1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56
57
58
59
60
61
62
63
64
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91
92
93
94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1,
97
98};
99
100void
101il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
102{
103 unsigned long flags;
104 int i;
105 spin_lock_irqsave(&rxq->lock, flags);
106 INIT_LIST_HEAD(&rxq->rx_free);
107 INIT_LIST_HEAD(&rxq->rx_used);
108
109 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
110
111
112 if (rxq->pool[i].page != NULL) {
113 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
114 PAGE_SIZE << il->hw_params.rx_page_order,
115 PCI_DMA_FROMDEVICE);
116 __il_free_pages(il, rxq->pool[i].page);
117 rxq->pool[i].page = NULL;
118 }
119 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
120 }
121
122 for (i = 0; i < RX_QUEUE_SIZE; i++)
123 rxq->queue[i] = NULL;
124
125
126
127 rxq->read = rxq->write = 0;
128 rxq->write_actual = 0;
129 rxq->free_count = 0;
130 spin_unlock_irqrestore(&rxq->lock, flags);
131}
132
133int
134il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
135{
136 u32 rb_size;
137 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
138 u32 rb_timeout = 0;
139
140 if (il->cfg->mod_params->amsdu_size_8K)
141 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148
149 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151
152 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
153
154
155 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
156
157
158
159
160
161
162
163 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
164 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
166 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
167 rb_size |
168 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
169 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
170
171
172 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
173
174 return 0;
175}
176
177static void
178il4965_set_pwr_vmain(struct il_priv *il)
179{
180
181
182
183
184
185
186
187
188
189
190 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
191 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
192 ~APMG_PS_CTRL_MSK_PWR_SRC);
193}
194
195int
196il4965_hw_nic_init(struct il_priv *il)
197{
198 unsigned long flags;
199 struct il_rx_queue *rxq = &il->rxq;
200 int ret;
201
202 spin_lock_irqsave(&il->lock, flags);
203 il_apm_init(il);
204
205 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
206 spin_unlock_irqrestore(&il->lock, flags);
207
208 il4965_set_pwr_vmain(il);
209 il4965_nic_config(il);
210
211
212 if (!rxq->bd) {
213 ret = il_rx_queue_alloc(il);
214 if (ret) {
215 IL_ERR("Unable to initialize Rx queue\n");
216 return -ENOMEM;
217 }
218 } else
219 il4965_rx_queue_reset(il, rxq);
220
221 il4965_rx_replenish(il);
222
223 il4965_rx_init(il, rxq);
224
225 spin_lock_irqsave(&il->lock, flags);
226
227 rxq->need_update = 1;
228 il_rx_queue_update_write_ptr(il, rxq);
229
230 spin_unlock_irqrestore(&il->lock, flags);
231
232
233 if (!il->txq) {
234 ret = il4965_txq_ctx_alloc(il);
235 if (ret)
236 return ret;
237 } else
238 il4965_txq_ctx_reset(il);
239
240 set_bit(S_INIT, &il->status);
241
242 return 0;
243}
244
245
246
247
248static inline __le32
249il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
250{
251 return cpu_to_le32((u32) (dma_addr >> 8));
252}
253
254
255
256
257
258
259
260
261
262
263
264
265void
266il4965_rx_queue_restock(struct il_priv *il)
267{
268 struct il_rx_queue *rxq = &il->rxq;
269 struct list_head *element;
270 struct il_rx_buf *rxb;
271 unsigned long flags;
272
273 spin_lock_irqsave(&rxq->lock, flags);
274 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
275
276 rxb = rxq->queue[rxq->write];
277 BUG_ON(rxb && rxb->page);
278
279
280 element = rxq->rx_free.next;
281 rxb = list_entry(element, struct il_rx_buf, list);
282 list_del(element);
283
284
285 rxq->bd[rxq->write] =
286 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
287 rxq->queue[rxq->write] = rxb;
288 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
289 rxq->free_count--;
290 }
291 spin_unlock_irqrestore(&rxq->lock, flags);
292
293
294 if (rxq->free_count <= RX_LOW_WATERMARK)
295 queue_work(il->workqueue, &il->rx_replenish);
296
297
298
299 if (rxq->write_actual != (rxq->write & ~0x7)) {
300 spin_lock_irqsave(&rxq->lock, flags);
301 rxq->need_update = 1;
302 spin_unlock_irqrestore(&rxq->lock, flags);
303 il_rx_queue_update_write_ptr(il, rxq);
304 }
305}
306
307
308
309
310
311
312
313
314
315static void
316il4965_rx_allocate(struct il_priv *il, gfp_t priority)
317{
318 struct il_rx_queue *rxq = &il->rxq;
319 struct list_head *element;
320 struct il_rx_buf *rxb;
321 struct page *page;
322 dma_addr_t page_dma;
323 unsigned long flags;
324 gfp_t gfp_mask = priority;
325
326 while (1) {
327 spin_lock_irqsave(&rxq->lock, flags);
328 if (list_empty(&rxq->rx_used)) {
329 spin_unlock_irqrestore(&rxq->lock, flags);
330 return;
331 }
332 spin_unlock_irqrestore(&rxq->lock, flags);
333
334 if (rxq->free_count > RX_LOW_WATERMARK)
335 gfp_mask |= __GFP_NOWARN;
336
337 if (il->hw_params.rx_page_order > 0)
338 gfp_mask |= __GFP_COMP;
339
340
341 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
342 if (!page) {
343 if (net_ratelimit())
344 D_INFO("alloc_pages failed, " "order: %d\n",
345 il->hw_params.rx_page_order);
346
347 if (rxq->free_count <= RX_LOW_WATERMARK &&
348 net_ratelimit())
349 IL_ERR("Failed to alloc_pages with %s. "
350 "Only %u free buffers remaining.\n",
351 priority ==
352 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
353 rxq->free_count);
354
355
356
357 return;
358 }
359
360
361 page_dma =
362 pci_map_page(il->pci_dev, page, 0,
363 PAGE_SIZE << il->hw_params.rx_page_order,
364 PCI_DMA_FROMDEVICE);
365 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
366 __free_pages(page, il->hw_params.rx_page_order);
367 break;
368 }
369
370 spin_lock_irqsave(&rxq->lock, flags);
371
372 if (list_empty(&rxq->rx_used)) {
373 spin_unlock_irqrestore(&rxq->lock, flags);
374 pci_unmap_page(il->pci_dev, page_dma,
375 PAGE_SIZE << il->hw_params.rx_page_order,
376 PCI_DMA_FROMDEVICE);
377 __free_pages(page, il->hw_params.rx_page_order);
378 return;
379 }
380
381 element = rxq->rx_used.next;
382 rxb = list_entry(element, struct il_rx_buf, list);
383 list_del(element);
384
385 BUG_ON(rxb->page);
386
387 rxb->page = page;
388 rxb->page_dma = page_dma;
389 list_add_tail(&rxb->list, &rxq->rx_free);
390 rxq->free_count++;
391 il->alloc_rxb_page++;
392
393 spin_unlock_irqrestore(&rxq->lock, flags);
394 }
395}
396
397void
398il4965_rx_replenish(struct il_priv *il)
399{
400 unsigned long flags;
401
402 il4965_rx_allocate(il, GFP_KERNEL);
403
404 spin_lock_irqsave(&il->lock, flags);
405 il4965_rx_queue_restock(il);
406 spin_unlock_irqrestore(&il->lock, flags);
407}
408
409void
410il4965_rx_replenish_now(struct il_priv *il)
411{
412 il4965_rx_allocate(il, GFP_ATOMIC);
413
414 il4965_rx_queue_restock(il);
415}
416
417
418
419
420
421
422void
423il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
424{
425 int i;
426 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
427 if (rxq->pool[i].page != NULL) {
428 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
429 PAGE_SIZE << il->hw_params.rx_page_order,
430 PCI_DMA_FROMDEVICE);
431 __il_free_pages(il, rxq->pool[i].page);
432 rxq->pool[i].page = NULL;
433 }
434 }
435
436 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
437 rxq->bd_dma);
438 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
439 rxq->rb_stts, rxq->rb_stts_dma);
440 rxq->bd = NULL;
441 rxq->rb_stts = NULL;
442}
443
444int
445il4965_rxq_stop(struct il_priv *il)
446{
447 int ret;
448
449 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
450 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
451 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
452 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
453 1000);
454 if (ret < 0)
455 IL_ERR("Can't stop Rx DMA.\n");
456
457 return 0;
458}
459
460int
461il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
462{
463 int idx = 0;
464 int band_offset = 0;
465
466
467 if (rate_n_flags & RATE_MCS_HT_MSK) {
468 idx = (rate_n_flags & 0xff);
469 return idx;
470
471 } else {
472 if (band == IEEE80211_BAND_5GHZ)
473 band_offset = IL_FIRST_OFDM_RATE;
474 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
475 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
476 return idx - band_offset;
477 }
478
479 return -1;
480}
481
482static int
483il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
484{
485
486
487 struct il4965_rx_non_cfg_phy *ncphy =
488 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
489 u32 agc =
490 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
491 IL49_AGC_DB_POS;
492
493 u32 valid_antennae =
494 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
495 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
496 u8 max_rssi = 0;
497 u32 i;
498
499
500
501
502
503
504 for (i = 0; i < 3; i++)
505 if (valid_antennae & (1 << i))
506 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
507
508 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
509 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
510 max_rssi, agc);
511
512
513
514 return max_rssi - agc - IL4965_RSSI_OFFSET;
515}
516
517static u32
518il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
519{
520 u32 decrypt_out = 0;
521
522 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
523 RX_RES_STATUS_STATION_FOUND)
524 decrypt_out |=
525 (RX_RES_STATUS_STATION_FOUND |
526 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
527
528 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
529
530
531 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
532 RX_RES_STATUS_SEC_TYPE_NONE)
533 return decrypt_out;
534
535
536 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
537 RX_RES_STATUS_SEC_TYPE_ERR)
538 return decrypt_out;
539
540
541 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
542 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
543 return decrypt_out;
544
545 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
546
547 case RX_RES_STATUS_SEC_TYPE_CCMP:
548
549 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
550
551 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
552 else
553 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
554
555 break;
556
557 case RX_RES_STATUS_SEC_TYPE_TKIP:
558 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
559
560 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
561 break;
562 }
563
564 default:
565 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
566 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
567 else
568 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
569 break;
570 }
571
572 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
573
574 return decrypt_out;
575}
576
577static void
578il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
579 u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
580 struct ieee80211_rx_status *stats)
581{
582 struct sk_buff *skb;
583 __le16 fc = hdr->frame_control;
584
585
586 if (unlikely(!il->is_open)) {
587 D_DROP("Dropping packet while interface is not open.\n");
588 return;
589 }
590
591
592 if (!il->cfg->mod_params->sw_crypto &&
593 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
594 return;
595
596 skb = dev_alloc_skb(128);
597 if (!skb) {
598 IL_ERR("dev_alloc_skb failed\n");
599 return;
600 }
601
602 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
603 len);
604
605 il_update_stats(il, false, fc, len);
606 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
607
608 ieee80211_rx(il->hw, skb);
609 il->alloc_rxb_page--;
610 rxb->page = NULL;
611}
612
613
614
615void
616il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
617{
618 struct ieee80211_hdr *header;
619 struct ieee80211_rx_status rx_status = {};
620 struct il_rx_pkt *pkt = rxb_addr(rxb);
621 struct il_rx_phy_res *phy_res;
622 __le32 rx_pkt_status;
623 struct il_rx_mpdu_res_start *amsdu;
624 u32 len;
625 u32 ampdu_status;
626 u32 rate_n_flags;
627
628
629
630
631
632
633
634
635
636
637 if (pkt->hdr.cmd == N_RX) {
638 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
639 header =
640 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
641 phy_res->cfg_phy_cnt);
642
643 len = le16_to_cpu(phy_res->byte_count);
644 rx_pkt_status =
645 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
646 phy_res->cfg_phy_cnt + len);
647 ampdu_status = le32_to_cpu(rx_pkt_status);
648 } else {
649 if (!il->_4965.last_phy_res_valid) {
650 IL_ERR("MPDU frame without cached PHY data\n");
651 return;
652 }
653 phy_res = &il->_4965.last_phy_res;
654 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
655 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
656 len = le16_to_cpu(amsdu->byte_count);
657 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
658 ampdu_status =
659 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
660 }
661
662 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
663 D_DROP("dsp size out of range [0,20]: %d/n",
664 phy_res->cfg_phy_cnt);
665 return;
666 }
667
668 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
669 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
670 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
671 return;
672 }
673
674
675 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
676
677
678 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
679 rx_status.band =
680 (phy_res->
681 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
682 IEEE80211_BAND_5GHZ;
683 rx_status.freq =
684 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
685 rx_status.band);
686 rx_status.rate_idx =
687 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
688 rx_status.flag = 0;
689
690
691
692
693
694 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
695
696
697 rx_status.signal = il4965_calc_rssi(il, phy_res);
698
699 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
700 (unsigned long long)rx_status.mactime);
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715 rx_status.antenna =
716 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
717 RX_RES_PHY_FLAGS_ANTENNA_POS;
718
719
720 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
721 rx_status.flag |= RX_FLAG_SHORTPRE;
722
723
724 if (rate_n_flags & RATE_MCS_HT_MSK)
725 rx_status.flag |= RX_FLAG_HT;
726 if (rate_n_flags & RATE_MCS_HT40_MSK)
727 rx_status.flag |= RX_FLAG_40MHZ;
728 if (rate_n_flags & RATE_MCS_SGI_MSK)
729 rx_status.flag |= RX_FLAG_SHORT_GI;
730
731 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
732
733
734
735
736
737 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
738 rx_status.ampdu_reference = il->_4965.ampdu_ref;
739 }
740
741 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
742 &rx_status);
743}
744
745
746
747void
748il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
749{
750 struct il_rx_pkt *pkt = rxb_addr(rxb);
751 il->_4965.last_phy_res_valid = true;
752 il->_4965.ampdu_ref++;
753 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
754 sizeof(struct il_rx_phy_res));
755}
756
757static int
758il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
759 enum ieee80211_band band, u8 is_active,
760 u8 n_probes, struct il_scan_channel *scan_ch)
761{
762 struct ieee80211_channel *chan;
763 const struct ieee80211_supported_band *sband;
764 const struct il_channel_info *ch_info;
765 u16 passive_dwell = 0;
766 u16 active_dwell = 0;
767 int added, i;
768 u16 channel;
769
770 sband = il_get_hw_mode(il, band);
771 if (!sband)
772 return 0;
773
774 active_dwell = il_get_active_dwell_time(il, band, n_probes);
775 passive_dwell = il_get_passive_dwell_time(il, band, vif);
776
777 if (passive_dwell <= active_dwell)
778 passive_dwell = active_dwell + 1;
779
780 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
781 chan = il->scan_request->channels[i];
782
783 if (chan->band != band)
784 continue;
785
786 channel = chan->hw_value;
787 scan_ch->channel = cpu_to_le16(channel);
788
789 ch_info = il_get_channel_info(il, band, channel);
790 if (!il_is_channel_valid(ch_info)) {
791 D_SCAN("Channel %d is INVALID for this band.\n",
792 channel);
793 continue;
794 }
795
796 if (!is_active || il_is_channel_passive(ch_info) ||
797 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
798 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
799 else
800 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
801
802 if (n_probes)
803 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
804
805 scan_ch->active_dwell = cpu_to_le16(active_dwell);
806 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
807
808
809 scan_ch->dsp_atten = 110;
810
811
812
813
814
815 if (band == IEEE80211_BAND_5GHZ)
816 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
817 else
818 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
819
820 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
821 le32_to_cpu(scan_ch->type),
822 (scan_ch->
823 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
824 (scan_ch->
825 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
826 passive_dwell);
827
828 scan_ch++;
829 added++;
830 }
831
832 D_SCAN("total channels to scan %d\n", added);
833 return added;
834}
835
836static void
837il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
838{
839 int i;
840 u8 ind = *ant;
841
842 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
843 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
844 if (valid & BIT(ind)) {
845 *ant = ind;
846 return;
847 }
848 }
849}
850
851int
852il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
853{
854 struct il_host_cmd cmd = {
855 .id = C_SCAN,
856 .len = sizeof(struct il_scan_cmd),
857 .flags = CMD_SIZE_HUGE,
858 };
859 struct il_scan_cmd *scan;
860 u32 rate_flags = 0;
861 u16 cmd_len;
862 u16 rx_chain = 0;
863 enum ieee80211_band band;
864 u8 n_probes = 0;
865 u8 rx_ant = il->hw_params.valid_rx_ant;
866 u8 rate;
867 bool is_active = false;
868 int chan_mod;
869 u8 active_chains;
870 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
871 int ret;
872
873 lockdep_assert_held(&il->mutex);
874
875 if (!il->scan_cmd) {
876 il->scan_cmd =
877 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
878 GFP_KERNEL);
879 if (!il->scan_cmd) {
880 D_SCAN("fail to allocate memory for scan\n");
881 return -ENOMEM;
882 }
883 }
884 scan = il->scan_cmd;
885 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
886
887 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
888 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
889
890 if (il_is_any_associated(il)) {
891 u16 interval;
892 u32 extra;
893 u32 suspend_time = 100;
894 u32 scan_suspend_time = 100;
895
896 D_INFO("Scanning while associated...\n");
897 interval = vif->bss_conf.beacon_int;
898
899 scan->suspend_time = 0;
900 scan->max_out_time = cpu_to_le32(200 * 1024);
901 if (!interval)
902 interval = suspend_time;
903
904 extra = (suspend_time / interval) << 22;
905 scan_suspend_time =
906 (extra | ((suspend_time % interval) * 1024));
907 scan->suspend_time = cpu_to_le32(scan_suspend_time);
908 D_SCAN("suspend_time 0x%X beacon interval %d\n",
909 scan_suspend_time, interval);
910 }
911
912 if (il->scan_request->n_ssids) {
913 int i, p = 0;
914 D_SCAN("Kicking off active scan\n");
915 for (i = 0; i < il->scan_request->n_ssids; i++) {
916
917 if (!il->scan_request->ssids[i].ssid_len)
918 continue;
919 scan->direct_scan[p].id = WLAN_EID_SSID;
920 scan->direct_scan[p].len =
921 il->scan_request->ssids[i].ssid_len;
922 memcpy(scan->direct_scan[p].ssid,
923 il->scan_request->ssids[i].ssid,
924 il->scan_request->ssids[i].ssid_len);
925 n_probes++;
926 p++;
927 }
928 is_active = true;
929 } else
930 D_SCAN("Start passive scan.\n");
931
932 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
933 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
934 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
935
936 switch (il->scan_band) {
937 case IEEE80211_BAND_2GHZ:
938 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
939 chan_mod =
940 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
941 RXON_FLG_CHANNEL_MODE_POS;
942 if (chan_mod == CHANNEL_MODE_PURE_40) {
943 rate = RATE_6M_PLCP;
944 } else {
945 rate = RATE_1M_PLCP;
946 rate_flags = RATE_MCS_CCK_MSK;
947 }
948 break;
949 case IEEE80211_BAND_5GHZ:
950 rate = RATE_6M_PLCP;
951 break;
952 default:
953 IL_WARN("Invalid scan band\n");
954 return -EIO;
955 }
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974 scan->good_CRC_th =
975 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
976
977 band = il->scan_band;
978
979 if (il->cfg->scan_rx_antennas[band])
980 rx_ant = il->cfg->scan_rx_antennas[band];
981
982 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
983 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
984 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
985
986
987 if (test_bit(S_POWER_PMI, &il->status)) {
988
989 active_chains =
990 rx_ant & ((u8) (il->chain_noise_data.active_chains));
991 if (!active_chains)
992 active_chains = rx_ant;
993
994 D_SCAN("chain_noise_data.active_chains: %u\n",
995 il->chain_noise_data.active_chains);
996
997 rx_ant = il4965_first_antenna(active_chains);
998 }
999
1000
1001 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1002 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1003 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1004 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1005 scan->rx_chain = cpu_to_le16(rx_chain);
1006
1007 cmd_len =
1008 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1009 vif->addr, il->scan_request->ie,
1010 il->scan_request->ie_len,
1011 IL_MAX_SCAN_SIZE - sizeof(*scan));
1012 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1013
1014 scan->filter_flags |=
1015 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1016
1017 scan->channel_count =
1018 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1019 (void *)&scan->data[cmd_len]);
1020 if (scan->channel_count == 0) {
1021 D_SCAN("channel count %d\n", scan->channel_count);
1022 return -EIO;
1023 }
1024
1025 cmd.len +=
1026 le16_to_cpu(scan->tx_cmd.len) +
1027 scan->channel_count * sizeof(struct il_scan_channel);
1028 cmd.data = scan;
1029 scan->len = cpu_to_le16(cmd.len);
1030
1031 set_bit(S_SCAN_HW, &il->status);
1032
1033 ret = il_send_cmd_sync(il, &cmd);
1034 if (ret)
1035 clear_bit(S_SCAN_HW, &il->status);
1036
1037 return ret;
1038}
1039
1040int
1041il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1042 bool add)
1043{
1044 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1045
1046 if (add)
1047 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1048 &vif_priv->ibss_bssid_sta_id);
1049 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1050 vif->bss_conf.bssid);
1051}
1052
1053void
1054il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1055{
1056 lockdep_assert_held(&il->sta_lock);
1057
1058 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1059 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1060 else {
1061 D_TX("free more than tfds_in_queue (%u:%d)\n",
1062 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1063 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1064 }
1065}
1066
1067#define IL_TX_QUEUE_MSK 0xfffff
1068
1069static bool
1070il4965_is_single_rx_stream(struct il_priv *il)
1071{
1072 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1073 il->current_ht_config.single_chain_sufficient;
1074}
1075
1076#define IL_NUM_RX_CHAINS_MULTIPLE 3
1077#define IL_NUM_RX_CHAINS_SINGLE 2
1078#define IL_NUM_IDLE_CHAINS_DUAL 2
1079#define IL_NUM_IDLE_CHAINS_SINGLE 1
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static int
1092il4965_get_active_rx_chain_count(struct il_priv *il)
1093{
1094
1095 if (il4965_is_single_rx_stream(il))
1096 return IL_NUM_RX_CHAINS_SINGLE;
1097 else
1098 return IL_NUM_RX_CHAINS_MULTIPLE;
1099}
1100
1101
1102
1103
1104
1105static int
1106il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1107{
1108
1109 switch (il->current_ht_config.smps) {
1110 case IEEE80211_SMPS_STATIC:
1111 case IEEE80211_SMPS_DYNAMIC:
1112 return IL_NUM_IDLE_CHAINS_SINGLE;
1113 case IEEE80211_SMPS_OFF:
1114 return active_cnt;
1115 default:
1116 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1117 return active_cnt;
1118 }
1119}
1120
1121
1122static u8
1123il4965_count_chain_bitmap(u32 chain_bitmap)
1124{
1125 u8 res;
1126 res = (chain_bitmap & BIT(0)) >> 0;
1127 res += (chain_bitmap & BIT(1)) >> 1;
1128 res += (chain_bitmap & BIT(2)) >> 2;
1129 res += (chain_bitmap & BIT(3)) >> 3;
1130 return res;
1131}
1132
1133
1134
1135
1136
1137
1138
1139void
1140il4965_set_rxon_chain(struct il_priv *il)
1141{
1142 bool is_single = il4965_is_single_rx_stream(il);
1143 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1144 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1145 u32 active_chains;
1146 u16 rx_chain;
1147
1148
1149
1150
1151
1152 if (il->chain_noise_data.active_chains)
1153 active_chains = il->chain_noise_data.active_chains;
1154 else
1155 active_chains = il->hw_params.valid_rx_ant;
1156
1157 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1158
1159
1160 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1161 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1162
1163
1164
1165
1166 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1167 if (valid_rx_cnt < active_rx_cnt)
1168 active_rx_cnt = valid_rx_cnt;
1169
1170 if (valid_rx_cnt < idle_rx_cnt)
1171 idle_rx_cnt = valid_rx_cnt;
1172
1173 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1174 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1175
1176 il->staging.rx_chain = cpu_to_le16(rx_chain);
1177
1178 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1179 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1180 else
1181 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1182
1183 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1184 active_rx_cnt, idle_rx_cnt);
1185
1186 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1187 active_rx_cnt < idle_rx_cnt);
1188}
1189
1190static const char *
1191il4965_get_fh_string(int cmd)
1192{
1193 switch (cmd) {
1194 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1195 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1196 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1197 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1198 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1199 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1200 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1201 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1202 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1203 default:
1204 return "UNKNOWN";
1205 }
1206}
1207
1208int
1209il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1210{
1211 int i;
1212#ifdef CONFIG_IWLEGACY_DEBUG
1213 int pos = 0;
1214 size_t bufsz = 0;
1215#endif
1216 static const u32 fh_tbl[] = {
1217 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1218 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1219 FH49_RSCSR_CHNL0_WPTR,
1220 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1221 FH49_MEM_RSSR_SHARED_CTRL_REG,
1222 FH49_MEM_RSSR_RX_STATUS_REG,
1223 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1224 FH49_TSSR_TX_STATUS_REG,
1225 FH49_TSSR_TX_ERROR_REG
1226 };
1227#ifdef CONFIG_IWLEGACY_DEBUG
1228 if (display) {
1229 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1230 *buf = kmalloc(bufsz, GFP_KERNEL);
1231 if (!*buf)
1232 return -ENOMEM;
1233 pos +=
1234 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1235 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1236 pos +=
1237 scnprintf(*buf + pos, bufsz - pos,
1238 " %34s: 0X%08x\n",
1239 il4965_get_fh_string(fh_tbl[i]),
1240 il_rd(il, fh_tbl[i]));
1241 }
1242 return pos;
1243 }
1244#endif
1245 IL_ERR("FH register values:\n");
1246 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1247 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1248 il_rd(il, fh_tbl[i]));
1249 }
1250 return 0;
1251}
1252
1253void
1254il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1255{
1256 struct il_rx_pkt *pkt = rxb_addr(rxb);
1257 struct il_missed_beacon_notif *missed_beacon;
1258
1259 missed_beacon = &pkt->u.missed_beacon;
1260 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1261 il->missed_beacon_threshold) {
1262 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1263 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1264 le32_to_cpu(missed_beacon->total_missed_becons),
1265 le32_to_cpu(missed_beacon->num_recvd_beacons),
1266 le32_to_cpu(missed_beacon->num_expected_beacons));
1267 if (!test_bit(S_SCANNING, &il->status))
1268 il4965_init_sensitivity(il);
1269 }
1270}
1271
1272
1273
1274
1275static void
1276il4965_rx_calc_noise(struct il_priv *il)
1277{
1278 struct stats_rx_non_phy *rx_info;
1279 int num_active_rx = 0;
1280 int total_silence = 0;
1281 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1282 int last_rx_noise;
1283
1284 rx_info = &(il->_4965.stats.rx.general);
1285 bcn_silence_a =
1286 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1287 bcn_silence_b =
1288 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1289 bcn_silence_c =
1290 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1291
1292 if (bcn_silence_a) {
1293 total_silence += bcn_silence_a;
1294 num_active_rx++;
1295 }
1296 if (bcn_silence_b) {
1297 total_silence += bcn_silence_b;
1298 num_active_rx++;
1299 }
1300 if (bcn_silence_c) {
1301 total_silence += bcn_silence_c;
1302 num_active_rx++;
1303 }
1304
1305
1306 if (num_active_rx)
1307 last_rx_noise = (total_silence / num_active_rx) - 107;
1308 else
1309 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1310
1311 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1312 bcn_silence_b, bcn_silence_c, last_rx_noise);
1313}
1314
1315#ifdef CONFIG_IWLEGACY_DEBUGFS
1316
1317
1318
1319
1320
1321static void
1322il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1323{
1324 int i, size;
1325 __le32 *prev_stats;
1326 u32 *accum_stats;
1327 u32 *delta, *max_delta;
1328 struct stats_general_common *general, *accum_general;
1329 struct stats_tx *tx, *accum_tx;
1330
1331 prev_stats = (__le32 *) &il->_4965.stats;
1332 accum_stats = (u32 *) &il->_4965.accum_stats;
1333 size = sizeof(struct il_notif_stats);
1334 general = &il->_4965.stats.general.common;
1335 accum_general = &il->_4965.accum_stats.general.common;
1336 tx = &il->_4965.stats.tx;
1337 accum_tx = &il->_4965.accum_stats.tx;
1338 delta = (u32 *) &il->_4965.delta_stats;
1339 max_delta = (u32 *) &il->_4965.max_delta;
1340
1341 for (i = sizeof(__le32); i < size;
1342 i +=
1343 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1344 accum_stats++) {
1345 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1346 *delta =
1347 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1348 *accum_stats += *delta;
1349 if (*delta > *max_delta)
1350 *max_delta = *delta;
1351 }
1352 }
1353
1354
1355 accum_general->temperature = general->temperature;
1356 accum_general->ttl_timestamp = general->ttl_timestamp;
1357}
1358#endif
1359
1360void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{
1363 const int recalib_seconds = 60;
1364 bool change;
1365 struct il_rx_pkt *pkt = rxb_addr(rxb);
1366
1367 D_RX("Statistics notification received (%d vs %d).\n",
1368 (int)sizeof(struct il_notif_stats),
1369 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1370
1371 change =
1372 ((il->_4965.stats.general.common.temperature !=
1373 pkt->u.stats.general.common.temperature) ||
1374 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1375 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1376#ifdef CONFIG_IWLEGACY_DEBUGFS
1377 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1378#endif
1379
1380
1381 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1382
1383 set_bit(S_STATS, &il->status);
1384
1385
1386
1387
1388
1389 mod_timer(&il->stats_periodic,
1390 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1391
1392 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1393 (pkt->hdr.cmd == N_STATS)) {
1394 il4965_rx_calc_noise(il);
1395 queue_work(il->workqueue, &il->run_time_calib_work);
1396 }
1397
1398 if (change)
1399 il4965_temperature_calib(il);
1400}
1401
1402void
1403il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1404{
1405 struct il_rx_pkt *pkt = rxb_addr(rxb);
1406
1407 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1408#ifdef CONFIG_IWLEGACY_DEBUGFS
1409 memset(&il->_4965.accum_stats, 0,
1410 sizeof(struct il_notif_stats));
1411 memset(&il->_4965.delta_stats, 0,
1412 sizeof(struct il_notif_stats));
1413 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1414#endif
1415 D_RX("Statistics have been cleared\n");
1416 }
1417 il4965_hdl_stats(il, rxb);
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447static const u8 tid_to_ac[] = {
1448 IEEE80211_AC_BE,
1449 IEEE80211_AC_BK,
1450 IEEE80211_AC_BK,
1451 IEEE80211_AC_BE,
1452 IEEE80211_AC_VI,
1453 IEEE80211_AC_VI,
1454 IEEE80211_AC_VO,
1455 IEEE80211_AC_VO
1456};
1457
1458static inline int
1459il4965_get_ac_from_tid(u16 tid)
1460{
1461 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1462 return tid_to_ac[tid];
1463
1464
1465 return -EINVAL;
1466}
1467
1468static inline int
1469il4965_get_fifo_from_tid(u16 tid)
1470{
1471 const u8 ac_to_fifo[] = {
1472 IL_TX_FIFO_VO,
1473 IL_TX_FIFO_VI,
1474 IL_TX_FIFO_BE,
1475 IL_TX_FIFO_BK,
1476 };
1477
1478 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1479 return ac_to_fifo[tid_to_ac[tid]];
1480
1481
1482 return -EINVAL;
1483}
1484
1485
1486
1487
1488static void
1489il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1490 struct il_tx_cmd *tx_cmd,
1491 struct ieee80211_tx_info *info,
1492 struct ieee80211_hdr *hdr, u8 std_id)
1493{
1494 __le16 fc = hdr->frame_control;
1495 __le32 tx_flags = tx_cmd->tx_flags;
1496
1497 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1498 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1499 tx_flags |= TX_CMD_FLG_ACK_MSK;
1500 if (ieee80211_is_mgmt(fc))
1501 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1502 if (ieee80211_is_probe_resp(fc) &&
1503 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1504 tx_flags |= TX_CMD_FLG_TSF_MSK;
1505 } else {
1506 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1507 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1508 }
1509
1510 if (ieee80211_is_back_req(fc))
1511 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1512
1513 tx_cmd->sta_id = std_id;
1514 if (ieee80211_has_morefrags(fc))
1515 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1516
1517 if (ieee80211_is_data_qos(fc)) {
1518 u8 *qc = ieee80211_get_qos_ctl(hdr);
1519 tx_cmd->tid_tspec = qc[0] & 0xf;
1520 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1521 } else {
1522 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1523 }
1524
1525 il_tx_cmd_protection(il, info, fc, &tx_flags);
1526
1527 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1528 if (ieee80211_is_mgmt(fc)) {
1529 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1530 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1531 else
1532 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1533 } else {
1534 tx_cmd->timeout.pm_frame_timeout = 0;
1535 }
1536
1537 tx_cmd->driver_txop = 0;
1538 tx_cmd->tx_flags = tx_flags;
1539 tx_cmd->next_frame_len = 0;
1540}
1541
1542static void
1543il4965_tx_cmd_build_rate(struct il_priv *il,
1544 struct il_tx_cmd *tx_cmd,
1545 struct ieee80211_tx_info *info,
1546 struct ieee80211_sta *sta,
1547 __le16 fc)
1548{
1549 const u8 rts_retry_limit = 60;
1550 u32 rate_flags;
1551 int rate_idx;
1552 u8 data_retry_limit;
1553 u8 rate_plcp;
1554
1555
1556 if (ieee80211_is_probe_resp(fc))
1557 data_retry_limit = 3;
1558 else
1559 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1560 tx_cmd->data_retry_limit = data_retry_limit;
1561
1562 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1563
1564
1565
1566 if (ieee80211_is_data(fc)) {
1567 tx_cmd->initial_rate_idx = 0;
1568 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1569 return;
1570 }
1571
1572
1573
1574
1575
1576
1577
1578 rate_idx = info->control.rates[0].idx;
1579 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1580 || rate_idx > RATE_COUNT_LEGACY)
1581 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1582
1583 if (info->band == IEEE80211_BAND_5GHZ)
1584 rate_idx += IL_FIRST_OFDM_RATE;
1585
1586 rate_plcp = il_rates[rate_idx].plcp;
1587
1588 rate_flags = 0;
1589
1590
1591 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1592 rate_flags |= RATE_MCS_CCK_MSK;
1593
1594
1595 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1596 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1597
1598
1599 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1600}
1601
1602static void
1603il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1604 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1605 int sta_id)
1606{
1607 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1608
1609 switch (keyconf->cipher) {
1610 case WLAN_CIPHER_SUITE_CCMP:
1611 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1612 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1613 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1614 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1615 D_TX("tx_cmd with AES hwcrypto\n");
1616 break;
1617
1618 case WLAN_CIPHER_SUITE_TKIP:
1619 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1620 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1621 D_TX("tx_cmd with tkip hwcrypto\n");
1622 break;
1623
1624 case WLAN_CIPHER_SUITE_WEP104:
1625 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1626
1627 case WLAN_CIPHER_SUITE_WEP40:
1628 tx_cmd->sec_ctl |=
1629 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1630 TX_CMD_SEC_SHIFT);
1631
1632 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1633
1634 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1635 keyconf->keyidx);
1636 break;
1637
1638 default:
1639 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1640 break;
1641 }
1642}
1643
1644
1645
1646
1647int
1648il4965_tx_skb(struct il_priv *il,
1649 struct ieee80211_sta *sta,
1650 struct sk_buff *skb)
1651{
1652 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1653 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1654 struct il_station_priv *sta_priv = NULL;
1655 struct il_tx_queue *txq;
1656 struct il_queue *q;
1657 struct il_device_cmd *out_cmd;
1658 struct il_cmd_meta *out_meta;
1659 struct il_tx_cmd *tx_cmd;
1660 int txq_id;
1661 dma_addr_t phys_addr;
1662 dma_addr_t txcmd_phys;
1663 dma_addr_t scratch_phys;
1664 u16 len, firstlen, secondlen;
1665 u16 seq_number = 0;
1666 __le16 fc;
1667 u8 hdr_len;
1668 u8 sta_id;
1669 u8 wait_write_ptr = 0;
1670 u8 tid = 0;
1671 u8 *qc = NULL;
1672 unsigned long flags;
1673 bool is_agg = false;
1674
1675 spin_lock_irqsave(&il->lock, flags);
1676 if (il_is_rfkill(il)) {
1677 D_DROP("Dropping - RF KILL\n");
1678 goto drop_unlock;
1679 }
1680
1681 fc = hdr->frame_control;
1682
1683#ifdef CONFIG_IWLEGACY_DEBUG
1684 if (ieee80211_is_auth(fc))
1685 D_TX("Sending AUTH frame\n");
1686 else if (ieee80211_is_assoc_req(fc))
1687 D_TX("Sending ASSOC frame\n");
1688 else if (ieee80211_is_reassoc_req(fc))
1689 D_TX("Sending REASSOC frame\n");
1690#endif
1691
1692 hdr_len = ieee80211_hdrlen(fc);
1693
1694
1695 if (!ieee80211_is_data(fc))
1696 sta_id = il->hw_params.bcast_id;
1697 else {
1698
1699 sta_id = il_sta_id_or_broadcast(il, sta);
1700
1701 if (sta_id == IL_INVALID_STATION) {
1702 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1703 goto drop_unlock;
1704 }
1705 }
1706
1707 D_TX("station Id %d\n", sta_id);
1708
1709 if (sta)
1710 sta_priv = (void *)sta->drv_priv;
1711
1712 if (sta_priv && sta_priv->asleep &&
1713 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1724 }
1725
1726
1727 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1728
1729
1730 txq_id = skb_get_queue_mapping(skb);
1731
1732
1733 spin_lock(&il->sta_lock);
1734
1735 if (ieee80211_is_data_qos(fc)) {
1736 qc = ieee80211_get_qos_ctl(hdr);
1737 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1738 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1739 spin_unlock(&il->sta_lock);
1740 goto drop_unlock;
1741 }
1742 seq_number = il->stations[sta_id].tid[tid].seq_number;
1743 seq_number &= IEEE80211_SCTL_SEQ;
1744 hdr->seq_ctrl =
1745 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1746 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1747 seq_number += 0x10;
1748
1749 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1750 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1751 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1752 is_agg = true;
1753 }
1754 }
1755
1756 txq = &il->txq[txq_id];
1757 q = &txq->q;
1758
1759 if (unlikely(il_queue_space(q) < q->high_mark)) {
1760 spin_unlock(&il->sta_lock);
1761 goto drop_unlock;
1762 }
1763
1764 if (ieee80211_is_data_qos(fc)) {
1765 il->stations[sta_id].tid[tid].tfds_in_queue++;
1766 if (!ieee80211_has_morefrags(fc))
1767 il->stations[sta_id].tid[tid].seq_number = seq_number;
1768 }
1769
1770 spin_unlock(&il->sta_lock);
1771
1772 txq->skbs[q->write_ptr] = skb;
1773
1774
1775 out_cmd = txq->cmd[q->write_ptr];
1776 out_meta = &txq->meta[q->write_ptr];
1777 tx_cmd = &out_cmd->cmd.tx;
1778 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1779 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1780
1781
1782
1783
1784
1785
1786
1787 out_cmd->hdr.cmd = C_TX;
1788 out_cmd->hdr.sequence =
1789 cpu_to_le16((u16)
1790 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1791
1792
1793 memcpy(tx_cmd->hdr, hdr, hdr_len);
1794
1795
1796 tx_cmd->len = cpu_to_le16((u16) skb->len);
1797
1798 if (info->control.hw_key)
1799 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1800
1801
1802 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1803
1804 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1816 firstlen = (len + 3) & ~3;
1817
1818
1819 if (firstlen != len)
1820 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1821
1822
1823
1824 txcmd_phys =
1825 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1826 PCI_DMA_BIDIRECTIONAL);
1827 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1828 goto drop_unlock;
1829
1830
1831
1832 secondlen = skb->len - hdr_len;
1833 if (secondlen > 0) {
1834 phys_addr =
1835 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1836 PCI_DMA_TODEVICE);
1837 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1838 goto drop_unlock;
1839 }
1840
1841
1842
1843 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 if (secondlen)
1847 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1848 0, 0);
1849
1850 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1851 txq->need_update = 1;
1852 } else {
1853 wait_write_ptr = 1;
1854 txq->need_update = 0;
1855 }
1856
1857 scratch_phys =
1858 txcmd_phys + sizeof(struct il_cmd_header) +
1859 offsetof(struct il_tx_cmd, scratch);
1860
1861
1862 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1863 PCI_DMA_BIDIRECTIONAL);
1864 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1865 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1866
1867 il_update_stats(il, true, fc, skb->len);
1868
1869 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1870 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1871 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1872 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1873
1874
1875 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1876 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1877
1878 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1879 PCI_DMA_BIDIRECTIONAL);
1880
1881
1882 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1883 il_txq_update_write_ptr(il, txq);
1884 spin_unlock_irqrestore(&il->lock, flags);
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 if (sta_priv && sta_priv->client && !is_agg)
1901 atomic_inc(&sta_priv->pending_frames);
1902
1903 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1904 if (wait_write_ptr) {
1905 spin_lock_irqsave(&il->lock, flags);
1906 txq->need_update = 1;
1907 il_txq_update_write_ptr(il, txq);
1908 spin_unlock_irqrestore(&il->lock, flags);
1909 } else {
1910 il_stop_queue(il, txq);
1911 }
1912 }
1913
1914 return 0;
1915
1916drop_unlock:
1917 spin_unlock_irqrestore(&il->lock, flags);
1918 return -1;
1919}
1920
1921static inline int
1922il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1923{
1924 ptr->addr =
1925 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
1926 if (!ptr->addr)
1927 return -ENOMEM;
1928 ptr->size = size;
1929 return 0;
1930}
1931
1932static inline void
1933il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1934{
1935 if (unlikely(!ptr->addr))
1936 return;
1937
1938 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1939 memset(ptr, 0, sizeof(*ptr));
1940}
1941
1942
1943
1944
1945
1946
1947void
1948il4965_hw_txq_ctx_free(struct il_priv *il)
1949{
1950 int txq_id;
1951
1952
1953 if (il->txq) {
1954 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1955 if (txq_id == il->cmd_queue)
1956 il_cmd_queue_free(il);
1957 else
1958 il_tx_queue_free(il, txq_id);
1959 }
1960 il4965_free_dma_ptr(il, &il->kw);
1961
1962 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1963
1964
1965 il_free_txq_mem(il);
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975int
1976il4965_txq_ctx_alloc(struct il_priv *il)
1977{
1978 int ret, txq_id;
1979 unsigned long flags;
1980
1981
1982 il4965_hw_txq_ctx_free(il);
1983
1984 ret =
1985 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1986 il->hw_params.scd_bc_tbls_size);
1987 if (ret) {
1988 IL_ERR("Scheduler BC Table allocation failed\n");
1989 goto error_bc_tbls;
1990 }
1991
1992 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
1993 if (ret) {
1994 IL_ERR("Keep Warm allocation failed\n");
1995 goto error_kw;
1996 }
1997
1998
1999 ret = il_alloc_txq_mem(il);
2000 if (ret)
2001 goto error;
2002
2003 spin_lock_irqsave(&il->lock, flags);
2004
2005
2006 il4965_txq_set_sched(il, 0);
2007
2008
2009 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2010
2011 spin_unlock_irqrestore(&il->lock, flags);
2012
2013
2014 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2015 ret = il_tx_queue_init(il, txq_id);
2016 if (ret) {
2017 IL_ERR("Tx %d queue init failed\n", txq_id);
2018 goto error;
2019 }
2020 }
2021
2022 return ret;
2023
2024error:
2025 il4965_hw_txq_ctx_free(il);
2026 il4965_free_dma_ptr(il, &il->kw);
2027error_kw:
2028 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2029error_bc_tbls:
2030 return ret;
2031}
2032
2033void
2034il4965_txq_ctx_reset(struct il_priv *il)
2035{
2036 int txq_id;
2037 unsigned long flags;
2038
2039 spin_lock_irqsave(&il->lock, flags);
2040
2041
2042 il4965_txq_set_sched(il, 0);
2043
2044 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2045
2046 spin_unlock_irqrestore(&il->lock, flags);
2047
2048
2049 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2050 il_tx_queue_reset(il, txq_id);
2051}
2052
2053void
2054il4965_txq_ctx_unmap(struct il_priv *il)
2055{
2056 int txq_id;
2057
2058 if (!il->txq)
2059 return;
2060
2061
2062 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2063 if (txq_id == il->cmd_queue)
2064 il_cmd_queue_unmap(il);
2065 else
2066 il_tx_queue_unmap(il, txq_id);
2067}
2068
2069
2070
2071
2072void
2073il4965_txq_ctx_stop(struct il_priv *il)
2074{
2075 int ch, ret;
2076
2077 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2078
2079
2080 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2081 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2082 ret =
2083 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2084 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2085 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2086 1000);
2087 if (ret < 0)
2088 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2089 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2090 }
2091}
2092
2093
2094
2095
2096
2097
2098
2099static int
2100il4965_txq_ctx_activate_free(struct il_priv *il)
2101{
2102 int txq_id;
2103
2104 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2105 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2106 return txq_id;
2107 return -1;
2108}
2109
2110
2111
2112
2113static void
2114il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2115{
2116
2117
2118 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2119 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2120 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2121}
2122
2123
2124
2125
2126static int
2127il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2128{
2129 u32 tbl_dw_addr;
2130 u32 tbl_dw;
2131 u16 scd_q2ratid;
2132
2133 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2134
2135 tbl_dw_addr =
2136 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2137
2138 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2139
2140 if (txq_id & 0x1)
2141 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2142 else
2143 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2144
2145 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2146
2147 return 0;
2148}
2149
2150
2151
2152
2153
2154
2155
2156static int
2157il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2158 int tid, u16 ssn_idx)
2159{
2160 unsigned long flags;
2161 u16 ra_tid;
2162 int ret;
2163
2164 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2165 (IL49_FIRST_AMPDU_QUEUE +
2166 il->cfg->num_of_ampdu_queues <= txq_id)) {
2167 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2168 txq_id, IL49_FIRST_AMPDU_QUEUE,
2169 IL49_FIRST_AMPDU_QUEUE +
2170 il->cfg->num_of_ampdu_queues - 1);
2171 return -EINVAL;
2172 }
2173
2174 ra_tid = BUILD_RAxTID(sta_id, tid);
2175
2176
2177 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2178 if (ret)
2179 return ret;
2180
2181 spin_lock_irqsave(&il->lock, flags);
2182
2183
2184 il4965_tx_queue_stop_scheduler(il, txq_id);
2185
2186
2187 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2188
2189
2190 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2191
2192
2193
2194 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2195 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2196 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2197
2198
2199 il_write_targ_mem(il,
2200 il->scd_base_addr +
2201 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2202 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2203 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2204
2205 il_write_targ_mem(il,
2206 il->scd_base_addr +
2207 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2208 (SCD_FRAME_LIMIT <<
2209 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2210 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2211
2212 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2213
2214
2215 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2216
2217 spin_unlock_irqrestore(&il->lock, flags);
2218
2219 return 0;
2220}
2221
2222int
2223il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2224 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2225{
2226 int sta_id;
2227 int tx_fifo;
2228 int txq_id;
2229 int ret;
2230 unsigned long flags;
2231 struct il_tid_data *tid_data;
2232
2233
2234 tx_fifo = il4965_get_fifo_from_tid(tid);
2235 if (unlikely(tx_fifo < 0))
2236 return tx_fifo;
2237
2238 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2239
2240 sta_id = il_sta_id(sta);
2241 if (sta_id == IL_INVALID_STATION) {
2242 IL_ERR("Start AGG on invalid station\n");
2243 return -ENXIO;
2244 }
2245 if (unlikely(tid >= MAX_TID_COUNT))
2246 return -EINVAL;
2247
2248 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2249 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2250 return -ENXIO;
2251 }
2252
2253 txq_id = il4965_txq_ctx_activate_free(il);
2254 if (txq_id == -1) {
2255 IL_ERR("No free aggregation queue available\n");
2256 return -ENXIO;
2257 }
2258
2259 spin_lock_irqsave(&il->sta_lock, flags);
2260 tid_data = &il->stations[sta_id].tid[tid];
2261 *ssn = SEQ_TO_SN(tid_data->seq_number);
2262 tid_data->agg.txq_id = txq_id;
2263 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2264 spin_unlock_irqrestore(&il->sta_lock, flags);
2265
2266 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2267 if (ret)
2268 return ret;
2269
2270 spin_lock_irqsave(&il->sta_lock, flags);
2271 tid_data = &il->stations[sta_id].tid[tid];
2272 if (tid_data->tfds_in_queue == 0) {
2273 D_HT("HW queue is empty\n");
2274 tid_data->agg.state = IL_AGG_ON;
2275 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2276 } else {
2277 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2278 tid_data->tfds_in_queue);
2279 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2280 }
2281 spin_unlock_irqrestore(&il->sta_lock, flags);
2282 return ret;
2283}
2284
2285
2286
2287
2288
2289static int
2290il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2291{
2292 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2293 (IL49_FIRST_AMPDU_QUEUE +
2294 il->cfg->num_of_ampdu_queues <= txq_id)) {
2295 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2296 txq_id, IL49_FIRST_AMPDU_QUEUE,
2297 IL49_FIRST_AMPDU_QUEUE +
2298 il->cfg->num_of_ampdu_queues - 1);
2299 return -EINVAL;
2300 }
2301
2302 il4965_tx_queue_stop_scheduler(il, txq_id);
2303
2304 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2305
2306 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2307 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2308
2309 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2310
2311 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2312 il_txq_ctx_deactivate(il, txq_id);
2313 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2314
2315 return 0;
2316}
2317
2318int
2319il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2320 struct ieee80211_sta *sta, u16 tid)
2321{
2322 int tx_fifo_id, txq_id, sta_id, ssn;
2323 struct il_tid_data *tid_data;
2324 int write_ptr, read_ptr;
2325 unsigned long flags;
2326
2327
2328 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2329 if (unlikely(tx_fifo_id < 0))
2330 return tx_fifo_id;
2331
2332 sta_id = il_sta_id(sta);
2333
2334 if (sta_id == IL_INVALID_STATION) {
2335 IL_ERR("Invalid station for AGG tid %d\n", tid);
2336 return -ENXIO;
2337 }
2338
2339 spin_lock_irqsave(&il->sta_lock, flags);
2340
2341 tid_data = &il->stations[sta_id].tid[tid];
2342 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2343 txq_id = tid_data->agg.txq_id;
2344
2345 switch (il->stations[sta_id].tid[tid].agg.state) {
2346 case IL_EMPTYING_HW_QUEUE_ADDBA:
2347
2348
2349
2350
2351
2352
2353 D_HT("AGG stop before setup done\n");
2354 goto turn_off;
2355 case IL_AGG_ON:
2356 break;
2357 default:
2358 IL_WARN("Stopping AGG while state not ON or starting\n");
2359 }
2360
2361 write_ptr = il->txq[txq_id].q.write_ptr;
2362 read_ptr = il->txq[txq_id].q.read_ptr;
2363
2364
2365 if (write_ptr != read_ptr) {
2366 D_HT("Stopping a non empty AGG HW QUEUE\n");
2367 il->stations[sta_id].tid[tid].agg.state =
2368 IL_EMPTYING_HW_QUEUE_DELBA;
2369 spin_unlock_irqrestore(&il->sta_lock, flags);
2370 return 0;
2371 }
2372
2373 D_HT("HW queue is empty\n");
2374turn_off:
2375 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2376
2377
2378 spin_unlock(&il->sta_lock);
2379 spin_lock(&il->lock);
2380
2381
2382
2383
2384
2385
2386
2387
2388 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2389 spin_unlock_irqrestore(&il->lock, flags);
2390
2391 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2392
2393 return 0;
2394}
2395
2396int
2397il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2398{
2399 struct il_queue *q = &il->txq[txq_id].q;
2400 u8 *addr = il->stations[sta_id].sta.sta.addr;
2401 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2402
2403 lockdep_assert_held(&il->sta_lock);
2404
2405 switch (il->stations[sta_id].tid[tid].agg.state) {
2406 case IL_EMPTYING_HW_QUEUE_DELBA:
2407
2408
2409 if (txq_id == tid_data->agg.txq_id &&
2410 q->read_ptr == q->write_ptr) {
2411 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2412 int tx_fifo = il4965_get_fifo_from_tid(tid);
2413 D_HT("HW queue empty: continue DELBA flow\n");
2414 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2415 tid_data->agg.state = IL_AGG_OFF;
2416 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2417 }
2418 break;
2419 case IL_EMPTYING_HW_QUEUE_ADDBA:
2420
2421 if (tid_data->tfds_in_queue == 0) {
2422 D_HT("HW queue empty: continue ADDBA flow\n");
2423 tid_data->agg.state = IL_AGG_ON;
2424 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2425 }
2426 break;
2427 }
2428
2429 return 0;
2430}
2431
2432static void
2433il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2434{
2435 struct ieee80211_sta *sta;
2436 struct il_station_priv *sta_priv;
2437
2438 rcu_read_lock();
2439 sta = ieee80211_find_sta(il->vif, addr1);
2440 if (sta) {
2441 sta_priv = (void *)sta->drv_priv;
2442
2443 if (sta_priv->client &&
2444 atomic_dec_return(&sta_priv->pending_frames) == 0)
2445 ieee80211_sta_block_awake(il->hw, sta, false);
2446 }
2447 rcu_read_unlock();
2448}
2449
2450static void
2451il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2452{
2453 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2454
2455 if (!is_agg)
2456 il4965_non_agg_tx_status(il, hdr->addr1);
2457
2458 ieee80211_tx_status_irqsafe(il->hw, skb);
2459}
2460
2461int
2462il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2463{
2464 struct il_tx_queue *txq = &il->txq[txq_id];
2465 struct il_queue *q = &txq->q;
2466 int nfreed = 0;
2467 struct ieee80211_hdr *hdr;
2468 struct sk_buff *skb;
2469
2470 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2471 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2472 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2473 q->write_ptr, q->read_ptr);
2474 return 0;
2475 }
2476
2477 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2478 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2479
2480 skb = txq->skbs[txq->q.read_ptr];
2481
2482 if (WARN_ON_ONCE(skb == NULL))
2483 continue;
2484
2485 hdr = (struct ieee80211_hdr *) skb->data;
2486 if (ieee80211_is_data_qos(hdr->frame_control))
2487 nfreed++;
2488
2489 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2490
2491 txq->skbs[txq->q.read_ptr] = NULL;
2492 il->ops->txq_free_tfd(il, txq);
2493 }
2494 return nfreed;
2495}
2496
2497
2498
2499
2500
2501
2502
2503static int
2504il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2505 struct il_compressed_ba_resp *ba_resp)
2506{
2507 int i, sh, ack;
2508 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2509 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2510 int successes = 0;
2511 struct ieee80211_tx_info *info;
2512 u64 bitmap, sent_bitmap;
2513
2514 if (unlikely(!agg->wait_for_ba)) {
2515 if (unlikely(ba_resp->bitmap))
2516 IL_ERR("Received BA when not expected\n");
2517 return -EINVAL;
2518 }
2519
2520
2521 agg->wait_for_ba = 0;
2522 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2523
2524
2525 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2526 if (sh < 0)
2527 sh += 0x100;
2528
2529 if (agg->frame_count > (64 - sh)) {
2530 D_TX_REPLY("more frames than bitmap size");
2531 return -1;
2532 }
2533
2534
2535 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2536
2537
2538
2539 sent_bitmap = bitmap & agg->bitmap;
2540
2541
2542
2543 i = 0;
2544 while (sent_bitmap) {
2545 ack = sent_bitmap & 1ULL;
2546 successes += ack;
2547 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2548 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2549 sent_bitmap >>= 1;
2550 ++i;
2551 }
2552
2553 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2554
2555 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2556 memset(&info->status, 0, sizeof(info->status));
2557 info->flags |= IEEE80211_TX_STAT_ACK;
2558 info->flags |= IEEE80211_TX_STAT_AMPDU;
2559 info->status.ampdu_ack_len = successes;
2560 info->status.ampdu_len = agg->frame_count;
2561 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2562
2563 return 0;
2564}
2565
2566static inline bool
2567il4965_is_tx_success(u32 status)
2568{
2569 status &= TX_STATUS_MSK;
2570 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2571}
2572
2573static u8
2574il4965_find_station(struct il_priv *il, const u8 *addr)
2575{
2576 int i;
2577 int start = 0;
2578 int ret = IL_INVALID_STATION;
2579 unsigned long flags;
2580
2581 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2582 start = IL_STA_ID;
2583
2584 if (is_broadcast_ether_addr(addr))
2585 return il->hw_params.bcast_id;
2586
2587 spin_lock_irqsave(&il->sta_lock, flags);
2588 for (i = start; i < il->hw_params.max_stations; i++)
2589 if (il->stations[i].used &&
2590 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2591 ret = i;
2592 goto out;
2593 }
2594
2595 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2596
2597out:
2598
2599
2600
2601
2602
2603 if (ret != IL_INVALID_STATION &&
2604 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2605 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2606 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2607 IL_ERR("Requested station info for sta %d before ready.\n",
2608 ret);
2609 ret = IL_INVALID_STATION;
2610 }
2611 spin_unlock_irqrestore(&il->sta_lock, flags);
2612 return ret;
2613}
2614
2615static int
2616il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2617{
2618 if (il->iw_mode == NL80211_IFTYPE_STATION)
2619 return IL_AP_ID;
2620 else {
2621 u8 *da = ieee80211_get_DA(hdr);
2622
2623 return il4965_find_station(il, da);
2624 }
2625}
2626
2627static inline u32
2628il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2629{
2630 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
2631}
2632
2633static inline u32
2634il4965_tx_status_to_mac80211(u32 status)
2635{
2636 status &= TX_STATUS_MSK;
2637
2638 switch (status) {
2639 case TX_STATUS_SUCCESS:
2640 case TX_STATUS_DIRECT_DONE:
2641 return IEEE80211_TX_STAT_ACK;
2642 case TX_STATUS_FAIL_DEST_PS:
2643 return IEEE80211_TX_STAT_TX_FILTERED;
2644 default:
2645 return 0;
2646 }
2647}
2648
2649
2650
2651
2652static int
2653il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2654 struct il4965_tx_resp *tx_resp, int txq_id,
2655 u16 start_idx)
2656{
2657 u16 status;
2658 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2659 struct ieee80211_tx_info *info = NULL;
2660 struct ieee80211_hdr *hdr = NULL;
2661 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2662 int i, sh, idx;
2663 u16 seq;
2664 if (agg->wait_for_ba)
2665 D_TX_REPLY("got tx response w/o block-ack\n");
2666
2667 agg->frame_count = tx_resp->frame_count;
2668 agg->start_idx = start_idx;
2669 agg->rate_n_flags = rate_n_flags;
2670 agg->bitmap = 0;
2671
2672
2673 if (agg->frame_count == 1) {
2674
2675 status = le16_to_cpu(frame_status[0].status);
2676 idx = start_idx;
2677
2678 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2679 agg->frame_count, agg->start_idx, idx);
2680
2681 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2682 info->status.rates[0].count = tx_resp->failure_frame + 1;
2683 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2684 info->flags |= il4965_tx_status_to_mac80211(status);
2685 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2686
2687 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2688 tx_resp->failure_frame);
2689 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2690
2691 agg->wait_for_ba = 0;
2692 } else {
2693
2694 u64 bitmap = 0;
2695 int start = agg->start_idx;
2696 struct sk_buff *skb;
2697
2698
2699 for (i = 0; i < agg->frame_count; i++) {
2700 u16 sc;
2701 status = le16_to_cpu(frame_status[i].status);
2702 seq = le16_to_cpu(frame_status[i].sequence);
2703 idx = SEQ_TO_IDX(seq);
2704 txq_id = SEQ_TO_QUEUE(seq);
2705
2706 if (status &
2707 (AGG_TX_STATE_FEW_BYTES_MSK |
2708 AGG_TX_STATE_ABORT_MSK))
2709 continue;
2710
2711 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2712 agg->frame_count, txq_id, idx);
2713
2714 skb = il->txq[txq_id].skbs[idx];
2715 if (WARN_ON_ONCE(skb == NULL))
2716 return -1;
2717 hdr = (struct ieee80211_hdr *) skb->data;
2718
2719 sc = le16_to_cpu(hdr->seq_ctrl);
2720 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2721 IL_ERR("BUG_ON idx doesn't match seq control"
2722 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2723 SEQ_TO_SN(sc), hdr->seq_ctrl);
2724 return -1;
2725 }
2726
2727 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2728 SEQ_TO_SN(sc));
2729
2730 sh = idx - start;
2731 if (sh > 64) {
2732 sh = (start - idx) + 0xff;
2733 bitmap = bitmap << sh;
2734 sh = 0;
2735 start = idx;
2736 } else if (sh < -64)
2737 sh = 0xff - (start - idx);
2738 else if (sh < 0) {
2739 sh = start - idx;
2740 start = idx;
2741 bitmap = bitmap << sh;
2742 sh = 0;
2743 }
2744 bitmap |= 1ULL << sh;
2745 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2746 (unsigned long long)bitmap);
2747 }
2748
2749 agg->bitmap = bitmap;
2750 agg->start_idx = start;
2751 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2752 agg->frame_count, agg->start_idx,
2753 (unsigned long long)agg->bitmap);
2754
2755 if (bitmap)
2756 agg->wait_for_ba = 1;
2757 }
2758 return 0;
2759}
2760
2761
2762
2763
2764static void
2765il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2766{
2767 struct il_rx_pkt *pkt = rxb_addr(rxb);
2768 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2769 int txq_id = SEQ_TO_QUEUE(sequence);
2770 int idx = SEQ_TO_IDX(sequence);
2771 struct il_tx_queue *txq = &il->txq[txq_id];
2772 struct sk_buff *skb;
2773 struct ieee80211_hdr *hdr;
2774 struct ieee80211_tx_info *info;
2775 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2776 u32 status = le32_to_cpu(tx_resp->u.status);
2777 int uninitialized_var(tid);
2778 int sta_id;
2779 int freed;
2780 u8 *qc = NULL;
2781 unsigned long flags;
2782
2783 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2784 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2785 "is out of range [0-%d] %d %d\n", txq_id, idx,
2786 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2787 return;
2788 }
2789
2790 txq->time_stamp = jiffies;
2791
2792 skb = txq->skbs[txq->q.read_ptr];
2793 info = IEEE80211_SKB_CB(skb);
2794 memset(&info->status, 0, sizeof(info->status));
2795
2796 hdr = (struct ieee80211_hdr *) skb->data;
2797 if (ieee80211_is_data_qos(hdr->frame_control)) {
2798 qc = ieee80211_get_qos_ctl(hdr);
2799 tid = qc[0] & 0xf;
2800 }
2801
2802 sta_id = il4965_get_ra_sta_id(il, hdr);
2803 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2804 IL_ERR("Station not known\n");
2805 return;
2806 }
2807
2808 spin_lock_irqsave(&il->sta_lock, flags);
2809 if (txq->sched_retry) {
2810 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2811 struct il_ht_agg *agg = NULL;
2812 WARN_ON(!qc);
2813
2814 agg = &il->stations[sta_id].tid[tid].agg;
2815
2816 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2817
2818
2819 if (tx_resp->frame_count == 1 &&
2820 !il4965_is_tx_success(status))
2821 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2822
2823 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2824 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2825 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2826 "%d idx %d\n", scd_ssn, idx);
2827 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2828 if (qc)
2829 il4965_free_tfds_in_queue(il, sta_id, tid,
2830 freed);
2831
2832 if (il->mac80211_registered &&
2833 il_queue_space(&txq->q) > txq->q.low_mark &&
2834 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2835 il_wake_queue(il, txq);
2836 }
2837 } else {
2838 info->status.rates[0].count = tx_resp->failure_frame + 1;
2839 info->flags |= il4965_tx_status_to_mac80211(status);
2840 il4965_hwrate_to_tx_control(il,
2841 le32_to_cpu(tx_resp->rate_n_flags),
2842 info);
2843
2844 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2845 "rate_n_flags 0x%x retries %d\n", txq_id,
2846 il4965_get_tx_fail_reason(status), status,
2847 le32_to_cpu(tx_resp->rate_n_flags),
2848 tx_resp->failure_frame);
2849
2850 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2851 if (qc && likely(sta_id != IL_INVALID_STATION))
2852 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2853 else if (sta_id == IL_INVALID_STATION)
2854 D_TX_REPLY("Station not known\n");
2855
2856 if (il->mac80211_registered &&
2857 il_queue_space(&txq->q) > txq->q.low_mark)
2858 il_wake_queue(il, txq);
2859 }
2860 if (qc && likely(sta_id != IL_INVALID_STATION))
2861 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2862
2863 il4965_check_abort_status(il, tx_resp->frame_count, status);
2864
2865 spin_unlock_irqrestore(&il->sta_lock, flags);
2866}
2867
2868
2869
2870
2871void
2872il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2873 struct ieee80211_tx_info *info)
2874{
2875 struct ieee80211_tx_rate *r = &info->status.rates[0];
2876
2877 info->status.antenna =
2878 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2879 if (rate_n_flags & RATE_MCS_HT_MSK)
2880 r->flags |= IEEE80211_TX_RC_MCS;
2881 if (rate_n_flags & RATE_MCS_GF_MSK)
2882 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2883 if (rate_n_flags & RATE_MCS_HT40_MSK)
2884 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2885 if (rate_n_flags & RATE_MCS_DUP_MSK)
2886 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2887 if (rate_n_flags & RATE_MCS_SGI_MSK)
2888 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2889 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2890}
2891
2892
2893
2894
2895
2896
2897
2898void
2899il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2900{
2901 struct il_rx_pkt *pkt = rxb_addr(rxb);
2902 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2903 struct il_tx_queue *txq = NULL;
2904 struct il_ht_agg *agg;
2905 int idx;
2906 int sta_id;
2907 int tid;
2908 unsigned long flags;
2909
2910
2911 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2912
2913
2914
2915 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2916
2917 if (scd_flow >= il->hw_params.max_txq_num) {
2918 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2919 return;
2920 }
2921
2922 txq = &il->txq[scd_flow];
2923 sta_id = ba_resp->sta_id;
2924 tid = ba_resp->tid;
2925 agg = &il->stations[sta_id].tid[tid].agg;
2926 if (unlikely(agg->txq_id != scd_flow)) {
2927
2928
2929
2930
2931
2932
2933 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2934 scd_flow, agg->txq_id);
2935 return;
2936 }
2937
2938
2939 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2940
2941 spin_lock_irqsave(&il->sta_lock, flags);
2942
2943 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2944 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2945 ba_resp->sta_id);
2946 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2947 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2948 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2949 ba_resp->scd_flow, ba_resp->scd_ssn);
2950 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2951 (unsigned long long)agg->bitmap);
2952
2953
2954 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2955
2956
2957
2958
2959 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2960
2961 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2962 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2963
2964 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2965 il->mac80211_registered &&
2966 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2967 il_wake_queue(il, txq);
2968
2969 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2970 }
2971
2972 spin_unlock_irqrestore(&il->sta_lock, flags);
2973}
2974
2975#ifdef CONFIG_IWLEGACY_DEBUG
2976const char *
2977il4965_get_tx_fail_reason(u32 status)
2978{
2979#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2980#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2981
2982 switch (status & TX_STATUS_MSK) {
2983 case TX_STATUS_SUCCESS:
2984 return "SUCCESS";
2985 TX_STATUS_POSTPONE(DELAY);
2986 TX_STATUS_POSTPONE(FEW_BYTES);
2987 TX_STATUS_POSTPONE(QUIET_PERIOD);
2988 TX_STATUS_POSTPONE(CALC_TTAK);
2989 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2990 TX_STATUS_FAIL(SHORT_LIMIT);
2991 TX_STATUS_FAIL(LONG_LIMIT);
2992 TX_STATUS_FAIL(FIFO_UNDERRUN);
2993 TX_STATUS_FAIL(DRAIN_FLOW);
2994 TX_STATUS_FAIL(RFKILL_FLUSH);
2995 TX_STATUS_FAIL(LIFE_EXPIRE);
2996 TX_STATUS_FAIL(DEST_PS);
2997 TX_STATUS_FAIL(HOST_ABORTED);
2998 TX_STATUS_FAIL(BT_RETRY);
2999 TX_STATUS_FAIL(STA_INVALID);
3000 TX_STATUS_FAIL(FRAG_DROPPED);
3001 TX_STATUS_FAIL(TID_DISABLE);
3002 TX_STATUS_FAIL(FIFO_FLUSHED);
3003 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3004 TX_STATUS_FAIL(PASSIVE_NO_RX);
3005 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3006 }
3007
3008 return "UNKNOWN";
3009
3010#undef TX_STATUS_FAIL
3011#undef TX_STATUS_POSTPONE
3012}
3013#endif
3014
3015static struct il_link_quality_cmd *
3016il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3017{
3018 int i, r;
3019 struct il_link_quality_cmd *link_cmd;
3020 u32 rate_flags = 0;
3021 __le32 rate_n_flags;
3022
3023 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3024 if (!link_cmd) {
3025 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3026 return NULL;
3027 }
3028
3029
3030 if (il->band == IEEE80211_BAND_5GHZ)
3031 r = RATE_6M_IDX;
3032 else
3033 r = RATE_1M_IDX;
3034
3035 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3036 rate_flags |= RATE_MCS_CCK_MSK;
3037
3038 rate_flags |=
3039 il4965_first_antenna(il->hw_params.
3040 valid_tx_ant) << RATE_MCS_ANT_POS;
3041 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3042 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3043 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3044
3045 link_cmd->general_params.single_stream_ant_msk =
3046 il4965_first_antenna(il->hw_params.valid_tx_ant);
3047
3048 link_cmd->general_params.dual_stream_ant_msk =
3049 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3050 valid_tx_ant);
3051 if (!link_cmd->general_params.dual_stream_ant_msk) {
3052 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3053 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3054 link_cmd->general_params.dual_stream_ant_msk =
3055 il->hw_params.valid_tx_ant;
3056 }
3057
3058 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3059 link_cmd->agg_params.agg_time_limit =
3060 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3061
3062 link_cmd->sta_id = sta_id;
3063
3064 return link_cmd;
3065}
3066
3067
3068
3069
3070
3071
3072int
3073il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3074{
3075 int ret;
3076 u8 sta_id;
3077 struct il_link_quality_cmd *link_cmd;
3078 unsigned long flags;
3079
3080 if (sta_id_r)
3081 *sta_id_r = IL_INVALID_STATION;
3082
3083 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3084 if (ret) {
3085 IL_ERR("Unable to add station %pM\n", addr);
3086 return ret;
3087 }
3088
3089 if (sta_id_r)
3090 *sta_id_r = sta_id;
3091
3092 spin_lock_irqsave(&il->sta_lock, flags);
3093 il->stations[sta_id].used |= IL_STA_LOCAL;
3094 spin_unlock_irqrestore(&il->sta_lock, flags);
3095
3096
3097 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3098 if (!link_cmd) {
3099 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3100 addr);
3101 return -ENOMEM;
3102 }
3103
3104 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3105 if (ret)
3106 IL_ERR("Link quality command failed (%d)\n", ret);
3107
3108 spin_lock_irqsave(&il->sta_lock, flags);
3109 il->stations[sta_id].lq = link_cmd;
3110 spin_unlock_irqrestore(&il->sta_lock, flags);
3111
3112 return 0;
3113}
3114
3115static int
3116il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3117{
3118 int i;
3119 u8 buff[sizeof(struct il_wep_cmd) +
3120 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3121 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3122 size_t cmd_size = sizeof(struct il_wep_cmd);
3123 struct il_host_cmd cmd = {
3124 .id = C_WEPKEY,
3125 .data = wep_cmd,
3126 .flags = CMD_SYNC,
3127 };
3128 bool not_empty = false;
3129
3130 might_sleep();
3131
3132 memset(wep_cmd, 0,
3133 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3134
3135 for (i = 0; i < WEP_KEYS_MAX; i++) {
3136 u8 key_size = il->_4965.wep_keys[i].key_size;
3137
3138 wep_cmd->key[i].key_idx = i;
3139 if (key_size) {
3140 wep_cmd->key[i].key_offset = i;
3141 not_empty = true;
3142 } else
3143 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3144
3145 wep_cmd->key[i].key_size = key_size;
3146 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3147 }
3148
3149 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3150 wep_cmd->num_keys = WEP_KEYS_MAX;
3151
3152 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3153 cmd.len = cmd_size;
3154
3155 if (not_empty || send_if_empty)
3156 return il_send_cmd(il, &cmd);
3157 else
3158 return 0;
3159}
3160
3161int
3162il4965_restore_default_wep_keys(struct il_priv *il)
3163{
3164 lockdep_assert_held(&il->mutex);
3165
3166 return il4965_static_wepkey_cmd(il, false);
3167}
3168
3169int
3170il4965_remove_default_wep_key(struct il_priv *il,
3171 struct ieee80211_key_conf *keyconf)
3172{
3173 int ret;
3174 int idx = keyconf->keyidx;
3175
3176 lockdep_assert_held(&il->mutex);
3177
3178 D_WEP("Removing default WEP key: idx=%d\n", idx);
3179
3180 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3181 if (il_is_rfkill(il)) {
3182 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3183
3184 return 0;
3185 }
3186 ret = il4965_static_wepkey_cmd(il, 1);
3187 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3188
3189 return ret;
3190}
3191
3192int
3193il4965_set_default_wep_key(struct il_priv *il,
3194 struct ieee80211_key_conf *keyconf)
3195{
3196 int ret;
3197 int len = keyconf->keylen;
3198 int idx = keyconf->keyidx;
3199
3200 lockdep_assert_held(&il->mutex);
3201
3202 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3203 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3204 return -EINVAL;
3205 }
3206
3207 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3208 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3209 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3210
3211 il->_4965.wep_keys[idx].key_size = len;
3212 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3213
3214 ret = il4965_static_wepkey_cmd(il, false);
3215
3216 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3217 return ret;
3218}
3219
3220static int
3221il4965_set_wep_dynamic_key_info(struct il_priv *il,
3222 struct ieee80211_key_conf *keyconf, u8 sta_id)
3223{
3224 unsigned long flags;
3225 __le16 key_flags = 0;
3226 struct il_addsta_cmd sta_cmd;
3227
3228 lockdep_assert_held(&il->mutex);
3229
3230 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3231
3232 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3233 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3234 key_flags &= ~STA_KEY_FLG_INVALID;
3235
3236 if (keyconf->keylen == WEP_KEY_LEN_128)
3237 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3238
3239 if (sta_id == il->hw_params.bcast_id)
3240 key_flags |= STA_KEY_MULTICAST_MSK;
3241
3242 spin_lock_irqsave(&il->sta_lock, flags);
3243
3244 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3245 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3246 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3247
3248 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3249
3250 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3251 keyconf->keylen);
3252
3253 if ((il->stations[sta_id].sta.key.
3254 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3255 il->stations[sta_id].sta.key.key_offset =
3256 il_get_free_ucode_key_idx(il);
3257
3258
3259
3260 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3261 "no space for a new key");
3262
3263 il->stations[sta_id].sta.key.key_flags = key_flags;
3264 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3265 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3266
3267 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3268 sizeof(struct il_addsta_cmd));
3269 spin_unlock_irqrestore(&il->sta_lock, flags);
3270
3271 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3272}
3273
3274static int
3275il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3276 struct ieee80211_key_conf *keyconf, u8 sta_id)
3277{
3278 unsigned long flags;
3279 __le16 key_flags = 0;
3280 struct il_addsta_cmd sta_cmd;
3281
3282 lockdep_assert_held(&il->mutex);
3283
3284 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3285 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3286 key_flags &= ~STA_KEY_FLG_INVALID;
3287
3288 if (sta_id == il->hw_params.bcast_id)
3289 key_flags |= STA_KEY_MULTICAST_MSK;
3290
3291 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3292
3293 spin_lock_irqsave(&il->sta_lock, flags);
3294 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3295 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3296
3297 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3298
3299 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3300
3301 if ((il->stations[sta_id].sta.key.
3302 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3303 il->stations[sta_id].sta.key.key_offset =
3304 il_get_free_ucode_key_idx(il);
3305
3306
3307
3308 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3309 "no space for a new key");
3310
3311 il->stations[sta_id].sta.key.key_flags = key_flags;
3312 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3313 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3314
3315 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3316 sizeof(struct il_addsta_cmd));
3317 spin_unlock_irqrestore(&il->sta_lock, flags);
3318
3319 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3320}
3321
3322static int
3323il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3324 struct ieee80211_key_conf *keyconf, u8 sta_id)
3325{
3326 unsigned long flags;
3327 int ret = 0;
3328 __le16 key_flags = 0;
3329
3330 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3331 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3332 key_flags &= ~STA_KEY_FLG_INVALID;
3333
3334 if (sta_id == il->hw_params.bcast_id)
3335 key_flags |= STA_KEY_MULTICAST_MSK;
3336
3337 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3338 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3339
3340 spin_lock_irqsave(&il->sta_lock, flags);
3341
3342 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3343 il->stations[sta_id].keyinfo.keylen = 16;
3344
3345 if ((il->stations[sta_id].sta.key.
3346 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3347 il->stations[sta_id].sta.key.key_offset =
3348 il_get_free_ucode_key_idx(il);
3349
3350
3351
3352 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3353 "no space for a new key");
3354
3355 il->stations[sta_id].sta.key.key_flags = key_flags;
3356
3357
3358 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3359
3360 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3361
3362 spin_unlock_irqrestore(&il->sta_lock, flags);
3363
3364 return ret;
3365}
3366
3367void
3368il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3369 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3370{
3371 u8 sta_id;
3372 unsigned long flags;
3373 int i;
3374
3375 if (il_scan_cancel(il)) {
3376
3377
3378 return;
3379 }
3380
3381 sta_id = il_sta_id_or_broadcast(il, sta);
3382 if (sta_id == IL_INVALID_STATION)
3383 return;
3384
3385 spin_lock_irqsave(&il->sta_lock, flags);
3386
3387 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3388
3389 for (i = 0; i < 5; i++)
3390 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3391 cpu_to_le16(phase1key[i]);
3392
3393 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3394 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3395
3396 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3397
3398 spin_unlock_irqrestore(&il->sta_lock, flags);
3399}
3400
3401int
3402il4965_remove_dynamic_key(struct il_priv *il,
3403 struct ieee80211_key_conf *keyconf, u8 sta_id)
3404{
3405 unsigned long flags;
3406 u16 key_flags;
3407 u8 keyidx;
3408 struct il_addsta_cmd sta_cmd;
3409
3410 lockdep_assert_held(&il->mutex);
3411
3412 il->_4965.key_mapping_keys--;
3413
3414 spin_lock_irqsave(&il->sta_lock, flags);
3415 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3416 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3417
3418 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3419
3420 if (keyconf->keyidx != keyidx) {
3421
3422
3423
3424
3425
3426 spin_unlock_irqrestore(&il->sta_lock, flags);
3427 return 0;
3428 }
3429
3430 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3431 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3432 key_flags);
3433 spin_unlock_irqrestore(&il->sta_lock, flags);
3434 return 0;
3435 }
3436
3437 if (!test_and_clear_bit
3438 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3439 IL_ERR("idx %d not used in uCode key table.\n",
3440 il->stations[sta_id].sta.key.key_offset);
3441 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3442 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3443 il->stations[sta_id].sta.key.key_flags =
3444 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3445 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3446 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3447 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3448
3449 if (il_is_rfkill(il)) {
3450 D_WEP
3451 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3452 spin_unlock_irqrestore(&il->sta_lock, flags);
3453 return 0;
3454 }
3455 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3456 sizeof(struct il_addsta_cmd));
3457 spin_unlock_irqrestore(&il->sta_lock, flags);
3458
3459 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3460}
3461
3462int
3463il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3464 u8 sta_id)
3465{
3466 int ret;
3467
3468 lockdep_assert_held(&il->mutex);
3469
3470 il->_4965.key_mapping_keys++;
3471 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3472
3473 switch (keyconf->cipher) {
3474 case WLAN_CIPHER_SUITE_CCMP:
3475 ret =
3476 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3477 break;
3478 case WLAN_CIPHER_SUITE_TKIP:
3479 ret =
3480 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3481 break;
3482 case WLAN_CIPHER_SUITE_WEP40:
3483 case WLAN_CIPHER_SUITE_WEP104:
3484 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3485 break;
3486 default:
3487 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3488 keyconf->cipher);
3489 ret = -EINVAL;
3490 }
3491
3492 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3493 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3494
3495 return ret;
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505int
3506il4965_alloc_bcast_station(struct il_priv *il)
3507{
3508 struct il_link_quality_cmd *link_cmd;
3509 unsigned long flags;
3510 u8 sta_id;
3511
3512 spin_lock_irqsave(&il->sta_lock, flags);
3513 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3514 if (sta_id == IL_INVALID_STATION) {
3515 IL_ERR("Unable to prepare broadcast station\n");
3516 spin_unlock_irqrestore(&il->sta_lock, flags);
3517
3518 return -EINVAL;
3519 }
3520
3521 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3522 il->stations[sta_id].used |= IL_STA_BCAST;
3523 spin_unlock_irqrestore(&il->sta_lock, flags);
3524
3525 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3526 if (!link_cmd) {
3527 IL_ERR
3528 ("Unable to initialize rate scaling for bcast station.\n");
3529 return -ENOMEM;
3530 }
3531
3532 spin_lock_irqsave(&il->sta_lock, flags);
3533 il->stations[sta_id].lq = link_cmd;
3534 spin_unlock_irqrestore(&il->sta_lock, flags);
3535
3536 return 0;
3537}
3538
3539
3540
3541
3542
3543
3544
3545static int
3546il4965_update_bcast_station(struct il_priv *il)
3547{
3548 unsigned long flags;
3549 struct il_link_quality_cmd *link_cmd;
3550 u8 sta_id = il->hw_params.bcast_id;
3551
3552 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3553 if (!link_cmd) {
3554 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3555 return -ENOMEM;
3556 }
3557
3558 spin_lock_irqsave(&il->sta_lock, flags);
3559 if (il->stations[sta_id].lq)
3560 kfree(il->stations[sta_id].lq);
3561 else
3562 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3563 il->stations[sta_id].lq = link_cmd;
3564 spin_unlock_irqrestore(&il->sta_lock, flags);
3565
3566 return 0;
3567}
3568
3569int
3570il4965_update_bcast_stations(struct il_priv *il)
3571{
3572 return il4965_update_bcast_station(il);
3573}
3574
3575
3576
3577
3578int
3579il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3580{
3581 unsigned long flags;
3582 struct il_addsta_cmd sta_cmd;
3583
3584 lockdep_assert_held(&il->mutex);
3585
3586
3587 spin_lock_irqsave(&il->sta_lock, flags);
3588 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3589 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3590 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3591 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3592 sizeof(struct il_addsta_cmd));
3593 spin_unlock_irqrestore(&il->sta_lock, flags);
3594
3595 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3596}
3597
3598int
3599il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3600 u16 ssn)
3601{
3602 unsigned long flags;
3603 int sta_id;
3604 struct il_addsta_cmd sta_cmd;
3605
3606 lockdep_assert_held(&il->mutex);
3607
3608 sta_id = il_sta_id(sta);
3609 if (sta_id == IL_INVALID_STATION)
3610 return -ENXIO;
3611
3612 spin_lock_irqsave(&il->sta_lock, flags);
3613 il->stations[sta_id].sta.station_flags_msk = 0;
3614 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3615 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3616 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3617 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3618 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3619 sizeof(struct il_addsta_cmd));
3620 spin_unlock_irqrestore(&il->sta_lock, flags);
3621
3622 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3623}
3624
3625int
3626il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3627{
3628 unsigned long flags;
3629 int sta_id;
3630 struct il_addsta_cmd sta_cmd;
3631
3632 lockdep_assert_held(&il->mutex);
3633
3634 sta_id = il_sta_id(sta);
3635 if (sta_id == IL_INVALID_STATION) {
3636 IL_ERR("Invalid station for AGG tid %d\n", tid);
3637 return -ENXIO;
3638 }
3639
3640 spin_lock_irqsave(&il->sta_lock, flags);
3641 il->stations[sta_id].sta.station_flags_msk = 0;
3642 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3643 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3644 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3645 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3646 sizeof(struct il_addsta_cmd));
3647 spin_unlock_irqrestore(&il->sta_lock, flags);
3648
3649 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3650}
3651
3652void
3653il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3654{
3655 unsigned long flags;
3656
3657 spin_lock_irqsave(&il->sta_lock, flags);
3658 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3659 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3660 il->stations[sta_id].sta.sta.modify_mask =
3661 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3662 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3663 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3664 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3665 spin_unlock_irqrestore(&il->sta_lock, flags);
3666
3667}
3668
3669void
3670il4965_update_chain_flags(struct il_priv *il)
3671{
3672 if (il->ops->set_rxon_chain) {
3673 il->ops->set_rxon_chain(il);
3674 if (il->active.rx_chain != il->staging.rx_chain)
3675 il_commit_rxon(il);
3676 }
3677}
3678
3679static void
3680il4965_clear_free_frames(struct il_priv *il)
3681{
3682 struct list_head *element;
3683
3684 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3685
3686 while (!list_empty(&il->free_frames)) {
3687 element = il->free_frames.next;
3688 list_del(element);
3689 kfree(list_entry(element, struct il_frame, list));
3690 il->frames_count--;
3691 }
3692
3693 if (il->frames_count) {
3694 IL_WARN("%d frames still in use. Did we lose one?\n",
3695 il->frames_count);
3696 il->frames_count = 0;
3697 }
3698}
3699
3700static struct il_frame *
3701il4965_get_free_frame(struct il_priv *il)
3702{
3703 struct il_frame *frame;
3704 struct list_head *element;
3705 if (list_empty(&il->free_frames)) {
3706 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3707 if (!frame) {
3708 IL_ERR("Could not allocate frame!\n");
3709 return NULL;
3710 }
3711
3712 il->frames_count++;
3713 return frame;
3714 }
3715
3716 element = il->free_frames.next;
3717 list_del(element);
3718 return list_entry(element, struct il_frame, list);
3719}
3720
3721static void
3722il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3723{
3724 memset(frame, 0, sizeof(*frame));
3725 list_add(&frame->list, &il->free_frames);
3726}
3727
3728static u32
3729il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3730 int left)
3731{
3732 lockdep_assert_held(&il->mutex);
3733
3734 if (!il->beacon_skb)
3735 return 0;
3736
3737 if (il->beacon_skb->len > left)
3738 return 0;
3739
3740 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3741
3742 return il->beacon_skb->len;
3743}
3744
3745
3746static void
3747il4965_set_beacon_tim(struct il_priv *il,
3748 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3749 u32 frame_size)
3750{
3751 u16 tim_idx;
3752 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3753
3754
3755
3756
3757
3758 tim_idx = mgmt->u.beacon.variable - beacon;
3759
3760
3761 while ((tim_idx < (frame_size - 2)) &&
3762 (beacon[tim_idx] != WLAN_EID_TIM))
3763 tim_idx += beacon[tim_idx + 1] + 2;
3764
3765
3766 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3767 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3768 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3769 } else
3770 IL_WARN("Unable to find TIM Element in beacon\n");
3771}
3772
3773static unsigned int
3774il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3775{
3776 struct il_tx_beacon_cmd *tx_beacon_cmd;
3777 u32 frame_size;
3778 u32 rate_flags;
3779 u32 rate;
3780
3781
3782
3783
3784
3785 lockdep_assert_held(&il->mutex);
3786
3787 if (!il->beacon_enabled) {
3788 IL_ERR("Trying to build beacon without beaconing enabled\n");
3789 return 0;
3790 }
3791
3792
3793 tx_beacon_cmd = &frame->u.beacon;
3794 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3795
3796
3797 frame_size =
3798 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3799 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3800 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3801 return 0;
3802 if (!frame_size)
3803 return 0;
3804
3805
3806 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3807 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3808 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3809 tx_beacon_cmd->tx.tx_flags =
3810 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3811 TX_CMD_FLG_STA_RATE_MSK;
3812
3813
3814 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3815 frame_size);
3816
3817
3818 rate = il_get_lowest_plcp(il);
3819 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3820 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3821 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3822 rate_flags |= RATE_MCS_CCK_MSK;
3823 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3824
3825 return sizeof(*tx_beacon_cmd) + frame_size;
3826}
3827
3828int
3829il4965_send_beacon_cmd(struct il_priv *il)
3830{
3831 struct il_frame *frame;
3832 unsigned int frame_size;
3833 int rc;
3834
3835 frame = il4965_get_free_frame(il);
3836 if (!frame) {
3837 IL_ERR("Could not obtain free frame buffer for beacon "
3838 "command.\n");
3839 return -ENOMEM;
3840 }
3841
3842 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3843 if (!frame_size) {
3844 IL_ERR("Error configuring the beacon command\n");
3845 il4965_free_frame(il, frame);
3846 return -EINVAL;
3847 }
3848
3849 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3850
3851 il4965_free_frame(il, frame);
3852
3853 return rc;
3854}
3855
3856static inline dma_addr_t
3857il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3858{
3859 struct il_tfd_tb *tb = &tfd->tbs[idx];
3860
3861 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3862 if (sizeof(dma_addr_t) > sizeof(u32))
3863 addr |=
3864 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3865 16;
3866
3867 return addr;
3868}
3869
3870static inline u16
3871il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3872{
3873 struct il_tfd_tb *tb = &tfd->tbs[idx];
3874
3875 return le16_to_cpu(tb->hi_n_len) >> 4;
3876}
3877
3878static inline void
3879il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3880{
3881 struct il_tfd_tb *tb = &tfd->tbs[idx];
3882 u16 hi_n_len = len << 4;
3883
3884 put_unaligned_le32(addr, &tb->lo);
3885 if (sizeof(dma_addr_t) > sizeof(u32))
3886 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3887
3888 tb->hi_n_len = cpu_to_le16(hi_n_len);
3889
3890 tfd->num_tbs = idx + 1;
3891}
3892
3893static inline u8
3894il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3895{
3896 return tfd->num_tbs & 0x1f;
3897}
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907void
3908il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3909{
3910 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3911 struct il_tfd *tfd;
3912 struct pci_dev *dev = il->pci_dev;
3913 int idx = txq->q.read_ptr;
3914 int i;
3915 int num_tbs;
3916
3917 tfd = &tfd_tmp[idx];
3918
3919
3920 num_tbs = il4965_tfd_get_num_tbs(tfd);
3921
3922 if (num_tbs >= IL_NUM_OF_TBS) {
3923 IL_ERR("Too many chunks: %i\n", num_tbs);
3924
3925 return;
3926 }
3927
3928
3929 if (num_tbs)
3930 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3931 dma_unmap_len(&txq->meta[idx], len),
3932 PCI_DMA_BIDIRECTIONAL);
3933
3934
3935 for (i = 1; i < num_tbs; i++)
3936 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3937 il4965_tfd_tb_get_len(tfd, i),
3938 PCI_DMA_TODEVICE);
3939
3940
3941 if (txq->skbs) {
3942 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3943
3944
3945 if (skb) {
3946 dev_kfree_skb_any(skb);
3947 txq->skbs[txq->q.read_ptr] = NULL;
3948 }
3949 }
3950}
3951
3952int
3953il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3954 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3955{
3956 struct il_queue *q;
3957 struct il_tfd *tfd, *tfd_tmp;
3958 u32 num_tbs;
3959
3960 q = &txq->q;
3961 tfd_tmp = (struct il_tfd *)txq->tfds;
3962 tfd = &tfd_tmp[q->write_ptr];
3963
3964 if (reset)
3965 memset(tfd, 0, sizeof(*tfd));
3966
3967 num_tbs = il4965_tfd_get_num_tbs(tfd);
3968
3969
3970 if (num_tbs >= IL_NUM_OF_TBS) {
3971 IL_ERR("Error can not send more than %d chunks\n",
3972 IL_NUM_OF_TBS);
3973 return -EINVAL;
3974 }
3975
3976 BUG_ON(addr & ~DMA_BIT_MASK(36));
3977 if (unlikely(addr & ~IL_TX_DMA_MASK))
3978 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3979
3980 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3981
3982 return 0;
3983}
3984
3985
3986
3987
3988
3989
3990
3991
3992int
3993il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
3994{
3995 int txq_id = txq->q.id;
3996
3997
3998 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
3999
4000 return 0;
4001}
4002
4003
4004
4005
4006
4007
4008static void
4009il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4010{
4011 struct il_rx_pkt *pkt = rxb_addr(rxb);
4012 struct il_alive_resp *palive;
4013 struct delayed_work *pwork;
4014
4015 palive = &pkt->u.alive_frame;
4016
4017 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4018 palive->is_valid, palive->ver_type, palive->ver_subtype);
4019
4020 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4021 D_INFO("Initialization Alive received.\n");
4022 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4023 sizeof(struct il_init_alive_resp));
4024 pwork = &il->init_alive_start;
4025 } else {
4026 D_INFO("Runtime Alive received.\n");
4027 memcpy(&il->card_alive, &pkt->u.alive_frame,
4028 sizeof(struct il_alive_resp));
4029 pwork = &il->alive_start;
4030 }
4031
4032
4033
4034 if (palive->is_valid == UCODE_VALID_OK)
4035 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4036 else
4037 IL_WARN("uCode did not respond OK.\n");
4038}
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050static void
4051il4965_bg_stats_periodic(unsigned long data)
4052{
4053 struct il_priv *il = (struct il_priv *)data;
4054
4055 if (test_bit(S_EXIT_PENDING, &il->status))
4056 return;
4057
4058
4059 if (!il_is_ready_rf(il))
4060 return;
4061
4062 il_send_stats_request(il, CMD_ASYNC, false);
4063}
4064
4065static void
4066il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4067{
4068 struct il_rx_pkt *pkt = rxb_addr(rxb);
4069 struct il4965_beacon_notif *beacon =
4070 (struct il4965_beacon_notif *)pkt->u.raw;
4071#ifdef CONFIG_IWLEGACY_DEBUG
4072 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4073
4074 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4075 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4076 beacon->beacon_notify_hdr.failure_frame,
4077 le32_to_cpu(beacon->ibss_mgr_status),
4078 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4079#endif
4080 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4081}
4082
4083static void
4084il4965_perform_ct_kill_task(struct il_priv *il)
4085{
4086 unsigned long flags;
4087
4088 D_POWER("Stop all queues\n");
4089
4090 if (il->mac80211_registered)
4091 ieee80211_stop_queues(il->hw);
4092
4093 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4094 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4095 _il_rd(il, CSR_UCODE_DRV_GP1);
4096
4097 spin_lock_irqsave(&il->reg_lock, flags);
4098 if (likely(_il_grab_nic_access(il)))
4099 _il_release_nic_access(il);
4100 spin_unlock_irqrestore(&il->reg_lock, flags);
4101}
4102
4103
4104
4105static void
4106il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4107{
4108 struct il_rx_pkt *pkt = rxb_addr(rxb);
4109 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4110 unsigned long status = il->status;
4111
4112 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4113 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4114 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4115 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4116
4117 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4118
4119 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4120 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4121
4122 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4123
4124 if (!(flags & RXON_CARD_DISABLED)) {
4125 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4126 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4127 il_wr(il, HBUS_TARG_MBX_C,
4128 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4129 }
4130 }
4131
4132 if (flags & CT_CARD_DISABLED)
4133 il4965_perform_ct_kill_task(il);
4134
4135 if (flags & HW_CARD_DISABLED)
4136 set_bit(S_RFKILL, &il->status);
4137 else
4138 clear_bit(S_RFKILL, &il->status);
4139
4140 if (!(flags & RXON_CARD_DISABLED))
4141 il_scan_cancel(il);
4142
4143 if ((test_bit(S_RFKILL, &status) !=
4144 test_bit(S_RFKILL, &il->status)))
4145 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4146 test_bit(S_RFKILL, &il->status));
4147 else
4148 wake_up(&il->wait_command_queue);
4149}
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160static void
4161il4965_setup_handlers(struct il_priv *il)
4162{
4163 il->handlers[N_ALIVE] = il4965_hdl_alive;
4164 il->handlers[N_ERROR] = il_hdl_error;
4165 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4166 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4167 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4168 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4169 il->handlers[N_BEACON] = il4965_hdl_beacon;
4170
4171
4172
4173
4174
4175
4176 il->handlers[C_STATS] = il4965_hdl_c_stats;
4177 il->handlers[N_STATS] = il4965_hdl_stats;
4178
4179 il_setup_rx_scan_handlers(il);
4180
4181
4182 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4183
4184 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4185
4186 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4187 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4188 il->handlers[N_RX] = il4965_hdl_rx;
4189
4190 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4191
4192 il->handlers[C_TX] = il4965_hdl_tx;
4193}
4194
4195
4196
4197
4198
4199
4200
4201
4202void
4203il4965_rx_handle(struct il_priv *il)
4204{
4205 struct il_rx_buf *rxb;
4206 struct il_rx_pkt *pkt;
4207 struct il_rx_queue *rxq = &il->rxq;
4208 u32 r, i;
4209 int reclaim;
4210 unsigned long flags;
4211 u8 fill_rx = 0;
4212 u32 count = 8;
4213 int total_empty;
4214
4215
4216
4217 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4218 i = rxq->read;
4219
4220
4221 if (i == r)
4222 D_RX("r = %d, i = %d\n", r, i);
4223
4224
4225 total_empty = r - rxq->write_actual;
4226 if (total_empty < 0)
4227 total_empty += RX_QUEUE_SIZE;
4228
4229 if (total_empty > (RX_QUEUE_SIZE / 2))
4230 fill_rx = 1;
4231
4232 while (i != r) {
4233 int len;
4234
4235 rxb = rxq->queue[i];
4236
4237
4238
4239
4240 BUG_ON(rxb == NULL);
4241
4242 rxq->queue[i] = NULL;
4243
4244 pci_unmap_page(il->pci_dev, rxb->page_dma,
4245 PAGE_SIZE << il->hw_params.rx_page_order,
4246 PCI_DMA_FROMDEVICE);
4247 pkt = rxb_addr(rxb);
4248
4249 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4250 len += sizeof(u32);
4251
4252
4253
4254
4255
4256
4257
4258 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4259 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
4260 (pkt->hdr.cmd != N_RX_MPDU) &&
4261 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
4262 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
4263
4264
4265
4266
4267 if (il->handlers[pkt->hdr.cmd]) {
4268 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4269 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4270 il->isr_stats.handlers[pkt->hdr.cmd]++;
4271 il->handlers[pkt->hdr.cmd] (il, rxb);
4272 } else {
4273
4274 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4275 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4276 }
4277
4278
4279
4280
4281
4282
4283
4284
4285 if (reclaim) {
4286
4287
4288
4289 if (rxb->page)
4290 il_tx_cmd_complete(il, rxb);
4291 else
4292 IL_WARN("Claim null rxb?\n");
4293 }
4294
4295
4296
4297
4298 spin_lock_irqsave(&rxq->lock, flags);
4299 if (rxb->page != NULL) {
4300 rxb->page_dma =
4301 pci_map_page(il->pci_dev, rxb->page, 0,
4302 PAGE_SIZE << il->hw_params.
4303 rx_page_order, PCI_DMA_FROMDEVICE);
4304
4305 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4306 rxb->page_dma))) {
4307 __il_free_pages(il, rxb->page);
4308 rxb->page = NULL;
4309 list_add_tail(&rxb->list, &rxq->rx_used);
4310 } else {
4311 list_add_tail(&rxb->list, &rxq->rx_free);
4312 rxq->free_count++;
4313 }
4314 } else
4315 list_add_tail(&rxb->list, &rxq->rx_used);
4316
4317 spin_unlock_irqrestore(&rxq->lock, flags);
4318
4319 i = (i + 1) & RX_QUEUE_MASK;
4320
4321
4322 if (fill_rx) {
4323 count++;
4324 if (count >= 8) {
4325 rxq->read = i;
4326 il4965_rx_replenish_now(il);
4327 count = 0;
4328 }
4329 }
4330 }
4331
4332
4333 rxq->read = i;
4334 if (fill_rx)
4335 il4965_rx_replenish_now(il);
4336 else
4337 il4965_rx_queue_restock(il);
4338}
4339
4340
4341static inline void
4342il4965_synchronize_irq(struct il_priv *il)
4343{
4344
4345 synchronize_irq(il->pci_dev->irq);
4346 tasklet_kill(&il->irq_tasklet);
4347}
4348
4349static void
4350il4965_irq_tasklet(struct il_priv *il)
4351{
4352 u32 inta, handled = 0;
4353 u32 inta_fh;
4354 unsigned long flags;
4355 u32 i;
4356#ifdef CONFIG_IWLEGACY_DEBUG
4357 u32 inta_mask;
4358#endif
4359
4360 spin_lock_irqsave(&il->lock, flags);
4361
4362
4363
4364
4365 inta = _il_rd(il, CSR_INT);
4366 _il_wr(il, CSR_INT, inta);
4367
4368
4369
4370
4371 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4372 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4373
4374#ifdef CONFIG_IWLEGACY_DEBUG
4375 if (il_get_debug_level(il) & IL_DL_ISR) {
4376
4377 inta_mask = _il_rd(il, CSR_INT_MASK);
4378 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4379 inta_mask, inta_fh);
4380 }
4381#endif
4382
4383 spin_unlock_irqrestore(&il->lock, flags);
4384
4385
4386
4387
4388
4389 if (inta_fh & CSR49_FH_INT_RX_MASK)
4390 inta |= CSR_INT_BIT_FH_RX;
4391 if (inta_fh & CSR49_FH_INT_TX_MASK)
4392 inta |= CSR_INT_BIT_FH_TX;
4393
4394
4395 if (inta & CSR_INT_BIT_HW_ERR) {
4396 IL_ERR("Hardware error detected. Restarting.\n");
4397
4398
4399 il_disable_interrupts(il);
4400
4401 il->isr_stats.hw++;
4402 il_irq_handle_error(il);
4403
4404 handled |= CSR_INT_BIT_HW_ERR;
4405
4406 return;
4407 }
4408#ifdef CONFIG_IWLEGACY_DEBUG
4409 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4410
4411 if (inta & CSR_INT_BIT_SCD) {
4412 D_ISR("Scheduler finished to transmit "
4413 "the frame/frames.\n");
4414 il->isr_stats.sch++;
4415 }
4416
4417
4418 if (inta & CSR_INT_BIT_ALIVE) {
4419 D_ISR("Alive interrupt\n");
4420 il->isr_stats.alive++;
4421 }
4422 }
4423#endif
4424
4425 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4426
4427
4428 if (inta & CSR_INT_BIT_RF_KILL) {
4429 int hw_rf_kill = 0;
4430
4431 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4432 hw_rf_kill = 1;
4433
4434 IL_WARN("RF_KILL bit toggled to %s.\n",
4435 hw_rf_kill ? "disable radio" : "enable radio");
4436
4437 il->isr_stats.rfkill++;
4438
4439
4440
4441
4442
4443
4444 if (!test_bit(S_ALIVE, &il->status)) {
4445 if (hw_rf_kill)
4446 set_bit(S_RFKILL, &il->status);
4447 else
4448 clear_bit(S_RFKILL, &il->status);
4449 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4450 }
4451
4452 handled |= CSR_INT_BIT_RF_KILL;
4453 }
4454
4455
4456 if (inta & CSR_INT_BIT_CT_KILL) {
4457 IL_ERR("Microcode CT kill error detected.\n");
4458 il->isr_stats.ctkill++;
4459 handled |= CSR_INT_BIT_CT_KILL;
4460 }
4461
4462
4463 if (inta & CSR_INT_BIT_SW_ERR) {
4464 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4465 inta);
4466 il->isr_stats.sw++;
4467 il_irq_handle_error(il);
4468 handled |= CSR_INT_BIT_SW_ERR;
4469 }
4470
4471
4472
4473
4474
4475
4476 if (inta & CSR_INT_BIT_WAKEUP) {
4477 D_ISR("Wakeup interrupt\n");
4478 il_rx_queue_update_write_ptr(il, &il->rxq);
4479 for (i = 0; i < il->hw_params.max_txq_num; i++)
4480 il_txq_update_write_ptr(il, &il->txq[i]);
4481 il->isr_stats.wakeup++;
4482 handled |= CSR_INT_BIT_WAKEUP;
4483 }
4484
4485
4486
4487
4488 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4489 il4965_rx_handle(il);
4490 il->isr_stats.rx++;
4491 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4492 }
4493
4494
4495 if (inta & CSR_INT_BIT_FH_TX) {
4496 D_ISR("uCode load interrupt\n");
4497 il->isr_stats.tx++;
4498 handled |= CSR_INT_BIT_FH_TX;
4499
4500 il->ucode_write_complete = 1;
4501 wake_up(&il->wait_command_queue);
4502 }
4503
4504 if (inta & ~handled) {
4505 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4506 il->isr_stats.unhandled++;
4507 }
4508
4509 if (inta & ~(il->inta_mask)) {
4510 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4511 inta & ~il->inta_mask);
4512 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4513 }
4514
4515
4516
4517 if (test_bit(S_INT_ENABLED, &il->status))
4518 il_enable_interrupts(il);
4519
4520 else if (handled & CSR_INT_BIT_RF_KILL)
4521 il_enable_rfkill_int(il);
4522
4523#ifdef CONFIG_IWLEGACY_DEBUG
4524 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4525 inta = _il_rd(il, CSR_INT);
4526 inta_mask = _il_rd(il, CSR_INT_MASK);
4527 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4528 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4529 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4530 }
4531#endif
4532}
4533
4534
4535
4536
4537
4538
4539
4540#ifdef CONFIG_IWLEGACY_DEBUG
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553static ssize_t
4554il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4555 char *buf)
4556{
4557 struct il_priv *il = dev_get_drvdata(d);
4558 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4559}
4560
4561static ssize_t
4562il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4563 const char *buf, size_t count)
4564{
4565 struct il_priv *il = dev_get_drvdata(d);
4566 unsigned long val;
4567 int ret;
4568
4569 ret = strict_strtoul(buf, 0, &val);
4570 if (ret)
4571 IL_ERR("%s is not in hex or decimal form.\n", buf);
4572 else
4573 il->debug_level = val;
4574
4575 return strnlen(buf, count);
4576}
4577
4578static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4579 il4965_store_debug_level);
4580
4581#endif
4582
4583static ssize_t
4584il4965_show_temperature(struct device *d, struct device_attribute *attr,
4585 char *buf)
4586{
4587 struct il_priv *il = dev_get_drvdata(d);
4588
4589 if (!il_is_alive(il))
4590 return -EAGAIN;
4591
4592 return sprintf(buf, "%d\n", il->temperature);
4593}
4594
4595static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4596
4597static ssize_t
4598il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4599{
4600 struct il_priv *il = dev_get_drvdata(d);
4601
4602 if (!il_is_ready_rf(il))
4603 return sprintf(buf, "off\n");
4604 else
4605 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4606}
4607
4608static ssize_t
4609il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4610 const char *buf, size_t count)
4611{
4612 struct il_priv *il = dev_get_drvdata(d);
4613 unsigned long val;
4614 int ret;
4615
4616 ret = strict_strtoul(buf, 10, &val);
4617 if (ret)
4618 IL_INFO("%s is not in decimal form.\n", buf);
4619 else {
4620 ret = il_set_tx_power(il, val, false);
4621 if (ret)
4622 IL_ERR("failed setting tx power (0x%d).\n", ret);
4623 else
4624 ret = count;
4625 }
4626 return ret;
4627}
4628
4629static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4630 il4965_store_tx_power);
4631
4632static struct attribute *il_sysfs_entries[] = {
4633 &dev_attr_temperature.attr,
4634 &dev_attr_tx_power.attr,
4635#ifdef CONFIG_IWLEGACY_DEBUG
4636 &dev_attr_debug_level.attr,
4637#endif
4638 NULL
4639};
4640
4641static struct attribute_group il_attribute_group = {
4642 .name = NULL,
4643 .attrs = il_sysfs_entries,
4644};
4645
4646
4647
4648
4649
4650
4651
4652static void
4653il4965_dealloc_ucode_pci(struct il_priv *il)
4654{
4655 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4656 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4657 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4658 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4659 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4660 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4661}
4662
4663static void
4664il4965_nic_start(struct il_priv *il)
4665{
4666
4667 _il_wr(il, CSR_RESET, 0);
4668}
4669
4670static void il4965_ucode_callback(const struct firmware *ucode_raw,
4671 void *context);
4672static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4673
4674static int __must_check
4675il4965_request_firmware(struct il_priv *il, bool first)
4676{
4677 const char *name_pre = il->cfg->fw_name_pre;
4678 char tag[8];
4679
4680 if (first) {
4681 il->fw_idx = il->cfg->ucode_api_max;
4682 sprintf(tag, "%d", il->fw_idx);
4683 } else {
4684 il->fw_idx--;
4685 sprintf(tag, "%d", il->fw_idx);
4686 }
4687
4688 if (il->fw_idx < il->cfg->ucode_api_min) {
4689 IL_ERR("no suitable firmware found!\n");
4690 return -ENOENT;
4691 }
4692
4693 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4694
4695 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4696
4697 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4698 &il->pci_dev->dev, GFP_KERNEL, il,
4699 il4965_ucode_callback);
4700}
4701
4702struct il4965_firmware_pieces {
4703 const void *inst, *data, *init, *init_data, *boot;
4704 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4705};
4706
4707static int
4708il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4709 struct il4965_firmware_pieces *pieces)
4710{
4711 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4712 u32 api_ver, hdr_size;
4713 const u8 *src;
4714
4715 il->ucode_ver = le32_to_cpu(ucode->ver);
4716 api_ver = IL_UCODE_API(il->ucode_ver);
4717
4718 switch (api_ver) {
4719 default:
4720 case 0:
4721 case 1:
4722 case 2:
4723 hdr_size = 24;
4724 if (ucode_raw->size < hdr_size) {
4725 IL_ERR("File size too small!\n");
4726 return -EINVAL;
4727 }
4728 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4729 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4730 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4731 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4732 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4733 src = ucode->v1.data;
4734 break;
4735 }
4736
4737
4738 if (ucode_raw->size !=
4739 hdr_size + pieces->inst_size + pieces->data_size +
4740 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4741
4742 IL_ERR("uCode file size %d does not match expected size\n",
4743 (int)ucode_raw->size);
4744 return -EINVAL;
4745 }
4746
4747 pieces->inst = src;
4748 src += pieces->inst_size;
4749 pieces->data = src;
4750 src += pieces->data_size;
4751 pieces->init = src;
4752 src += pieces->init_size;
4753 pieces->init_data = src;
4754 src += pieces->init_data_size;
4755 pieces->boot = src;
4756 src += pieces->boot_size;
4757
4758 return 0;
4759}
4760
4761
4762
4763
4764
4765
4766
4767static void
4768il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4769{
4770 struct il_priv *il = context;
4771 struct il_ucode_header *ucode;
4772 int err;
4773 struct il4965_firmware_pieces pieces;
4774 const unsigned int api_max = il->cfg->ucode_api_max;
4775 const unsigned int api_min = il->cfg->ucode_api_min;
4776 u32 api_ver;
4777
4778 u32 max_probe_length = 200;
4779 u32 standard_phy_calibration_size =
4780 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4781
4782 memset(&pieces, 0, sizeof(pieces));
4783
4784 if (!ucode_raw) {
4785 if (il->fw_idx <= il->cfg->ucode_api_max)
4786 IL_ERR("request for firmware file '%s' failed.\n",
4787 il->firmware_name);
4788 goto try_again;
4789 }
4790
4791 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4792 ucode_raw->size);
4793
4794
4795 if (ucode_raw->size < 4) {
4796 IL_ERR("File size way too small!\n");
4797 goto try_again;
4798 }
4799
4800
4801 ucode = (struct il_ucode_header *)ucode_raw->data;
4802
4803 err = il4965_load_firmware(il, ucode_raw, &pieces);
4804
4805 if (err)
4806 goto try_again;
4807
4808 api_ver = IL_UCODE_API(il->ucode_ver);
4809
4810
4811
4812
4813
4814
4815 if (api_ver < api_min || api_ver > api_max) {
4816 IL_ERR("Driver unable to support your firmware API. "
4817 "Driver supports v%u, firmware is v%u.\n", api_max,
4818 api_ver);
4819 goto try_again;
4820 }
4821
4822 if (api_ver != api_max)
4823 IL_ERR("Firmware has old API version. Expected v%u, "
4824 "got v%u. New firmware can be obtained "
4825 "from http://www.intellinuxwireless.org.\n", api_max,
4826 api_ver);
4827
4828 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4829 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4830 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4831
4832 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4833 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4834 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4835 IL_UCODE_SERIAL(il->ucode_ver));
4836
4837
4838
4839
4840
4841
4842
4843 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4844 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4845 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4846 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4847 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4848 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4849
4850
4851 if (pieces.inst_size > il->hw_params.max_inst_size) {
4852 IL_ERR("uCode instr len %Zd too large to fit in\n",
4853 pieces.inst_size);
4854 goto try_again;
4855 }
4856
4857 if (pieces.data_size > il->hw_params.max_data_size) {
4858 IL_ERR("uCode data len %Zd too large to fit in\n",
4859 pieces.data_size);
4860 goto try_again;
4861 }
4862
4863 if (pieces.init_size > il->hw_params.max_inst_size) {
4864 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4865 pieces.init_size);
4866 goto try_again;
4867 }
4868
4869 if (pieces.init_data_size > il->hw_params.max_data_size) {
4870 IL_ERR("uCode init data len %Zd too large to fit in\n",
4871 pieces.init_data_size);
4872 goto try_again;
4873 }
4874
4875 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4876 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4877 pieces.boot_size);
4878 goto try_again;
4879 }
4880
4881
4882
4883
4884
4885
4886 il->ucode_code.len = pieces.inst_size;
4887 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4888
4889 il->ucode_data.len = pieces.data_size;
4890 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4891
4892 il->ucode_data_backup.len = pieces.data_size;
4893 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4894
4895 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4896 !il->ucode_data_backup.v_addr)
4897 goto err_pci_alloc;
4898
4899
4900 if (pieces.init_size && pieces.init_data_size) {
4901 il->ucode_init.len = pieces.init_size;
4902 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4903
4904 il->ucode_init_data.len = pieces.init_data_size;
4905 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4906
4907 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4908 goto err_pci_alloc;
4909 }
4910
4911
4912 if (pieces.boot_size) {
4913 il->ucode_boot.len = pieces.boot_size;
4914 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4915
4916 if (!il->ucode_boot.v_addr)
4917 goto err_pci_alloc;
4918 }
4919
4920
4921
4922 il->sta_key_max_num = STA_KEY_MAX_NUM;
4923
4924
4925
4926
4927 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4928 pieces.inst_size);
4929 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4930
4931 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4932 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4933
4934
4935
4936
4937
4938 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4939 pieces.data_size);
4940 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4941 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4942
4943
4944 if (pieces.init_size) {
4945 D_INFO("Copying (but not loading) init instr len %Zd\n",
4946 pieces.init_size);
4947 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4948 }
4949
4950
4951 if (pieces.init_data_size) {
4952 D_INFO("Copying (but not loading) init data len %Zd\n",
4953 pieces.init_data_size);
4954 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4955 pieces.init_data_size);
4956 }
4957
4958
4959 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4960 pieces.boot_size);
4961 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4962
4963
4964
4965
4966
4967 il->_4965.phy_calib_chain_noise_reset_cmd =
4968 standard_phy_calibration_size;
4969 il->_4965.phy_calib_chain_noise_gain_cmd =
4970 standard_phy_calibration_size + 1;
4971
4972
4973
4974
4975
4976
4977 err = il4965_mac_setup_register(il, max_probe_length);
4978 if (err)
4979 goto out_unbind;
4980
4981 err = il_dbgfs_register(il, DRV_NAME);
4982 if (err)
4983 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4984 err);
4985
4986 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4987 if (err) {
4988 IL_ERR("failed to create sysfs device attributes\n");
4989 goto out_unbind;
4990 }
4991
4992
4993 release_firmware(ucode_raw);
4994 complete(&il->_4965.firmware_loading_complete);
4995 return;
4996
4997try_again:
4998
4999 if (il4965_request_firmware(il, false))
5000 goto out_unbind;
5001 release_firmware(ucode_raw);
5002 return;
5003
5004err_pci_alloc:
5005 IL_ERR("failed to allocate pci memory\n");
5006 il4965_dealloc_ucode_pci(il);
5007out_unbind:
5008 complete(&il->_4965.firmware_loading_complete);
5009 device_release_driver(&il->pci_dev->dev);
5010 release_firmware(ucode_raw);
5011}
5012
5013static const char *const desc_lookup_text[] = {
5014 "OK",
5015 "FAIL",
5016 "BAD_PARAM",
5017 "BAD_CHECKSUM",
5018 "NMI_INTERRUPT_WDG",
5019 "SYSASSERT",
5020 "FATAL_ERROR",
5021 "BAD_COMMAND",
5022 "HW_ERROR_TUNE_LOCK",
5023 "HW_ERROR_TEMPERATURE",
5024 "ILLEGAL_CHAN_FREQ",
5025 "VCC_NOT_STBL",
5026 "FH49_ERROR",
5027 "NMI_INTERRUPT_HOST",
5028 "NMI_INTERRUPT_ACTION_PT",
5029 "NMI_INTERRUPT_UNKNOWN",
5030 "UCODE_VERSION_MISMATCH",
5031 "HW_ERROR_ABS_LOCK",
5032 "HW_ERROR_CAL_LOCK_FAIL",
5033 "NMI_INTERRUPT_INST_ACTION_PT",
5034 "NMI_INTERRUPT_DATA_ACTION_PT",
5035 "NMI_TRM_HW_ER",
5036 "NMI_INTERRUPT_TRM",
5037 "NMI_INTERRUPT_BREAK_POINT",
5038 "DEBUG_0",
5039 "DEBUG_1",
5040 "DEBUG_2",
5041 "DEBUG_3",
5042};
5043
5044static struct {
5045 char *name;
5046 u8 num;
5047} advanced_lookup[] = {
5048 {
5049 "NMI_INTERRUPT_WDG", 0x34}, {
5050 "SYSASSERT", 0x35}, {
5051 "UCODE_VERSION_MISMATCH", 0x37}, {
5052 "BAD_COMMAND", 0x38}, {
5053 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5054 "FATAL_ERROR", 0x3D}, {
5055 "NMI_TRM_HW_ERR", 0x46}, {
5056 "NMI_INTERRUPT_TRM", 0x4C}, {
5057 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5058 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5059 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5060 "NMI_INTERRUPT_HOST", 0x66}, {
5061 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5062 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5063 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5064"ADVANCED_SYSASSERT", 0},};
5065
5066static const char *
5067il4965_desc_lookup(u32 num)
5068{
5069 int i;
5070 int max = ARRAY_SIZE(desc_lookup_text);
5071
5072 if (num < max)
5073 return desc_lookup_text[num];
5074
5075 max = ARRAY_SIZE(advanced_lookup) - 1;
5076 for (i = 0; i < max; i++) {
5077 if (advanced_lookup[i].num == num)
5078 break;
5079 }
5080 return advanced_lookup[i].name;
5081}
5082
5083#define ERROR_START_OFFSET (1 * sizeof(u32))
5084#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5085
5086void
5087il4965_dump_nic_error_log(struct il_priv *il)
5088{
5089 u32 data2, line;
5090 u32 desc, time, count, base, data1;
5091 u32 blink1, blink2, ilink1, ilink2;
5092 u32 pc, hcmd;
5093
5094 if (il->ucode_type == UCODE_INIT)
5095 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5096 else
5097 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5098
5099 if (!il->ops->is_valid_rtc_data_addr(base)) {
5100 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5101 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5102 return;
5103 }
5104
5105 count = il_read_targ_mem(il, base);
5106
5107 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5108 IL_ERR("Start IWL Error Log Dump:\n");
5109 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5110 }
5111
5112 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5113 il->isr_stats.err_code = desc;
5114 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5115 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5116 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5117 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5118 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5119 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5120 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5121 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5122 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5123 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5124
5125 IL_ERR("Desc Time "
5126 "data1 data2 line\n");
5127 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5128 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5129 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5130 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5131 blink2, ilink1, ilink2, hcmd);
5132}
5133
5134static void
5135il4965_rf_kill_ct_config(struct il_priv *il)
5136{
5137 struct il_ct_kill_config cmd;
5138 unsigned long flags;
5139 int ret = 0;
5140
5141 spin_lock_irqsave(&il->lock, flags);
5142 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5143 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5144 spin_unlock_irqrestore(&il->lock, flags);
5145
5146 cmd.critical_temperature_R =
5147 cpu_to_le32(il->hw_params.ct_kill_threshold);
5148
5149 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5150 if (ret)
5151 IL_ERR("C_CT_KILL_CONFIG failed\n");
5152 else
5153 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5154 "critical temperature is %d\n",
5155 il->hw_params.ct_kill_threshold);
5156}
5157
5158static const s8 default_queue_to_tx_fifo[] = {
5159 IL_TX_FIFO_VO,
5160 IL_TX_FIFO_VI,
5161 IL_TX_FIFO_BE,
5162 IL_TX_FIFO_BK,
5163 IL49_CMD_FIFO_NUM,
5164 IL_TX_FIFO_UNUSED,
5165 IL_TX_FIFO_UNUSED,
5166};
5167
5168#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5169
5170static int
5171il4965_alive_notify(struct il_priv *il)
5172{
5173 u32 a;
5174 unsigned long flags;
5175 int i, chan;
5176 u32 reg_val;
5177
5178 spin_lock_irqsave(&il->lock, flags);
5179
5180
5181 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5182 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5183 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5184 il_write_targ_mem(il, a, 0);
5185 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5186 il_write_targ_mem(il, a, 0);
5187 for (;
5188 a <
5189 il->scd_base_addr +
5190 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5191 a += 4)
5192 il_write_targ_mem(il, a, 0);
5193
5194
5195 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5196
5197
5198 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5199 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5200 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5201 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5202
5203
5204 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5205 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5206 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5207
5208
5209 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5210
5211
5212 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5213
5214
5215 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5216 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5217
5218
5219 il_write_targ_mem(il,
5220 il->scd_base_addr +
5221 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5222 (SCD_WIN_SIZE <<
5223 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5224 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5225
5226
5227 il_write_targ_mem(il,
5228 il->scd_base_addr +
5229 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5230 sizeof(u32),
5231 (SCD_FRAME_LIMIT <<
5232 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5233 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5234
5235 }
5236 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5237 (1 << il->hw_params.max_txq_num) - 1);
5238
5239
5240 il4965_txq_set_sched(il, IL_MASK(0, 6));
5241
5242 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5243
5244
5245 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5246 for (i = 0; i < 4; i++)
5247 atomic_set(&il->queue_stop_count[i], 0);
5248
5249
5250 il->txq_ctx_active_msk = 0;
5251
5252 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5253
5254 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5255 int ac = default_queue_to_tx_fifo[i];
5256
5257 il_txq_ctx_activate(il, i);
5258
5259 if (ac == IL_TX_FIFO_UNUSED)
5260 continue;
5261
5262 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5263 }
5264
5265 spin_unlock_irqrestore(&il->lock, flags);
5266
5267 return 0;
5268}
5269
5270
5271
5272
5273
5274
5275static void
5276il4965_alive_start(struct il_priv *il)
5277{
5278 int ret = 0;
5279
5280 D_INFO("Runtime Alive received.\n");
5281
5282 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5283
5284
5285 D_INFO("Alive failed.\n");
5286 goto restart;
5287 }
5288
5289
5290
5291
5292 if (il4965_verify_ucode(il)) {
5293
5294
5295 D_INFO("Bad runtime uCode load.\n");
5296 goto restart;
5297 }
5298
5299 ret = il4965_alive_notify(il);
5300 if (ret) {
5301 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5302 goto restart;
5303 }
5304
5305
5306 set_bit(S_ALIVE, &il->status);
5307
5308
5309 il_setup_watchdog(il);
5310
5311 if (il_is_rfkill(il))
5312 return;
5313
5314 ieee80211_wake_queues(il->hw);
5315
5316 il->active_rate = RATES_MASK;
5317
5318 if (il_is_associated(il)) {
5319 struct il_rxon_cmd *active_rxon =
5320 (struct il_rxon_cmd *)&il->active;
5321
5322 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5323 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5324 } else {
5325
5326 il_connection_init_rx_config(il);
5327
5328 if (il->ops->set_rxon_chain)
5329 il->ops->set_rxon_chain(il);
5330 }
5331
5332
5333 il_send_bt_config(il);
5334
5335 il4965_reset_run_time_calib(il);
5336
5337 set_bit(S_READY, &il->status);
5338
5339
5340 il_commit_rxon(il);
5341
5342
5343 il4965_rf_kill_ct_config(il);
5344
5345 D_INFO("ALIVE processing complete.\n");
5346 wake_up(&il->wait_command_queue);
5347
5348 il_power_update_mode(il, true);
5349 D_INFO("Updated power mode\n");
5350
5351 return;
5352
5353restart:
5354 queue_work(il->workqueue, &il->restart);
5355}
5356
5357static void il4965_cancel_deferred_work(struct il_priv *il);
5358
5359static void
5360__il4965_down(struct il_priv *il)
5361{
5362 unsigned long flags;
5363 int exit_pending;
5364
5365 D_INFO(DRV_NAME " is going down\n");
5366
5367 il_scan_cancel_timeout(il, 200);
5368
5369 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5370
5371
5372
5373 del_timer_sync(&il->watchdog);
5374
5375 il_clear_ucode_stations(il);
5376
5377
5378 spin_lock_irq(&il->sta_lock);
5379
5380
5381
5382
5383
5384
5385
5386 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5387 il->_4965.key_mapping_keys = 0;
5388 spin_unlock_irq(&il->sta_lock);
5389
5390 il_dealloc_bcast_stations(il);
5391 il_clear_driver_stations(il);
5392
5393
5394 wake_up_all(&il->wait_command_queue);
5395
5396
5397
5398 if (!exit_pending)
5399 clear_bit(S_EXIT_PENDING, &il->status);
5400
5401
5402 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5403
5404
5405 spin_lock_irqsave(&il->lock, flags);
5406 il_disable_interrupts(il);
5407 spin_unlock_irqrestore(&il->lock, flags);
5408 il4965_synchronize_irq(il);
5409
5410 if (il->mac80211_registered)
5411 ieee80211_stop_queues(il->hw);
5412
5413
5414
5415 if (!il_is_init(il)) {
5416 il->status =
5417 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5418 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5419 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5420 goto exit;
5421 }
5422
5423
5424
5425 il->status &=
5426 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5427 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5428 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5429 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5430
5431
5432
5433
5434
5435
5436 spin_lock_irq(&il->reg_lock);
5437
5438
5439 il4965_txq_ctx_stop(il);
5440 il4965_rxq_stop(il);
5441
5442 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5443 udelay(5);
5444
5445 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5446
5447 _il_apm_stop(il);
5448
5449 spin_unlock_irq(&il->reg_lock);
5450
5451 il4965_txq_ctx_unmap(il);
5452exit:
5453 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5454
5455 dev_kfree_skb(il->beacon_skb);
5456 il->beacon_skb = NULL;
5457
5458
5459 il4965_clear_free_frames(il);
5460}
5461
5462static void
5463il4965_down(struct il_priv *il)
5464{
5465 mutex_lock(&il->mutex);
5466 __il4965_down(il);
5467 mutex_unlock(&il->mutex);
5468
5469 il4965_cancel_deferred_work(il);
5470}
5471
5472
5473static void
5474il4965_set_hw_ready(struct il_priv *il)
5475{
5476 int ret;
5477
5478 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5479 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5480
5481
5482 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5483 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5484 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5485 100);
5486 if (ret >= 0)
5487 il->hw_ready = true;
5488
5489 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5490}
5491
5492static void
5493il4965_prepare_card_hw(struct il_priv *il)
5494{
5495 int ret;
5496
5497 il->hw_ready = false;
5498
5499 il4965_set_hw_ready(il);
5500 if (il->hw_ready)
5501 return;
5502
5503
5504 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5505
5506 ret =
5507 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5508 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5509 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5510
5511
5512 if (ret != -ETIMEDOUT)
5513 il4965_set_hw_ready(il);
5514}
5515
5516#define MAX_HW_RESTARTS 5
5517
5518static int
5519__il4965_up(struct il_priv *il)
5520{
5521 int i;
5522 int ret;
5523
5524 if (test_bit(S_EXIT_PENDING, &il->status)) {
5525 IL_WARN("Exit pending; will not bring the NIC up\n");
5526 return -EIO;
5527 }
5528
5529 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5530 IL_ERR("ucode not available for device bringup\n");
5531 return -EIO;
5532 }
5533
5534 ret = il4965_alloc_bcast_station(il);
5535 if (ret) {
5536 il_dealloc_bcast_stations(il);
5537 return ret;
5538 }
5539
5540 il4965_prepare_card_hw(il);
5541 if (!il->hw_ready) {
5542 IL_ERR("HW not ready\n");
5543 return -EIO;
5544 }
5545
5546
5547 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5548 clear_bit(S_RFKILL, &il->status);
5549 else {
5550 set_bit(S_RFKILL, &il->status);
5551 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5552
5553 il_enable_rfkill_int(il);
5554 IL_WARN("Radio disabled by HW RF Kill switch\n");
5555 return 0;
5556 }
5557
5558 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5559
5560
5561 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5562
5563 ret = il4965_hw_nic_init(il);
5564 if (ret) {
5565 IL_ERR("Unable to init nic\n");
5566 return ret;
5567 }
5568
5569
5570 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5571 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5572
5573
5574 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5575 il_enable_interrupts(il);
5576
5577
5578 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5579 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5580
5581
5582
5583
5584 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5585 il->ucode_data.len);
5586
5587 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5588
5589
5590
5591
5592 ret = il->ops->load_ucode(il);
5593
5594 if (ret) {
5595 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5596 continue;
5597 }
5598
5599
5600 il4965_nic_start(il);
5601
5602 D_INFO(DRV_NAME " is coming up\n");
5603
5604 return 0;
5605 }
5606
5607 set_bit(S_EXIT_PENDING, &il->status);
5608 __il4965_down(il);
5609 clear_bit(S_EXIT_PENDING, &il->status);
5610
5611
5612
5613 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5614 return -EIO;
5615}
5616
5617
5618
5619
5620
5621
5622
5623static void
5624il4965_bg_init_alive_start(struct work_struct *data)
5625{
5626 struct il_priv *il =
5627 container_of(data, struct il_priv, init_alive_start.work);
5628
5629 mutex_lock(&il->mutex);
5630 if (test_bit(S_EXIT_PENDING, &il->status))
5631 goto out;
5632
5633 il->ops->init_alive_start(il);
5634out:
5635 mutex_unlock(&il->mutex);
5636}
5637
5638static void
5639il4965_bg_alive_start(struct work_struct *data)
5640{
5641 struct il_priv *il =
5642 container_of(data, struct il_priv, alive_start.work);
5643
5644 mutex_lock(&il->mutex);
5645 if (test_bit(S_EXIT_PENDING, &il->status))
5646 goto out;
5647
5648 il4965_alive_start(il);
5649out:
5650 mutex_unlock(&il->mutex);
5651}
5652
5653static void
5654il4965_bg_run_time_calib_work(struct work_struct *work)
5655{
5656 struct il_priv *il = container_of(work, struct il_priv,
5657 run_time_calib_work);
5658
5659 mutex_lock(&il->mutex);
5660
5661 if (test_bit(S_EXIT_PENDING, &il->status) ||
5662 test_bit(S_SCANNING, &il->status)) {
5663 mutex_unlock(&il->mutex);
5664 return;
5665 }
5666
5667 if (il->start_calib) {
5668 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5669 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5670 }
5671
5672 mutex_unlock(&il->mutex);
5673}
5674
5675static void
5676il4965_bg_restart(struct work_struct *data)
5677{
5678 struct il_priv *il = container_of(data, struct il_priv, restart);
5679
5680 if (test_bit(S_EXIT_PENDING, &il->status))
5681 return;
5682
5683 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5684 mutex_lock(&il->mutex);
5685 il->is_open = 0;
5686
5687 __il4965_down(il);
5688
5689 mutex_unlock(&il->mutex);
5690 il4965_cancel_deferred_work(il);
5691 ieee80211_restart_hw(il->hw);
5692 } else {
5693 il4965_down(il);
5694
5695 mutex_lock(&il->mutex);
5696 if (test_bit(S_EXIT_PENDING, &il->status)) {
5697 mutex_unlock(&il->mutex);
5698 return;
5699 }
5700
5701 __il4965_up(il);
5702 mutex_unlock(&il->mutex);
5703 }
5704}
5705
5706static void
5707il4965_bg_rx_replenish(struct work_struct *data)
5708{
5709 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5710
5711 if (test_bit(S_EXIT_PENDING, &il->status))
5712 return;
5713
5714 mutex_lock(&il->mutex);
5715 il4965_rx_replenish(il);
5716 mutex_unlock(&il->mutex);
5717}
5718
5719
5720
5721
5722
5723
5724
5725#define UCODE_READY_TIMEOUT (4 * HZ)
5726
5727
5728
5729
5730
5731static int
5732il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5733{
5734 int ret;
5735 struct ieee80211_hw *hw = il->hw;
5736
5737 hw->rate_control_algorithm = "iwl-4965-rs";
5738
5739
5740 hw->flags =
5741 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5742 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5743 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
5744 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5745 if (il->cfg->sku & IL_SKU_N)
5746 hw->flags |=
5747 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5748 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5749
5750 hw->sta_data_size = sizeof(struct il_station_priv);
5751 hw->vif_data_size = sizeof(struct il_vif_priv);
5752
5753 hw->wiphy->interface_modes =
5754 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5755
5756 hw->wiphy->flags |=
5757 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
5758 WIPHY_FLAG_IBSS_RSN;
5759
5760
5761
5762
5763
5764 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5765
5766 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5767
5768 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5769
5770
5771 hw->queues = 4;
5772
5773 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5774
5775 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5776 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5777 &il->bands[IEEE80211_BAND_2GHZ];
5778 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5779 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5780 &il->bands[IEEE80211_BAND_5GHZ];
5781
5782 il_leds_init(il);
5783
5784 ret = ieee80211_register_hw(il->hw);
5785 if (ret) {
5786 IL_ERR("Failed to register hw (error %d)\n", ret);
5787 return ret;
5788 }
5789 il->mac80211_registered = 1;
5790
5791 return 0;
5792}
5793
5794int
5795il4965_mac_start(struct ieee80211_hw *hw)
5796{
5797 struct il_priv *il = hw->priv;
5798 int ret;
5799
5800 D_MAC80211("enter\n");
5801
5802
5803 mutex_lock(&il->mutex);
5804 ret = __il4965_up(il);
5805 mutex_unlock(&il->mutex);
5806
5807 if (ret)
5808 return ret;
5809
5810 if (il_is_rfkill(il))
5811 goto out;
5812
5813 D_INFO("Start UP work done.\n");
5814
5815
5816
5817 ret = wait_event_timeout(il->wait_command_queue,
5818 test_bit(S_READY, &il->status),
5819 UCODE_READY_TIMEOUT);
5820 if (!ret) {
5821 if (!test_bit(S_READY, &il->status)) {
5822 IL_ERR("START_ALIVE timeout after %dms.\n",
5823 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5824 return -ETIMEDOUT;
5825 }
5826 }
5827
5828 il4965_led_enable(il);
5829
5830out:
5831 il->is_open = 1;
5832 D_MAC80211("leave\n");
5833 return 0;
5834}
5835
5836void
5837il4965_mac_stop(struct ieee80211_hw *hw)
5838{
5839 struct il_priv *il = hw->priv;
5840
5841 D_MAC80211("enter\n");
5842
5843 if (!il->is_open)
5844 return;
5845
5846 il->is_open = 0;
5847
5848 il4965_down(il);
5849
5850 flush_workqueue(il->workqueue);
5851
5852
5853
5854 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5855 il_enable_rfkill_int(il);
5856
5857 D_MAC80211("leave\n");
5858}
5859
5860void
5861il4965_mac_tx(struct ieee80211_hw *hw,
5862 struct ieee80211_tx_control *control,
5863 struct sk_buff *skb)
5864{
5865 struct il_priv *il = hw->priv;
5866
5867 D_MACDUMP("enter\n");
5868
5869 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5870 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5871
5872 if (il4965_tx_skb(il, control->sta, skb))
5873 dev_kfree_skb_any(skb);
5874
5875 D_MACDUMP("leave\n");
5876}
5877
5878void
5879il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5880 struct ieee80211_key_conf *keyconf,
5881 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5882{
5883 struct il_priv *il = hw->priv;
5884
5885 D_MAC80211("enter\n");
5886
5887 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5888
5889 D_MAC80211("leave\n");
5890}
5891
5892int
5893il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5894 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5895 struct ieee80211_key_conf *key)
5896{
5897 struct il_priv *il = hw->priv;
5898 int ret;
5899 u8 sta_id;
5900 bool is_default_wep_key = false;
5901
5902 D_MAC80211("enter\n");
5903
5904 if (il->cfg->mod_params->sw_crypto) {
5905 D_MAC80211("leave - hwcrypto disabled\n");
5906 return -EOPNOTSUPP;
5907 }
5908
5909
5910
5911
5912
5913 if (vif->type == NL80211_IFTYPE_ADHOC &&
5914 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5915 D_MAC80211("leave - ad-hoc group key\n");
5916 return -EOPNOTSUPP;
5917 }
5918
5919 sta_id = il_sta_id_or_broadcast(il, sta);
5920 if (sta_id == IL_INVALID_STATION)
5921 return -EINVAL;
5922
5923 mutex_lock(&il->mutex);
5924 il_scan_cancel_timeout(il, 100);
5925
5926
5927
5928
5929
5930
5931
5932 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5933 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5934 if (cmd == SET_KEY)
5935 is_default_wep_key = !il->_4965.key_mapping_keys;
5936 else
5937 is_default_wep_key =
5938 (key->hw_key_idx == HW_KEY_DEFAULT);
5939 }
5940
5941 switch (cmd) {
5942 case SET_KEY:
5943 if (is_default_wep_key)
5944 ret = il4965_set_default_wep_key(il, key);
5945 else
5946 ret = il4965_set_dynamic_key(il, key, sta_id);
5947
5948 D_MAC80211("enable hwcrypto key\n");
5949 break;
5950 case DISABLE_KEY:
5951 if (is_default_wep_key)
5952 ret = il4965_remove_default_wep_key(il, key);
5953 else
5954 ret = il4965_remove_dynamic_key(il, key, sta_id);
5955
5956 D_MAC80211("disable hwcrypto key\n");
5957 break;
5958 default:
5959 ret = -EINVAL;
5960 }
5961
5962 mutex_unlock(&il->mutex);
5963 D_MAC80211("leave\n");
5964
5965 return ret;
5966}
5967
5968int
5969il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5970 enum ieee80211_ampdu_mlme_action action,
5971 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5972 u8 buf_size)
5973{
5974 struct il_priv *il = hw->priv;
5975 int ret = -EINVAL;
5976
5977 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5978
5979 if (!(il->cfg->sku & IL_SKU_N))
5980 return -EACCES;
5981
5982 mutex_lock(&il->mutex);
5983
5984 switch (action) {
5985 case IEEE80211_AMPDU_RX_START:
5986 D_HT("start Rx\n");
5987 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5988 break;
5989 case IEEE80211_AMPDU_RX_STOP:
5990 D_HT("stop Rx\n");
5991 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5992 if (test_bit(S_EXIT_PENDING, &il->status))
5993 ret = 0;
5994 break;
5995 case IEEE80211_AMPDU_TX_START:
5996 D_HT("start Tx\n");
5997 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5998 break;
5999 case IEEE80211_AMPDU_TX_STOP_CONT:
6000 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6001 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6002 D_HT("stop Tx\n");
6003 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6004 if (test_bit(S_EXIT_PENDING, &il->status))
6005 ret = 0;
6006 break;
6007 case IEEE80211_AMPDU_TX_OPERATIONAL:
6008 ret = 0;
6009 break;
6010 }
6011 mutex_unlock(&il->mutex);
6012
6013 return ret;
6014}
6015
6016int
6017il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6018 struct ieee80211_sta *sta)
6019{
6020 struct il_priv *il = hw->priv;
6021 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6022 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6023 int ret;
6024 u8 sta_id;
6025
6026 D_INFO("received request to add station %pM\n", sta->addr);
6027 mutex_lock(&il->mutex);
6028 D_INFO("proceeding to add station %pM\n", sta->addr);
6029 sta_priv->common.sta_id = IL_INVALID_STATION;
6030
6031 atomic_set(&sta_priv->pending_frames, 0);
6032
6033 ret =
6034 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6035 if (ret) {
6036 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6037
6038 mutex_unlock(&il->mutex);
6039 return ret;
6040 }
6041
6042 sta_priv->common.sta_id = sta_id;
6043
6044
6045 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6046 il4965_rs_rate_init(il, sta, sta_id);
6047 mutex_unlock(&il->mutex);
6048
6049 return 0;
6050}
6051
6052void
6053il4965_mac_channel_switch(struct ieee80211_hw *hw,
6054 struct ieee80211_channel_switch *ch_switch)
6055{
6056 struct il_priv *il = hw->priv;
6057 const struct il_channel_info *ch_info;
6058 struct ieee80211_conf *conf = &hw->conf;
6059 struct ieee80211_channel *channel = ch_switch->channel;
6060 struct il_ht_config *ht_conf = &il->current_ht_config;
6061 u16 ch;
6062
6063 D_MAC80211("enter\n");
6064
6065 mutex_lock(&il->mutex);
6066
6067 if (il_is_rfkill(il))
6068 goto out;
6069
6070 if (test_bit(S_EXIT_PENDING, &il->status) ||
6071 test_bit(S_SCANNING, &il->status) ||
6072 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6073 goto out;
6074
6075 if (!il_is_associated(il))
6076 goto out;
6077
6078 if (!il->ops->set_channel_switch)
6079 goto out;
6080
6081 ch = channel->hw_value;
6082 if (le16_to_cpu(il->active.channel) == ch)
6083 goto out;
6084
6085 ch_info = il_get_channel_info(il, channel->band, ch);
6086 if (!il_is_channel_valid(ch_info)) {
6087 D_MAC80211("invalid channel\n");
6088 goto out;
6089 }
6090
6091 spin_lock_irq(&il->lock);
6092
6093 il->current_ht_config.smps = conf->smps_mode;
6094
6095
6096 il->ht.enabled = conf_is_ht(conf);
6097 if (il->ht.enabled) {
6098 if (conf_is_ht40_minus(conf)) {
6099 il->ht.extension_chan_offset =
6100 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6101 il->ht.is_40mhz = true;
6102 } else if (conf_is_ht40_plus(conf)) {
6103 il->ht.extension_chan_offset =
6104 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6105 il->ht.is_40mhz = true;
6106 } else {
6107 il->ht.extension_chan_offset =
6108 IEEE80211_HT_PARAM_CHA_SEC_NONE;
6109 il->ht.is_40mhz = false;
6110 }
6111 } else
6112 il->ht.is_40mhz = false;
6113
6114 if ((le16_to_cpu(il->staging.channel) != ch))
6115 il->staging.flags = 0;
6116
6117 il_set_rxon_channel(il, channel);
6118 il_set_rxon_ht(il, ht_conf);
6119 il_set_flags_for_band(il, channel->band, il->vif);
6120
6121 spin_unlock_irq(&il->lock);
6122
6123 il_set_rate(il);
6124
6125
6126
6127
6128 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6129 il->switch_channel = cpu_to_le16(ch);
6130 if (il->ops->set_channel_switch(il, ch_switch)) {
6131 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6132 il->switch_channel = 0;
6133 ieee80211_chswitch_done(il->vif, false);
6134 }
6135
6136out:
6137 mutex_unlock(&il->mutex);
6138 D_MAC80211("leave\n");
6139}
6140
6141void
6142il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6143 unsigned int *total_flags, u64 multicast)
6144{
6145 struct il_priv *il = hw->priv;
6146 __le32 filter_or = 0, filter_nand = 0;
6147
6148#define CHK(test, flag) do { \
6149 if (*total_flags & (test)) \
6150 filter_or |= (flag); \
6151 else \
6152 filter_nand |= (flag); \
6153 } while (0)
6154
6155 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6156 *total_flags);
6157
6158 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
6159
6160 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6161 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6162
6163#undef CHK
6164
6165 mutex_lock(&il->mutex);
6166
6167 il->staging.filter_flags &= ~filter_nand;
6168 il->staging.filter_flags |= filter_or;
6169
6170
6171
6172
6173
6174
6175 mutex_unlock(&il->mutex);
6176
6177
6178
6179
6180
6181
6182
6183 *total_flags &=
6184 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
6185 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6186}
6187
6188
6189
6190
6191
6192
6193
6194static void
6195il4965_bg_txpower_work(struct work_struct *work)
6196{
6197 struct il_priv *il = container_of(work, struct il_priv,
6198 txpower_work);
6199
6200 mutex_lock(&il->mutex);
6201
6202
6203
6204
6205
6206 if (test_bit(S_EXIT_PENDING, &il->status) ||
6207 test_bit(S_SCANNING, &il->status))
6208 goto out;
6209
6210
6211
6212
6213 il->ops->send_tx_power(il);
6214
6215
6216
6217 il->last_temperature = il->temperature;
6218out:
6219 mutex_unlock(&il->mutex);
6220}
6221
6222static void
6223il4965_setup_deferred_work(struct il_priv *il)
6224{
6225 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6226
6227 init_waitqueue_head(&il->wait_command_queue);
6228
6229 INIT_WORK(&il->restart, il4965_bg_restart);
6230 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6231 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6232 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6233 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6234
6235 il_setup_scan_deferred_work(il);
6236
6237 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6238
6239 init_timer(&il->stats_periodic);
6240 il->stats_periodic.data = (unsigned long)il;
6241 il->stats_periodic.function = il4965_bg_stats_periodic;
6242
6243 init_timer(&il->watchdog);
6244 il->watchdog.data = (unsigned long)il;
6245 il->watchdog.function = il_bg_watchdog;
6246
6247 tasklet_init(&il->irq_tasklet,
6248 (void (*)(unsigned long))il4965_irq_tasklet,
6249 (unsigned long)il);
6250}
6251
6252static void
6253il4965_cancel_deferred_work(struct il_priv *il)
6254{
6255 cancel_work_sync(&il->txpower_work);
6256 cancel_delayed_work_sync(&il->init_alive_start);
6257 cancel_delayed_work(&il->alive_start);
6258 cancel_work_sync(&il->run_time_calib_work);
6259
6260 il_cancel_scan_deferred_work(il);
6261
6262 del_timer_sync(&il->stats_periodic);
6263}
6264
6265static void
6266il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6267{
6268 int i;
6269
6270 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6271 rates[i].bitrate = il_rates[i].ieee * 5;
6272 rates[i].hw_value = i;
6273 rates[i].hw_value_short = i;
6274 rates[i].flags = 0;
6275 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6276
6277
6278
6279 rates[i].flags |=
6280 (il_rates[i].plcp ==
6281 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6282 }
6283 }
6284}
6285
6286
6287
6288
6289void
6290il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6291{
6292 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6293 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6294}
6295
6296void
6297il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6298 int tx_fifo_id, int scd_retry)
6299{
6300 int txq_id = txq->q.id;
6301
6302
6303 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6304
6305
6306 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6307 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6308 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6309 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6310 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6311 IL49_SCD_QUEUE_STTS_REG_MSK);
6312
6313 txq->sched_retry = scd_retry;
6314
6315 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6316 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6317}
6318
6319const struct ieee80211_ops il4965_mac_ops = {
6320 .tx = il4965_mac_tx,
6321 .start = il4965_mac_start,
6322 .stop = il4965_mac_stop,
6323 .add_interface = il_mac_add_interface,
6324 .remove_interface = il_mac_remove_interface,
6325 .change_interface = il_mac_change_interface,
6326 .config = il_mac_config,
6327 .configure_filter = il4965_configure_filter,
6328 .set_key = il4965_mac_set_key,
6329 .update_tkip_key = il4965_mac_update_tkip_key,
6330 .conf_tx = il_mac_conf_tx,
6331 .reset_tsf = il_mac_reset_tsf,
6332 .bss_info_changed = il_mac_bss_info_changed,
6333 .ampdu_action = il4965_mac_ampdu_action,
6334 .hw_scan = il_mac_hw_scan,
6335 .sta_add = il4965_mac_sta_add,
6336 .sta_remove = il_mac_sta_remove,
6337 .channel_switch = il4965_mac_channel_switch,
6338 .tx_last_beacon = il_mac_tx_last_beacon,
6339 .flush = il_mac_flush,
6340};
6341
6342static int
6343il4965_init_drv(struct il_priv *il)
6344{
6345 int ret;
6346
6347 spin_lock_init(&il->sta_lock);
6348 spin_lock_init(&il->hcmd_lock);
6349
6350 INIT_LIST_HEAD(&il->free_frames);
6351
6352 mutex_init(&il->mutex);
6353
6354 il->ieee_channels = NULL;
6355 il->ieee_rates = NULL;
6356 il->band = IEEE80211_BAND_2GHZ;
6357
6358 il->iw_mode = NL80211_IFTYPE_STATION;
6359 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6360 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6361
6362
6363 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6364
6365
6366 if (il->ops->set_rxon_chain)
6367 il->ops->set_rxon_chain(il);
6368
6369 il_init_scan_params(il);
6370
6371 ret = il_init_channel_map(il);
6372 if (ret) {
6373 IL_ERR("initializing regulatory failed: %d\n", ret);
6374 goto err;
6375 }
6376
6377 ret = il_init_geos(il);
6378 if (ret) {
6379 IL_ERR("initializing geos failed: %d\n", ret);
6380 goto err_free_channel_map;
6381 }
6382 il4965_init_hw_rates(il, il->ieee_rates);
6383
6384 return 0;
6385
6386err_free_channel_map:
6387 il_free_channel_map(il);
6388err:
6389 return ret;
6390}
6391
6392static void
6393il4965_uninit_drv(struct il_priv *il)
6394{
6395 il_free_geos(il);
6396 il_free_channel_map(il);
6397 kfree(il->scan_cmd);
6398}
6399
6400static void
6401il4965_hw_detect(struct il_priv *il)
6402{
6403 il->hw_rev = _il_rd(il, CSR_HW_REV);
6404 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6405 il->rev_id = il->pci_dev->revision;
6406 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6407}
6408
6409static struct il_sensitivity_ranges il4965_sensitivity = {
6410 .min_nrg_cck = 97,
6411 .max_nrg_cck = 0,
6412
6413 .auto_corr_min_ofdm = 85,
6414 .auto_corr_min_ofdm_mrc = 170,
6415 .auto_corr_min_ofdm_x1 = 105,
6416 .auto_corr_min_ofdm_mrc_x1 = 220,
6417
6418 .auto_corr_max_ofdm = 120,
6419 .auto_corr_max_ofdm_mrc = 210,
6420 .auto_corr_max_ofdm_x1 = 140,
6421 .auto_corr_max_ofdm_mrc_x1 = 270,
6422
6423 .auto_corr_min_cck = 125,
6424 .auto_corr_max_cck = 200,
6425 .auto_corr_min_cck_mrc = 200,
6426 .auto_corr_max_cck_mrc = 400,
6427
6428 .nrg_th_cck = 100,
6429 .nrg_th_ofdm = 100,
6430
6431 .barker_corr_th_min = 190,
6432 .barker_corr_th_min_mrc = 390,
6433 .nrg_th_cca = 62,
6434};
6435
6436static void
6437il4965_set_hw_params(struct il_priv *il)
6438{
6439 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6440 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6441 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6442 if (il->cfg->mod_params->amsdu_size_8K)
6443 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6444 else
6445 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6446
6447 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6448
6449 if (il->cfg->mod_params->disable_11n)
6450 il->cfg->sku &= ~IL_SKU_N;
6451
6452 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6453 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6454 il->cfg->num_of_queues =
6455 il->cfg->mod_params->num_of_queues;
6456
6457 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6458 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6459 il->hw_params.scd_bc_tbls_size =
6460 il->cfg->num_of_queues *
6461 sizeof(struct il4965_scd_bc_tbl);
6462
6463 il->hw_params.tfd_size = sizeof(struct il_tfd);
6464 il->hw_params.max_stations = IL4965_STATION_COUNT;
6465 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6466 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6467 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6468 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
6469
6470 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6471
6472 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6473 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6474 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6475 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6476
6477 il->hw_params.ct_kill_threshold =
6478 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6479
6480 il->hw_params.sens = &il4965_sensitivity;
6481 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6482}
6483
6484static int
6485il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6486{
6487 int err = 0;
6488 struct il_priv *il;
6489 struct ieee80211_hw *hw;
6490 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6491 unsigned long flags;
6492 u16 pci_cmd;
6493
6494
6495
6496
6497
6498 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6499 if (!hw) {
6500 err = -ENOMEM;
6501 goto out;
6502 }
6503 il = hw->priv;
6504 il->hw = hw;
6505 SET_IEEE80211_DEV(hw, &pdev->dev);
6506
6507 D_INFO("*** LOAD DRIVER ***\n");
6508 il->cfg = cfg;
6509 il->ops = &il4965_ops;
6510#ifdef CONFIG_IWLEGACY_DEBUGFS
6511 il->debugfs_ops = &il4965_debugfs_ops;
6512#endif
6513 il->pci_dev = pdev;
6514 il->inta_mask = CSR_INI_SET_MASK;
6515
6516
6517
6518
6519 pci_disable_link_state(pdev,
6520 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6521 PCIE_LINK_STATE_CLKPM);
6522
6523 if (pci_enable_device(pdev)) {
6524 err = -ENODEV;
6525 goto out_ieee80211_free_hw;
6526 }
6527
6528 pci_set_master(pdev);
6529
6530 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6531 if (!err)
6532 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6533 if (err) {
6534 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6535 if (!err)
6536 err =
6537 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6538
6539 if (err) {
6540 IL_WARN("No suitable DMA available.\n");
6541 goto out_pci_disable_device;
6542 }
6543 }
6544
6545 err = pci_request_regions(pdev, DRV_NAME);
6546 if (err)
6547 goto out_pci_disable_device;
6548
6549 pci_set_drvdata(pdev, il);
6550
6551
6552
6553
6554 il->hw_base = pci_ioremap_bar(pdev, 0);
6555 if (!il->hw_base) {
6556 err = -ENODEV;
6557 goto out_pci_release_regions;
6558 }
6559
6560 D_INFO("pci_resource_len = 0x%08llx\n",
6561 (unsigned long long)pci_resource_len(pdev, 0));
6562 D_INFO("pci_resource_base = %p\n", il->hw_base);
6563
6564
6565
6566
6567 spin_lock_init(&il->reg_lock);
6568 spin_lock_init(&il->lock);
6569
6570
6571
6572
6573
6574
6575 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6576
6577 il4965_hw_detect(il);
6578 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6579
6580
6581
6582 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6583
6584 il4965_prepare_card_hw(il);
6585 if (!il->hw_ready) {
6586 IL_WARN("Failed, HW not ready\n");
6587 err = -EIO;
6588 goto out_iounmap;
6589 }
6590
6591
6592
6593
6594
6595 err = il_eeprom_init(il);
6596 if (err) {
6597 IL_ERR("Unable to init EEPROM\n");
6598 goto out_iounmap;
6599 }
6600 err = il4965_eeprom_check_version(il);
6601 if (err)
6602 goto out_free_eeprom;
6603
6604
6605 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6606 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6607 il->hw->wiphy->addresses = il->addresses;
6608 il->hw->wiphy->n_addresses = 1;
6609
6610
6611
6612
6613 il4965_set_hw_params(il);
6614
6615
6616
6617
6618
6619 err = il4965_init_drv(il);
6620 if (err)
6621 goto out_free_eeprom;
6622
6623
6624
6625
6626
6627 spin_lock_irqsave(&il->lock, flags);
6628 il_disable_interrupts(il);
6629 spin_unlock_irqrestore(&il->lock, flags);
6630
6631 pci_enable_msi(il->pci_dev);
6632
6633 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6634 if (err) {
6635 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6636 goto out_disable_msi;
6637 }
6638
6639 il4965_setup_deferred_work(il);
6640 il4965_setup_handlers(il);
6641
6642
6643
6644
6645
6646
6647 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6648 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6649 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6650 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6651 }
6652
6653 il_enable_rfkill_int(il);
6654
6655
6656 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6657 clear_bit(S_RFKILL, &il->status);
6658 else
6659 set_bit(S_RFKILL, &il->status);
6660
6661 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6662 test_bit(S_RFKILL, &il->status));
6663
6664 il_power_initialize(il);
6665
6666 init_completion(&il->_4965.firmware_loading_complete);
6667
6668 err = il4965_request_firmware(il, true);
6669 if (err)
6670 goto out_destroy_workqueue;
6671
6672 return 0;
6673
6674out_destroy_workqueue:
6675 destroy_workqueue(il->workqueue);
6676 il->workqueue = NULL;
6677 free_irq(il->pci_dev->irq, il);
6678out_disable_msi:
6679 pci_disable_msi(il->pci_dev);
6680 il4965_uninit_drv(il);
6681out_free_eeprom:
6682 il_eeprom_free(il);
6683out_iounmap:
6684 iounmap(il->hw_base);
6685out_pci_release_regions:
6686 pci_set_drvdata(pdev, NULL);
6687 pci_release_regions(pdev);
6688out_pci_disable_device:
6689 pci_disable_device(pdev);
6690out_ieee80211_free_hw:
6691 ieee80211_free_hw(il->hw);
6692out:
6693 return err;
6694}
6695
6696static void
6697il4965_pci_remove(struct pci_dev *pdev)
6698{
6699 struct il_priv *il = pci_get_drvdata(pdev);
6700 unsigned long flags;
6701
6702 if (!il)
6703 return;
6704
6705 wait_for_completion(&il->_4965.firmware_loading_complete);
6706
6707 D_INFO("*** UNLOAD DRIVER ***\n");
6708
6709 il_dbgfs_unregister(il);
6710 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6711
6712
6713
6714
6715
6716 set_bit(S_EXIT_PENDING, &il->status);
6717
6718 il_leds_exit(il);
6719
6720 if (il->mac80211_registered) {
6721 ieee80211_unregister_hw(il->hw);
6722 il->mac80211_registered = 0;
6723 } else {
6724 il4965_down(il);
6725 }
6726
6727
6728
6729
6730
6731
6732
6733
6734 il_apm_stop(il);
6735
6736
6737
6738
6739 spin_lock_irqsave(&il->lock, flags);
6740 il_disable_interrupts(il);
6741 spin_unlock_irqrestore(&il->lock, flags);
6742
6743 il4965_synchronize_irq(il);
6744
6745 il4965_dealloc_ucode_pci(il);
6746
6747 if (il->rxq.bd)
6748 il4965_rx_queue_free(il, &il->rxq);
6749 il4965_hw_txq_ctx_free(il);
6750
6751 il_eeprom_free(il);
6752
6753
6754 flush_workqueue(il->workqueue);
6755
6756
6757
6758
6759 destroy_workqueue(il->workqueue);
6760 il->workqueue = NULL;
6761
6762 free_irq(il->pci_dev->irq, il);
6763 pci_disable_msi(il->pci_dev);
6764 iounmap(il->hw_base);
6765 pci_release_regions(pdev);
6766 pci_disable_device(pdev);
6767 pci_set_drvdata(pdev, NULL);
6768
6769 il4965_uninit_drv(il);
6770
6771 dev_kfree_skb(il->beacon_skb);
6772
6773 ieee80211_free_hw(il->hw);
6774}
6775
6776
6777
6778
6779
6780void
6781il4965_txq_set_sched(struct il_priv *il, u32 mask)
6782{
6783 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6784}
6785
6786
6787
6788
6789
6790
6791
6792
6793static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6794 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6795 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6796 {0}
6797};
6798MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6799
6800static struct pci_driver il4965_driver = {
6801 .name = DRV_NAME,
6802 .id_table = il4965_hw_card_ids,
6803 .probe = il4965_pci_probe,
6804 .remove = il4965_pci_remove,
6805 .driver.pm = IL_LEGACY_PM_OPS,
6806};
6807
6808static int __init
6809il4965_init(void)
6810{
6811
6812 int ret;
6813 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6814 pr_info(DRV_COPYRIGHT "\n");
6815
6816 ret = il4965_rate_control_register();
6817 if (ret) {
6818 pr_err("Unable to register rate control algorithm: %d\n", ret);
6819 return ret;
6820 }
6821
6822 ret = pci_register_driver(&il4965_driver);
6823 if (ret) {
6824 pr_err("Unable to initialize PCI module\n");
6825 goto error_register;
6826 }
6827
6828 return ret;
6829
6830error_register:
6831 il4965_rate_control_unregister();
6832 return ret;
6833}
6834
6835static void __exit
6836il4965_exit(void)
6837{
6838 pci_unregister_driver(&il4965_driver);
6839 il4965_rate_control_unregister();
6840}
6841
6842module_exit(il4965_exit);
6843module_init(il4965_init);
6844
6845#ifdef CONFIG_IWLEGACY_DEBUG
6846module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6847MODULE_PARM_DESC(debug, "debug output mask");
6848#endif
6849
6850module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6851MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6852module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6853MODULE_PARM_DESC(queues_num, "number of hw queues.");
6854module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6855MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6856module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6857 S_IRUGO);
6858MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6859module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6860MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
6861