1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56
57
58
59
60
61
62
63
64
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91
92
93
94struct il_mod_params il4965_mod_params = {
95 .restart_fw = 1,
96
97};
98
99void
100il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
101{
102 unsigned long flags;
103 int i;
104 spin_lock_irqsave(&rxq->lock, flags);
105 INIT_LIST_HEAD(&rxq->rx_free);
106 INIT_LIST_HEAD(&rxq->rx_used);
107
108 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
109
110
111 if (rxq->pool[i].page != NULL) {
112 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
113 PAGE_SIZE << il->hw_params.rx_page_order,
114 PCI_DMA_FROMDEVICE);
115 __il_free_pages(il, rxq->pool[i].page);
116 rxq->pool[i].page = NULL;
117 }
118 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
119 }
120
121 for (i = 0; i < RX_QUEUE_SIZE; i++)
122 rxq->queue[i] = NULL;
123
124
125
126 rxq->read = rxq->write = 0;
127 rxq->write_actual = 0;
128 rxq->free_count = 0;
129 spin_unlock_irqrestore(&rxq->lock, flags);
130}
131
132int
133il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
134{
135 u32 rb_size;
136 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
137 u32 rb_timeout = 0;
138
139 if (il->cfg->mod_params->amsdu_size_8K)
140 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
141 else
142 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
143
144
145 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
146
147
148 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
149
150
151 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
152
153
154 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
155
156
157
158
159
160
161
162 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
163 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
164 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
166 rb_size |
167 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
168 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
169
170
171 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
172
173 return 0;
174}
175
176static void
177il4965_set_pwr_vmain(struct il_priv *il)
178{
179
180
181
182
183
184
185
186
187
188
189 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
190 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
191 ~APMG_PS_CTRL_MSK_PWR_SRC);
192}
193
194int
195il4965_hw_nic_init(struct il_priv *il)
196{
197 unsigned long flags;
198 struct il_rx_queue *rxq = &il->rxq;
199 int ret;
200
201 spin_lock_irqsave(&il->lock, flags);
202 il_apm_init(il);
203
204 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
205 spin_unlock_irqrestore(&il->lock, flags);
206
207 il4965_set_pwr_vmain(il);
208 il4965_nic_config(il);
209
210
211 if (!rxq->bd) {
212 ret = il_rx_queue_alloc(il);
213 if (ret) {
214 IL_ERR("Unable to initialize Rx queue\n");
215 return -ENOMEM;
216 }
217 } else
218 il4965_rx_queue_reset(il, rxq);
219
220 il4965_rx_replenish(il);
221
222 il4965_rx_init(il, rxq);
223
224 spin_lock_irqsave(&il->lock, flags);
225
226 rxq->need_update = 1;
227 il_rx_queue_update_write_ptr(il, rxq);
228
229 spin_unlock_irqrestore(&il->lock, flags);
230
231
232 if (!il->txq) {
233 ret = il4965_txq_ctx_alloc(il);
234 if (ret)
235 return ret;
236 } else
237 il4965_txq_ctx_reset(il);
238
239 set_bit(S_INIT, &il->status);
240
241 return 0;
242}
243
244
245
246
247static inline __le32
248il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
249{
250 return cpu_to_le32((u32) (dma_addr >> 8));
251}
252
253
254
255
256
257
258
259
260
261
262
263
264void
265il4965_rx_queue_restock(struct il_priv *il)
266{
267 struct il_rx_queue *rxq = &il->rxq;
268 struct list_head *element;
269 struct il_rx_buf *rxb;
270 unsigned long flags;
271
272 spin_lock_irqsave(&rxq->lock, flags);
273 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
274
275 rxb = rxq->queue[rxq->write];
276 BUG_ON(rxb && rxb->page);
277
278
279 element = rxq->rx_free.next;
280 rxb = list_entry(element, struct il_rx_buf, list);
281 list_del(element);
282
283
284 rxq->bd[rxq->write] =
285 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
286 rxq->queue[rxq->write] = rxb;
287 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
288 rxq->free_count--;
289 }
290 spin_unlock_irqrestore(&rxq->lock, flags);
291
292
293 if (rxq->free_count <= RX_LOW_WATERMARK)
294 queue_work(il->workqueue, &il->rx_replenish);
295
296
297
298 if (rxq->write_actual != (rxq->write & ~0x7)) {
299 spin_lock_irqsave(&rxq->lock, flags);
300 rxq->need_update = 1;
301 spin_unlock_irqrestore(&rxq->lock, flags);
302 il_rx_queue_update_write_ptr(il, rxq);
303 }
304}
305
306
307
308
309
310
311
312
313
314static void
315il4965_rx_allocate(struct il_priv *il, gfp_t priority)
316{
317 struct il_rx_queue *rxq = &il->rxq;
318 struct list_head *element;
319 struct il_rx_buf *rxb;
320 struct page *page;
321 dma_addr_t page_dma;
322 unsigned long flags;
323 gfp_t gfp_mask = priority;
324
325 while (1) {
326 spin_lock_irqsave(&rxq->lock, flags);
327 if (list_empty(&rxq->rx_used)) {
328 spin_unlock_irqrestore(&rxq->lock, flags);
329 return;
330 }
331 spin_unlock_irqrestore(&rxq->lock, flags);
332
333 if (rxq->free_count > RX_LOW_WATERMARK)
334 gfp_mask |= __GFP_NOWARN;
335
336 if (il->hw_params.rx_page_order > 0)
337 gfp_mask |= __GFP_COMP;
338
339
340 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
341 if (!page) {
342 if (net_ratelimit())
343 D_INFO("alloc_pages failed, " "order: %d\n",
344 il->hw_params.rx_page_order);
345
346 if (rxq->free_count <= RX_LOW_WATERMARK &&
347 net_ratelimit())
348 IL_ERR("Failed to alloc_pages with %s. "
349 "Only %u free buffers remaining.\n",
350 priority ==
351 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
352 rxq->free_count);
353
354
355
356 return;
357 }
358
359
360 page_dma =
361 pci_map_page(il->pci_dev, page, 0,
362 PAGE_SIZE << il->hw_params.rx_page_order,
363 PCI_DMA_FROMDEVICE);
364 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
365 __free_pages(page, il->hw_params.rx_page_order);
366 break;
367 }
368
369 spin_lock_irqsave(&rxq->lock, flags);
370
371 if (list_empty(&rxq->rx_used)) {
372 spin_unlock_irqrestore(&rxq->lock, flags);
373 pci_unmap_page(il->pci_dev, page_dma,
374 PAGE_SIZE << il->hw_params.rx_page_order,
375 PCI_DMA_FROMDEVICE);
376 __free_pages(page, il->hw_params.rx_page_order);
377 return;
378 }
379
380 element = rxq->rx_used.next;
381 rxb = list_entry(element, struct il_rx_buf, list);
382 list_del(element);
383
384 BUG_ON(rxb->page);
385
386 rxb->page = page;
387 rxb->page_dma = page_dma;
388 list_add_tail(&rxb->list, &rxq->rx_free);
389 rxq->free_count++;
390 il->alloc_rxb_page++;
391
392 spin_unlock_irqrestore(&rxq->lock, flags);
393 }
394}
395
396void
397il4965_rx_replenish(struct il_priv *il)
398{
399 unsigned long flags;
400
401 il4965_rx_allocate(il, GFP_KERNEL);
402
403 spin_lock_irqsave(&il->lock, flags);
404 il4965_rx_queue_restock(il);
405 spin_unlock_irqrestore(&il->lock, flags);
406}
407
408void
409il4965_rx_replenish_now(struct il_priv *il)
410{
411 il4965_rx_allocate(il, GFP_ATOMIC);
412
413 il4965_rx_queue_restock(il);
414}
415
416
417
418
419
420
421void
422il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
423{
424 int i;
425 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
426 if (rxq->pool[i].page != NULL) {
427 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
428 PAGE_SIZE << il->hw_params.rx_page_order,
429 PCI_DMA_FROMDEVICE);
430 __il_free_pages(il, rxq->pool[i].page);
431 rxq->pool[i].page = NULL;
432 }
433 }
434
435 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
436 rxq->bd_dma);
437 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
438 rxq->rb_stts, rxq->rb_stts_dma);
439 rxq->bd = NULL;
440 rxq->rb_stts = NULL;
441}
442
443int
444il4965_rxq_stop(struct il_priv *il)
445{
446 int ret;
447
448 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
449 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
450 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
451 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
452 1000);
453 if (ret < 0)
454 IL_ERR("Can't stop Rx DMA.\n");
455
456 return 0;
457}
458
459int
460il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
461{
462 int idx = 0;
463 int band_offset = 0;
464
465
466 if (rate_n_flags & RATE_MCS_HT_MSK) {
467 idx = (rate_n_flags & 0xff);
468 return idx;
469
470 } else {
471 if (band == NL80211_BAND_5GHZ)
472 band_offset = IL_FIRST_OFDM_RATE;
473 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
474 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
475 return idx - band_offset;
476 }
477
478 return -1;
479}
480
481static int
482il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
483{
484
485
486 struct il4965_rx_non_cfg_phy *ncphy =
487 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
488 u32 agc =
489 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
490 IL49_AGC_DB_POS;
491
492 u32 valid_antennae =
493 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
494 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
495 u8 max_rssi = 0;
496 u32 i;
497
498
499
500
501
502
503 for (i = 0; i < 3; i++)
504 if (valid_antennae & (1 << i))
505 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
506
507 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
508 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
509 max_rssi, agc);
510
511
512
513 return max_rssi - agc - IL4965_RSSI_OFFSET;
514}
515
516static u32
517il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
518{
519 u32 decrypt_out = 0;
520
521 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
522 RX_RES_STATUS_STATION_FOUND)
523 decrypt_out |=
524 (RX_RES_STATUS_STATION_FOUND |
525 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
526
527 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
528
529
530 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
531 RX_RES_STATUS_SEC_TYPE_NONE)
532 return decrypt_out;
533
534
535 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
536 RX_RES_STATUS_SEC_TYPE_ERR)
537 return decrypt_out;
538
539
540 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
541 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
542 return decrypt_out;
543
544 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
545
546 case RX_RES_STATUS_SEC_TYPE_CCMP:
547
548 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
549
550 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
551 else
552 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
553
554 break;
555
556 case RX_RES_STATUS_SEC_TYPE_TKIP:
557 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
558
559 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
560 break;
561 }
562
563 default:
564 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
565 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
566 else
567 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
568 break;
569 }
570
571 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
572
573 return decrypt_out;
574}
575
576#define SMALL_PACKET_SIZE 256
577
578static void
579il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
580 u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
581 struct ieee80211_rx_status *stats)
582{
583 struct sk_buff *skb;
584 __le16 fc = hdr->frame_control;
585
586
587 if (unlikely(!il->is_open)) {
588 D_DROP("Dropping packet while interface is not open.\n");
589 return;
590 }
591
592 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
593 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
594 D_INFO("Woke queues - frame received on passive channel\n");
595 }
596
597
598 if (!il->cfg->mod_params->sw_crypto &&
599 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
600 return;
601
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
603 if (!skb) {
604 IL_ERR("dev_alloc_skb failed\n");
605 return;
606 }
607
608 if (len <= SMALL_PACKET_SIZE) {
609 skb_put_data(skb, hdr, len);
610 } else {
611 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
612 len, PAGE_SIZE << il->hw_params.rx_page_order);
613 il->alloc_rxb_page--;
614 rxb->page = NULL;
615 }
616
617 il_update_stats(il, false, fc, len);
618 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
619
620 ieee80211_rx(il->hw, skb);
621}
622
623
624
625static void
626il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
627{
628 struct ieee80211_hdr *header;
629 struct ieee80211_rx_status rx_status = {};
630 struct il_rx_pkt *pkt = rxb_addr(rxb);
631 struct il_rx_phy_res *phy_res;
632 __le32 rx_pkt_status;
633 struct il_rx_mpdu_res_start *amsdu;
634 u32 len;
635 u32 ampdu_status;
636 u32 rate_n_flags;
637
638
639
640
641
642
643
644
645
646
647 if (pkt->hdr.cmd == N_RX) {
648 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
649 header =
650 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
651 phy_res->cfg_phy_cnt);
652
653 len = le16_to_cpu(phy_res->byte_count);
654 rx_pkt_status =
655 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
656 phy_res->cfg_phy_cnt + len);
657 ampdu_status = le32_to_cpu(rx_pkt_status);
658 } else {
659 if (!il->_4965.last_phy_res_valid) {
660 IL_ERR("MPDU frame without cached PHY data\n");
661 return;
662 }
663 phy_res = &il->_4965.last_phy_res;
664 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
665 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
666 len = le16_to_cpu(amsdu->byte_count);
667 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
668 ampdu_status =
669 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
670 }
671
672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
673 D_DROP("dsp size out of range [0,20]: %d\n",
674 phy_res->cfg_phy_cnt);
675 return;
676 }
677
678 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
679 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
680 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
681 return;
682 }
683
684
685 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
686
687
688 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
689 rx_status.band =
690 (phy_res->
691 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
692 NL80211_BAND_5GHZ;
693 rx_status.freq =
694 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
695 rx_status.band);
696 rx_status.rate_idx =
697 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
698 rx_status.flag = 0;
699
700
701
702
703
704 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
705
706
707 rx_status.signal = il4965_calc_rssi(il, phy_res);
708
709 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
710 (unsigned long long)rx_status.mactime);
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725 rx_status.antenna =
726 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
727 RX_RES_PHY_FLAGS_ANTENNA_POS;
728
729
730 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
731 rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
732
733
734 if (rate_n_flags & RATE_MCS_HT_MSK)
735 rx_status.encoding = RX_ENC_HT;
736 if (rate_n_flags & RATE_MCS_HT40_MSK)
737 rx_status.bw = RATE_INFO_BW_40;
738 else
739 rx_status.bw = RATE_INFO_BW_20;
740 if (rate_n_flags & RATE_MCS_SGI_MSK)
741 rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
742
743 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
744
745
746
747
748
749 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
750 rx_status.ampdu_reference = il->_4965.ampdu_ref;
751 }
752
753 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
754 &rx_status);
755}
756
757
758
759static void
760il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
761{
762 struct il_rx_pkt *pkt = rxb_addr(rxb);
763 il->_4965.last_phy_res_valid = true;
764 il->_4965.ampdu_ref++;
765 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
766 sizeof(struct il_rx_phy_res));
767}
768
769static int
770il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
771 enum nl80211_band band, u8 is_active,
772 u8 n_probes, struct il_scan_channel *scan_ch)
773{
774 struct ieee80211_channel *chan;
775 const struct ieee80211_supported_band *sband;
776 const struct il_channel_info *ch_info;
777 u16 passive_dwell = 0;
778 u16 active_dwell = 0;
779 int added, i;
780 u16 channel;
781
782 sband = il_get_hw_mode(il, band);
783 if (!sband)
784 return 0;
785
786 active_dwell = il_get_active_dwell_time(il, band, n_probes);
787 passive_dwell = il_get_passive_dwell_time(il, band, vif);
788
789 if (passive_dwell <= active_dwell)
790 passive_dwell = active_dwell + 1;
791
792 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
793 chan = il->scan_request->channels[i];
794
795 if (chan->band != band)
796 continue;
797
798 channel = chan->hw_value;
799 scan_ch->channel = cpu_to_le16(channel);
800
801 ch_info = il_get_channel_info(il, band, channel);
802 if (!il_is_channel_valid(ch_info)) {
803 D_SCAN("Channel %d is INVALID for this band.\n",
804 channel);
805 continue;
806 }
807
808 if (!is_active || il_is_channel_passive(ch_info) ||
809 (chan->flags & IEEE80211_CHAN_NO_IR))
810 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
811 else
812 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
813
814 if (n_probes)
815 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
816
817 scan_ch->active_dwell = cpu_to_le16(active_dwell);
818 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
819
820
821 scan_ch->dsp_atten = 110;
822
823
824
825
826
827 if (band == NL80211_BAND_5GHZ)
828 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
829 else
830 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
831
832 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
833 le32_to_cpu(scan_ch->type),
834 (scan_ch->
835 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
836 (scan_ch->
837 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
838 passive_dwell);
839
840 scan_ch++;
841 added++;
842 }
843
844 D_SCAN("total channels to scan %d\n", added);
845 return added;
846}
847
848static void
849il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
850{
851 int i;
852 u8 ind = *ant;
853
854 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
855 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
856 if (valid & BIT(ind)) {
857 *ant = ind;
858 return;
859 }
860 }
861}
862
863int
864il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
865{
866 struct il_host_cmd cmd = {
867 .id = C_SCAN,
868 .len = sizeof(struct il_scan_cmd),
869 .flags = CMD_SIZE_HUGE,
870 };
871 struct il_scan_cmd *scan;
872 u32 rate_flags = 0;
873 u16 cmd_len;
874 u16 rx_chain = 0;
875 enum nl80211_band band;
876 u8 n_probes = 0;
877 u8 rx_ant = il->hw_params.valid_rx_ant;
878 u8 rate;
879 bool is_active = false;
880 int chan_mod;
881 u8 active_chains;
882 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
883 int ret;
884
885 lockdep_assert_held(&il->mutex);
886
887 if (!il->scan_cmd) {
888 il->scan_cmd =
889 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
890 GFP_KERNEL);
891 if (!il->scan_cmd) {
892 D_SCAN("fail to allocate memory for scan\n");
893 return -ENOMEM;
894 }
895 }
896 scan = il->scan_cmd;
897 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
898
899 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
900 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
901
902 if (il_is_any_associated(il)) {
903 u16 interval;
904 u32 extra;
905 u32 suspend_time = 100;
906 u32 scan_suspend_time = 100;
907
908 D_INFO("Scanning while associated...\n");
909 interval = vif->bss_conf.beacon_int;
910
911 scan->suspend_time = 0;
912 scan->max_out_time = cpu_to_le32(200 * 1024);
913 if (!interval)
914 interval = suspend_time;
915
916 extra = (suspend_time / interval) << 22;
917 scan_suspend_time =
918 (extra | ((suspend_time % interval) * 1024));
919 scan->suspend_time = cpu_to_le32(scan_suspend_time);
920 D_SCAN("suspend_time 0x%X beacon interval %d\n",
921 scan_suspend_time, interval);
922 }
923
924 if (il->scan_request->n_ssids) {
925 int i, p = 0;
926 D_SCAN("Kicking off active scan\n");
927 for (i = 0; i < il->scan_request->n_ssids; i++) {
928
929 if (!il->scan_request->ssids[i].ssid_len)
930 continue;
931 scan->direct_scan[p].id = WLAN_EID_SSID;
932 scan->direct_scan[p].len =
933 il->scan_request->ssids[i].ssid_len;
934 memcpy(scan->direct_scan[p].ssid,
935 il->scan_request->ssids[i].ssid,
936 il->scan_request->ssids[i].ssid_len);
937 n_probes++;
938 p++;
939 }
940 is_active = true;
941 } else
942 D_SCAN("Start passive scan.\n");
943
944 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
945 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
946 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
947
948 switch (il->scan_band) {
949 case NL80211_BAND_2GHZ:
950 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
951 chan_mod =
952 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
953 RXON_FLG_CHANNEL_MODE_POS;
954 if (chan_mod == CHANNEL_MODE_PURE_40) {
955 rate = RATE_6M_PLCP;
956 } else {
957 rate = RATE_1M_PLCP;
958 rate_flags = RATE_MCS_CCK_MSK;
959 }
960 break;
961 case NL80211_BAND_5GHZ:
962 rate = RATE_6M_PLCP;
963 break;
964 default:
965 IL_WARN("Invalid scan band\n");
966 return -EIO;
967 }
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986 scan->good_CRC_th =
987 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
988
989 band = il->scan_band;
990
991 if (il->cfg->scan_rx_antennas[band])
992 rx_ant = il->cfg->scan_rx_antennas[band];
993
994 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
995 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
996 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
997
998
999 if (test_bit(S_POWER_PMI, &il->status)) {
1000
1001 active_chains =
1002 rx_ant & ((u8) (il->chain_noise_data.active_chains));
1003 if (!active_chains)
1004 active_chains = rx_ant;
1005
1006 D_SCAN("chain_noise_data.active_chains: %u\n",
1007 il->chain_noise_data.active_chains);
1008
1009 rx_ant = il4965_first_antenna(active_chains);
1010 }
1011
1012
1013 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1014 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1015 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1016 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1017 scan->rx_chain = cpu_to_le16(rx_chain);
1018
1019 cmd_len =
1020 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1021 vif->addr, il->scan_request->ie,
1022 il->scan_request->ie_len,
1023 IL_MAX_SCAN_SIZE - sizeof(*scan));
1024 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1025
1026 scan->filter_flags |=
1027 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1028
1029 scan->channel_count =
1030 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1031 (void *)&scan->data[cmd_len]);
1032 if (scan->channel_count == 0) {
1033 D_SCAN("channel count %d\n", scan->channel_count);
1034 return -EIO;
1035 }
1036
1037 cmd.len +=
1038 le16_to_cpu(scan->tx_cmd.len) +
1039 scan->channel_count * sizeof(struct il_scan_channel);
1040 cmd.data = scan;
1041 scan->len = cpu_to_le16(cmd.len);
1042
1043 set_bit(S_SCAN_HW, &il->status);
1044
1045 ret = il_send_cmd_sync(il, &cmd);
1046 if (ret)
1047 clear_bit(S_SCAN_HW, &il->status);
1048
1049 return ret;
1050}
1051
1052int
1053il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1054 bool add)
1055{
1056 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1057
1058 if (add)
1059 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1060 &vif_priv->ibss_bssid_sta_id);
1061 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1062 vif->bss_conf.bssid);
1063}
1064
1065void
1066il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1067{
1068 lockdep_assert_held(&il->sta_lock);
1069
1070 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1071 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1072 else {
1073 D_TX("free more than tfds_in_queue (%u:%d)\n",
1074 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1075 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1076 }
1077}
1078
1079#define IL_TX_QUEUE_MSK 0xfffff
1080
1081static bool
1082il4965_is_single_rx_stream(struct il_priv *il)
1083{
1084 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1085 il->current_ht_config.single_chain_sufficient;
1086}
1087
1088#define IL_NUM_RX_CHAINS_MULTIPLE 3
1089#define IL_NUM_RX_CHAINS_SINGLE 2
1090#define IL_NUM_IDLE_CHAINS_DUAL 2
1091#define IL_NUM_IDLE_CHAINS_SINGLE 1
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int
1104il4965_get_active_rx_chain_count(struct il_priv *il)
1105{
1106
1107 if (il4965_is_single_rx_stream(il))
1108 return IL_NUM_RX_CHAINS_SINGLE;
1109 else
1110 return IL_NUM_RX_CHAINS_MULTIPLE;
1111}
1112
1113
1114
1115
1116
1117static int
1118il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1119{
1120
1121 switch (il->current_ht_config.smps) {
1122 case IEEE80211_SMPS_STATIC:
1123 case IEEE80211_SMPS_DYNAMIC:
1124 return IL_NUM_IDLE_CHAINS_SINGLE;
1125 case IEEE80211_SMPS_OFF:
1126 return active_cnt;
1127 default:
1128 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1129 return active_cnt;
1130 }
1131}
1132
1133
1134static u8
1135il4965_count_chain_bitmap(u32 chain_bitmap)
1136{
1137 u8 res;
1138 res = (chain_bitmap & BIT(0)) >> 0;
1139 res += (chain_bitmap & BIT(1)) >> 1;
1140 res += (chain_bitmap & BIT(2)) >> 2;
1141 res += (chain_bitmap & BIT(3)) >> 3;
1142 return res;
1143}
1144
1145
1146
1147
1148
1149
1150
1151void
1152il4965_set_rxon_chain(struct il_priv *il)
1153{
1154 bool is_single = il4965_is_single_rx_stream(il);
1155 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1156 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1157 u32 active_chains;
1158 u16 rx_chain;
1159
1160
1161
1162
1163
1164 if (il->chain_noise_data.active_chains)
1165 active_chains = il->chain_noise_data.active_chains;
1166 else
1167 active_chains = il->hw_params.valid_rx_ant;
1168
1169 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1170
1171
1172 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1173 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1174
1175
1176
1177
1178 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1179 if (valid_rx_cnt < active_rx_cnt)
1180 active_rx_cnt = valid_rx_cnt;
1181
1182 if (valid_rx_cnt < idle_rx_cnt)
1183 idle_rx_cnt = valid_rx_cnt;
1184
1185 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1186 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1187
1188 il->staging.rx_chain = cpu_to_le16(rx_chain);
1189
1190 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1191 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1192 else
1193 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1194
1195 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1196 active_rx_cnt, idle_rx_cnt);
1197
1198 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1199 active_rx_cnt < idle_rx_cnt);
1200}
1201
1202static const char *
1203il4965_get_fh_string(int cmd)
1204{
1205 switch (cmd) {
1206 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1207 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1208 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1209 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1210 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1211 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1212 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1213 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1214 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1215 default:
1216 return "UNKNOWN";
1217 }
1218}
1219
1220int
1221il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1222{
1223 int i;
1224#ifdef CONFIG_IWLEGACY_DEBUG
1225 int pos = 0;
1226 size_t bufsz = 0;
1227#endif
1228 static const u32 fh_tbl[] = {
1229 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1230 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1231 FH49_RSCSR_CHNL0_WPTR,
1232 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1233 FH49_MEM_RSSR_SHARED_CTRL_REG,
1234 FH49_MEM_RSSR_RX_STATUS_REG,
1235 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1236 FH49_TSSR_TX_STATUS_REG,
1237 FH49_TSSR_TX_ERROR_REG
1238 };
1239#ifdef CONFIG_IWLEGACY_DEBUG
1240 if (display) {
1241 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1242 *buf = kmalloc(bufsz, GFP_KERNEL);
1243 if (!*buf)
1244 return -ENOMEM;
1245 pos +=
1246 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1247 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1248 pos +=
1249 scnprintf(*buf + pos, bufsz - pos,
1250 " %34s: 0X%08x\n",
1251 il4965_get_fh_string(fh_tbl[i]),
1252 il_rd(il, fh_tbl[i]));
1253 }
1254 return pos;
1255 }
1256#endif
1257 IL_ERR("FH register values:\n");
1258 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1259 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1260 il_rd(il, fh_tbl[i]));
1261 }
1262 return 0;
1263}
1264
1265static void
1266il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1267{
1268 struct il_rx_pkt *pkt = rxb_addr(rxb);
1269 struct il_missed_beacon_notif *missed_beacon;
1270
1271 missed_beacon = &pkt->u.missed_beacon;
1272 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1273 il->missed_beacon_threshold) {
1274 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1275 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1276 le32_to_cpu(missed_beacon->total_missed_becons),
1277 le32_to_cpu(missed_beacon->num_recvd_beacons),
1278 le32_to_cpu(missed_beacon->num_expected_beacons));
1279 if (!test_bit(S_SCANNING, &il->status))
1280 il4965_init_sensitivity(il);
1281 }
1282}
1283
1284
1285
1286
1287static void
1288il4965_rx_calc_noise(struct il_priv *il)
1289{
1290 struct stats_rx_non_phy *rx_info;
1291 int num_active_rx = 0;
1292 int total_silence = 0;
1293 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1294 int last_rx_noise;
1295
1296 rx_info = &(il->_4965.stats.rx.general);
1297 bcn_silence_a =
1298 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1299 bcn_silence_b =
1300 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1301 bcn_silence_c =
1302 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1303
1304 if (bcn_silence_a) {
1305 total_silence += bcn_silence_a;
1306 num_active_rx++;
1307 }
1308 if (bcn_silence_b) {
1309 total_silence += bcn_silence_b;
1310 num_active_rx++;
1311 }
1312 if (bcn_silence_c) {
1313 total_silence += bcn_silence_c;
1314 num_active_rx++;
1315 }
1316
1317
1318 if (num_active_rx)
1319 last_rx_noise = (total_silence / num_active_rx) - 107;
1320 else
1321 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1322
1323 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1324 bcn_silence_b, bcn_silence_c, last_rx_noise);
1325}
1326
1327#ifdef CONFIG_IWLEGACY_DEBUGFS
1328
1329
1330
1331
1332
1333static void
1334il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1335{
1336 int i, size;
1337 __le32 *prev_stats;
1338 u32 *accum_stats;
1339 u32 *delta, *max_delta;
1340 struct stats_general_common *general, *accum_general;
1341
1342 prev_stats = (__le32 *) &il->_4965.stats;
1343 accum_stats = (u32 *) &il->_4965.accum_stats;
1344 size = sizeof(struct il_notif_stats);
1345 general = &il->_4965.stats.general.common;
1346 accum_general = &il->_4965.accum_stats.general.common;
1347 delta = (u32 *) &il->_4965.delta_stats;
1348 max_delta = (u32 *) &il->_4965.max_delta;
1349
1350 for (i = sizeof(__le32); i < size;
1351 i +=
1352 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1353 accum_stats++) {
1354 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1355 *delta =
1356 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1357 *accum_stats += *delta;
1358 if (*delta > *max_delta)
1359 *max_delta = *delta;
1360 }
1361 }
1362
1363
1364 accum_general->temperature = general->temperature;
1365 accum_general->ttl_timestamp = general->ttl_timestamp;
1366}
1367#endif
1368
1369static void
1370il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1371{
1372 const int recalib_seconds = 60;
1373 bool change;
1374 struct il_rx_pkt *pkt = rxb_addr(rxb);
1375
1376 D_RX("Statistics notification received (%d vs %d).\n",
1377 (int)sizeof(struct il_notif_stats),
1378 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1379
1380 change =
1381 ((il->_4965.stats.general.common.temperature !=
1382 pkt->u.stats.general.common.temperature) ||
1383 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1384 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1385#ifdef CONFIG_IWLEGACY_DEBUGFS
1386 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1387#endif
1388
1389
1390 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1391
1392 set_bit(S_STATS, &il->status);
1393
1394
1395
1396
1397
1398 mod_timer(&il->stats_periodic,
1399 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1400
1401 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1402 (pkt->hdr.cmd == N_STATS)) {
1403 il4965_rx_calc_noise(il);
1404 queue_work(il->workqueue, &il->run_time_calib_work);
1405 }
1406
1407 if (change)
1408 il4965_temperature_calib(il);
1409}
1410
1411static void
1412il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1413{
1414 struct il_rx_pkt *pkt = rxb_addr(rxb);
1415
1416 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1417#ifdef CONFIG_IWLEGACY_DEBUGFS
1418 memset(&il->_4965.accum_stats, 0,
1419 sizeof(struct il_notif_stats));
1420 memset(&il->_4965.delta_stats, 0,
1421 sizeof(struct il_notif_stats));
1422 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1423#endif
1424 D_RX("Statistics have been cleared\n");
1425 }
1426 il4965_hdl_stats(il, rxb);
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456static const u8 tid_to_ac[] = {
1457 IEEE80211_AC_BE,
1458 IEEE80211_AC_BK,
1459 IEEE80211_AC_BK,
1460 IEEE80211_AC_BE,
1461 IEEE80211_AC_VI,
1462 IEEE80211_AC_VI,
1463 IEEE80211_AC_VO,
1464 IEEE80211_AC_VO
1465};
1466
1467static inline int
1468il4965_get_ac_from_tid(u16 tid)
1469{
1470 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1471 return tid_to_ac[tid];
1472
1473
1474 return -EINVAL;
1475}
1476
1477static inline int
1478il4965_get_fifo_from_tid(u16 tid)
1479{
1480 static const u8 ac_to_fifo[] = {
1481 IL_TX_FIFO_VO,
1482 IL_TX_FIFO_VI,
1483 IL_TX_FIFO_BE,
1484 IL_TX_FIFO_BK,
1485 };
1486
1487 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1488 return ac_to_fifo[tid_to_ac[tid]];
1489
1490
1491 return -EINVAL;
1492}
1493
1494
1495
1496
1497static void
1498il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1499 struct il_tx_cmd *tx_cmd,
1500 struct ieee80211_tx_info *info,
1501 struct ieee80211_hdr *hdr, u8 std_id)
1502{
1503 __le16 fc = hdr->frame_control;
1504 __le32 tx_flags = tx_cmd->tx_flags;
1505
1506 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1507 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1508 tx_flags |= TX_CMD_FLG_ACK_MSK;
1509 if (ieee80211_is_mgmt(fc))
1510 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1511 if (ieee80211_is_probe_resp(fc) &&
1512 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1513 tx_flags |= TX_CMD_FLG_TSF_MSK;
1514 } else {
1515 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1516 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1517 }
1518
1519 if (ieee80211_is_back_req(fc))
1520 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1521
1522 tx_cmd->sta_id = std_id;
1523 if (ieee80211_has_morefrags(fc))
1524 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1525
1526 if (ieee80211_is_data_qos(fc)) {
1527 u8 *qc = ieee80211_get_qos_ctl(hdr);
1528 tx_cmd->tid_tspec = qc[0] & 0xf;
1529 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1530 } else {
1531 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1532 }
1533
1534 il_tx_cmd_protection(il, info, fc, &tx_flags);
1535
1536 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1537 if (ieee80211_is_mgmt(fc)) {
1538 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1539 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1540 else
1541 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1542 } else {
1543 tx_cmd->timeout.pm_frame_timeout = 0;
1544 }
1545
1546 tx_cmd->driver_txop = 0;
1547 tx_cmd->tx_flags = tx_flags;
1548 tx_cmd->next_frame_len = 0;
1549}
1550
1551static void
1552il4965_tx_cmd_build_rate(struct il_priv *il,
1553 struct il_tx_cmd *tx_cmd,
1554 struct ieee80211_tx_info *info,
1555 struct ieee80211_sta *sta,
1556 __le16 fc)
1557{
1558 const u8 rts_retry_limit = 60;
1559 u32 rate_flags;
1560 int rate_idx;
1561 u8 data_retry_limit;
1562 u8 rate_plcp;
1563
1564
1565 if (ieee80211_is_probe_resp(fc))
1566 data_retry_limit = 3;
1567 else
1568 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1569 tx_cmd->data_retry_limit = data_retry_limit;
1570
1571 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1572
1573
1574
1575 if (ieee80211_is_data(fc)) {
1576 tx_cmd->initial_rate_idx = 0;
1577 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1578 return;
1579 }
1580
1581
1582
1583
1584
1585
1586
1587 rate_idx = info->control.rates[0].idx;
1588 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1589 || rate_idx > RATE_COUNT_LEGACY)
1590 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1591
1592 if (info->band == NL80211_BAND_5GHZ)
1593 rate_idx += IL_FIRST_OFDM_RATE;
1594
1595 rate_plcp = il_rates[rate_idx].plcp;
1596
1597 rate_flags = 0;
1598
1599
1600 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1601 rate_flags |= RATE_MCS_CCK_MSK;
1602
1603
1604 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1605 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1606
1607
1608 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1609}
1610
1611static void
1612il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1613 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1614 int sta_id)
1615{
1616 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1617
1618 switch (keyconf->cipher) {
1619 case WLAN_CIPHER_SUITE_CCMP:
1620 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1621 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1622 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1623 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1624 D_TX("tx_cmd with AES hwcrypto\n");
1625 break;
1626
1627 case WLAN_CIPHER_SUITE_TKIP:
1628 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1629 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1630 D_TX("tx_cmd with tkip hwcrypto\n");
1631 break;
1632
1633 case WLAN_CIPHER_SUITE_WEP104:
1634 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1635
1636 case WLAN_CIPHER_SUITE_WEP40:
1637 tx_cmd->sec_ctl |=
1638 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1639 TX_CMD_SEC_SHIFT);
1640
1641 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1642
1643 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1644 keyconf->keyidx);
1645 break;
1646
1647 default:
1648 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1649 break;
1650 }
1651}
1652
1653
1654
1655
1656int
1657il4965_tx_skb(struct il_priv *il,
1658 struct ieee80211_sta *sta,
1659 struct sk_buff *skb)
1660{
1661 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1662 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1663 struct il_station_priv *sta_priv = NULL;
1664 struct il_tx_queue *txq;
1665 struct il_queue *q;
1666 struct il_device_cmd *out_cmd;
1667 struct il_cmd_meta *out_meta;
1668 struct il_tx_cmd *tx_cmd;
1669 int txq_id;
1670 dma_addr_t phys_addr;
1671 dma_addr_t txcmd_phys;
1672 dma_addr_t scratch_phys;
1673 u16 len, firstlen, secondlen;
1674 u16 seq_number = 0;
1675 __le16 fc;
1676 u8 hdr_len;
1677 u8 sta_id;
1678 u8 wait_write_ptr = 0;
1679 u8 tid = 0;
1680 u8 *qc = NULL;
1681 unsigned long flags;
1682 bool is_agg = false;
1683
1684 spin_lock_irqsave(&il->lock, flags);
1685 if (il_is_rfkill(il)) {
1686 D_DROP("Dropping - RF KILL\n");
1687 goto drop_unlock;
1688 }
1689
1690 fc = hdr->frame_control;
1691
1692#ifdef CONFIG_IWLEGACY_DEBUG
1693 if (ieee80211_is_auth(fc))
1694 D_TX("Sending AUTH frame\n");
1695 else if (ieee80211_is_assoc_req(fc))
1696 D_TX("Sending ASSOC frame\n");
1697 else if (ieee80211_is_reassoc_req(fc))
1698 D_TX("Sending REASSOC frame\n");
1699#endif
1700
1701 hdr_len = ieee80211_hdrlen(fc);
1702
1703
1704 if (!ieee80211_is_data(fc))
1705 sta_id = il->hw_params.bcast_id;
1706 else {
1707
1708 sta_id = il_sta_id_or_broadcast(il, sta);
1709
1710 if (sta_id == IL_INVALID_STATION) {
1711 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1712 goto drop_unlock;
1713 }
1714 }
1715
1716 D_TX("station Id %d\n", sta_id);
1717
1718 if (sta)
1719 sta_priv = (void *)sta->drv_priv;
1720
1721 if (sta_priv && sta_priv->asleep &&
1722 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1733 }
1734
1735
1736 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1737
1738
1739 txq_id = skb_get_queue_mapping(skb);
1740
1741
1742 spin_lock(&il->sta_lock);
1743
1744 if (ieee80211_is_data_qos(fc)) {
1745 qc = ieee80211_get_qos_ctl(hdr);
1746 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1747 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1748 spin_unlock(&il->sta_lock);
1749 goto drop_unlock;
1750 }
1751 seq_number = il->stations[sta_id].tid[tid].seq_number;
1752 seq_number &= IEEE80211_SCTL_SEQ;
1753 hdr->seq_ctrl =
1754 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1755 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1756 seq_number += 0x10;
1757
1758 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1759 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1760 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1761 is_agg = true;
1762 }
1763 }
1764
1765 txq = &il->txq[txq_id];
1766 q = &txq->q;
1767
1768 if (unlikely(il_queue_space(q) < q->high_mark)) {
1769 spin_unlock(&il->sta_lock);
1770 goto drop_unlock;
1771 }
1772
1773 if (ieee80211_is_data_qos(fc)) {
1774 il->stations[sta_id].tid[tid].tfds_in_queue++;
1775 if (!ieee80211_has_morefrags(fc))
1776 il->stations[sta_id].tid[tid].seq_number = seq_number;
1777 }
1778
1779 spin_unlock(&il->sta_lock);
1780
1781 txq->skbs[q->write_ptr] = skb;
1782
1783
1784 out_cmd = txq->cmd[q->write_ptr];
1785 out_meta = &txq->meta[q->write_ptr];
1786 tx_cmd = &out_cmd->cmd.tx;
1787 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1788 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1789
1790
1791
1792
1793
1794
1795
1796 out_cmd->hdr.cmd = C_TX;
1797 out_cmd->hdr.sequence =
1798 cpu_to_le16((u16)
1799 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1800
1801
1802 memcpy(tx_cmd->hdr, hdr, hdr_len);
1803
1804
1805 tx_cmd->len = cpu_to_le16((u16) skb->len);
1806
1807 if (info->control.hw_key)
1808 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1809
1810
1811 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1812
1813 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1825 firstlen = (len + 3) & ~3;
1826
1827
1828 if (firstlen != len)
1829 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1830
1831
1832
1833 txcmd_phys =
1834 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1835 PCI_DMA_BIDIRECTIONAL);
1836 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1837 goto drop_unlock;
1838
1839
1840
1841 secondlen = skb->len - hdr_len;
1842 if (secondlen > 0) {
1843 phys_addr =
1844 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1845 PCI_DMA_TODEVICE);
1846 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1847 goto drop_unlock;
1848 }
1849
1850
1851
1852 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1853 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1854 dma_unmap_len_set(out_meta, len, firstlen);
1855 if (secondlen)
1856 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1857 0, 0);
1858
1859 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1860 txq->need_update = 1;
1861 } else {
1862 wait_write_ptr = 1;
1863 txq->need_update = 0;
1864 }
1865
1866 scratch_phys =
1867 txcmd_phys + sizeof(struct il_cmd_header) +
1868 offsetof(struct il_tx_cmd, scratch);
1869
1870
1871 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1872 PCI_DMA_BIDIRECTIONAL);
1873 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1874 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1875
1876 il_update_stats(il, true, fc, skb->len);
1877
1878 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1879 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1880 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1882
1883
1884 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1885 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1886
1887 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1888 PCI_DMA_BIDIRECTIONAL);
1889
1890
1891 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1892 il_txq_update_write_ptr(il, txq);
1893 spin_unlock_irqrestore(&il->lock, flags);
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 if (sta_priv && sta_priv->client && !is_agg)
1910 atomic_inc(&sta_priv->pending_frames);
1911
1912 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1913 if (wait_write_ptr) {
1914 spin_lock_irqsave(&il->lock, flags);
1915 txq->need_update = 1;
1916 il_txq_update_write_ptr(il, txq);
1917 spin_unlock_irqrestore(&il->lock, flags);
1918 } else {
1919 il_stop_queue(il, txq);
1920 }
1921 }
1922
1923 return 0;
1924
1925drop_unlock:
1926 spin_unlock_irqrestore(&il->lock, flags);
1927 return -1;
1928}
1929
1930static inline int
1931il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1932{
1933 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1934 GFP_KERNEL);
1935 if (!ptr->addr)
1936 return -ENOMEM;
1937 ptr->size = size;
1938 return 0;
1939}
1940
1941static inline void
1942il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1943{
1944 if (unlikely(!ptr->addr))
1945 return;
1946
1947 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1948 memset(ptr, 0, sizeof(*ptr));
1949}
1950
1951
1952
1953
1954
1955
1956void
1957il4965_hw_txq_ctx_free(struct il_priv *il)
1958{
1959 int txq_id;
1960
1961
1962 if (il->txq) {
1963 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1964 if (txq_id == il->cmd_queue)
1965 il_cmd_queue_free(il);
1966 else
1967 il_tx_queue_free(il, txq_id);
1968 }
1969 il4965_free_dma_ptr(il, &il->kw);
1970
1971 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1972
1973
1974 il_free_txq_mem(il);
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984int
1985il4965_txq_ctx_alloc(struct il_priv *il)
1986{
1987 int ret, txq_id;
1988 unsigned long flags;
1989
1990
1991 il4965_hw_txq_ctx_free(il);
1992
1993 ret =
1994 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1995 il->hw_params.scd_bc_tbls_size);
1996 if (ret) {
1997 IL_ERR("Scheduler BC Table allocation failed\n");
1998 goto error_bc_tbls;
1999 }
2000
2001 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2002 if (ret) {
2003 IL_ERR("Keep Warm allocation failed\n");
2004 goto error_kw;
2005 }
2006
2007
2008 ret = il_alloc_txq_mem(il);
2009 if (ret)
2010 goto error;
2011
2012 spin_lock_irqsave(&il->lock, flags);
2013
2014
2015 il4965_txq_set_sched(il, 0);
2016
2017
2018 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2019
2020 spin_unlock_irqrestore(&il->lock, flags);
2021
2022
2023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2024 ret = il_tx_queue_init(il, txq_id);
2025 if (ret) {
2026 IL_ERR("Tx %d queue init failed\n", txq_id);
2027 goto error;
2028 }
2029 }
2030
2031 return ret;
2032
2033error:
2034 il4965_hw_txq_ctx_free(il);
2035 il4965_free_dma_ptr(il, &il->kw);
2036error_kw:
2037 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2038error_bc_tbls:
2039 return ret;
2040}
2041
2042void
2043il4965_txq_ctx_reset(struct il_priv *il)
2044{
2045 int txq_id;
2046 unsigned long flags;
2047
2048 spin_lock_irqsave(&il->lock, flags);
2049
2050
2051 il4965_txq_set_sched(il, 0);
2052
2053 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2054
2055 spin_unlock_irqrestore(&il->lock, flags);
2056
2057
2058 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2059 il_tx_queue_reset(il, txq_id);
2060}
2061
2062static void
2063il4965_txq_ctx_unmap(struct il_priv *il)
2064{
2065 int txq_id;
2066
2067 if (!il->txq)
2068 return;
2069
2070
2071 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2072 if (txq_id == il->cmd_queue)
2073 il_cmd_queue_unmap(il);
2074 else
2075 il_tx_queue_unmap(il, txq_id);
2076}
2077
2078
2079
2080
2081void
2082il4965_txq_ctx_stop(struct il_priv *il)
2083{
2084 int ch, ret;
2085
2086 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2087
2088
2089 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2090 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2091 ret =
2092 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2093 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2094 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2095 1000);
2096 if (ret < 0)
2097 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2098 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2099 }
2100}
2101
2102
2103
2104
2105
2106
2107
2108static int
2109il4965_txq_ctx_activate_free(struct il_priv *il)
2110{
2111 int txq_id;
2112
2113 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2114 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2115 return txq_id;
2116 return -1;
2117}
2118
2119
2120
2121
2122static void
2123il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2124{
2125
2126
2127 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2128 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2129 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2130}
2131
2132
2133
2134
2135static int
2136il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2137{
2138 u32 tbl_dw_addr;
2139 u32 tbl_dw;
2140 u16 scd_q2ratid;
2141
2142 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2143
2144 tbl_dw_addr =
2145 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2146
2147 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2148
2149 if (txq_id & 0x1)
2150 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2151 else
2152 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2153
2154 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2155
2156 return 0;
2157}
2158
2159
2160
2161
2162
2163
2164
2165static int
2166il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2167 int tid, u16 ssn_idx)
2168{
2169 unsigned long flags;
2170 u16 ra_tid;
2171 int ret;
2172
2173 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2174 (IL49_FIRST_AMPDU_QUEUE +
2175 il->cfg->num_of_ampdu_queues <= txq_id)) {
2176 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2177 txq_id, IL49_FIRST_AMPDU_QUEUE,
2178 IL49_FIRST_AMPDU_QUEUE +
2179 il->cfg->num_of_ampdu_queues - 1);
2180 return -EINVAL;
2181 }
2182
2183 ra_tid = BUILD_RAxTID(sta_id, tid);
2184
2185
2186 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2187 if (ret)
2188 return ret;
2189
2190 spin_lock_irqsave(&il->lock, flags);
2191
2192
2193 il4965_tx_queue_stop_scheduler(il, txq_id);
2194
2195
2196 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2197
2198
2199 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2200
2201
2202
2203 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2204 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2205 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2206
2207
2208 il_write_targ_mem(il,
2209 il->scd_base_addr +
2210 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2211 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2212 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2213
2214 il_write_targ_mem(il,
2215 il->scd_base_addr +
2216 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2217 (SCD_FRAME_LIMIT <<
2218 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2219 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2220
2221 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2222
2223
2224 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2225
2226 spin_unlock_irqrestore(&il->lock, flags);
2227
2228 return 0;
2229}
2230
2231int
2232il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2233 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2234{
2235 int sta_id;
2236 int tx_fifo;
2237 int txq_id;
2238 int ret;
2239 unsigned long flags;
2240 struct il_tid_data *tid_data;
2241
2242
2243 tx_fifo = il4965_get_fifo_from_tid(tid);
2244 if (unlikely(tx_fifo < 0))
2245 return tx_fifo;
2246
2247 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2248
2249 sta_id = il_sta_id(sta);
2250 if (sta_id == IL_INVALID_STATION) {
2251 IL_ERR("Start AGG on invalid station\n");
2252 return -ENXIO;
2253 }
2254 if (unlikely(tid >= MAX_TID_COUNT))
2255 return -EINVAL;
2256
2257 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2258 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2259 return -ENXIO;
2260 }
2261
2262 txq_id = il4965_txq_ctx_activate_free(il);
2263 if (txq_id == -1) {
2264 IL_ERR("No free aggregation queue available\n");
2265 return -ENXIO;
2266 }
2267
2268 spin_lock_irqsave(&il->sta_lock, flags);
2269 tid_data = &il->stations[sta_id].tid[tid];
2270 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2271 tid_data->agg.txq_id = txq_id;
2272 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2273 spin_unlock_irqrestore(&il->sta_lock, flags);
2274
2275 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2276 if (ret)
2277 return ret;
2278
2279 spin_lock_irqsave(&il->sta_lock, flags);
2280 tid_data = &il->stations[sta_id].tid[tid];
2281 if (tid_data->tfds_in_queue == 0) {
2282 D_HT("HW queue is empty\n");
2283 tid_data->agg.state = IL_AGG_ON;
2284 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2285 } else {
2286 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2287 tid_data->tfds_in_queue);
2288 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2289 }
2290 spin_unlock_irqrestore(&il->sta_lock, flags);
2291 return ret;
2292}
2293
2294
2295
2296
2297
2298static int
2299il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2300{
2301 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2302 (IL49_FIRST_AMPDU_QUEUE +
2303 il->cfg->num_of_ampdu_queues <= txq_id)) {
2304 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2305 txq_id, IL49_FIRST_AMPDU_QUEUE,
2306 IL49_FIRST_AMPDU_QUEUE +
2307 il->cfg->num_of_ampdu_queues - 1);
2308 return -EINVAL;
2309 }
2310
2311 il4965_tx_queue_stop_scheduler(il, txq_id);
2312
2313 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2314
2315 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2316 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2317
2318 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2319
2320 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2321 il_txq_ctx_deactivate(il, txq_id);
2322 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2323
2324 return 0;
2325}
2326
2327int
2328il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2329 struct ieee80211_sta *sta, u16 tid)
2330{
2331 int tx_fifo_id, txq_id, sta_id, ssn;
2332 struct il_tid_data *tid_data;
2333 int write_ptr, read_ptr;
2334 unsigned long flags;
2335
2336
2337 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2338 if (unlikely(tx_fifo_id < 0))
2339 return tx_fifo_id;
2340
2341 sta_id = il_sta_id(sta);
2342
2343 if (sta_id == IL_INVALID_STATION) {
2344 IL_ERR("Invalid station for AGG tid %d\n", tid);
2345 return -ENXIO;
2346 }
2347
2348 spin_lock_irqsave(&il->sta_lock, flags);
2349
2350 tid_data = &il->stations[sta_id].tid[tid];
2351 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2352 txq_id = tid_data->agg.txq_id;
2353
2354 switch (il->stations[sta_id].tid[tid].agg.state) {
2355 case IL_EMPTYING_HW_QUEUE_ADDBA:
2356
2357
2358
2359
2360
2361
2362 D_HT("AGG stop before setup done\n");
2363 goto turn_off;
2364 case IL_AGG_ON:
2365 break;
2366 default:
2367 IL_WARN("Stopping AGG while state not ON or starting\n");
2368 }
2369
2370 write_ptr = il->txq[txq_id].q.write_ptr;
2371 read_ptr = il->txq[txq_id].q.read_ptr;
2372
2373
2374 if (write_ptr != read_ptr) {
2375 D_HT("Stopping a non empty AGG HW QUEUE\n");
2376 il->stations[sta_id].tid[tid].agg.state =
2377 IL_EMPTYING_HW_QUEUE_DELBA;
2378 spin_unlock_irqrestore(&il->sta_lock, flags);
2379 return 0;
2380 }
2381
2382 D_HT("HW queue is empty\n");
2383turn_off:
2384 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2385
2386
2387 spin_unlock(&il->sta_lock);
2388 spin_lock(&il->lock);
2389
2390
2391
2392
2393
2394
2395
2396
2397 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2398 spin_unlock_irqrestore(&il->lock, flags);
2399
2400 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2401
2402 return 0;
2403}
2404
2405int
2406il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2407{
2408 struct il_queue *q = &il->txq[txq_id].q;
2409 u8 *addr = il->stations[sta_id].sta.sta.addr;
2410 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2411
2412 lockdep_assert_held(&il->sta_lock);
2413
2414 switch (il->stations[sta_id].tid[tid].agg.state) {
2415 case IL_EMPTYING_HW_QUEUE_DELBA:
2416
2417
2418 if (txq_id == tid_data->agg.txq_id &&
2419 q->read_ptr == q->write_ptr) {
2420 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2421 int tx_fifo = il4965_get_fifo_from_tid(tid);
2422 D_HT("HW queue empty: continue DELBA flow\n");
2423 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2424 tid_data->agg.state = IL_AGG_OFF;
2425 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2426 }
2427 break;
2428 case IL_EMPTYING_HW_QUEUE_ADDBA:
2429
2430 if (tid_data->tfds_in_queue == 0) {
2431 D_HT("HW queue empty: continue ADDBA flow\n");
2432 tid_data->agg.state = IL_AGG_ON;
2433 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2434 }
2435 break;
2436 }
2437
2438 return 0;
2439}
2440
2441static void
2442il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2443{
2444 struct ieee80211_sta *sta;
2445 struct il_station_priv *sta_priv;
2446
2447 rcu_read_lock();
2448 sta = ieee80211_find_sta(il->vif, addr1);
2449 if (sta) {
2450 sta_priv = (void *)sta->drv_priv;
2451
2452 if (sta_priv->client &&
2453 atomic_dec_return(&sta_priv->pending_frames) == 0)
2454 ieee80211_sta_block_awake(il->hw, sta, false);
2455 }
2456 rcu_read_unlock();
2457}
2458
2459static void
2460il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2461{
2462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2463
2464 if (!is_agg)
2465 il4965_non_agg_tx_status(il, hdr->addr1);
2466
2467 ieee80211_tx_status_irqsafe(il->hw, skb);
2468}
2469
2470int
2471il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2472{
2473 struct il_tx_queue *txq = &il->txq[txq_id];
2474 struct il_queue *q = &txq->q;
2475 int nfreed = 0;
2476 struct ieee80211_hdr *hdr;
2477 struct sk_buff *skb;
2478
2479 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2480 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2481 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2482 q->write_ptr, q->read_ptr);
2483 return 0;
2484 }
2485
2486 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2487 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2488
2489 skb = txq->skbs[txq->q.read_ptr];
2490
2491 if (WARN_ON_ONCE(skb == NULL))
2492 continue;
2493
2494 hdr = (struct ieee80211_hdr *) skb->data;
2495 if (ieee80211_is_data_qos(hdr->frame_control))
2496 nfreed++;
2497
2498 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2499
2500 txq->skbs[txq->q.read_ptr] = NULL;
2501 il->ops->txq_free_tfd(il, txq);
2502 }
2503 return nfreed;
2504}
2505
2506
2507
2508
2509
2510
2511
2512static int
2513il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2514 struct il_compressed_ba_resp *ba_resp)
2515{
2516 int i, sh, ack;
2517 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2518 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2519 int successes = 0;
2520 struct ieee80211_tx_info *info;
2521 u64 bitmap, sent_bitmap;
2522
2523 if (unlikely(!agg->wait_for_ba)) {
2524 if (unlikely(ba_resp->bitmap))
2525 IL_ERR("Received BA when not expected\n");
2526 return -EINVAL;
2527 }
2528
2529
2530 agg->wait_for_ba = 0;
2531 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2532
2533
2534 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2535 if (sh < 0)
2536 sh += 0x100;
2537
2538 if (agg->frame_count > (64 - sh)) {
2539 D_TX_REPLY("more frames than bitmap size");
2540 return -1;
2541 }
2542
2543
2544 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2545
2546
2547
2548 sent_bitmap = bitmap & agg->bitmap;
2549
2550
2551
2552 i = 0;
2553 while (sent_bitmap) {
2554 ack = sent_bitmap & 1ULL;
2555 successes += ack;
2556 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2557 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2558 sent_bitmap >>= 1;
2559 ++i;
2560 }
2561
2562 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2563
2564 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2565 memset(&info->status, 0, sizeof(info->status));
2566 info->flags |= IEEE80211_TX_STAT_ACK;
2567 info->flags |= IEEE80211_TX_STAT_AMPDU;
2568 info->status.ampdu_ack_len = successes;
2569 info->status.ampdu_len = agg->frame_count;
2570 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2571
2572 return 0;
2573}
2574
2575static inline bool
2576il4965_is_tx_success(u32 status)
2577{
2578 status &= TX_STATUS_MSK;
2579 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2580}
2581
2582static u8
2583il4965_find_station(struct il_priv *il, const u8 *addr)
2584{
2585 int i;
2586 int start = 0;
2587 int ret = IL_INVALID_STATION;
2588 unsigned long flags;
2589
2590 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2591 start = IL_STA_ID;
2592
2593 if (is_broadcast_ether_addr(addr))
2594 return il->hw_params.bcast_id;
2595
2596 spin_lock_irqsave(&il->sta_lock, flags);
2597 for (i = start; i < il->hw_params.max_stations; i++)
2598 if (il->stations[i].used &&
2599 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2600 ret = i;
2601 goto out;
2602 }
2603
2604 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2605
2606out:
2607
2608
2609
2610
2611
2612 if (ret != IL_INVALID_STATION &&
2613 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2614 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2615 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2616 IL_ERR("Requested station info for sta %d before ready.\n",
2617 ret);
2618 ret = IL_INVALID_STATION;
2619 }
2620 spin_unlock_irqrestore(&il->sta_lock, flags);
2621 return ret;
2622}
2623
2624static int
2625il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2626{
2627 if (il->iw_mode == NL80211_IFTYPE_STATION)
2628 return IL_AP_ID;
2629 else {
2630 u8 *da = ieee80211_get_DA(hdr);
2631
2632 return il4965_find_station(il, da);
2633 }
2634}
2635
2636static inline u32
2637il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2638{
2639 return le32_to_cpup(&tx_resp->u.status +
2640 tx_resp->frame_count) & IEEE80211_MAX_SN;
2641}
2642
2643static inline u32
2644il4965_tx_status_to_mac80211(u32 status)
2645{
2646 status &= TX_STATUS_MSK;
2647
2648 switch (status) {
2649 case TX_STATUS_SUCCESS:
2650 case TX_STATUS_DIRECT_DONE:
2651 return IEEE80211_TX_STAT_ACK;
2652 case TX_STATUS_FAIL_DEST_PS:
2653 return IEEE80211_TX_STAT_TX_FILTERED;
2654 default:
2655 return 0;
2656 }
2657}
2658
2659
2660
2661
2662static int
2663il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2664 struct il4965_tx_resp *tx_resp, int txq_id,
2665 u16 start_idx)
2666{
2667 u16 status;
2668 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2669 struct ieee80211_tx_info *info = NULL;
2670 struct ieee80211_hdr *hdr = NULL;
2671 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2672 int i, sh, idx;
2673 u16 seq;
2674 if (agg->wait_for_ba)
2675 D_TX_REPLY("got tx response w/o block-ack\n");
2676
2677 agg->frame_count = tx_resp->frame_count;
2678 agg->start_idx = start_idx;
2679 agg->rate_n_flags = rate_n_flags;
2680 agg->bitmap = 0;
2681
2682
2683 if (agg->frame_count == 1) {
2684
2685 status = le16_to_cpu(frame_status[0].status);
2686 idx = start_idx;
2687
2688 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2689 agg->frame_count, agg->start_idx, idx);
2690
2691 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2692 info->status.rates[0].count = tx_resp->failure_frame + 1;
2693 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2694 info->flags |= il4965_tx_status_to_mac80211(status);
2695 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2696
2697 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2698 tx_resp->failure_frame);
2699 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2700
2701 agg->wait_for_ba = 0;
2702 } else {
2703
2704 u64 bitmap = 0;
2705 int start = agg->start_idx;
2706 struct sk_buff *skb;
2707
2708
2709 for (i = 0; i < agg->frame_count; i++) {
2710 u16 sc;
2711 status = le16_to_cpu(frame_status[i].status);
2712 seq = le16_to_cpu(frame_status[i].sequence);
2713 idx = SEQ_TO_IDX(seq);
2714 txq_id = SEQ_TO_QUEUE(seq);
2715
2716 if (status &
2717 (AGG_TX_STATE_FEW_BYTES_MSK |
2718 AGG_TX_STATE_ABORT_MSK))
2719 continue;
2720
2721 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2722 agg->frame_count, txq_id, idx);
2723
2724 skb = il->txq[txq_id].skbs[idx];
2725 if (WARN_ON_ONCE(skb == NULL))
2726 return -1;
2727 hdr = (struct ieee80211_hdr *) skb->data;
2728
2729 sc = le16_to_cpu(hdr->seq_ctrl);
2730 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2731 IL_ERR("BUG_ON idx doesn't match seq control"
2732 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2733 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2734 return -1;
2735 }
2736
2737 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2738 IEEE80211_SEQ_TO_SN(sc));
2739
2740 sh = idx - start;
2741 if (sh > 64) {
2742 sh = (start - idx) + 0xff;
2743 bitmap = bitmap << sh;
2744 sh = 0;
2745 start = idx;
2746 } else if (sh < -64)
2747 sh = 0xff - (start - idx);
2748 else if (sh < 0) {
2749 sh = start - idx;
2750 start = idx;
2751 bitmap = bitmap << sh;
2752 sh = 0;
2753 }
2754 bitmap |= 1ULL << sh;
2755 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2756 (unsigned long long)bitmap);
2757 }
2758
2759 agg->bitmap = bitmap;
2760 agg->start_idx = start;
2761 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2762 agg->frame_count, agg->start_idx,
2763 (unsigned long long)agg->bitmap);
2764
2765 if (bitmap)
2766 agg->wait_for_ba = 1;
2767 }
2768 return 0;
2769}
2770
2771
2772
2773
2774static void
2775il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2776{
2777 struct il_rx_pkt *pkt = rxb_addr(rxb);
2778 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2779 int txq_id = SEQ_TO_QUEUE(sequence);
2780 int idx = SEQ_TO_IDX(sequence);
2781 struct il_tx_queue *txq = &il->txq[txq_id];
2782 struct sk_buff *skb;
2783 struct ieee80211_hdr *hdr;
2784 struct ieee80211_tx_info *info;
2785 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2786 u32 status = le32_to_cpu(tx_resp->u.status);
2787 int uninitialized_var(tid);
2788 int sta_id;
2789 int freed;
2790 u8 *qc = NULL;
2791 unsigned long flags;
2792
2793 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2794 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2795 "is out of range [0-%d] %d %d\n", txq_id, idx,
2796 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2797 return;
2798 }
2799
2800 txq->time_stamp = jiffies;
2801
2802 skb = txq->skbs[txq->q.read_ptr];
2803 info = IEEE80211_SKB_CB(skb);
2804 memset(&info->status, 0, sizeof(info->status));
2805
2806 hdr = (struct ieee80211_hdr *) skb->data;
2807 if (ieee80211_is_data_qos(hdr->frame_control)) {
2808 qc = ieee80211_get_qos_ctl(hdr);
2809 tid = qc[0] & 0xf;
2810 }
2811
2812 sta_id = il4965_get_ra_sta_id(il, hdr);
2813 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2814 IL_ERR("Station not known\n");
2815 return;
2816 }
2817
2818
2819
2820
2821
2822
2823
2824
2825 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2826 il->iw_mode == NL80211_IFTYPE_STATION) {
2827 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2828 D_INFO("Stopped queues - RX waiting on passive channel\n");
2829 }
2830
2831 spin_lock_irqsave(&il->sta_lock, flags);
2832 if (txq->sched_retry) {
2833 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2834 struct il_ht_agg *agg = NULL;
2835 WARN_ON(!qc);
2836
2837 agg = &il->stations[sta_id].tid[tid].agg;
2838
2839 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2840
2841
2842 if (tx_resp->frame_count == 1 &&
2843 !il4965_is_tx_success(status))
2844 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2845
2846 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2847 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2848 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2849 "%d idx %d\n", scd_ssn, idx);
2850 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2851 if (qc)
2852 il4965_free_tfds_in_queue(il, sta_id, tid,
2853 freed);
2854
2855 if (il->mac80211_registered &&
2856 il_queue_space(&txq->q) > txq->q.low_mark &&
2857 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2858 il_wake_queue(il, txq);
2859 }
2860 } else {
2861 info->status.rates[0].count = tx_resp->failure_frame + 1;
2862 info->flags |= il4965_tx_status_to_mac80211(status);
2863 il4965_hwrate_to_tx_control(il,
2864 le32_to_cpu(tx_resp->rate_n_flags),
2865 info);
2866
2867 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2868 "rate_n_flags 0x%x retries %d\n", txq_id,
2869 il4965_get_tx_fail_reason(status), status,
2870 le32_to_cpu(tx_resp->rate_n_flags),
2871 tx_resp->failure_frame);
2872
2873 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2874 if (qc && likely(sta_id != IL_INVALID_STATION))
2875 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2876 else if (sta_id == IL_INVALID_STATION)
2877 D_TX_REPLY("Station not known\n");
2878
2879 if (il->mac80211_registered &&
2880 il_queue_space(&txq->q) > txq->q.low_mark)
2881 il_wake_queue(il, txq);
2882 }
2883 if (qc && likely(sta_id != IL_INVALID_STATION))
2884 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2885
2886 il4965_check_abort_status(il, tx_resp->frame_count, status);
2887
2888 spin_unlock_irqrestore(&il->sta_lock, flags);
2889}
2890
2891
2892
2893
2894void
2895il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2896 struct ieee80211_tx_info *info)
2897{
2898 struct ieee80211_tx_rate *r = &info->status.rates[0];
2899
2900 info->status.antenna =
2901 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2902 if (rate_n_flags & RATE_MCS_HT_MSK)
2903 r->flags |= IEEE80211_TX_RC_MCS;
2904 if (rate_n_flags & RATE_MCS_GF_MSK)
2905 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2906 if (rate_n_flags & RATE_MCS_HT40_MSK)
2907 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2908 if (rate_n_flags & RATE_MCS_DUP_MSK)
2909 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2910 if (rate_n_flags & RATE_MCS_SGI_MSK)
2911 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2912 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2913}
2914
2915
2916
2917
2918
2919
2920
2921static void
2922il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2923{
2924 struct il_rx_pkt *pkt = rxb_addr(rxb);
2925 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2926 struct il_tx_queue *txq = NULL;
2927 struct il_ht_agg *agg;
2928 int idx;
2929 int sta_id;
2930 int tid;
2931 unsigned long flags;
2932
2933
2934 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2935
2936
2937
2938 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2939
2940 if (scd_flow >= il->hw_params.max_txq_num) {
2941 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2942 return;
2943 }
2944
2945 txq = &il->txq[scd_flow];
2946 sta_id = ba_resp->sta_id;
2947 tid = ba_resp->tid;
2948 agg = &il->stations[sta_id].tid[tid].agg;
2949 if (unlikely(agg->txq_id != scd_flow)) {
2950
2951
2952
2953
2954
2955
2956 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2957 scd_flow, agg->txq_id);
2958 return;
2959 }
2960
2961
2962 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2963
2964 spin_lock_irqsave(&il->sta_lock, flags);
2965
2966 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2967 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2968 ba_resp->sta_id);
2969 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2970 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2971 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2972 ba_resp->scd_flow, ba_resp->scd_ssn);
2973 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2974 (unsigned long long)agg->bitmap);
2975
2976
2977 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2978
2979
2980
2981
2982 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2983
2984 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2985 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2986
2987 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2988 il->mac80211_registered &&
2989 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2990 il_wake_queue(il, txq);
2991
2992 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2993 }
2994
2995 spin_unlock_irqrestore(&il->sta_lock, flags);
2996}
2997
2998#ifdef CONFIG_IWLEGACY_DEBUG
2999const char *
3000il4965_get_tx_fail_reason(u32 status)
3001{
3002#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
3003#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
3004
3005 switch (status & TX_STATUS_MSK) {
3006 case TX_STATUS_SUCCESS:
3007 return "SUCCESS";
3008 TX_STATUS_POSTPONE(DELAY);
3009 TX_STATUS_POSTPONE(FEW_BYTES);
3010 TX_STATUS_POSTPONE(QUIET_PERIOD);
3011 TX_STATUS_POSTPONE(CALC_TTAK);
3012 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
3013 TX_STATUS_FAIL(SHORT_LIMIT);
3014 TX_STATUS_FAIL(LONG_LIMIT);
3015 TX_STATUS_FAIL(FIFO_UNDERRUN);
3016 TX_STATUS_FAIL(DRAIN_FLOW);
3017 TX_STATUS_FAIL(RFKILL_FLUSH);
3018 TX_STATUS_FAIL(LIFE_EXPIRE);
3019 TX_STATUS_FAIL(DEST_PS);
3020 TX_STATUS_FAIL(HOST_ABORTED);
3021 TX_STATUS_FAIL(BT_RETRY);
3022 TX_STATUS_FAIL(STA_INVALID);
3023 TX_STATUS_FAIL(FRAG_DROPPED);
3024 TX_STATUS_FAIL(TID_DISABLE);
3025 TX_STATUS_FAIL(FIFO_FLUSHED);
3026 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3027 TX_STATUS_FAIL(PASSIVE_NO_RX);
3028 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3029 }
3030
3031 return "UNKNOWN";
3032
3033#undef TX_STATUS_FAIL
3034#undef TX_STATUS_POSTPONE
3035}
3036#endif
3037
3038static struct il_link_quality_cmd *
3039il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3040{
3041 int i, r;
3042 struct il_link_quality_cmd *link_cmd;
3043 u32 rate_flags = 0;
3044 __le32 rate_n_flags;
3045
3046 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3047 if (!link_cmd) {
3048 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3049 return NULL;
3050 }
3051
3052
3053 if (il->band == NL80211_BAND_5GHZ)
3054 r = RATE_6M_IDX;
3055 else
3056 r = RATE_1M_IDX;
3057
3058 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3059 rate_flags |= RATE_MCS_CCK_MSK;
3060
3061 rate_flags |=
3062 il4965_first_antenna(il->hw_params.
3063 valid_tx_ant) << RATE_MCS_ANT_POS;
3064 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3065 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3066 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3067
3068 link_cmd->general_params.single_stream_ant_msk =
3069 il4965_first_antenna(il->hw_params.valid_tx_ant);
3070
3071 link_cmd->general_params.dual_stream_ant_msk =
3072 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3073 valid_tx_ant);
3074 if (!link_cmd->general_params.dual_stream_ant_msk) {
3075 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3076 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3077 link_cmd->general_params.dual_stream_ant_msk =
3078 il->hw_params.valid_tx_ant;
3079 }
3080
3081 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3082 link_cmd->agg_params.agg_time_limit =
3083 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3084
3085 link_cmd->sta_id = sta_id;
3086
3087 return link_cmd;
3088}
3089
3090
3091
3092
3093
3094
3095int
3096il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3097{
3098 int ret;
3099 u8 sta_id;
3100 struct il_link_quality_cmd *link_cmd;
3101 unsigned long flags;
3102
3103 if (sta_id_r)
3104 *sta_id_r = IL_INVALID_STATION;
3105
3106 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3107 if (ret) {
3108 IL_ERR("Unable to add station %pM\n", addr);
3109 return ret;
3110 }
3111
3112 if (sta_id_r)
3113 *sta_id_r = sta_id;
3114
3115 spin_lock_irqsave(&il->sta_lock, flags);
3116 il->stations[sta_id].used |= IL_STA_LOCAL;
3117 spin_unlock_irqrestore(&il->sta_lock, flags);
3118
3119
3120 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3121 if (!link_cmd) {
3122 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3123 addr);
3124 return -ENOMEM;
3125 }
3126
3127 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3128 if (ret)
3129 IL_ERR("Link quality command failed (%d)\n", ret);
3130
3131 spin_lock_irqsave(&il->sta_lock, flags);
3132 il->stations[sta_id].lq = link_cmd;
3133 spin_unlock_irqrestore(&il->sta_lock, flags);
3134
3135 return 0;
3136}
3137
3138static int
3139il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3140{
3141 int i;
3142 u8 buff[sizeof(struct il_wep_cmd) +
3143 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3144 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3145 size_t cmd_size = sizeof(struct il_wep_cmd);
3146 struct il_host_cmd cmd = {
3147 .id = C_WEPKEY,
3148 .data = wep_cmd,
3149 .flags = CMD_SYNC,
3150 };
3151 bool not_empty = false;
3152
3153 might_sleep();
3154
3155 memset(wep_cmd, 0,
3156 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3157
3158 for (i = 0; i < WEP_KEYS_MAX; i++) {
3159 u8 key_size = il->_4965.wep_keys[i].key_size;
3160
3161 wep_cmd->key[i].key_idx = i;
3162 if (key_size) {
3163 wep_cmd->key[i].key_offset = i;
3164 not_empty = true;
3165 } else
3166 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3167
3168 wep_cmd->key[i].key_size = key_size;
3169 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3170 }
3171
3172 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3173 wep_cmd->num_keys = WEP_KEYS_MAX;
3174
3175 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3176 cmd.len = cmd_size;
3177
3178 if (not_empty || send_if_empty)
3179 return il_send_cmd(il, &cmd);
3180 else
3181 return 0;
3182}
3183
3184int
3185il4965_restore_default_wep_keys(struct il_priv *il)
3186{
3187 lockdep_assert_held(&il->mutex);
3188
3189 return il4965_static_wepkey_cmd(il, false);
3190}
3191
3192int
3193il4965_remove_default_wep_key(struct il_priv *il,
3194 struct ieee80211_key_conf *keyconf)
3195{
3196 int ret;
3197 int idx = keyconf->keyidx;
3198
3199 lockdep_assert_held(&il->mutex);
3200
3201 D_WEP("Removing default WEP key: idx=%d\n", idx);
3202
3203 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3204 if (il_is_rfkill(il)) {
3205 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3206
3207 return 0;
3208 }
3209 ret = il4965_static_wepkey_cmd(il, 1);
3210 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3211
3212 return ret;
3213}
3214
3215int
3216il4965_set_default_wep_key(struct il_priv *il,
3217 struct ieee80211_key_conf *keyconf)
3218{
3219 int ret;
3220 int len = keyconf->keylen;
3221 int idx = keyconf->keyidx;
3222
3223 lockdep_assert_held(&il->mutex);
3224
3225 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3226 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3227 return -EINVAL;
3228 }
3229
3230 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3231 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3232 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3233
3234 il->_4965.wep_keys[idx].key_size = len;
3235 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3236
3237 ret = il4965_static_wepkey_cmd(il, false);
3238
3239 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3240 return ret;
3241}
3242
3243static int
3244il4965_set_wep_dynamic_key_info(struct il_priv *il,
3245 struct ieee80211_key_conf *keyconf, u8 sta_id)
3246{
3247 unsigned long flags;
3248 __le16 key_flags = 0;
3249 struct il_addsta_cmd sta_cmd;
3250
3251 lockdep_assert_held(&il->mutex);
3252
3253 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3254
3255 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3256 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3257 key_flags &= ~STA_KEY_FLG_INVALID;
3258
3259 if (keyconf->keylen == WEP_KEY_LEN_128)
3260 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3261
3262 if (sta_id == il->hw_params.bcast_id)
3263 key_flags |= STA_KEY_MULTICAST_MSK;
3264
3265 spin_lock_irqsave(&il->sta_lock, flags);
3266
3267 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3268 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3269 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3270
3271 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3272
3273 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3274 keyconf->keylen);
3275
3276 if ((il->stations[sta_id].sta.key.
3277 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3278 il->stations[sta_id].sta.key.key_offset =
3279 il_get_free_ucode_key_idx(il);
3280
3281
3282
3283 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3284 "no space for a new key");
3285
3286 il->stations[sta_id].sta.key.key_flags = key_flags;
3287 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3288 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3289
3290 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3291 sizeof(struct il_addsta_cmd));
3292 spin_unlock_irqrestore(&il->sta_lock, flags);
3293
3294 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3295}
3296
3297static int
3298il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3299 struct ieee80211_key_conf *keyconf, u8 sta_id)
3300{
3301 unsigned long flags;
3302 __le16 key_flags = 0;
3303 struct il_addsta_cmd sta_cmd;
3304
3305 lockdep_assert_held(&il->mutex);
3306
3307 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3308 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3309 key_flags &= ~STA_KEY_FLG_INVALID;
3310
3311 if (sta_id == il->hw_params.bcast_id)
3312 key_flags |= STA_KEY_MULTICAST_MSK;
3313
3314 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3315
3316 spin_lock_irqsave(&il->sta_lock, flags);
3317 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3318 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3319
3320 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3321
3322 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3323
3324 if ((il->stations[sta_id].sta.key.
3325 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3326 il->stations[sta_id].sta.key.key_offset =
3327 il_get_free_ucode_key_idx(il);
3328
3329
3330
3331 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3332 "no space for a new key");
3333
3334 il->stations[sta_id].sta.key.key_flags = key_flags;
3335 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3336 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3337
3338 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3339 sizeof(struct il_addsta_cmd));
3340 spin_unlock_irqrestore(&il->sta_lock, flags);
3341
3342 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3343}
3344
3345static int
3346il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3347 struct ieee80211_key_conf *keyconf, u8 sta_id)
3348{
3349 unsigned long flags;
3350 int ret = 0;
3351 __le16 key_flags = 0;
3352
3353 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3354 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3355 key_flags &= ~STA_KEY_FLG_INVALID;
3356
3357 if (sta_id == il->hw_params.bcast_id)
3358 key_flags |= STA_KEY_MULTICAST_MSK;
3359
3360 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3361 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3362
3363 spin_lock_irqsave(&il->sta_lock, flags);
3364
3365 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3366 il->stations[sta_id].keyinfo.keylen = 16;
3367
3368 if ((il->stations[sta_id].sta.key.
3369 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3370 il->stations[sta_id].sta.key.key_offset =
3371 il_get_free_ucode_key_idx(il);
3372
3373
3374
3375 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3376 "no space for a new key");
3377
3378 il->stations[sta_id].sta.key.key_flags = key_flags;
3379
3380
3381 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3382
3383 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3384
3385 spin_unlock_irqrestore(&il->sta_lock, flags);
3386
3387 return ret;
3388}
3389
3390void
3391il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3392 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3393{
3394 u8 sta_id;
3395 unsigned long flags;
3396 int i;
3397
3398 if (il_scan_cancel(il)) {
3399
3400
3401 return;
3402 }
3403
3404 sta_id = il_sta_id_or_broadcast(il, sta);
3405 if (sta_id == IL_INVALID_STATION)
3406 return;
3407
3408 spin_lock_irqsave(&il->sta_lock, flags);
3409
3410 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3411
3412 for (i = 0; i < 5; i++)
3413 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3414 cpu_to_le16(phase1key[i]);
3415
3416 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3417 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3418
3419 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3420
3421 spin_unlock_irqrestore(&il->sta_lock, flags);
3422}
3423
3424int
3425il4965_remove_dynamic_key(struct il_priv *il,
3426 struct ieee80211_key_conf *keyconf, u8 sta_id)
3427{
3428 unsigned long flags;
3429 u16 key_flags;
3430 u8 keyidx;
3431 struct il_addsta_cmd sta_cmd;
3432
3433 lockdep_assert_held(&il->mutex);
3434
3435 il->_4965.key_mapping_keys--;
3436
3437 spin_lock_irqsave(&il->sta_lock, flags);
3438 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3439 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3440
3441 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3442
3443 if (keyconf->keyidx != keyidx) {
3444
3445
3446
3447
3448
3449 spin_unlock_irqrestore(&il->sta_lock, flags);
3450 return 0;
3451 }
3452
3453 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3454 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3455 key_flags);
3456 spin_unlock_irqrestore(&il->sta_lock, flags);
3457 return 0;
3458 }
3459
3460 if (!test_and_clear_bit
3461 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3462 IL_ERR("idx %d not used in uCode key table.\n",
3463 il->stations[sta_id].sta.key.key_offset);
3464 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3465 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3466 il->stations[sta_id].sta.key.key_flags =
3467 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3468 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3469 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3470 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3471
3472 if (il_is_rfkill(il)) {
3473 D_WEP
3474 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3475 spin_unlock_irqrestore(&il->sta_lock, flags);
3476 return 0;
3477 }
3478 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3479 sizeof(struct il_addsta_cmd));
3480 spin_unlock_irqrestore(&il->sta_lock, flags);
3481
3482 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3483}
3484
3485int
3486il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3487 u8 sta_id)
3488{
3489 int ret;
3490
3491 lockdep_assert_held(&il->mutex);
3492
3493 il->_4965.key_mapping_keys++;
3494 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3495
3496 switch (keyconf->cipher) {
3497 case WLAN_CIPHER_SUITE_CCMP:
3498 ret =
3499 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3500 break;
3501 case WLAN_CIPHER_SUITE_TKIP:
3502 ret =
3503 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3504 break;
3505 case WLAN_CIPHER_SUITE_WEP40:
3506 case WLAN_CIPHER_SUITE_WEP104:
3507 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3508 break;
3509 default:
3510 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3511 keyconf->cipher);
3512 ret = -EINVAL;
3513 }
3514
3515 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3516 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3517
3518 return ret;
3519}
3520
3521
3522
3523
3524
3525
3526
3527
3528int
3529il4965_alloc_bcast_station(struct il_priv *il)
3530{
3531 struct il_link_quality_cmd *link_cmd;
3532 unsigned long flags;
3533 u8 sta_id;
3534
3535 spin_lock_irqsave(&il->sta_lock, flags);
3536 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3537 if (sta_id == IL_INVALID_STATION) {
3538 IL_ERR("Unable to prepare broadcast station\n");
3539 spin_unlock_irqrestore(&il->sta_lock, flags);
3540
3541 return -EINVAL;
3542 }
3543
3544 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3545 il->stations[sta_id].used |= IL_STA_BCAST;
3546 spin_unlock_irqrestore(&il->sta_lock, flags);
3547
3548 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3549 if (!link_cmd) {
3550 IL_ERR
3551 ("Unable to initialize rate scaling for bcast station.\n");
3552 return -ENOMEM;
3553 }
3554
3555 spin_lock_irqsave(&il->sta_lock, flags);
3556 il->stations[sta_id].lq = link_cmd;
3557 spin_unlock_irqrestore(&il->sta_lock, flags);
3558
3559 return 0;
3560}
3561
3562
3563
3564
3565
3566
3567
3568static int
3569il4965_update_bcast_station(struct il_priv *il)
3570{
3571 unsigned long flags;
3572 struct il_link_quality_cmd *link_cmd;
3573 u8 sta_id = il->hw_params.bcast_id;
3574
3575 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3576 if (!link_cmd) {
3577 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3578 return -ENOMEM;
3579 }
3580
3581 spin_lock_irqsave(&il->sta_lock, flags);
3582 if (il->stations[sta_id].lq)
3583 kfree(il->stations[sta_id].lq);
3584 else
3585 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3586 il->stations[sta_id].lq = link_cmd;
3587 spin_unlock_irqrestore(&il->sta_lock, flags);
3588
3589 return 0;
3590}
3591
3592int
3593il4965_update_bcast_stations(struct il_priv *il)
3594{
3595 return il4965_update_bcast_station(il);
3596}
3597
3598
3599
3600
3601int
3602il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3603{
3604 unsigned long flags;
3605 struct il_addsta_cmd sta_cmd;
3606
3607 lockdep_assert_held(&il->mutex);
3608
3609
3610 spin_lock_irqsave(&il->sta_lock, flags);
3611 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3612 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3613 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3614 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3615 sizeof(struct il_addsta_cmd));
3616 spin_unlock_irqrestore(&il->sta_lock, flags);
3617
3618 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3619}
3620
3621int
3622il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3623 u16 ssn)
3624{
3625 unsigned long flags;
3626 int sta_id;
3627 struct il_addsta_cmd sta_cmd;
3628
3629 lockdep_assert_held(&il->mutex);
3630
3631 sta_id = il_sta_id(sta);
3632 if (sta_id == IL_INVALID_STATION)
3633 return -ENXIO;
3634
3635 spin_lock_irqsave(&il->sta_lock, flags);
3636 il->stations[sta_id].sta.station_flags_msk = 0;
3637 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3638 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3639 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3640 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3641 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3642 sizeof(struct il_addsta_cmd));
3643 spin_unlock_irqrestore(&il->sta_lock, flags);
3644
3645 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3646}
3647
3648int
3649il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3650{
3651 unsigned long flags;
3652 int sta_id;
3653 struct il_addsta_cmd sta_cmd;
3654
3655 lockdep_assert_held(&il->mutex);
3656
3657 sta_id = il_sta_id(sta);
3658 if (sta_id == IL_INVALID_STATION) {
3659 IL_ERR("Invalid station for AGG tid %d\n", tid);
3660 return -ENXIO;
3661 }
3662
3663 spin_lock_irqsave(&il->sta_lock, flags);
3664 il->stations[sta_id].sta.station_flags_msk = 0;
3665 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3666 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3667 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3668 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3669 sizeof(struct il_addsta_cmd));
3670 spin_unlock_irqrestore(&il->sta_lock, flags);
3671
3672 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3673}
3674
3675void
3676il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3677{
3678 unsigned long flags;
3679
3680 spin_lock_irqsave(&il->sta_lock, flags);
3681 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3682 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3683 il->stations[sta_id].sta.sta.modify_mask =
3684 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3685 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3686 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3687 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3688 spin_unlock_irqrestore(&il->sta_lock, flags);
3689
3690}
3691
3692void
3693il4965_update_chain_flags(struct il_priv *il)
3694{
3695 if (il->ops->set_rxon_chain) {
3696 il->ops->set_rxon_chain(il);
3697 if (il->active.rx_chain != il->staging.rx_chain)
3698 il_commit_rxon(il);
3699 }
3700}
3701
3702static void
3703il4965_clear_free_frames(struct il_priv *il)
3704{
3705 struct list_head *element;
3706
3707 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3708
3709 while (!list_empty(&il->free_frames)) {
3710 element = il->free_frames.next;
3711 list_del(element);
3712 kfree(list_entry(element, struct il_frame, list));
3713 il->frames_count--;
3714 }
3715
3716 if (il->frames_count) {
3717 IL_WARN("%d frames still in use. Did we lose one?\n",
3718 il->frames_count);
3719 il->frames_count = 0;
3720 }
3721}
3722
3723static struct il_frame *
3724il4965_get_free_frame(struct il_priv *il)
3725{
3726 struct il_frame *frame;
3727 struct list_head *element;
3728 if (list_empty(&il->free_frames)) {
3729 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3730 if (!frame) {
3731 IL_ERR("Could not allocate frame!\n");
3732 return NULL;
3733 }
3734
3735 il->frames_count++;
3736 return frame;
3737 }
3738
3739 element = il->free_frames.next;
3740 list_del(element);
3741 return list_entry(element, struct il_frame, list);
3742}
3743
3744static void
3745il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3746{
3747 memset(frame, 0, sizeof(*frame));
3748 list_add(&frame->list, &il->free_frames);
3749}
3750
3751static u32
3752il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3753 int left)
3754{
3755 lockdep_assert_held(&il->mutex);
3756
3757 if (!il->beacon_skb)
3758 return 0;
3759
3760 if (il->beacon_skb->len > left)
3761 return 0;
3762
3763 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3764
3765 return il->beacon_skb->len;
3766}
3767
3768
3769static void
3770il4965_set_beacon_tim(struct il_priv *il,
3771 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3772 u32 frame_size)
3773{
3774 u16 tim_idx;
3775 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3776
3777
3778
3779
3780
3781 tim_idx = mgmt->u.beacon.variable - beacon;
3782
3783
3784 while ((tim_idx < (frame_size - 2)) &&
3785 (beacon[tim_idx] != WLAN_EID_TIM))
3786 tim_idx += beacon[tim_idx + 1] + 2;
3787
3788
3789 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3790 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3791 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3792 } else
3793 IL_WARN("Unable to find TIM Element in beacon\n");
3794}
3795
3796static unsigned int
3797il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3798{
3799 struct il_tx_beacon_cmd *tx_beacon_cmd;
3800 u32 frame_size;
3801 u32 rate_flags;
3802 u32 rate;
3803
3804
3805
3806
3807
3808 lockdep_assert_held(&il->mutex);
3809
3810 if (!il->beacon_enabled) {
3811 IL_ERR("Trying to build beacon without beaconing enabled\n");
3812 return 0;
3813 }
3814
3815
3816 tx_beacon_cmd = &frame->u.beacon;
3817 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3818
3819
3820 frame_size =
3821 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3822 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3823 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3824 return 0;
3825 if (!frame_size)
3826 return 0;
3827
3828
3829 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3830 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3831 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3832 tx_beacon_cmd->tx.tx_flags =
3833 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3834 TX_CMD_FLG_STA_RATE_MSK;
3835
3836
3837 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3838 frame_size);
3839
3840
3841 rate = il_get_lowest_plcp(il);
3842 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3843 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3844 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3845 rate_flags |= RATE_MCS_CCK_MSK;
3846 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3847
3848 return sizeof(*tx_beacon_cmd) + frame_size;
3849}
3850
3851int
3852il4965_send_beacon_cmd(struct il_priv *il)
3853{
3854 struct il_frame *frame;
3855 unsigned int frame_size;
3856 int rc;
3857
3858 frame = il4965_get_free_frame(il);
3859 if (!frame) {
3860 IL_ERR("Could not obtain free frame buffer for beacon "
3861 "command.\n");
3862 return -ENOMEM;
3863 }
3864
3865 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3866 if (!frame_size) {
3867 IL_ERR("Error configuring the beacon command\n");
3868 il4965_free_frame(il, frame);
3869 return -EINVAL;
3870 }
3871
3872 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3873
3874 il4965_free_frame(il, frame);
3875
3876 return rc;
3877}
3878
3879static inline dma_addr_t
3880il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3881{
3882 struct il_tfd_tb *tb = &tfd->tbs[idx];
3883
3884 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3885 if (sizeof(dma_addr_t) > sizeof(u32))
3886 addr |=
3887 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3888 16;
3889
3890 return addr;
3891}
3892
3893static inline u16
3894il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3895{
3896 struct il_tfd_tb *tb = &tfd->tbs[idx];
3897
3898 return le16_to_cpu(tb->hi_n_len) >> 4;
3899}
3900
3901static inline void
3902il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3903{
3904 struct il_tfd_tb *tb = &tfd->tbs[idx];
3905 u16 hi_n_len = len << 4;
3906
3907 put_unaligned_le32(addr, &tb->lo);
3908 if (sizeof(dma_addr_t) > sizeof(u32))
3909 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3910
3911 tb->hi_n_len = cpu_to_le16(hi_n_len);
3912
3913 tfd->num_tbs = idx + 1;
3914}
3915
3916static inline u8
3917il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3918{
3919 return tfd->num_tbs & 0x1f;
3920}
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930void
3931il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3932{
3933 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3934 struct il_tfd *tfd;
3935 struct pci_dev *dev = il->pci_dev;
3936 int idx = txq->q.read_ptr;
3937 int i;
3938 int num_tbs;
3939
3940 tfd = &tfd_tmp[idx];
3941
3942
3943 num_tbs = il4965_tfd_get_num_tbs(tfd);
3944
3945 if (num_tbs >= IL_NUM_OF_TBS) {
3946 IL_ERR("Too many chunks: %i\n", num_tbs);
3947
3948 return;
3949 }
3950
3951
3952 if (num_tbs)
3953 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3954 dma_unmap_len(&txq->meta[idx], len),
3955 PCI_DMA_BIDIRECTIONAL);
3956
3957
3958 for (i = 1; i < num_tbs; i++)
3959 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3960 il4965_tfd_tb_get_len(tfd, i),
3961 PCI_DMA_TODEVICE);
3962
3963
3964 if (txq->skbs) {
3965 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3966
3967
3968 if (skb) {
3969 dev_kfree_skb_any(skb);
3970 txq->skbs[txq->q.read_ptr] = NULL;
3971 }
3972 }
3973}
3974
3975int
3976il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3977 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3978{
3979 struct il_queue *q;
3980 struct il_tfd *tfd, *tfd_tmp;
3981 u32 num_tbs;
3982
3983 q = &txq->q;
3984 tfd_tmp = (struct il_tfd *)txq->tfds;
3985 tfd = &tfd_tmp[q->write_ptr];
3986
3987 if (reset)
3988 memset(tfd, 0, sizeof(*tfd));
3989
3990 num_tbs = il4965_tfd_get_num_tbs(tfd);
3991
3992
3993 if (num_tbs >= IL_NUM_OF_TBS) {
3994 IL_ERR("Error can not send more than %d chunks\n",
3995 IL_NUM_OF_TBS);
3996 return -EINVAL;
3997 }
3998
3999 BUG_ON(addr & ~DMA_BIT_MASK(36));
4000 if (unlikely(addr & ~IL_TX_DMA_MASK))
4001 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
4002
4003 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
4004
4005 return 0;
4006}
4007
4008
4009
4010
4011
4012
4013
4014
4015int
4016il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
4017{
4018 int txq_id = txq->q.id;
4019
4020
4021 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
4022
4023 return 0;
4024}
4025
4026
4027
4028
4029
4030
4031static void
4032il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4033{
4034 struct il_rx_pkt *pkt = rxb_addr(rxb);
4035 struct il_alive_resp *palive;
4036 struct delayed_work *pwork;
4037
4038 palive = &pkt->u.alive_frame;
4039
4040 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4041 palive->is_valid, palive->ver_type, palive->ver_subtype);
4042
4043 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4044 D_INFO("Initialization Alive received.\n");
4045 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4046 sizeof(struct il_init_alive_resp));
4047 pwork = &il->init_alive_start;
4048 } else {
4049 D_INFO("Runtime Alive received.\n");
4050 memcpy(&il->card_alive, &pkt->u.alive_frame,
4051 sizeof(struct il_alive_resp));
4052 pwork = &il->alive_start;
4053 }
4054
4055
4056
4057 if (palive->is_valid == UCODE_VALID_OK)
4058 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4059 else
4060 IL_WARN("uCode did not respond OK.\n");
4061}
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073static void
4074il4965_bg_stats_periodic(struct timer_list *t)
4075{
4076 struct il_priv *il = from_timer(il, t, stats_periodic);
4077
4078 if (test_bit(S_EXIT_PENDING, &il->status))
4079 return;
4080
4081
4082 if (!il_is_ready_rf(il))
4083 return;
4084
4085 il_send_stats_request(il, CMD_ASYNC, false);
4086}
4087
4088static void
4089il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4090{
4091 struct il_rx_pkt *pkt = rxb_addr(rxb);
4092 struct il4965_beacon_notif *beacon =
4093 (struct il4965_beacon_notif *)pkt->u.raw;
4094#ifdef CONFIG_IWLEGACY_DEBUG
4095 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4096
4097 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4098 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4099 beacon->beacon_notify_hdr.failure_frame,
4100 le32_to_cpu(beacon->ibss_mgr_status),
4101 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4102#endif
4103 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4104}
4105
4106static void
4107il4965_perform_ct_kill_task(struct il_priv *il)
4108{
4109 unsigned long flags;
4110
4111 D_POWER("Stop all queues\n");
4112
4113 if (il->mac80211_registered)
4114 ieee80211_stop_queues(il->hw);
4115
4116 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4117 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4118 _il_rd(il, CSR_UCODE_DRV_GP1);
4119
4120 spin_lock_irqsave(&il->reg_lock, flags);
4121 if (likely(_il_grab_nic_access(il)))
4122 _il_release_nic_access(il);
4123 spin_unlock_irqrestore(&il->reg_lock, flags);
4124}
4125
4126
4127
4128static void
4129il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4130{
4131 struct il_rx_pkt *pkt = rxb_addr(rxb);
4132 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4133 unsigned long status = il->status;
4134
4135 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4136 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4137 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4138 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4139
4140 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4141
4142 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4143 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4144
4145 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4146
4147 if (!(flags & RXON_CARD_DISABLED)) {
4148 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4149 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4150 il_wr(il, HBUS_TARG_MBX_C,
4151 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4152 }
4153 }
4154
4155 if (flags & CT_CARD_DISABLED)
4156 il4965_perform_ct_kill_task(il);
4157
4158 if (flags & HW_CARD_DISABLED)
4159 set_bit(S_RFKILL, &il->status);
4160 else
4161 clear_bit(S_RFKILL, &il->status);
4162
4163 if (!(flags & RXON_CARD_DISABLED))
4164 il_scan_cancel(il);
4165
4166 if ((test_bit(S_RFKILL, &status) !=
4167 test_bit(S_RFKILL, &il->status)))
4168 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4169 test_bit(S_RFKILL, &il->status));
4170 else
4171 wake_up(&il->wait_command_queue);
4172}
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183static void
4184il4965_setup_handlers(struct il_priv *il)
4185{
4186 il->handlers[N_ALIVE] = il4965_hdl_alive;
4187 il->handlers[N_ERROR] = il_hdl_error;
4188 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4189 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4190 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4191 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4192 il->handlers[N_BEACON] = il4965_hdl_beacon;
4193
4194
4195
4196
4197
4198
4199 il->handlers[C_STATS] = il4965_hdl_c_stats;
4200 il->handlers[N_STATS] = il4965_hdl_stats;
4201
4202 il_setup_rx_scan_handlers(il);
4203
4204
4205 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4206
4207 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4208
4209 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4210 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4211 il->handlers[N_RX] = il4965_hdl_rx;
4212
4213 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4214
4215 il->handlers[C_TX] = il4965_hdl_tx;
4216}
4217
4218
4219
4220
4221
4222
4223
4224
4225void
4226il4965_rx_handle(struct il_priv *il)
4227{
4228 struct il_rx_buf *rxb;
4229 struct il_rx_pkt *pkt;
4230 struct il_rx_queue *rxq = &il->rxq;
4231 u32 r, i;
4232 int reclaim;
4233 unsigned long flags;
4234 u8 fill_rx = 0;
4235 u32 count = 8;
4236 int total_empty;
4237
4238
4239
4240 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4241 i = rxq->read;
4242
4243
4244 if (i == r)
4245 D_RX("r = %d, i = %d\n", r, i);
4246
4247
4248 total_empty = r - rxq->write_actual;
4249 if (total_empty < 0)
4250 total_empty += RX_QUEUE_SIZE;
4251
4252 if (total_empty > (RX_QUEUE_SIZE / 2))
4253 fill_rx = 1;
4254
4255 while (i != r) {
4256 int len;
4257
4258 rxb = rxq->queue[i];
4259
4260
4261
4262
4263 BUG_ON(rxb == NULL);
4264
4265 rxq->queue[i] = NULL;
4266
4267 pci_unmap_page(il->pci_dev, rxb->page_dma,
4268 PAGE_SIZE << il->hw_params.rx_page_order,
4269 PCI_DMA_FROMDEVICE);
4270 pkt = rxb_addr(rxb);
4271
4272 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4273 len += sizeof(u32);
4274
4275 reclaim = il_need_reclaim(il, pkt);
4276
4277
4278
4279
4280 if (il->handlers[pkt->hdr.cmd]) {
4281 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4282 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4283 il->isr_stats.handlers[pkt->hdr.cmd]++;
4284 il->handlers[pkt->hdr.cmd] (il, rxb);
4285 } else {
4286
4287 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4288 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4289 }
4290
4291
4292
4293
4294
4295
4296
4297
4298 if (reclaim) {
4299
4300
4301
4302 if (rxb->page)
4303 il_tx_cmd_complete(il, rxb);
4304 else
4305 IL_WARN("Claim null rxb?\n");
4306 }
4307
4308
4309
4310
4311 spin_lock_irqsave(&rxq->lock, flags);
4312 if (rxb->page != NULL) {
4313 rxb->page_dma =
4314 pci_map_page(il->pci_dev, rxb->page, 0,
4315 PAGE_SIZE << il->hw_params.
4316 rx_page_order, PCI_DMA_FROMDEVICE);
4317
4318 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4319 rxb->page_dma))) {
4320 __il_free_pages(il, rxb->page);
4321 rxb->page = NULL;
4322 list_add_tail(&rxb->list, &rxq->rx_used);
4323 } else {
4324 list_add_tail(&rxb->list, &rxq->rx_free);
4325 rxq->free_count++;
4326 }
4327 } else
4328 list_add_tail(&rxb->list, &rxq->rx_used);
4329
4330 spin_unlock_irqrestore(&rxq->lock, flags);
4331
4332 i = (i + 1) & RX_QUEUE_MASK;
4333
4334
4335 if (fill_rx) {
4336 count++;
4337 if (count >= 8) {
4338 rxq->read = i;
4339 il4965_rx_replenish_now(il);
4340 count = 0;
4341 }
4342 }
4343 }
4344
4345
4346 rxq->read = i;
4347 if (fill_rx)
4348 il4965_rx_replenish_now(il);
4349 else
4350 il4965_rx_queue_restock(il);
4351}
4352
4353
4354static inline void
4355il4965_synchronize_irq(struct il_priv *il)
4356{
4357
4358 synchronize_irq(il->pci_dev->irq);
4359 tasklet_kill(&il->irq_tasklet);
4360}
4361
4362static void
4363il4965_irq_tasklet(struct il_priv *il)
4364{
4365 u32 inta, handled = 0;
4366 u32 inta_fh;
4367 unsigned long flags;
4368 u32 i;
4369#ifdef CONFIG_IWLEGACY_DEBUG
4370 u32 inta_mask;
4371#endif
4372
4373 spin_lock_irqsave(&il->lock, flags);
4374
4375
4376
4377
4378 inta = _il_rd(il, CSR_INT);
4379 _il_wr(il, CSR_INT, inta);
4380
4381
4382
4383
4384 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4385 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4386
4387#ifdef CONFIG_IWLEGACY_DEBUG
4388 if (il_get_debug_level(il) & IL_DL_ISR) {
4389
4390 inta_mask = _il_rd(il, CSR_INT_MASK);
4391 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4392 inta_mask, inta_fh);
4393 }
4394#endif
4395
4396 spin_unlock_irqrestore(&il->lock, flags);
4397
4398
4399
4400
4401
4402 if (inta_fh & CSR49_FH_INT_RX_MASK)
4403 inta |= CSR_INT_BIT_FH_RX;
4404 if (inta_fh & CSR49_FH_INT_TX_MASK)
4405 inta |= CSR_INT_BIT_FH_TX;
4406
4407
4408 if (inta & CSR_INT_BIT_HW_ERR) {
4409 IL_ERR("Hardware error detected. Restarting.\n");
4410
4411
4412 il_disable_interrupts(il);
4413
4414 il->isr_stats.hw++;
4415 il_irq_handle_error(il);
4416
4417 handled |= CSR_INT_BIT_HW_ERR;
4418
4419 return;
4420 }
4421#ifdef CONFIG_IWLEGACY_DEBUG
4422 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4423
4424 if (inta & CSR_INT_BIT_SCD) {
4425 D_ISR("Scheduler finished to transmit "
4426 "the frame/frames.\n");
4427 il->isr_stats.sch++;
4428 }
4429
4430
4431 if (inta & CSR_INT_BIT_ALIVE) {
4432 D_ISR("Alive interrupt\n");
4433 il->isr_stats.alive++;
4434 }
4435 }
4436#endif
4437
4438 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4439
4440
4441 if (inta & CSR_INT_BIT_RF_KILL) {
4442 int hw_rf_kill = 0;
4443
4444 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4445 hw_rf_kill = 1;
4446
4447 IL_WARN("RF_KILL bit toggled to %s.\n",
4448 hw_rf_kill ? "disable radio" : "enable radio");
4449
4450 il->isr_stats.rfkill++;
4451
4452
4453
4454
4455
4456
4457 if (hw_rf_kill) {
4458 set_bit(S_RFKILL, &il->status);
4459 } else {
4460 clear_bit(S_RFKILL, &il->status);
4461 il_force_reset(il, true);
4462 }
4463 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4464
4465 handled |= CSR_INT_BIT_RF_KILL;
4466 }
4467
4468
4469 if (inta & CSR_INT_BIT_CT_KILL) {
4470 IL_ERR("Microcode CT kill error detected.\n");
4471 il->isr_stats.ctkill++;
4472 handled |= CSR_INT_BIT_CT_KILL;
4473 }
4474
4475
4476 if (inta & CSR_INT_BIT_SW_ERR) {
4477 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4478 inta);
4479 il->isr_stats.sw++;
4480 il_irq_handle_error(il);
4481 handled |= CSR_INT_BIT_SW_ERR;
4482 }
4483
4484
4485
4486
4487
4488
4489 if (inta & CSR_INT_BIT_WAKEUP) {
4490 D_ISR("Wakeup interrupt\n");
4491 il_rx_queue_update_write_ptr(il, &il->rxq);
4492 for (i = 0; i < il->hw_params.max_txq_num; i++)
4493 il_txq_update_write_ptr(il, &il->txq[i]);
4494 il->isr_stats.wakeup++;
4495 handled |= CSR_INT_BIT_WAKEUP;
4496 }
4497
4498
4499
4500
4501 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4502 il4965_rx_handle(il);
4503 il->isr_stats.rx++;
4504 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4505 }
4506
4507
4508 if (inta & CSR_INT_BIT_FH_TX) {
4509 D_ISR("uCode load interrupt\n");
4510 il->isr_stats.tx++;
4511 handled |= CSR_INT_BIT_FH_TX;
4512
4513 il->ucode_write_complete = 1;
4514 wake_up(&il->wait_command_queue);
4515 }
4516
4517 if (inta & ~handled) {
4518 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4519 il->isr_stats.unhandled++;
4520 }
4521
4522 if (inta & ~(il->inta_mask)) {
4523 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4524 inta & ~il->inta_mask);
4525 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4526 }
4527
4528
4529
4530 if (test_bit(S_INT_ENABLED, &il->status))
4531 il_enable_interrupts(il);
4532
4533 else if (handled & CSR_INT_BIT_RF_KILL)
4534 il_enable_rfkill_int(il);
4535
4536#ifdef CONFIG_IWLEGACY_DEBUG
4537 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4538 inta = _il_rd(il, CSR_INT);
4539 inta_mask = _il_rd(il, CSR_INT_MASK);
4540 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4541 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4542 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4543 }
4544#endif
4545}
4546
4547
4548
4549
4550
4551
4552
4553#ifdef CONFIG_IWLEGACY_DEBUG
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566static ssize_t
4567il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4568 char *buf)
4569{
4570 struct il_priv *il = dev_get_drvdata(d);
4571 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4572}
4573
4574static ssize_t
4575il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4576 const char *buf, size_t count)
4577{
4578 struct il_priv *il = dev_get_drvdata(d);
4579 unsigned long val;
4580 int ret;
4581
4582 ret = kstrtoul(buf, 0, &val);
4583 if (ret)
4584 IL_ERR("%s is not in hex or decimal form.\n", buf);
4585 else
4586 il->debug_level = val;
4587
4588 return strnlen(buf, count);
4589}
4590
4591static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
4592 il4965_store_debug_level);
4593
4594#endif
4595
4596static ssize_t
4597il4965_show_temperature(struct device *d, struct device_attribute *attr,
4598 char *buf)
4599{
4600 struct il_priv *il = dev_get_drvdata(d);
4601
4602 if (!il_is_alive(il))
4603 return -EAGAIN;
4604
4605 return sprintf(buf, "%d\n", il->temperature);
4606}
4607
4608static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
4609
4610static ssize_t
4611il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4612{
4613 struct il_priv *il = dev_get_drvdata(d);
4614
4615 if (!il_is_ready_rf(il))
4616 return sprintf(buf, "off\n");
4617 else
4618 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4619}
4620
4621static ssize_t
4622il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4623 const char *buf, size_t count)
4624{
4625 struct il_priv *il = dev_get_drvdata(d);
4626 unsigned long val;
4627 int ret;
4628
4629 ret = kstrtoul(buf, 10, &val);
4630 if (ret)
4631 IL_INFO("%s is not in decimal form.\n", buf);
4632 else {
4633 ret = il_set_tx_power(il, val, false);
4634 if (ret)
4635 IL_ERR("failed setting tx power (0x%08x).\n", ret);
4636 else
4637 ret = count;
4638 }
4639 return ret;
4640}
4641
4642static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
4643 il4965_store_tx_power);
4644
4645static struct attribute *il_sysfs_entries[] = {
4646 &dev_attr_temperature.attr,
4647 &dev_attr_tx_power.attr,
4648#ifdef CONFIG_IWLEGACY_DEBUG
4649 &dev_attr_debug_level.attr,
4650#endif
4651 NULL
4652};
4653
4654static const struct attribute_group il_attribute_group = {
4655 .name = NULL,
4656 .attrs = il_sysfs_entries,
4657};
4658
4659
4660
4661
4662
4663
4664
4665static void
4666il4965_dealloc_ucode_pci(struct il_priv *il)
4667{
4668 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4669 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4670 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4671 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4672 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4673 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4674}
4675
4676static void
4677il4965_nic_start(struct il_priv *il)
4678{
4679
4680 _il_wr(il, CSR_RESET, 0);
4681}
4682
4683static void il4965_ucode_callback(const struct firmware *ucode_raw,
4684 void *context);
4685static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4686
4687static int __must_check
4688il4965_request_firmware(struct il_priv *il, bool first)
4689{
4690 const char *name_pre = il->cfg->fw_name_pre;
4691 char tag[8];
4692
4693 if (first) {
4694 il->fw_idx = il->cfg->ucode_api_max;
4695 sprintf(tag, "%d", il->fw_idx);
4696 } else {
4697 il->fw_idx--;
4698 sprintf(tag, "%d", il->fw_idx);
4699 }
4700
4701 if (il->fw_idx < il->cfg->ucode_api_min) {
4702 IL_ERR("no suitable firmware found!\n");
4703 return -ENOENT;
4704 }
4705
4706 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4707
4708 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4709
4710 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4711 &il->pci_dev->dev, GFP_KERNEL, il,
4712 il4965_ucode_callback);
4713}
4714
4715struct il4965_firmware_pieces {
4716 const void *inst, *data, *init, *init_data, *boot;
4717 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4718};
4719
4720static int
4721il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4722 struct il4965_firmware_pieces *pieces)
4723{
4724 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4725 u32 api_ver, hdr_size;
4726 const u8 *src;
4727
4728 il->ucode_ver = le32_to_cpu(ucode->ver);
4729 api_ver = IL_UCODE_API(il->ucode_ver);
4730
4731 switch (api_ver) {
4732 default:
4733 case 0:
4734 case 1:
4735 case 2:
4736 hdr_size = 24;
4737 if (ucode_raw->size < hdr_size) {
4738 IL_ERR("File size too small!\n");
4739 return -EINVAL;
4740 }
4741 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4742 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4743 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4744 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4745 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4746 src = ucode->v1.data;
4747 break;
4748 }
4749
4750
4751 if (ucode_raw->size !=
4752 hdr_size + pieces->inst_size + pieces->data_size +
4753 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4754
4755 IL_ERR("uCode file size %d does not match expected size\n",
4756 (int)ucode_raw->size);
4757 return -EINVAL;
4758 }
4759
4760 pieces->inst = src;
4761 src += pieces->inst_size;
4762 pieces->data = src;
4763 src += pieces->data_size;
4764 pieces->init = src;
4765 src += pieces->init_size;
4766 pieces->init_data = src;
4767 src += pieces->init_data_size;
4768 pieces->boot = src;
4769 src += pieces->boot_size;
4770
4771 return 0;
4772}
4773
4774
4775
4776
4777
4778
4779
4780static void
4781il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4782{
4783 struct il_priv *il = context;
4784 int err;
4785 struct il4965_firmware_pieces pieces;
4786 const unsigned int api_max = il->cfg->ucode_api_max;
4787 const unsigned int api_min = il->cfg->ucode_api_min;
4788 u32 api_ver;
4789
4790 u32 max_probe_length = 200;
4791 u32 standard_phy_calibration_size =
4792 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4793
4794 memset(&pieces, 0, sizeof(pieces));
4795
4796 if (!ucode_raw) {
4797 if (il->fw_idx <= il->cfg->ucode_api_max)
4798 IL_ERR("request for firmware file '%s' failed.\n",
4799 il->firmware_name);
4800 goto try_again;
4801 }
4802
4803 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4804 ucode_raw->size);
4805
4806
4807 if (ucode_raw->size < 4) {
4808 IL_ERR("File size way too small!\n");
4809 goto try_again;
4810 }
4811
4812
4813 err = il4965_load_firmware(il, ucode_raw, &pieces);
4814
4815 if (err)
4816 goto try_again;
4817
4818 api_ver = IL_UCODE_API(il->ucode_ver);
4819
4820
4821
4822
4823
4824
4825 if (api_ver < api_min || api_ver > api_max) {
4826 IL_ERR("Driver unable to support your firmware API. "
4827 "Driver supports v%u, firmware is v%u.\n", api_max,
4828 api_ver);
4829 goto try_again;
4830 }
4831
4832 if (api_ver != api_max)
4833 IL_ERR("Firmware has old API version. Expected v%u, "
4834 "got v%u. New firmware can be obtained "
4835 "from http://www.intellinuxwireless.org.\n", api_max,
4836 api_ver);
4837
4838 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4839 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4840 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4841
4842 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4843 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4844 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4845 IL_UCODE_SERIAL(il->ucode_ver));
4846
4847
4848
4849
4850
4851
4852
4853 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4854 D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
4855 D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
4856 D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
4857 D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
4858 D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
4859
4860
4861 if (pieces.inst_size > il->hw_params.max_inst_size) {
4862 IL_ERR("uCode instr len %zd too large to fit in\n",
4863 pieces.inst_size);
4864 goto try_again;
4865 }
4866
4867 if (pieces.data_size > il->hw_params.max_data_size) {
4868 IL_ERR("uCode data len %zd too large to fit in\n",
4869 pieces.data_size);
4870 goto try_again;
4871 }
4872
4873 if (pieces.init_size > il->hw_params.max_inst_size) {
4874 IL_ERR("uCode init instr len %zd too large to fit in\n",
4875 pieces.init_size);
4876 goto try_again;
4877 }
4878
4879 if (pieces.init_data_size > il->hw_params.max_data_size) {
4880 IL_ERR("uCode init data len %zd too large to fit in\n",
4881 pieces.init_data_size);
4882 goto try_again;
4883 }
4884
4885 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4886 IL_ERR("uCode boot instr len %zd too large to fit in\n",
4887 pieces.boot_size);
4888 goto try_again;
4889 }
4890
4891
4892
4893
4894
4895
4896 il->ucode_code.len = pieces.inst_size;
4897 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4898
4899 il->ucode_data.len = pieces.data_size;
4900 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4901
4902 il->ucode_data_backup.len = pieces.data_size;
4903 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4904
4905 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4906 !il->ucode_data_backup.v_addr)
4907 goto err_pci_alloc;
4908
4909
4910 if (pieces.init_size && pieces.init_data_size) {
4911 il->ucode_init.len = pieces.init_size;
4912 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4913
4914 il->ucode_init_data.len = pieces.init_data_size;
4915 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4916
4917 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4918 goto err_pci_alloc;
4919 }
4920
4921
4922 if (pieces.boot_size) {
4923 il->ucode_boot.len = pieces.boot_size;
4924 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4925
4926 if (!il->ucode_boot.v_addr)
4927 goto err_pci_alloc;
4928 }
4929
4930
4931
4932 il->sta_key_max_num = STA_KEY_MAX_NUM;
4933
4934
4935
4936
4937 D_INFO("Copying (but not loading) uCode instr len %zd\n",
4938 pieces.inst_size);
4939 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4940
4941 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4942 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4943
4944
4945
4946
4947
4948 D_INFO("Copying (but not loading) uCode data len %zd\n",
4949 pieces.data_size);
4950 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4951 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4952
4953
4954 if (pieces.init_size) {
4955 D_INFO("Copying (but not loading) init instr len %zd\n",
4956 pieces.init_size);
4957 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4958 }
4959
4960
4961 if (pieces.init_data_size) {
4962 D_INFO("Copying (but not loading) init data len %zd\n",
4963 pieces.init_data_size);
4964 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4965 pieces.init_data_size);
4966 }
4967
4968
4969 D_INFO("Copying (but not loading) boot instr len %zd\n",
4970 pieces.boot_size);
4971 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4972
4973
4974
4975
4976
4977 il->_4965.phy_calib_chain_noise_reset_cmd =
4978 standard_phy_calibration_size;
4979 il->_4965.phy_calib_chain_noise_gain_cmd =
4980 standard_phy_calibration_size + 1;
4981
4982
4983
4984
4985
4986
4987 err = il4965_mac_setup_register(il, max_probe_length);
4988 if (err)
4989 goto out_unbind;
4990
4991 err = il_dbgfs_register(il, DRV_NAME);
4992 if (err)
4993 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4994 err);
4995
4996 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4997 if (err) {
4998 IL_ERR("failed to create sysfs device attributes\n");
4999 goto out_unbind;
5000 }
5001
5002
5003 release_firmware(ucode_raw);
5004 complete(&il->_4965.firmware_loading_complete);
5005 return;
5006
5007try_again:
5008
5009 if (il4965_request_firmware(il, false))
5010 goto out_unbind;
5011 release_firmware(ucode_raw);
5012 return;
5013
5014err_pci_alloc:
5015 IL_ERR("failed to allocate pci memory\n");
5016 il4965_dealloc_ucode_pci(il);
5017out_unbind:
5018 complete(&il->_4965.firmware_loading_complete);
5019 device_release_driver(&il->pci_dev->dev);
5020 release_firmware(ucode_raw);
5021}
5022
5023static const char *const desc_lookup_text[] = {
5024 "OK",
5025 "FAIL",
5026 "BAD_PARAM",
5027 "BAD_CHECKSUM",
5028 "NMI_INTERRUPT_WDG",
5029 "SYSASSERT",
5030 "FATAL_ERROR",
5031 "BAD_COMMAND",
5032 "HW_ERROR_TUNE_LOCK",
5033 "HW_ERROR_TEMPERATURE",
5034 "ILLEGAL_CHAN_FREQ",
5035 "VCC_NOT_STBL",
5036 "FH49_ERROR",
5037 "NMI_INTERRUPT_HOST",
5038 "NMI_INTERRUPT_ACTION_PT",
5039 "NMI_INTERRUPT_UNKNOWN",
5040 "UCODE_VERSION_MISMATCH",
5041 "HW_ERROR_ABS_LOCK",
5042 "HW_ERROR_CAL_LOCK_FAIL",
5043 "NMI_INTERRUPT_INST_ACTION_PT",
5044 "NMI_INTERRUPT_DATA_ACTION_PT",
5045 "NMI_TRM_HW_ER",
5046 "NMI_INTERRUPT_TRM",
5047 "NMI_INTERRUPT_BREAK_POINT",
5048 "DEBUG_0",
5049 "DEBUG_1",
5050 "DEBUG_2",
5051 "DEBUG_3",
5052};
5053
5054static struct {
5055 char *name;
5056 u8 num;
5057} advanced_lookup[] = {
5058 {
5059 "NMI_INTERRUPT_WDG", 0x34}, {
5060 "SYSASSERT", 0x35}, {
5061 "UCODE_VERSION_MISMATCH", 0x37}, {
5062 "BAD_COMMAND", 0x38}, {
5063 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5064 "FATAL_ERROR", 0x3D}, {
5065 "NMI_TRM_HW_ERR", 0x46}, {
5066 "NMI_INTERRUPT_TRM", 0x4C}, {
5067 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5068 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5069 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5070 "NMI_INTERRUPT_HOST", 0x66}, {
5071 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5072 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5073 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5074"ADVANCED_SYSASSERT", 0},};
5075
5076static const char *
5077il4965_desc_lookup(u32 num)
5078{
5079 int i;
5080 int max = ARRAY_SIZE(desc_lookup_text);
5081
5082 if (num < max)
5083 return desc_lookup_text[num];
5084
5085 max = ARRAY_SIZE(advanced_lookup) - 1;
5086 for (i = 0; i < max; i++) {
5087 if (advanced_lookup[i].num == num)
5088 break;
5089 }
5090 return advanced_lookup[i].name;
5091}
5092
5093#define ERROR_START_OFFSET (1 * sizeof(u32))
5094#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5095
5096void
5097il4965_dump_nic_error_log(struct il_priv *il)
5098{
5099 u32 data2, line;
5100 u32 desc, time, count, base, data1;
5101 u32 blink1, blink2, ilink1, ilink2;
5102 u32 pc, hcmd;
5103
5104 if (il->ucode_type == UCODE_INIT)
5105 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5106 else
5107 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5108
5109 if (!il->ops->is_valid_rtc_data_addr(base)) {
5110 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5111 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5112 return;
5113 }
5114
5115 count = il_read_targ_mem(il, base);
5116
5117 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5118 IL_ERR("Start IWL Error Log Dump:\n");
5119 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5120 }
5121
5122 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5123 il->isr_stats.err_code = desc;
5124 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5125 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5126 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5127 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5128 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5129 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5130 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5131 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5132 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5133 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5134
5135 IL_ERR("Desc Time "
5136 "data1 data2 line\n");
5137 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5138 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5139 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5140 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5141 blink2, ilink1, ilink2, hcmd);
5142}
5143
5144static void
5145il4965_rf_kill_ct_config(struct il_priv *il)
5146{
5147 struct il_ct_kill_config cmd;
5148 unsigned long flags;
5149 int ret = 0;
5150
5151 spin_lock_irqsave(&il->lock, flags);
5152 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5153 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5154 spin_unlock_irqrestore(&il->lock, flags);
5155
5156 cmd.critical_temperature_R =
5157 cpu_to_le32(il->hw_params.ct_kill_threshold);
5158
5159 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5160 if (ret)
5161 IL_ERR("C_CT_KILL_CONFIG failed\n");
5162 else
5163 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5164 "critical temperature is %d\n",
5165 il->hw_params.ct_kill_threshold);
5166}
5167
5168static const s8 default_queue_to_tx_fifo[] = {
5169 IL_TX_FIFO_VO,
5170 IL_TX_FIFO_VI,
5171 IL_TX_FIFO_BE,
5172 IL_TX_FIFO_BK,
5173 IL49_CMD_FIFO_NUM,
5174 IL_TX_FIFO_UNUSED,
5175 IL_TX_FIFO_UNUSED,
5176};
5177
5178#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5179
5180static int
5181il4965_alive_notify(struct il_priv *il)
5182{
5183 u32 a;
5184 unsigned long flags;
5185 int i, chan;
5186 u32 reg_val;
5187
5188 spin_lock_irqsave(&il->lock, flags);
5189
5190
5191 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5192 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5193 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5194 il_write_targ_mem(il, a, 0);
5195 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5196 il_write_targ_mem(il, a, 0);
5197 for (;
5198 a <
5199 il->scd_base_addr +
5200 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5201 a += 4)
5202 il_write_targ_mem(il, a, 0);
5203
5204
5205 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5206
5207
5208 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5209 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5210 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5211 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5212
5213
5214 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5215 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5216 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5217
5218
5219 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5220
5221
5222 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5223
5224
5225 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5226 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5227
5228
5229 il_write_targ_mem(il,
5230 il->scd_base_addr +
5231 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5232 (SCD_WIN_SIZE <<
5233 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5234 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5235
5236
5237 il_write_targ_mem(il,
5238 il->scd_base_addr +
5239 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5240 sizeof(u32),
5241 (SCD_FRAME_LIMIT <<
5242 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5243 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5244
5245 }
5246 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5247 (1 << il->hw_params.max_txq_num) - 1);
5248
5249
5250 il4965_txq_set_sched(il, IL_MASK(0, 6));
5251
5252 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5253
5254
5255 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5256 for (i = 0; i < 4; i++)
5257 atomic_set(&il->queue_stop_count[i], 0);
5258
5259
5260 il->txq_ctx_active_msk = 0;
5261
5262 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5263
5264 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5265 int ac = default_queue_to_tx_fifo[i];
5266
5267 il_txq_ctx_activate(il, i);
5268
5269 if (ac == IL_TX_FIFO_UNUSED)
5270 continue;
5271
5272 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5273 }
5274
5275 spin_unlock_irqrestore(&il->lock, flags);
5276
5277 return 0;
5278}
5279
5280
5281
5282
5283
5284
5285static void
5286il4965_alive_start(struct il_priv *il)
5287{
5288 int ret = 0;
5289
5290 D_INFO("Runtime Alive received.\n");
5291
5292 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5293
5294
5295 D_INFO("Alive failed.\n");
5296 goto restart;
5297 }
5298
5299
5300
5301
5302 if (il4965_verify_ucode(il)) {
5303
5304
5305 D_INFO("Bad runtime uCode load.\n");
5306 goto restart;
5307 }
5308
5309 ret = il4965_alive_notify(il);
5310 if (ret) {
5311 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5312 goto restart;
5313 }
5314
5315
5316 set_bit(S_ALIVE, &il->status);
5317
5318
5319 il_setup_watchdog(il);
5320
5321 if (il_is_rfkill(il))
5322 return;
5323
5324 ieee80211_wake_queues(il->hw);
5325
5326 il->active_rate = RATES_MASK;
5327
5328 il_power_update_mode(il, true);
5329 D_INFO("Updated power mode\n");
5330
5331 if (il_is_associated(il)) {
5332 struct il_rxon_cmd *active_rxon =
5333 (struct il_rxon_cmd *)&il->active;
5334
5335 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5336 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5337 } else {
5338
5339 il_connection_init_rx_config(il);
5340
5341 if (il->ops->set_rxon_chain)
5342 il->ops->set_rxon_chain(il);
5343 }
5344
5345
5346 il_send_bt_config(il);
5347
5348 il4965_reset_run_time_calib(il);
5349
5350 set_bit(S_READY, &il->status);
5351
5352
5353 il_commit_rxon(il);
5354
5355
5356 il4965_rf_kill_ct_config(il);
5357
5358 D_INFO("ALIVE processing complete.\n");
5359 wake_up(&il->wait_command_queue);
5360
5361 return;
5362
5363restart:
5364 queue_work(il->workqueue, &il->restart);
5365}
5366
5367static void il4965_cancel_deferred_work(struct il_priv *il);
5368
5369static void
5370__il4965_down(struct il_priv *il)
5371{
5372 unsigned long flags;
5373 int exit_pending;
5374
5375 D_INFO(DRV_NAME " is going down\n");
5376
5377 il_scan_cancel_timeout(il, 200);
5378
5379 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5380
5381
5382
5383 del_timer_sync(&il->watchdog);
5384
5385 il_clear_ucode_stations(il);
5386
5387
5388 spin_lock_irq(&il->sta_lock);
5389
5390
5391
5392
5393
5394
5395
5396 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5397 il->_4965.key_mapping_keys = 0;
5398 spin_unlock_irq(&il->sta_lock);
5399
5400 il_dealloc_bcast_stations(il);
5401 il_clear_driver_stations(il);
5402
5403
5404 wake_up_all(&il->wait_command_queue);
5405
5406
5407
5408 if (!exit_pending)
5409 clear_bit(S_EXIT_PENDING, &il->status);
5410
5411
5412 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5413
5414
5415 spin_lock_irqsave(&il->lock, flags);
5416 il_disable_interrupts(il);
5417 spin_unlock_irqrestore(&il->lock, flags);
5418 il4965_synchronize_irq(il);
5419
5420 if (il->mac80211_registered)
5421 ieee80211_stop_queues(il->hw);
5422
5423
5424
5425 if (!il_is_init(il)) {
5426 il->status =
5427 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5428 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5429 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5430 goto exit;
5431 }
5432
5433
5434
5435 il->status &=
5436 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5437 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5438 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5439 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5440
5441
5442
5443
5444
5445
5446 spin_lock_irq(&il->reg_lock);
5447
5448
5449 il4965_txq_ctx_stop(il);
5450 il4965_rxq_stop(il);
5451
5452 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5453 udelay(5);
5454
5455 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5456
5457 _il_apm_stop(il);
5458
5459 spin_unlock_irq(&il->reg_lock);
5460
5461 il4965_txq_ctx_unmap(il);
5462exit:
5463 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5464
5465 dev_kfree_skb(il->beacon_skb);
5466 il->beacon_skb = NULL;
5467
5468
5469 il4965_clear_free_frames(il);
5470}
5471
5472static void
5473il4965_down(struct il_priv *il)
5474{
5475 mutex_lock(&il->mutex);
5476 __il4965_down(il);
5477 mutex_unlock(&il->mutex);
5478
5479 il4965_cancel_deferred_work(il);
5480}
5481
5482
5483static void
5484il4965_set_hw_ready(struct il_priv *il)
5485{
5486 int ret;
5487
5488 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5489 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5490
5491
5492 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5493 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5494 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5495 100);
5496 if (ret >= 0)
5497 il->hw_ready = true;
5498
5499 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5500}
5501
5502static void
5503il4965_prepare_card_hw(struct il_priv *il)
5504{
5505 int ret;
5506
5507 il->hw_ready = false;
5508
5509 il4965_set_hw_ready(il);
5510 if (il->hw_ready)
5511 return;
5512
5513
5514 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5515
5516 ret =
5517 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5518 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5519 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5520
5521
5522 if (ret != -ETIMEDOUT)
5523 il4965_set_hw_ready(il);
5524}
5525
5526#define MAX_HW_RESTARTS 5
5527
5528static int
5529__il4965_up(struct il_priv *il)
5530{
5531 int i;
5532 int ret;
5533
5534 if (test_bit(S_EXIT_PENDING, &il->status)) {
5535 IL_WARN("Exit pending; will not bring the NIC up\n");
5536 return -EIO;
5537 }
5538
5539 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5540 IL_ERR("ucode not available for device bringup\n");
5541 return -EIO;
5542 }
5543
5544 ret = il4965_alloc_bcast_station(il);
5545 if (ret) {
5546 il_dealloc_bcast_stations(il);
5547 return ret;
5548 }
5549
5550 il4965_prepare_card_hw(il);
5551 if (!il->hw_ready) {
5552 il_dealloc_bcast_stations(il);
5553 IL_ERR("HW not ready\n");
5554 return -EIO;
5555 }
5556
5557
5558 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5559 clear_bit(S_RFKILL, &il->status);
5560 else {
5561 set_bit(S_RFKILL, &il->status);
5562 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5563
5564 il_dealloc_bcast_stations(il);
5565 il_enable_rfkill_int(il);
5566 IL_WARN("Radio disabled by HW RF Kill switch\n");
5567 return 0;
5568 }
5569
5570 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5571
5572
5573 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5574
5575 ret = il4965_hw_nic_init(il);
5576 if (ret) {
5577 IL_ERR("Unable to init nic\n");
5578 il_dealloc_bcast_stations(il);
5579 return ret;
5580 }
5581
5582
5583 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5584 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5585
5586
5587 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5588 il_enable_interrupts(il);
5589
5590
5591 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5592 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5593
5594
5595
5596
5597 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5598 il->ucode_data.len);
5599
5600 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5601
5602
5603
5604
5605 ret = il->ops->load_ucode(il);
5606
5607 if (ret) {
5608 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5609 continue;
5610 }
5611
5612
5613 il4965_nic_start(il);
5614
5615 D_INFO(DRV_NAME " is coming up\n");
5616
5617 return 0;
5618 }
5619
5620 set_bit(S_EXIT_PENDING, &il->status);
5621 __il4965_down(il);
5622 clear_bit(S_EXIT_PENDING, &il->status);
5623
5624
5625
5626 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5627 return -EIO;
5628}
5629
5630
5631
5632
5633
5634
5635
5636static void
5637il4965_bg_init_alive_start(struct work_struct *data)
5638{
5639 struct il_priv *il =
5640 container_of(data, struct il_priv, init_alive_start.work);
5641
5642 mutex_lock(&il->mutex);
5643 if (test_bit(S_EXIT_PENDING, &il->status))
5644 goto out;
5645
5646 il->ops->init_alive_start(il);
5647out:
5648 mutex_unlock(&il->mutex);
5649}
5650
5651static void
5652il4965_bg_alive_start(struct work_struct *data)
5653{
5654 struct il_priv *il =
5655 container_of(data, struct il_priv, alive_start.work);
5656
5657 mutex_lock(&il->mutex);
5658 if (test_bit(S_EXIT_PENDING, &il->status))
5659 goto out;
5660
5661 il4965_alive_start(il);
5662out:
5663 mutex_unlock(&il->mutex);
5664}
5665
5666static void
5667il4965_bg_run_time_calib_work(struct work_struct *work)
5668{
5669 struct il_priv *il = container_of(work, struct il_priv,
5670 run_time_calib_work);
5671
5672 mutex_lock(&il->mutex);
5673
5674 if (test_bit(S_EXIT_PENDING, &il->status) ||
5675 test_bit(S_SCANNING, &il->status)) {
5676 mutex_unlock(&il->mutex);
5677 return;
5678 }
5679
5680 if (il->start_calib) {
5681 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5682 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5683 }
5684
5685 mutex_unlock(&il->mutex);
5686}
5687
5688static void
5689il4965_bg_restart(struct work_struct *data)
5690{
5691 struct il_priv *il = container_of(data, struct il_priv, restart);
5692
5693 if (test_bit(S_EXIT_PENDING, &il->status))
5694 return;
5695
5696 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5697 mutex_lock(&il->mutex);
5698 il->is_open = 0;
5699
5700 __il4965_down(il);
5701
5702 mutex_unlock(&il->mutex);
5703 il4965_cancel_deferred_work(il);
5704 ieee80211_restart_hw(il->hw);
5705 } else {
5706 il4965_down(il);
5707
5708 mutex_lock(&il->mutex);
5709 if (test_bit(S_EXIT_PENDING, &il->status)) {
5710 mutex_unlock(&il->mutex);
5711 return;
5712 }
5713
5714 __il4965_up(il);
5715 mutex_unlock(&il->mutex);
5716 }
5717}
5718
5719static void
5720il4965_bg_rx_replenish(struct work_struct *data)
5721{
5722 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5723
5724 if (test_bit(S_EXIT_PENDING, &il->status))
5725 return;
5726
5727 mutex_lock(&il->mutex);
5728 il4965_rx_replenish(il);
5729 mutex_unlock(&il->mutex);
5730}
5731
5732
5733
5734
5735
5736
5737
5738#define UCODE_READY_TIMEOUT (4 * HZ)
5739
5740
5741
5742
5743
5744static int
5745il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5746{
5747 int ret;
5748 struct ieee80211_hw *hw = il->hw;
5749
5750 hw->rate_control_algorithm = "iwl-4965-rs";
5751
5752
5753 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
5754 ieee80211_hw_set(hw, SUPPORTS_PS);
5755 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
5756 ieee80211_hw_set(hw, SPECTRUM_MGMT);
5757 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
5758 ieee80211_hw_set(hw, SIGNAL_DBM);
5759 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
5760 if (il->cfg->sku & IL_SKU_N)
5761 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
5762 NL80211_FEATURE_STATIC_SMPS;
5763
5764 hw->sta_data_size = sizeof(struct il_station_priv);
5765 hw->vif_data_size = sizeof(struct il_vif_priv);
5766
5767 hw->wiphy->interface_modes =
5768 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5769
5770 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5771 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
5772 REGULATORY_DISABLE_BEACON_HINTS;
5773
5774
5775
5776
5777
5778 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5779
5780 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5781
5782 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5783
5784
5785 hw->queues = 4;
5786
5787 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5788
5789 if (il->bands[NL80211_BAND_2GHZ].n_channels)
5790 il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
5791 &il->bands[NL80211_BAND_2GHZ];
5792 if (il->bands[NL80211_BAND_5GHZ].n_channels)
5793 il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
5794 &il->bands[NL80211_BAND_5GHZ];
5795
5796 il_leds_init(il);
5797
5798 wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
5799
5800 ret = ieee80211_register_hw(il->hw);
5801 if (ret) {
5802 IL_ERR("Failed to register hw (error %d)\n", ret);
5803 return ret;
5804 }
5805 il->mac80211_registered = 1;
5806
5807 return 0;
5808}
5809
5810int
5811il4965_mac_start(struct ieee80211_hw *hw)
5812{
5813 struct il_priv *il = hw->priv;
5814 int ret;
5815
5816 D_MAC80211("enter\n");
5817
5818
5819 mutex_lock(&il->mutex);
5820 ret = __il4965_up(il);
5821 mutex_unlock(&il->mutex);
5822
5823 if (ret)
5824 return ret;
5825
5826 if (il_is_rfkill(il))
5827 goto out;
5828
5829 D_INFO("Start UP work done.\n");
5830
5831
5832
5833 ret = wait_event_timeout(il->wait_command_queue,
5834 test_bit(S_READY, &il->status),
5835 UCODE_READY_TIMEOUT);
5836 if (!ret) {
5837 if (!test_bit(S_READY, &il->status)) {
5838 IL_ERR("START_ALIVE timeout after %dms.\n",
5839 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5840 return -ETIMEDOUT;
5841 }
5842 }
5843
5844 il4965_led_enable(il);
5845
5846out:
5847 il->is_open = 1;
5848 D_MAC80211("leave\n");
5849 return 0;
5850}
5851
5852void
5853il4965_mac_stop(struct ieee80211_hw *hw)
5854{
5855 struct il_priv *il = hw->priv;
5856
5857 D_MAC80211("enter\n");
5858
5859 if (!il->is_open)
5860 return;
5861
5862 il->is_open = 0;
5863
5864 il4965_down(il);
5865
5866 flush_workqueue(il->workqueue);
5867
5868
5869
5870 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5871 il_enable_rfkill_int(il);
5872
5873 D_MAC80211("leave\n");
5874}
5875
5876void
5877il4965_mac_tx(struct ieee80211_hw *hw,
5878 struct ieee80211_tx_control *control,
5879 struct sk_buff *skb)
5880{
5881 struct il_priv *il = hw->priv;
5882
5883 D_MACDUMP("enter\n");
5884
5885 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5886 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5887
5888 if (il4965_tx_skb(il, control->sta, skb))
5889 dev_kfree_skb_any(skb);
5890
5891 D_MACDUMP("leave\n");
5892}
5893
5894void
5895il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5896 struct ieee80211_key_conf *keyconf,
5897 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5898{
5899 struct il_priv *il = hw->priv;
5900
5901 D_MAC80211("enter\n");
5902
5903 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5904
5905 D_MAC80211("leave\n");
5906}
5907
5908int
5909il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5910 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5911 struct ieee80211_key_conf *key)
5912{
5913 struct il_priv *il = hw->priv;
5914 int ret;
5915 u8 sta_id;
5916 bool is_default_wep_key = false;
5917
5918 D_MAC80211("enter\n");
5919
5920 if (il->cfg->mod_params->sw_crypto) {
5921 D_MAC80211("leave - hwcrypto disabled\n");
5922 return -EOPNOTSUPP;
5923 }
5924
5925
5926
5927
5928
5929 if (vif->type == NL80211_IFTYPE_ADHOC &&
5930 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5931 D_MAC80211("leave - ad-hoc group key\n");
5932 return -EOPNOTSUPP;
5933 }
5934
5935 sta_id = il_sta_id_or_broadcast(il, sta);
5936 if (sta_id == IL_INVALID_STATION)
5937 return -EINVAL;
5938
5939 mutex_lock(&il->mutex);
5940 il_scan_cancel_timeout(il, 100);
5941
5942
5943
5944
5945
5946
5947
5948 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5949 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5950 if (cmd == SET_KEY)
5951 is_default_wep_key = !il->_4965.key_mapping_keys;
5952 else
5953 is_default_wep_key =
5954 (key->hw_key_idx == HW_KEY_DEFAULT);
5955 }
5956
5957 switch (cmd) {
5958 case SET_KEY:
5959 if (is_default_wep_key)
5960 ret = il4965_set_default_wep_key(il, key);
5961 else
5962 ret = il4965_set_dynamic_key(il, key, sta_id);
5963
5964 D_MAC80211("enable hwcrypto key\n");
5965 break;
5966 case DISABLE_KEY:
5967 if (is_default_wep_key)
5968 ret = il4965_remove_default_wep_key(il, key);
5969 else
5970 ret = il4965_remove_dynamic_key(il, key, sta_id);
5971
5972 D_MAC80211("disable hwcrypto key\n");
5973 break;
5974 default:
5975 ret = -EINVAL;
5976 }
5977
5978 mutex_unlock(&il->mutex);
5979 D_MAC80211("leave\n");
5980
5981 return ret;
5982}
5983
5984int
5985il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5986 struct ieee80211_ampdu_params *params)
5987{
5988 struct il_priv *il = hw->priv;
5989 int ret = -EINVAL;
5990 struct ieee80211_sta *sta = params->sta;
5991 enum ieee80211_ampdu_mlme_action action = params->action;
5992 u16 tid = params->tid;
5993 u16 *ssn = ¶ms->ssn;
5994
5995 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5996
5997 if (!(il->cfg->sku & IL_SKU_N))
5998 return -EACCES;
5999
6000 mutex_lock(&il->mutex);
6001
6002 switch (action) {
6003 case IEEE80211_AMPDU_RX_START:
6004 D_HT("start Rx\n");
6005 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
6006 break;
6007 case IEEE80211_AMPDU_RX_STOP:
6008 D_HT("stop Rx\n");
6009 ret = il4965_sta_rx_agg_stop(il, sta, tid);
6010 if (test_bit(S_EXIT_PENDING, &il->status))
6011 ret = 0;
6012 break;
6013 case IEEE80211_AMPDU_TX_START:
6014 D_HT("start Tx\n");
6015 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
6016 break;
6017 case IEEE80211_AMPDU_TX_STOP_CONT:
6018 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6019 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6020 D_HT("stop Tx\n");
6021 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6022 if (test_bit(S_EXIT_PENDING, &il->status))
6023 ret = 0;
6024 break;
6025 case IEEE80211_AMPDU_TX_OPERATIONAL:
6026 ret = 0;
6027 break;
6028 }
6029 mutex_unlock(&il->mutex);
6030
6031 return ret;
6032}
6033
6034int
6035il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6036 struct ieee80211_sta *sta)
6037{
6038 struct il_priv *il = hw->priv;
6039 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6040 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6041 int ret;
6042 u8 sta_id;
6043
6044 D_INFO("received request to add station %pM\n", sta->addr);
6045 mutex_lock(&il->mutex);
6046 D_INFO("proceeding to add station %pM\n", sta->addr);
6047 sta_priv->common.sta_id = IL_INVALID_STATION;
6048
6049 atomic_set(&sta_priv->pending_frames, 0);
6050
6051 ret =
6052 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6053 if (ret) {
6054 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6055
6056 mutex_unlock(&il->mutex);
6057 return ret;
6058 }
6059
6060 sta_priv->common.sta_id = sta_id;
6061
6062
6063 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6064 il4965_rs_rate_init(il, sta, sta_id);
6065 mutex_unlock(&il->mutex);
6066
6067 return 0;
6068}
6069
6070void
6071il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6072 struct ieee80211_channel_switch *ch_switch)
6073{
6074 struct il_priv *il = hw->priv;
6075 const struct il_channel_info *ch_info;
6076 struct ieee80211_conf *conf = &hw->conf;
6077 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6078 struct il_ht_config *ht_conf = &il->current_ht_config;
6079 u16 ch;
6080
6081 D_MAC80211("enter\n");
6082
6083 mutex_lock(&il->mutex);
6084
6085 if (il_is_rfkill(il))
6086 goto out;
6087
6088 if (test_bit(S_EXIT_PENDING, &il->status) ||
6089 test_bit(S_SCANNING, &il->status) ||
6090 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6091 goto out;
6092
6093 if (!il_is_associated(il))
6094 goto out;
6095
6096 if (!il->ops->set_channel_switch)
6097 goto out;
6098
6099 ch = channel->hw_value;
6100 if (le16_to_cpu(il->active.channel) == ch)
6101 goto out;
6102
6103 ch_info = il_get_channel_info(il, channel->band, ch);
6104 if (!il_is_channel_valid(ch_info)) {
6105 D_MAC80211("invalid channel\n");
6106 goto out;
6107 }
6108
6109 spin_lock_irq(&il->lock);
6110
6111 il->current_ht_config.smps = conf->smps_mode;
6112
6113
6114 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6115 case NL80211_CHAN_NO_HT:
6116 case NL80211_CHAN_HT20:
6117 il->ht.is_40mhz = false;
6118 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6119 break;
6120 case NL80211_CHAN_HT40MINUS:
6121 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6122 il->ht.is_40mhz = true;
6123 break;
6124 case NL80211_CHAN_HT40PLUS:
6125 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6126 il->ht.is_40mhz = true;
6127 break;
6128 }
6129
6130 if ((le16_to_cpu(il->staging.channel) != ch))
6131 il->staging.flags = 0;
6132
6133 il_set_rxon_channel(il, channel);
6134 il_set_rxon_ht(il, ht_conf);
6135 il_set_flags_for_band(il, channel->band, il->vif);
6136
6137 spin_unlock_irq(&il->lock);
6138
6139 il_set_rate(il);
6140
6141
6142
6143
6144 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6145 il->switch_channel = cpu_to_le16(ch);
6146 if (il->ops->set_channel_switch(il, ch_switch)) {
6147 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6148 il->switch_channel = 0;
6149 ieee80211_chswitch_done(il->vif, false);
6150 }
6151
6152out:
6153 mutex_unlock(&il->mutex);
6154 D_MAC80211("leave\n");
6155}
6156
6157void
6158il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6159 unsigned int *total_flags, u64 multicast)
6160{
6161 struct il_priv *il = hw->priv;
6162 __le32 filter_or = 0, filter_nand = 0;
6163
6164#define CHK(test, flag) do { \
6165 if (*total_flags & (test)) \
6166 filter_or |= (flag); \
6167 else \
6168 filter_nand |= (flag); \
6169 } while (0)
6170
6171 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6172 *total_flags);
6173
6174 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
6175
6176 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6177 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6178
6179#undef CHK
6180
6181 mutex_lock(&il->mutex);
6182
6183 il->staging.filter_flags &= ~filter_nand;
6184 il->staging.filter_flags |= filter_or;
6185
6186
6187
6188
6189
6190
6191 mutex_unlock(&il->mutex);
6192
6193
6194
6195
6196
6197
6198
6199 *total_flags &=
6200 FIF_OTHER_BSS | FIF_ALLMULTI |
6201 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6202}
6203
6204
6205
6206
6207
6208
6209
6210static void
6211il4965_bg_txpower_work(struct work_struct *work)
6212{
6213 struct il_priv *il = container_of(work, struct il_priv,
6214 txpower_work);
6215
6216 mutex_lock(&il->mutex);
6217
6218
6219
6220
6221
6222 if (test_bit(S_EXIT_PENDING, &il->status) ||
6223 test_bit(S_SCANNING, &il->status))
6224 goto out;
6225
6226
6227
6228
6229 il->ops->send_tx_power(il);
6230
6231
6232
6233 il->last_temperature = il->temperature;
6234out:
6235 mutex_unlock(&il->mutex);
6236}
6237
6238static void
6239il4965_setup_deferred_work(struct il_priv *il)
6240{
6241 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6242
6243 init_waitqueue_head(&il->wait_command_queue);
6244
6245 INIT_WORK(&il->restart, il4965_bg_restart);
6246 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6247 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6248 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6249 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6250
6251 il_setup_scan_deferred_work(il);
6252
6253 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6254
6255 timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
6256
6257 timer_setup(&il->watchdog, il_bg_watchdog, 0);
6258
6259 tasklet_init(&il->irq_tasklet,
6260 (void (*)(unsigned long))il4965_irq_tasklet,
6261 (unsigned long)il);
6262}
6263
6264static void
6265il4965_cancel_deferred_work(struct il_priv *il)
6266{
6267 cancel_work_sync(&il->txpower_work);
6268 cancel_delayed_work_sync(&il->init_alive_start);
6269 cancel_delayed_work(&il->alive_start);
6270 cancel_work_sync(&il->run_time_calib_work);
6271
6272 il_cancel_scan_deferred_work(il);
6273
6274 del_timer_sync(&il->stats_periodic);
6275}
6276
6277static void
6278il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6279{
6280 int i;
6281
6282 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6283 rates[i].bitrate = il_rates[i].ieee * 5;
6284 rates[i].hw_value = i;
6285 rates[i].hw_value_short = i;
6286 rates[i].flags = 0;
6287 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6288
6289
6290
6291 rates[i].flags |=
6292 (il_rates[i].plcp ==
6293 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6294 }
6295 }
6296}
6297
6298
6299
6300
6301void
6302il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6303{
6304 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6305 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6306}
6307
6308void
6309il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6310 int tx_fifo_id, int scd_retry)
6311{
6312 int txq_id = txq->q.id;
6313
6314
6315 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6316
6317
6318 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6319 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6320 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6321 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6322 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6323 IL49_SCD_QUEUE_STTS_REG_MSK);
6324
6325 txq->sched_retry = scd_retry;
6326
6327 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6328 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6329}
6330
6331static const struct ieee80211_ops il4965_mac_ops = {
6332 .tx = il4965_mac_tx,
6333 .start = il4965_mac_start,
6334 .stop = il4965_mac_stop,
6335 .add_interface = il_mac_add_interface,
6336 .remove_interface = il_mac_remove_interface,
6337 .change_interface = il_mac_change_interface,
6338 .config = il_mac_config,
6339 .configure_filter = il4965_configure_filter,
6340 .set_key = il4965_mac_set_key,
6341 .update_tkip_key = il4965_mac_update_tkip_key,
6342 .conf_tx = il_mac_conf_tx,
6343 .reset_tsf = il_mac_reset_tsf,
6344 .bss_info_changed = il_mac_bss_info_changed,
6345 .ampdu_action = il4965_mac_ampdu_action,
6346 .hw_scan = il_mac_hw_scan,
6347 .sta_add = il4965_mac_sta_add,
6348 .sta_remove = il_mac_sta_remove,
6349 .channel_switch = il4965_mac_channel_switch,
6350 .tx_last_beacon = il_mac_tx_last_beacon,
6351 .flush = il_mac_flush,
6352};
6353
6354static int
6355il4965_init_drv(struct il_priv *il)
6356{
6357 int ret;
6358
6359 spin_lock_init(&il->sta_lock);
6360 spin_lock_init(&il->hcmd_lock);
6361
6362 INIT_LIST_HEAD(&il->free_frames);
6363
6364 mutex_init(&il->mutex);
6365
6366 il->ieee_channels = NULL;
6367 il->ieee_rates = NULL;
6368 il->band = NL80211_BAND_2GHZ;
6369
6370 il->iw_mode = NL80211_IFTYPE_STATION;
6371 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6372 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6373
6374
6375 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6376
6377
6378 if (il->ops->set_rxon_chain)
6379 il->ops->set_rxon_chain(il);
6380
6381 il_init_scan_params(il);
6382
6383 ret = il_init_channel_map(il);
6384 if (ret) {
6385 IL_ERR("initializing regulatory failed: %d\n", ret);
6386 goto err;
6387 }
6388
6389 ret = il_init_geos(il);
6390 if (ret) {
6391 IL_ERR("initializing geos failed: %d\n", ret);
6392 goto err_free_channel_map;
6393 }
6394 il4965_init_hw_rates(il, il->ieee_rates);
6395
6396 return 0;
6397
6398err_free_channel_map:
6399 il_free_channel_map(il);
6400err:
6401 return ret;
6402}
6403
6404static void
6405il4965_uninit_drv(struct il_priv *il)
6406{
6407 il_free_geos(il);
6408 il_free_channel_map(il);
6409 kfree(il->scan_cmd);
6410}
6411
6412static void
6413il4965_hw_detect(struct il_priv *il)
6414{
6415 il->hw_rev = _il_rd(il, CSR_HW_REV);
6416 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6417 il->rev_id = il->pci_dev->revision;
6418 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6419}
6420
6421static const struct il_sensitivity_ranges il4965_sensitivity = {
6422 .min_nrg_cck = 97,
6423 .max_nrg_cck = 0,
6424
6425 .auto_corr_min_ofdm = 85,
6426 .auto_corr_min_ofdm_mrc = 170,
6427 .auto_corr_min_ofdm_x1 = 105,
6428 .auto_corr_min_ofdm_mrc_x1 = 220,
6429
6430 .auto_corr_max_ofdm = 120,
6431 .auto_corr_max_ofdm_mrc = 210,
6432 .auto_corr_max_ofdm_x1 = 140,
6433 .auto_corr_max_ofdm_mrc_x1 = 270,
6434
6435 .auto_corr_min_cck = 125,
6436 .auto_corr_max_cck = 200,
6437 .auto_corr_min_cck_mrc = 200,
6438 .auto_corr_max_cck_mrc = 400,
6439
6440 .nrg_th_cck = 100,
6441 .nrg_th_ofdm = 100,
6442
6443 .barker_corr_th_min = 190,
6444 .barker_corr_th_min_mrc = 390,
6445 .nrg_th_cca = 62,
6446};
6447
6448static void
6449il4965_set_hw_params(struct il_priv *il)
6450{
6451 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6452 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6453 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6454 if (il->cfg->mod_params->amsdu_size_8K)
6455 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6456 else
6457 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6458
6459 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6460
6461 if (il->cfg->mod_params->disable_11n)
6462 il->cfg->sku &= ~IL_SKU_N;
6463
6464 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6465 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6466 il->cfg->num_of_queues =
6467 il->cfg->mod_params->num_of_queues;
6468
6469 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6470 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6471 il->hw_params.scd_bc_tbls_size =
6472 il->cfg->num_of_queues *
6473 sizeof(struct il4965_scd_bc_tbl);
6474
6475 il->hw_params.tfd_size = sizeof(struct il_tfd);
6476 il->hw_params.max_stations = IL4965_STATION_COUNT;
6477 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6478 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6479 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6480 il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
6481
6482 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6483
6484 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6485 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6486 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6487 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6488
6489 il->hw_params.ct_kill_threshold =
6490 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6491
6492 il->hw_params.sens = &il4965_sensitivity;
6493 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6494}
6495
6496static int
6497il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6498{
6499 int err = 0;
6500 struct il_priv *il;
6501 struct ieee80211_hw *hw;
6502 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6503 unsigned long flags;
6504 u16 pci_cmd;
6505
6506
6507
6508
6509
6510 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6511 if (!hw) {
6512 err = -ENOMEM;
6513 goto out;
6514 }
6515 il = hw->priv;
6516 il->hw = hw;
6517 SET_IEEE80211_DEV(hw, &pdev->dev);
6518
6519 D_INFO("*** LOAD DRIVER ***\n");
6520 il->cfg = cfg;
6521 il->ops = &il4965_ops;
6522#ifdef CONFIG_IWLEGACY_DEBUGFS
6523 il->debugfs_ops = &il4965_debugfs_ops;
6524#endif
6525 il->pci_dev = pdev;
6526 il->inta_mask = CSR_INI_SET_MASK;
6527
6528
6529
6530
6531 pci_disable_link_state(pdev,
6532 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6533 PCIE_LINK_STATE_CLKPM);
6534
6535 if (pci_enable_device(pdev)) {
6536 err = -ENODEV;
6537 goto out_ieee80211_free_hw;
6538 }
6539
6540 pci_set_master(pdev);
6541
6542 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6543 if (!err)
6544 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6545 if (err) {
6546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6547 if (!err)
6548 err =
6549 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6550
6551 if (err) {
6552 IL_WARN("No suitable DMA available.\n");
6553 goto out_pci_disable_device;
6554 }
6555 }
6556
6557 err = pci_request_regions(pdev, DRV_NAME);
6558 if (err)
6559 goto out_pci_disable_device;
6560
6561 pci_set_drvdata(pdev, il);
6562
6563
6564
6565
6566 il->hw_base = pci_ioremap_bar(pdev, 0);
6567 if (!il->hw_base) {
6568 err = -ENODEV;
6569 goto out_pci_release_regions;
6570 }
6571
6572 D_INFO("pci_resource_len = 0x%08llx\n",
6573 (unsigned long long)pci_resource_len(pdev, 0));
6574 D_INFO("pci_resource_base = %p\n", il->hw_base);
6575
6576
6577
6578
6579 spin_lock_init(&il->reg_lock);
6580 spin_lock_init(&il->lock);
6581
6582
6583
6584
6585
6586
6587 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6588
6589 il4965_hw_detect(il);
6590 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6591
6592
6593
6594 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6595
6596 il4965_prepare_card_hw(il);
6597 if (!il->hw_ready) {
6598 IL_WARN("Failed, HW not ready\n");
6599 err = -EIO;
6600 goto out_iounmap;
6601 }
6602
6603
6604
6605
6606
6607 err = il_eeprom_init(il);
6608 if (err) {
6609 IL_ERR("Unable to init EEPROM\n");
6610 goto out_iounmap;
6611 }
6612 err = il4965_eeprom_check_version(il);
6613 if (err)
6614 goto out_free_eeprom;
6615
6616
6617 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6618 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6619 il->hw->wiphy->addresses = il->addresses;
6620 il->hw->wiphy->n_addresses = 1;
6621
6622
6623
6624
6625 il4965_set_hw_params(il);
6626
6627
6628
6629
6630
6631 err = il4965_init_drv(il);
6632 if (err)
6633 goto out_free_eeprom;
6634
6635
6636
6637
6638
6639 spin_lock_irqsave(&il->lock, flags);
6640 il_disable_interrupts(il);
6641 spin_unlock_irqrestore(&il->lock, flags);
6642
6643 pci_enable_msi(il->pci_dev);
6644
6645 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6646 if (err) {
6647 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6648 goto out_disable_msi;
6649 }
6650
6651 il4965_setup_deferred_work(il);
6652 il4965_setup_handlers(il);
6653
6654
6655
6656
6657
6658
6659 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6660 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6661 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6662 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6663 }
6664
6665 il_enable_rfkill_int(il);
6666
6667
6668 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6669 clear_bit(S_RFKILL, &il->status);
6670 else
6671 set_bit(S_RFKILL, &il->status);
6672
6673 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6674 test_bit(S_RFKILL, &il->status));
6675
6676 il_power_initialize(il);
6677
6678 init_completion(&il->_4965.firmware_loading_complete);
6679
6680 err = il4965_request_firmware(il, true);
6681 if (err)
6682 goto out_destroy_workqueue;
6683
6684 return 0;
6685
6686out_destroy_workqueue:
6687 destroy_workqueue(il->workqueue);
6688 il->workqueue = NULL;
6689 free_irq(il->pci_dev->irq, il);
6690out_disable_msi:
6691 pci_disable_msi(il->pci_dev);
6692 il4965_uninit_drv(il);
6693out_free_eeprom:
6694 il_eeprom_free(il);
6695out_iounmap:
6696 iounmap(il->hw_base);
6697out_pci_release_regions:
6698 pci_release_regions(pdev);
6699out_pci_disable_device:
6700 pci_disable_device(pdev);
6701out_ieee80211_free_hw:
6702 ieee80211_free_hw(il->hw);
6703out:
6704 return err;
6705}
6706
6707static void
6708il4965_pci_remove(struct pci_dev *pdev)
6709{
6710 struct il_priv *il = pci_get_drvdata(pdev);
6711 unsigned long flags;
6712
6713 if (!il)
6714 return;
6715
6716 wait_for_completion(&il->_4965.firmware_loading_complete);
6717
6718 D_INFO("*** UNLOAD DRIVER ***\n");
6719
6720 il_dbgfs_unregister(il);
6721 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6722
6723
6724
6725
6726
6727 set_bit(S_EXIT_PENDING, &il->status);
6728
6729 il_leds_exit(il);
6730
6731 if (il->mac80211_registered) {
6732 ieee80211_unregister_hw(il->hw);
6733 il->mac80211_registered = 0;
6734 } else {
6735 il4965_down(il);
6736 }
6737
6738
6739
6740
6741
6742
6743
6744
6745 il_apm_stop(il);
6746
6747
6748
6749
6750 spin_lock_irqsave(&il->lock, flags);
6751 il_disable_interrupts(il);
6752 spin_unlock_irqrestore(&il->lock, flags);
6753
6754 il4965_synchronize_irq(il);
6755
6756 il4965_dealloc_ucode_pci(il);
6757
6758 if (il->rxq.bd)
6759 il4965_rx_queue_free(il, &il->rxq);
6760 il4965_hw_txq_ctx_free(il);
6761
6762 il_eeprom_free(il);
6763
6764
6765 flush_workqueue(il->workqueue);
6766
6767
6768
6769
6770 destroy_workqueue(il->workqueue);
6771 il->workqueue = NULL;
6772
6773 free_irq(il->pci_dev->irq, il);
6774 pci_disable_msi(il->pci_dev);
6775 iounmap(il->hw_base);
6776 pci_release_regions(pdev);
6777 pci_disable_device(pdev);
6778
6779 il4965_uninit_drv(il);
6780
6781 dev_kfree_skb(il->beacon_skb);
6782
6783 ieee80211_free_hw(il->hw);
6784}
6785
6786
6787
6788
6789
6790void
6791il4965_txq_set_sched(struct il_priv *il, u32 mask)
6792{
6793 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6794}
6795
6796
6797
6798
6799
6800
6801
6802
6803static const struct pci_device_id il4965_hw_card_ids[] = {
6804 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6805 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6806 {0}
6807};
6808MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6809
6810static struct pci_driver il4965_driver = {
6811 .name = DRV_NAME,
6812 .id_table = il4965_hw_card_ids,
6813 .probe = il4965_pci_probe,
6814 .remove = il4965_pci_remove,
6815 .driver.pm = IL_LEGACY_PM_OPS,
6816};
6817
6818static int __init
6819il4965_init(void)
6820{
6821
6822 int ret;
6823 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6824 pr_info(DRV_COPYRIGHT "\n");
6825
6826 ret = il4965_rate_control_register();
6827 if (ret) {
6828 pr_err("Unable to register rate control algorithm: %d\n", ret);
6829 return ret;
6830 }
6831
6832 ret = pci_register_driver(&il4965_driver);
6833 if (ret) {
6834 pr_err("Unable to initialize PCI module\n");
6835 goto error_register;
6836 }
6837
6838 return ret;
6839
6840error_register:
6841 il4965_rate_control_unregister();
6842 return ret;
6843}
6844
6845static void __exit
6846il4965_exit(void)
6847{
6848 pci_unregister_driver(&il4965_driver);
6849 il4965_rate_control_unregister();
6850}
6851
6852module_exit(il4965_exit);
6853module_init(il4965_init);
6854
6855#ifdef CONFIG_IWLEGACY_DEBUG
6856module_param_named(debug, il_debug_level, uint, 0644);
6857MODULE_PARM_DESC(debug, "debug output mask");
6858#endif
6859
6860module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
6861MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6862module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
6863MODULE_PARM_DESC(queues_num, "number of hw queues.");
6864module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
6865MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6866module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
6867MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
6868module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
6869MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
6870