1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21#include <linux/pci-aspm.h>
22#include <linux/slab.h>
23#include <linux/dma-mapping.h>
24#include <linux/delay.h>
25#include <linux/sched.h>
26#include <linux/skbuff.h>
27#include <linux/netdevice.h>
28#include <linux/firmware.h>
29#include <linux/etherdevice.h>
30#include <linux/if_arp.h>
31
32#include <net/mac80211.h>
33
34#include <asm/div64.h>
35
36#define DRV_NAME "iwl4965"
37
38#include "common.h"
39#include "4965.h"
40
41
42
43
44
45
46
47
48
49
50#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
51
52#ifdef CONFIG_IWLEGACY_DEBUG
53#define VD "d"
54#else
55#define VD
56#endif
57
58#define DRV_VERSION IWLWIFI_VERSION VD
59
60MODULE_DESCRIPTION(DRV_DESCRIPTION);
61MODULE_VERSION(DRV_VERSION);
62MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
63MODULE_LICENSE("GPL");
64MODULE_ALIAS("iwl4965");
65
66void
67il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
68{
69 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
70 IL_ERR("Tx flush command to flush out all frames\n");
71 if (!test_bit(S_EXIT_PENDING, &il->status))
72 queue_work(il->workqueue, &il->tx_flush);
73 }
74}
75
76
77
78
79struct il_mod_params il4965_mod_params = {
80 .restart_fw = 1,
81
82};
83
84void
85il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
86{
87 unsigned long flags;
88 int i;
89 spin_lock_irqsave(&rxq->lock, flags);
90 INIT_LIST_HEAD(&rxq->rx_free);
91 INIT_LIST_HEAD(&rxq->rx_used);
92
93 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
94
95
96 if (rxq->pool[i].page != NULL) {
97 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
98 PAGE_SIZE << il->hw_params.rx_page_order,
99 PCI_DMA_FROMDEVICE);
100 __il_free_pages(il, rxq->pool[i].page);
101 rxq->pool[i].page = NULL;
102 }
103 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
104 }
105
106 for (i = 0; i < RX_QUEUE_SIZE; i++)
107 rxq->queue[i] = NULL;
108
109
110
111 rxq->read = rxq->write = 0;
112 rxq->write_actual = 0;
113 rxq->free_count = 0;
114 spin_unlock_irqrestore(&rxq->lock, flags);
115}
116
117int
118il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
119{
120 u32 rb_size;
121 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
122 u32 rb_timeout = 0;
123
124 if (il->cfg->mod_params->amsdu_size_8K)
125 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
126 else
127 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
128
129
130 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
131
132
133 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
134
135
136 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
137
138
139 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
140
141
142
143
144
145
146
147 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
148 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
149 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
150 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
151 rb_size |
152 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
153 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
154
155
156 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
157
158 return 0;
159}
160
161static void
162il4965_set_pwr_vmain(struct il_priv *il)
163{
164
165
166
167
168
169
170
171
172
173
174 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
175 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
176 ~APMG_PS_CTRL_MSK_PWR_SRC);
177}
178
179int
180il4965_hw_nic_init(struct il_priv *il)
181{
182 unsigned long flags;
183 struct il_rx_queue *rxq = &il->rxq;
184 int ret;
185
186 spin_lock_irqsave(&il->lock, flags);
187 il_apm_init(il);
188
189 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
190 spin_unlock_irqrestore(&il->lock, flags);
191
192 il4965_set_pwr_vmain(il);
193 il4965_nic_config(il);
194
195
196 if (!rxq->bd) {
197 ret = il_rx_queue_alloc(il);
198 if (ret) {
199 IL_ERR("Unable to initialize Rx queue\n");
200 return -ENOMEM;
201 }
202 } else
203 il4965_rx_queue_reset(il, rxq);
204
205 il4965_rx_replenish(il);
206
207 il4965_rx_init(il, rxq);
208
209 spin_lock_irqsave(&il->lock, flags);
210
211 rxq->need_update = 1;
212 il_rx_queue_update_write_ptr(il, rxq);
213
214 spin_unlock_irqrestore(&il->lock, flags);
215
216
217 if (!il->txq) {
218 ret = il4965_txq_ctx_alloc(il);
219 if (ret)
220 return ret;
221 } else
222 il4965_txq_ctx_reset(il);
223
224 set_bit(S_INIT, &il->status);
225
226 return 0;
227}
228
229
230
231
232static inline __le32
233il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
234{
235 return cpu_to_le32((u32) (dma_addr >> 8));
236}
237
238
239
240
241
242
243
244
245
246
247
248
249void
250il4965_rx_queue_restock(struct il_priv *il)
251{
252 struct il_rx_queue *rxq = &il->rxq;
253 struct list_head *element;
254 struct il_rx_buf *rxb;
255 unsigned long flags;
256
257 spin_lock_irqsave(&rxq->lock, flags);
258 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
259
260 rxb = rxq->queue[rxq->write];
261 BUG_ON(rxb && rxb->page);
262
263
264 element = rxq->rx_free.next;
265 rxb = list_entry(element, struct il_rx_buf, list);
266 list_del(element);
267
268
269 rxq->bd[rxq->write] =
270 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
271 rxq->queue[rxq->write] = rxb;
272 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
273 rxq->free_count--;
274 }
275 spin_unlock_irqrestore(&rxq->lock, flags);
276
277
278 if (rxq->free_count <= RX_LOW_WATERMARK)
279 queue_work(il->workqueue, &il->rx_replenish);
280
281
282
283 if (rxq->write_actual != (rxq->write & ~0x7)) {
284 spin_lock_irqsave(&rxq->lock, flags);
285 rxq->need_update = 1;
286 spin_unlock_irqrestore(&rxq->lock, flags);
287 il_rx_queue_update_write_ptr(il, rxq);
288 }
289}
290
291
292
293
294
295
296
297
298
299static void
300il4965_rx_allocate(struct il_priv *il, gfp_t priority)
301{
302 struct il_rx_queue *rxq = &il->rxq;
303 struct list_head *element;
304 struct il_rx_buf *rxb;
305 struct page *page;
306 dma_addr_t page_dma;
307 unsigned long flags;
308 gfp_t gfp_mask = priority;
309
310 while (1) {
311 spin_lock_irqsave(&rxq->lock, flags);
312 if (list_empty(&rxq->rx_used)) {
313 spin_unlock_irqrestore(&rxq->lock, flags);
314 return;
315 }
316 spin_unlock_irqrestore(&rxq->lock, flags);
317
318 if (rxq->free_count > RX_LOW_WATERMARK)
319 gfp_mask |= __GFP_NOWARN;
320
321 if (il->hw_params.rx_page_order > 0)
322 gfp_mask |= __GFP_COMP;
323
324
325 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
326 if (!page) {
327 if (net_ratelimit())
328 D_INFO("alloc_pages failed, " "order: %d\n",
329 il->hw_params.rx_page_order);
330
331 if (rxq->free_count <= RX_LOW_WATERMARK &&
332 net_ratelimit())
333 IL_ERR("Failed to alloc_pages with %s. "
334 "Only %u free buffers remaining.\n",
335 priority ==
336 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
337 rxq->free_count);
338
339
340
341 return;
342 }
343
344
345 page_dma =
346 pci_map_page(il->pci_dev, page, 0,
347 PAGE_SIZE << il->hw_params.rx_page_order,
348 PCI_DMA_FROMDEVICE);
349 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
350 __free_pages(page, il->hw_params.rx_page_order);
351 break;
352 }
353
354 spin_lock_irqsave(&rxq->lock, flags);
355
356 if (list_empty(&rxq->rx_used)) {
357 spin_unlock_irqrestore(&rxq->lock, flags);
358 pci_unmap_page(il->pci_dev, page_dma,
359 PAGE_SIZE << il->hw_params.rx_page_order,
360 PCI_DMA_FROMDEVICE);
361 __free_pages(page, il->hw_params.rx_page_order);
362 return;
363 }
364
365 element = rxq->rx_used.next;
366 rxb = list_entry(element, struct il_rx_buf, list);
367 list_del(element);
368
369 BUG_ON(rxb->page);
370
371 rxb->page = page;
372 rxb->page_dma = page_dma;
373 list_add_tail(&rxb->list, &rxq->rx_free);
374 rxq->free_count++;
375 il->alloc_rxb_page++;
376
377 spin_unlock_irqrestore(&rxq->lock, flags);
378 }
379}
380
381void
382il4965_rx_replenish(struct il_priv *il)
383{
384 unsigned long flags;
385
386 il4965_rx_allocate(il, GFP_KERNEL);
387
388 spin_lock_irqsave(&il->lock, flags);
389 il4965_rx_queue_restock(il);
390 spin_unlock_irqrestore(&il->lock, flags);
391}
392
393void
394il4965_rx_replenish_now(struct il_priv *il)
395{
396 il4965_rx_allocate(il, GFP_ATOMIC);
397
398 il4965_rx_queue_restock(il);
399}
400
401
402
403
404
405
406void
407il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
408{
409 int i;
410 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
411 if (rxq->pool[i].page != NULL) {
412 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
413 PAGE_SIZE << il->hw_params.rx_page_order,
414 PCI_DMA_FROMDEVICE);
415 __il_free_pages(il, rxq->pool[i].page);
416 rxq->pool[i].page = NULL;
417 }
418 }
419
420 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
421 rxq->bd_dma);
422 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
423 rxq->rb_stts, rxq->rb_stts_dma);
424 rxq->bd = NULL;
425 rxq->rb_stts = NULL;
426}
427
428int
429il4965_rxq_stop(struct il_priv *il)
430{
431 int ret;
432
433 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
434 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
435 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
436 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
437 1000);
438 if (ret < 0)
439 IL_ERR("Can't stop Rx DMA.\n");
440
441 return 0;
442}
443
444int
445il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
446{
447 int idx = 0;
448 int band_offset = 0;
449
450
451 if (rate_n_flags & RATE_MCS_HT_MSK) {
452 idx = (rate_n_flags & 0xff);
453 return idx;
454
455 } else {
456 if (band == NL80211_BAND_5GHZ)
457 band_offset = IL_FIRST_OFDM_RATE;
458 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
459 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
460 return idx - band_offset;
461 }
462
463 return -1;
464}
465
466static int
467il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
468{
469
470
471 struct il4965_rx_non_cfg_phy *ncphy =
472 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
473 u32 agc =
474 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
475 IL49_AGC_DB_POS;
476
477 u32 valid_antennae =
478 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
479 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
480 u8 max_rssi = 0;
481 u32 i;
482
483
484
485
486
487
488 for (i = 0; i < 3; i++)
489 if (valid_antennae & (1 << i))
490 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
491
492 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
493 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
494 max_rssi, agc);
495
496
497
498 return max_rssi - agc - IL4965_RSSI_OFFSET;
499}
500
501static u32
502il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
503{
504 u32 decrypt_out = 0;
505
506 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
507 RX_RES_STATUS_STATION_FOUND)
508 decrypt_out |=
509 (RX_RES_STATUS_STATION_FOUND |
510 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
511
512 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
513
514
515 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
516 RX_RES_STATUS_SEC_TYPE_NONE)
517 return decrypt_out;
518
519
520 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
521 RX_RES_STATUS_SEC_TYPE_ERR)
522 return decrypt_out;
523
524
525 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
526 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
527 return decrypt_out;
528
529 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
530
531 case RX_RES_STATUS_SEC_TYPE_CCMP:
532
533 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
534
535 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
536 else
537 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
538
539 break;
540
541 case RX_RES_STATUS_SEC_TYPE_TKIP:
542 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
543
544 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
545 break;
546 }
547
548 default:
549 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
550 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
551 else
552 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
553 break;
554 }
555
556 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
557
558 return decrypt_out;
559}
560
561#define SMALL_PACKET_SIZE 256
562
563static void
564il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
565 u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
566 struct ieee80211_rx_status *stats)
567{
568 struct sk_buff *skb;
569 __le16 fc = hdr->frame_control;
570
571
572 if (unlikely(!il->is_open)) {
573 D_DROP("Dropping packet while interface is not open.\n");
574 return;
575 }
576
577 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
578 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
579 D_INFO("Woke queues - frame received on passive channel\n");
580 }
581
582
583 if (!il->cfg->mod_params->sw_crypto &&
584 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
585 return;
586
587 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
588 if (!skb) {
589 IL_ERR("dev_alloc_skb failed\n");
590 return;
591 }
592
593 if (len <= SMALL_PACKET_SIZE) {
594 skb_put_data(skb, hdr, len);
595 } else {
596 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
597 len, PAGE_SIZE << il->hw_params.rx_page_order);
598 il->alloc_rxb_page--;
599 rxb->page = NULL;
600 }
601
602 il_update_stats(il, false, fc, len);
603 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
604
605 ieee80211_rx(il->hw, skb);
606}
607
608
609
610static void
611il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
612{
613 struct ieee80211_hdr *header;
614 struct ieee80211_rx_status rx_status = {};
615 struct il_rx_pkt *pkt = rxb_addr(rxb);
616 struct il_rx_phy_res *phy_res;
617 __le32 rx_pkt_status;
618 struct il_rx_mpdu_res_start *amsdu;
619 u32 len;
620 u32 ampdu_status;
621 u32 rate_n_flags;
622
623
624
625
626
627
628
629
630
631
632 if (pkt->hdr.cmd == N_RX) {
633 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
634 header =
635 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
636 phy_res->cfg_phy_cnt);
637
638 len = le16_to_cpu(phy_res->byte_count);
639 rx_pkt_status =
640 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
641 phy_res->cfg_phy_cnt + len);
642 ampdu_status = le32_to_cpu(rx_pkt_status);
643 } else {
644 if (!il->_4965.last_phy_res_valid) {
645 IL_ERR("MPDU frame without cached PHY data\n");
646 return;
647 }
648 phy_res = &il->_4965.last_phy_res;
649 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
650 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
651 len = le16_to_cpu(amsdu->byte_count);
652 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
653 ampdu_status =
654 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
655 }
656
657 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
658 D_DROP("dsp size out of range [0,20]: %d\n",
659 phy_res->cfg_phy_cnt);
660 return;
661 }
662
663 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
664 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
665 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
666 return;
667 }
668
669
670 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
671
672
673 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
674 rx_status.band =
675 (phy_res->
676 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
677 NL80211_BAND_5GHZ;
678 rx_status.freq =
679 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
680 rx_status.band);
681 rx_status.rate_idx =
682 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
683 rx_status.flag = 0;
684
685
686
687
688
689 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
690
691
692 rx_status.signal = il4965_calc_rssi(il, phy_res);
693
694 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
695 (unsigned long long)rx_status.mactime);
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710 rx_status.antenna =
711 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
712 RX_RES_PHY_FLAGS_ANTENNA_POS;
713
714
715 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
716 rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
717
718
719 if (rate_n_flags & RATE_MCS_HT_MSK)
720 rx_status.encoding = RX_ENC_HT;
721 if (rate_n_flags & RATE_MCS_HT40_MSK)
722 rx_status.bw = RATE_INFO_BW_40;
723 else
724 rx_status.bw = RATE_INFO_BW_20;
725 if (rate_n_flags & RATE_MCS_SGI_MSK)
726 rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
727
728 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
729
730
731
732
733
734 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
735 rx_status.ampdu_reference = il->_4965.ampdu_ref;
736 }
737
738 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
739 &rx_status);
740}
741
742
743
744static void
745il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
746{
747 struct il_rx_pkt *pkt = rxb_addr(rxb);
748 il->_4965.last_phy_res_valid = true;
749 il->_4965.ampdu_ref++;
750 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
751 sizeof(struct il_rx_phy_res));
752}
753
754static int
755il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
756 enum nl80211_band band, u8 is_active,
757 u8 n_probes, struct il_scan_channel *scan_ch)
758{
759 struct ieee80211_channel *chan;
760 const struct ieee80211_supported_band *sband;
761 const struct il_channel_info *ch_info;
762 u16 passive_dwell = 0;
763 u16 active_dwell = 0;
764 int added, i;
765 u16 channel;
766
767 sband = il_get_hw_mode(il, band);
768 if (!sband)
769 return 0;
770
771 active_dwell = il_get_active_dwell_time(il, band, n_probes);
772 passive_dwell = il_get_passive_dwell_time(il, band, vif);
773
774 if (passive_dwell <= active_dwell)
775 passive_dwell = active_dwell + 1;
776
777 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
778 chan = il->scan_request->channels[i];
779
780 if (chan->band != band)
781 continue;
782
783 channel = chan->hw_value;
784 scan_ch->channel = cpu_to_le16(channel);
785
786 ch_info = il_get_channel_info(il, band, channel);
787 if (!il_is_channel_valid(ch_info)) {
788 D_SCAN("Channel %d is INVALID for this band.\n",
789 channel);
790 continue;
791 }
792
793 if (!is_active || il_is_channel_passive(ch_info) ||
794 (chan->flags & IEEE80211_CHAN_NO_IR))
795 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
796 else
797 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
798
799 if (n_probes)
800 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
801
802 scan_ch->active_dwell = cpu_to_le16(active_dwell);
803 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
804
805
806 scan_ch->dsp_atten = 110;
807
808
809
810
811
812 if (band == NL80211_BAND_5GHZ)
813 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
814 else
815 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
816
817 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
818 le32_to_cpu(scan_ch->type),
819 (scan_ch->
820 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
821 (scan_ch->
822 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
823 passive_dwell);
824
825 scan_ch++;
826 added++;
827 }
828
829 D_SCAN("total channels to scan %d\n", added);
830 return added;
831}
832
833static void
834il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
835{
836 int i;
837 u8 ind = *ant;
838
839 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
840 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
841 if (valid & BIT(ind)) {
842 *ant = ind;
843 return;
844 }
845 }
846}
847
848int
849il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
850{
851 struct il_host_cmd cmd = {
852 .id = C_SCAN,
853 .len = sizeof(struct il_scan_cmd),
854 .flags = CMD_SIZE_HUGE,
855 };
856 struct il_scan_cmd *scan;
857 u32 rate_flags = 0;
858 u16 cmd_len;
859 u16 rx_chain = 0;
860 enum nl80211_band band;
861 u8 n_probes = 0;
862 u8 rx_ant = il->hw_params.valid_rx_ant;
863 u8 rate;
864 bool is_active = false;
865 int chan_mod;
866 u8 active_chains;
867 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
868 int ret;
869
870 lockdep_assert_held(&il->mutex);
871
872 if (!il->scan_cmd) {
873 il->scan_cmd =
874 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
875 GFP_KERNEL);
876 if (!il->scan_cmd) {
877 D_SCAN("fail to allocate memory for scan\n");
878 return -ENOMEM;
879 }
880 }
881 scan = il->scan_cmd;
882 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
883
884 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
885 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
886
887 if (il_is_any_associated(il)) {
888 u16 interval;
889 u32 extra;
890 u32 suspend_time = 100;
891 u32 scan_suspend_time = 100;
892
893 D_INFO("Scanning while associated...\n");
894 interval = vif->bss_conf.beacon_int;
895
896 scan->suspend_time = 0;
897 scan->max_out_time = cpu_to_le32(200 * 1024);
898 if (!interval)
899 interval = suspend_time;
900
901 extra = (suspend_time / interval) << 22;
902 scan_suspend_time =
903 (extra | ((suspend_time % interval) * 1024));
904 scan->suspend_time = cpu_to_le32(scan_suspend_time);
905 D_SCAN("suspend_time 0x%X beacon interval %d\n",
906 scan_suspend_time, interval);
907 }
908
909 if (il->scan_request->n_ssids) {
910 int i, p = 0;
911 D_SCAN("Kicking off active scan\n");
912 for (i = 0; i < il->scan_request->n_ssids; i++) {
913
914 if (!il->scan_request->ssids[i].ssid_len)
915 continue;
916 scan->direct_scan[p].id = WLAN_EID_SSID;
917 scan->direct_scan[p].len =
918 il->scan_request->ssids[i].ssid_len;
919 memcpy(scan->direct_scan[p].ssid,
920 il->scan_request->ssids[i].ssid,
921 il->scan_request->ssids[i].ssid_len);
922 n_probes++;
923 p++;
924 }
925 is_active = true;
926 } else
927 D_SCAN("Start passive scan.\n");
928
929 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
930 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
931 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
932
933 switch (il->scan_band) {
934 case NL80211_BAND_2GHZ:
935 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
936 chan_mod =
937 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
938 RXON_FLG_CHANNEL_MODE_POS;
939 if (chan_mod == CHANNEL_MODE_PURE_40) {
940 rate = RATE_6M_PLCP;
941 } else {
942 rate = RATE_1M_PLCP;
943 rate_flags = RATE_MCS_CCK_MSK;
944 }
945 break;
946 case NL80211_BAND_5GHZ:
947 rate = RATE_6M_PLCP;
948 break;
949 default:
950 IL_WARN("Invalid scan band\n");
951 return -EIO;
952 }
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971 scan->good_CRC_th =
972 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
973
974 band = il->scan_band;
975
976 if (il->cfg->scan_rx_antennas[band])
977 rx_ant = il->cfg->scan_rx_antennas[band];
978
979 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
980 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
981 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
982
983
984 if (test_bit(S_POWER_PMI, &il->status)) {
985
986 active_chains =
987 rx_ant & ((u8) (il->chain_noise_data.active_chains));
988 if (!active_chains)
989 active_chains = rx_ant;
990
991 D_SCAN("chain_noise_data.active_chains: %u\n",
992 il->chain_noise_data.active_chains);
993
994 rx_ant = il4965_first_antenna(active_chains);
995 }
996
997
998 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
999 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1000 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1001 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1002 scan->rx_chain = cpu_to_le16(rx_chain);
1003
1004 cmd_len =
1005 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1006 vif->addr, il->scan_request->ie,
1007 il->scan_request->ie_len,
1008 IL_MAX_SCAN_SIZE - sizeof(*scan));
1009 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1010
1011 scan->filter_flags |=
1012 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1013
1014 scan->channel_count =
1015 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1016 (void *)&scan->data[cmd_len]);
1017 if (scan->channel_count == 0) {
1018 D_SCAN("channel count %d\n", scan->channel_count);
1019 return -EIO;
1020 }
1021
1022 cmd.len +=
1023 le16_to_cpu(scan->tx_cmd.len) +
1024 scan->channel_count * sizeof(struct il_scan_channel);
1025 cmd.data = scan;
1026 scan->len = cpu_to_le16(cmd.len);
1027
1028 set_bit(S_SCAN_HW, &il->status);
1029
1030 ret = il_send_cmd_sync(il, &cmd);
1031 if (ret)
1032 clear_bit(S_SCAN_HW, &il->status);
1033
1034 return ret;
1035}
1036
1037int
1038il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1039 bool add)
1040{
1041 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1042
1043 if (add)
1044 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1045 &vif_priv->ibss_bssid_sta_id);
1046 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1047 vif->bss_conf.bssid);
1048}
1049
1050void
1051il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1052{
1053 lockdep_assert_held(&il->sta_lock);
1054
1055 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1056 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1057 else {
1058 D_TX("free more than tfds_in_queue (%u:%d)\n",
1059 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1060 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1061 }
1062}
1063
1064#define IL_TX_QUEUE_MSK 0xfffff
1065
1066static bool
1067il4965_is_single_rx_stream(struct il_priv *il)
1068{
1069 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1070 il->current_ht_config.single_chain_sufficient;
1071}
1072
1073#define IL_NUM_RX_CHAINS_MULTIPLE 3
1074#define IL_NUM_RX_CHAINS_SINGLE 2
1075#define IL_NUM_IDLE_CHAINS_DUAL 2
1076#define IL_NUM_IDLE_CHAINS_SINGLE 1
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static int
1089il4965_get_active_rx_chain_count(struct il_priv *il)
1090{
1091
1092 if (il4965_is_single_rx_stream(il))
1093 return IL_NUM_RX_CHAINS_SINGLE;
1094 else
1095 return IL_NUM_RX_CHAINS_MULTIPLE;
1096}
1097
1098
1099
1100
1101
1102static int
1103il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1104{
1105
1106 switch (il->current_ht_config.smps) {
1107 case IEEE80211_SMPS_STATIC:
1108 case IEEE80211_SMPS_DYNAMIC:
1109 return IL_NUM_IDLE_CHAINS_SINGLE;
1110 case IEEE80211_SMPS_OFF:
1111 return active_cnt;
1112 default:
1113 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1114 return active_cnt;
1115 }
1116}
1117
1118
1119static u8
1120il4965_count_chain_bitmap(u32 chain_bitmap)
1121{
1122 u8 res;
1123 res = (chain_bitmap & BIT(0)) >> 0;
1124 res += (chain_bitmap & BIT(1)) >> 1;
1125 res += (chain_bitmap & BIT(2)) >> 2;
1126 res += (chain_bitmap & BIT(3)) >> 3;
1127 return res;
1128}
1129
1130
1131
1132
1133
1134
1135
1136void
1137il4965_set_rxon_chain(struct il_priv *il)
1138{
1139 bool is_single = il4965_is_single_rx_stream(il);
1140 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1141 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1142 u32 active_chains;
1143 u16 rx_chain;
1144
1145
1146
1147
1148
1149 if (il->chain_noise_data.active_chains)
1150 active_chains = il->chain_noise_data.active_chains;
1151 else
1152 active_chains = il->hw_params.valid_rx_ant;
1153
1154 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1155
1156
1157 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1158 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1159
1160
1161
1162
1163 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1164 if (valid_rx_cnt < active_rx_cnt)
1165 active_rx_cnt = valid_rx_cnt;
1166
1167 if (valid_rx_cnt < idle_rx_cnt)
1168 idle_rx_cnt = valid_rx_cnt;
1169
1170 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1171 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1172
1173 il->staging.rx_chain = cpu_to_le16(rx_chain);
1174
1175 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1176 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1177 else
1178 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1179
1180 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1181 active_rx_cnt, idle_rx_cnt);
1182
1183 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1184 active_rx_cnt < idle_rx_cnt);
1185}
1186
1187static const char *
1188il4965_get_fh_string(int cmd)
1189{
1190 switch (cmd) {
1191 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1192 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1193 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1194 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1195 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1196 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1197 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1198 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1199 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1200 default:
1201 return "UNKNOWN";
1202 }
1203}
1204
1205int
1206il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1207{
1208 int i;
1209#ifdef CONFIG_IWLEGACY_DEBUG
1210 int pos = 0;
1211 size_t bufsz = 0;
1212#endif
1213 static const u32 fh_tbl[] = {
1214 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1215 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1216 FH49_RSCSR_CHNL0_WPTR,
1217 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1218 FH49_MEM_RSSR_SHARED_CTRL_REG,
1219 FH49_MEM_RSSR_RX_STATUS_REG,
1220 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1221 FH49_TSSR_TX_STATUS_REG,
1222 FH49_TSSR_TX_ERROR_REG
1223 };
1224#ifdef CONFIG_IWLEGACY_DEBUG
1225 if (display) {
1226 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1227 *buf = kmalloc(bufsz, GFP_KERNEL);
1228 if (!*buf)
1229 return -ENOMEM;
1230 pos +=
1231 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1232 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1233 pos +=
1234 scnprintf(*buf + pos, bufsz - pos,
1235 " %34s: 0X%08x\n",
1236 il4965_get_fh_string(fh_tbl[i]),
1237 il_rd(il, fh_tbl[i]));
1238 }
1239 return pos;
1240 }
1241#endif
1242 IL_ERR("FH register values:\n");
1243 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1244 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1245 il_rd(il, fh_tbl[i]));
1246 }
1247 return 0;
1248}
1249
1250static void
1251il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1252{
1253 struct il_rx_pkt *pkt = rxb_addr(rxb);
1254 struct il_missed_beacon_notif *missed_beacon;
1255
1256 missed_beacon = &pkt->u.missed_beacon;
1257 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1258 il->missed_beacon_threshold) {
1259 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1260 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1261 le32_to_cpu(missed_beacon->total_missed_becons),
1262 le32_to_cpu(missed_beacon->num_recvd_beacons),
1263 le32_to_cpu(missed_beacon->num_expected_beacons));
1264 if (!test_bit(S_SCANNING, &il->status))
1265 il4965_init_sensitivity(il);
1266 }
1267}
1268
1269
1270
1271
1272static void
1273il4965_rx_calc_noise(struct il_priv *il)
1274{
1275 struct stats_rx_non_phy *rx_info;
1276 int num_active_rx = 0;
1277 int total_silence = 0;
1278 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1279 int last_rx_noise;
1280
1281 rx_info = &(il->_4965.stats.rx.general);
1282 bcn_silence_a =
1283 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1284 bcn_silence_b =
1285 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1286 bcn_silence_c =
1287 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1288
1289 if (bcn_silence_a) {
1290 total_silence += bcn_silence_a;
1291 num_active_rx++;
1292 }
1293 if (bcn_silence_b) {
1294 total_silence += bcn_silence_b;
1295 num_active_rx++;
1296 }
1297 if (bcn_silence_c) {
1298 total_silence += bcn_silence_c;
1299 num_active_rx++;
1300 }
1301
1302
1303 if (num_active_rx)
1304 last_rx_noise = (total_silence / num_active_rx) - 107;
1305 else
1306 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1307
1308 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1309 bcn_silence_b, bcn_silence_c, last_rx_noise);
1310}
1311
1312#ifdef CONFIG_IWLEGACY_DEBUGFS
1313
1314
1315
1316
1317
1318static void
1319il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1320{
1321 int i, size;
1322 __le32 *prev_stats;
1323 u32 *accum_stats;
1324 u32 *delta, *max_delta;
1325 struct stats_general_common *general, *accum_general;
1326
1327 prev_stats = (__le32 *) &il->_4965.stats;
1328 accum_stats = (u32 *) &il->_4965.accum_stats;
1329 size = sizeof(struct il_notif_stats);
1330 general = &il->_4965.stats.general.common;
1331 accum_general = &il->_4965.accum_stats.general.common;
1332 delta = (u32 *) &il->_4965.delta_stats;
1333 max_delta = (u32 *) &il->_4965.max_delta;
1334
1335 for (i = sizeof(__le32); i < size;
1336 i +=
1337 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1338 accum_stats++) {
1339 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1340 *delta =
1341 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1342 *accum_stats += *delta;
1343 if (*delta > *max_delta)
1344 *max_delta = *delta;
1345 }
1346 }
1347
1348
1349 accum_general->temperature = general->temperature;
1350 accum_general->ttl_timestamp = general->ttl_timestamp;
1351}
1352#endif
1353
1354static void
1355il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1356{
1357 const int recalib_seconds = 60;
1358 bool change;
1359 struct il_rx_pkt *pkt = rxb_addr(rxb);
1360
1361 D_RX("Statistics notification received (%d vs %d).\n",
1362 (int)sizeof(struct il_notif_stats),
1363 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1364
1365 change =
1366 ((il->_4965.stats.general.common.temperature !=
1367 pkt->u.stats.general.common.temperature) ||
1368 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1369 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1370#ifdef CONFIG_IWLEGACY_DEBUGFS
1371 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1372#endif
1373
1374
1375 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1376
1377 set_bit(S_STATS, &il->status);
1378
1379
1380
1381
1382
1383 mod_timer(&il->stats_periodic,
1384 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1385
1386 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1387 (pkt->hdr.cmd == N_STATS)) {
1388 il4965_rx_calc_noise(il);
1389 queue_work(il->workqueue, &il->run_time_calib_work);
1390 }
1391
1392 if (change)
1393 il4965_temperature_calib(il);
1394}
1395
1396static void
1397il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1398{
1399 struct il_rx_pkt *pkt = rxb_addr(rxb);
1400
1401 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1402#ifdef CONFIG_IWLEGACY_DEBUGFS
1403 memset(&il->_4965.accum_stats, 0,
1404 sizeof(struct il_notif_stats));
1405 memset(&il->_4965.delta_stats, 0,
1406 sizeof(struct il_notif_stats));
1407 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1408#endif
1409 D_RX("Statistics have been cleared\n");
1410 }
1411 il4965_hdl_stats(il, rxb);
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static const u8 tid_to_ac[] = {
1442 IEEE80211_AC_BE,
1443 IEEE80211_AC_BK,
1444 IEEE80211_AC_BK,
1445 IEEE80211_AC_BE,
1446 IEEE80211_AC_VI,
1447 IEEE80211_AC_VI,
1448 IEEE80211_AC_VO,
1449 IEEE80211_AC_VO
1450};
1451
1452static inline int
1453il4965_get_ac_from_tid(u16 tid)
1454{
1455 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1456 return tid_to_ac[tid];
1457
1458
1459 return -EINVAL;
1460}
1461
1462static inline int
1463il4965_get_fifo_from_tid(u16 tid)
1464{
1465 static const u8 ac_to_fifo[] = {
1466 IL_TX_FIFO_VO,
1467 IL_TX_FIFO_VI,
1468 IL_TX_FIFO_BE,
1469 IL_TX_FIFO_BK,
1470 };
1471
1472 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1473 return ac_to_fifo[tid_to_ac[tid]];
1474
1475
1476 return -EINVAL;
1477}
1478
1479
1480
1481
1482static void
1483il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1484 struct il_tx_cmd *tx_cmd,
1485 struct ieee80211_tx_info *info,
1486 struct ieee80211_hdr *hdr, u8 std_id)
1487{
1488 __le16 fc = hdr->frame_control;
1489 __le32 tx_flags = tx_cmd->tx_flags;
1490
1491 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1492 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1493 tx_flags |= TX_CMD_FLG_ACK_MSK;
1494 if (ieee80211_is_mgmt(fc))
1495 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1496 if (ieee80211_is_probe_resp(fc) &&
1497 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1498 tx_flags |= TX_CMD_FLG_TSF_MSK;
1499 } else {
1500 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1501 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1502 }
1503
1504 if (ieee80211_is_back_req(fc))
1505 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1506
1507 tx_cmd->sta_id = std_id;
1508 if (ieee80211_has_morefrags(fc))
1509 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1510
1511 if (ieee80211_is_data_qos(fc)) {
1512 u8 *qc = ieee80211_get_qos_ctl(hdr);
1513 tx_cmd->tid_tspec = qc[0] & 0xf;
1514 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1515 } else {
1516 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1517 }
1518
1519 il_tx_cmd_protection(il, info, fc, &tx_flags);
1520
1521 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1522 if (ieee80211_is_mgmt(fc)) {
1523 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1524 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1525 else
1526 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1527 } else {
1528 tx_cmd->timeout.pm_frame_timeout = 0;
1529 }
1530
1531 tx_cmd->driver_txop = 0;
1532 tx_cmd->tx_flags = tx_flags;
1533 tx_cmd->next_frame_len = 0;
1534}
1535
1536static void
1537il4965_tx_cmd_build_rate(struct il_priv *il,
1538 struct il_tx_cmd *tx_cmd,
1539 struct ieee80211_tx_info *info,
1540 struct ieee80211_sta *sta,
1541 __le16 fc)
1542{
1543 const u8 rts_retry_limit = 60;
1544 u32 rate_flags;
1545 int rate_idx;
1546 u8 data_retry_limit;
1547 u8 rate_plcp;
1548
1549
1550 if (ieee80211_is_probe_resp(fc))
1551 data_retry_limit = 3;
1552 else
1553 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1554 tx_cmd->data_retry_limit = data_retry_limit;
1555
1556 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1557
1558
1559
1560 if (ieee80211_is_data(fc)) {
1561 tx_cmd->initial_rate_idx = 0;
1562 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1563 return;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572 rate_idx = info->control.rates[0].idx;
1573 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1574 || rate_idx > RATE_COUNT_LEGACY)
1575 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1576
1577 if (info->band == NL80211_BAND_5GHZ)
1578 rate_idx += IL_FIRST_OFDM_RATE;
1579
1580 rate_plcp = il_rates[rate_idx].plcp;
1581
1582 rate_flags = 0;
1583
1584
1585 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1586 rate_flags |= RATE_MCS_CCK_MSK;
1587
1588
1589 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1590 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1591
1592
1593 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1594}
1595
1596static void
1597il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1598 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1599 int sta_id)
1600{
1601 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1602
1603 switch (keyconf->cipher) {
1604 case WLAN_CIPHER_SUITE_CCMP:
1605 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1606 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1607 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1608 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1609 D_TX("tx_cmd with AES hwcrypto\n");
1610 break;
1611
1612 case WLAN_CIPHER_SUITE_TKIP:
1613 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1614 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1615 D_TX("tx_cmd with tkip hwcrypto\n");
1616 break;
1617
1618 case WLAN_CIPHER_SUITE_WEP104:
1619 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1620
1621 case WLAN_CIPHER_SUITE_WEP40:
1622 tx_cmd->sec_ctl |=
1623 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1624 TX_CMD_SEC_SHIFT);
1625
1626 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1627
1628 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1629 keyconf->keyidx);
1630 break;
1631
1632 default:
1633 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1634 break;
1635 }
1636}
1637
1638
1639
1640
1641int
1642il4965_tx_skb(struct il_priv *il,
1643 struct ieee80211_sta *sta,
1644 struct sk_buff *skb)
1645{
1646 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1647 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1648 struct il_station_priv *sta_priv = NULL;
1649 struct il_tx_queue *txq;
1650 struct il_queue *q;
1651 struct il_device_cmd *out_cmd;
1652 struct il_cmd_meta *out_meta;
1653 struct il_tx_cmd *tx_cmd;
1654 int txq_id;
1655 dma_addr_t phys_addr;
1656 dma_addr_t txcmd_phys;
1657 dma_addr_t scratch_phys;
1658 u16 len, firstlen, secondlen;
1659 u16 seq_number = 0;
1660 __le16 fc;
1661 u8 hdr_len;
1662 u8 sta_id;
1663 u8 wait_write_ptr = 0;
1664 u8 tid = 0;
1665 u8 *qc = NULL;
1666 unsigned long flags;
1667 bool is_agg = false;
1668
1669 spin_lock_irqsave(&il->lock, flags);
1670 if (il_is_rfkill(il)) {
1671 D_DROP("Dropping - RF KILL\n");
1672 goto drop_unlock;
1673 }
1674
1675 fc = hdr->frame_control;
1676
1677#ifdef CONFIG_IWLEGACY_DEBUG
1678 if (ieee80211_is_auth(fc))
1679 D_TX("Sending AUTH frame\n");
1680 else if (ieee80211_is_assoc_req(fc))
1681 D_TX("Sending ASSOC frame\n");
1682 else if (ieee80211_is_reassoc_req(fc))
1683 D_TX("Sending REASSOC frame\n");
1684#endif
1685
1686 hdr_len = ieee80211_hdrlen(fc);
1687
1688
1689 if (!ieee80211_is_data(fc))
1690 sta_id = il->hw_params.bcast_id;
1691 else {
1692
1693 sta_id = il_sta_id_or_broadcast(il, sta);
1694
1695 if (sta_id == IL_INVALID_STATION) {
1696 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1697 goto drop_unlock;
1698 }
1699 }
1700
1701 D_TX("station Id %d\n", sta_id);
1702
1703 if (sta)
1704 sta_priv = (void *)sta->drv_priv;
1705
1706 if (sta_priv && sta_priv->asleep &&
1707 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1718 }
1719
1720
1721 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1722
1723
1724 txq_id = skb_get_queue_mapping(skb);
1725
1726
1727 spin_lock(&il->sta_lock);
1728
1729 if (ieee80211_is_data_qos(fc)) {
1730 qc = ieee80211_get_qos_ctl(hdr);
1731 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1732 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1733 spin_unlock(&il->sta_lock);
1734 goto drop_unlock;
1735 }
1736 seq_number = il->stations[sta_id].tid[tid].seq_number;
1737 seq_number &= IEEE80211_SCTL_SEQ;
1738 hdr->seq_ctrl =
1739 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1740 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1741 seq_number += 0x10;
1742
1743 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1744 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1745 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1746 is_agg = true;
1747 }
1748 }
1749
1750 txq = &il->txq[txq_id];
1751 q = &txq->q;
1752
1753 if (unlikely(il_queue_space(q) < q->high_mark)) {
1754 spin_unlock(&il->sta_lock);
1755 goto drop_unlock;
1756 }
1757
1758 if (ieee80211_is_data_qos(fc)) {
1759 il->stations[sta_id].tid[tid].tfds_in_queue++;
1760 if (!ieee80211_has_morefrags(fc))
1761 il->stations[sta_id].tid[tid].seq_number = seq_number;
1762 }
1763
1764 spin_unlock(&il->sta_lock);
1765
1766 txq->skbs[q->write_ptr] = skb;
1767
1768
1769 out_cmd = txq->cmd[q->write_ptr];
1770 out_meta = &txq->meta[q->write_ptr];
1771 tx_cmd = &out_cmd->cmd.tx;
1772 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1773 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1774
1775
1776
1777
1778
1779
1780
1781 out_cmd->hdr.cmd = C_TX;
1782 out_cmd->hdr.sequence =
1783 cpu_to_le16((u16)
1784 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1785
1786
1787 memcpy(tx_cmd->hdr, hdr, hdr_len);
1788
1789
1790 tx_cmd->len = cpu_to_le16((u16) skb->len);
1791
1792 if (info->control.hw_key)
1793 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1794
1795
1796 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1797
1798 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1810 firstlen = (len + 3) & ~3;
1811
1812
1813 if (firstlen != len)
1814 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1815
1816
1817
1818 txcmd_phys =
1819 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1820 PCI_DMA_BIDIRECTIONAL);
1821 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1822 goto drop_unlock;
1823
1824
1825
1826 secondlen = skb->len - hdr_len;
1827 if (secondlen > 0) {
1828 phys_addr =
1829 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1830 PCI_DMA_TODEVICE);
1831 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1832 goto drop_unlock;
1833 }
1834
1835
1836
1837 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1838 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1839 dma_unmap_len_set(out_meta, len, firstlen);
1840 if (secondlen)
1841 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1842 0, 0);
1843
1844 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1845 txq->need_update = 1;
1846 } else {
1847 wait_write_ptr = 1;
1848 txq->need_update = 0;
1849 }
1850
1851 scratch_phys =
1852 txcmd_phys + sizeof(struct il_cmd_header) +
1853 offsetof(struct il_tx_cmd, scratch);
1854
1855
1856 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1857 PCI_DMA_BIDIRECTIONAL);
1858 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1859 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1860
1861 il_update_stats(il, true, fc, skb->len);
1862
1863 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1864 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1865 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1866 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1867
1868
1869 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1870 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1871
1872 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1873 PCI_DMA_BIDIRECTIONAL);
1874
1875
1876 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1877 il_txq_update_write_ptr(il, txq);
1878 spin_unlock_irqrestore(&il->lock, flags);
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 if (sta_priv && sta_priv->client && !is_agg)
1895 atomic_inc(&sta_priv->pending_frames);
1896
1897 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1898 if (wait_write_ptr) {
1899 spin_lock_irqsave(&il->lock, flags);
1900 txq->need_update = 1;
1901 il_txq_update_write_ptr(il, txq);
1902 spin_unlock_irqrestore(&il->lock, flags);
1903 } else {
1904 il_stop_queue(il, txq);
1905 }
1906 }
1907
1908 return 0;
1909
1910drop_unlock:
1911 spin_unlock_irqrestore(&il->lock, flags);
1912 return -1;
1913}
1914
1915static inline int
1916il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1917{
1918 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1919 GFP_KERNEL);
1920 if (!ptr->addr)
1921 return -ENOMEM;
1922 ptr->size = size;
1923 return 0;
1924}
1925
1926static inline void
1927il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1928{
1929 if (unlikely(!ptr->addr))
1930 return;
1931
1932 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1933 memset(ptr, 0, sizeof(*ptr));
1934}
1935
1936
1937
1938
1939
1940
1941void
1942il4965_hw_txq_ctx_free(struct il_priv *il)
1943{
1944 int txq_id;
1945
1946
1947 if (il->txq) {
1948 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1949 if (txq_id == il->cmd_queue)
1950 il_cmd_queue_free(il);
1951 else
1952 il_tx_queue_free(il, txq_id);
1953 }
1954 il4965_free_dma_ptr(il, &il->kw);
1955
1956 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1957
1958
1959 il_free_txq_mem(il);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969int
1970il4965_txq_ctx_alloc(struct il_priv *il)
1971{
1972 int ret, txq_id;
1973 unsigned long flags;
1974
1975
1976 il4965_hw_txq_ctx_free(il);
1977
1978 ret =
1979 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1980 il->hw_params.scd_bc_tbls_size);
1981 if (ret) {
1982 IL_ERR("Scheduler BC Table allocation failed\n");
1983 goto error_bc_tbls;
1984 }
1985
1986 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
1987 if (ret) {
1988 IL_ERR("Keep Warm allocation failed\n");
1989 goto error_kw;
1990 }
1991
1992
1993 ret = il_alloc_txq_mem(il);
1994 if (ret)
1995 goto error;
1996
1997 spin_lock_irqsave(&il->lock, flags);
1998
1999
2000 il4965_txq_set_sched(il, 0);
2001
2002
2003 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2004
2005 spin_unlock_irqrestore(&il->lock, flags);
2006
2007
2008 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2009 ret = il_tx_queue_init(il, txq_id);
2010 if (ret) {
2011 IL_ERR("Tx %d queue init failed\n", txq_id);
2012 goto error;
2013 }
2014 }
2015
2016 return ret;
2017
2018error:
2019 il4965_hw_txq_ctx_free(il);
2020 il4965_free_dma_ptr(il, &il->kw);
2021error_kw:
2022 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2023error_bc_tbls:
2024 return ret;
2025}
2026
2027void
2028il4965_txq_ctx_reset(struct il_priv *il)
2029{
2030 int txq_id;
2031 unsigned long flags;
2032
2033 spin_lock_irqsave(&il->lock, flags);
2034
2035
2036 il4965_txq_set_sched(il, 0);
2037
2038 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2039
2040 spin_unlock_irqrestore(&il->lock, flags);
2041
2042
2043 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2044 il_tx_queue_reset(il, txq_id);
2045}
2046
2047static void
2048il4965_txq_ctx_unmap(struct il_priv *il)
2049{
2050 int txq_id;
2051
2052 if (!il->txq)
2053 return;
2054
2055
2056 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2057 if (txq_id == il->cmd_queue)
2058 il_cmd_queue_unmap(il);
2059 else
2060 il_tx_queue_unmap(il, txq_id);
2061}
2062
2063
2064
2065
2066void
2067il4965_txq_ctx_stop(struct il_priv *il)
2068{
2069 int ch, ret;
2070
2071 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2072
2073
2074 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2075 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2076 ret =
2077 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2078 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2079 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2080 1000);
2081 if (ret < 0)
2082 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2083 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2084 }
2085}
2086
2087
2088
2089
2090
2091
2092
2093static int
2094il4965_txq_ctx_activate_free(struct il_priv *il)
2095{
2096 int txq_id;
2097
2098 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2099 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2100 return txq_id;
2101 return -1;
2102}
2103
2104
2105
2106
2107static void
2108il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2109{
2110
2111
2112 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2113 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2114 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2115}
2116
2117
2118
2119
2120static int
2121il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2122{
2123 u32 tbl_dw_addr;
2124 u32 tbl_dw;
2125 u16 scd_q2ratid;
2126
2127 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2128
2129 tbl_dw_addr =
2130 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2131
2132 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2133
2134 if (txq_id & 0x1)
2135 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2136 else
2137 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2138
2139 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2140
2141 return 0;
2142}
2143
2144
2145
2146
2147
2148
2149
2150static int
2151il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2152 int tid, u16 ssn_idx)
2153{
2154 unsigned long flags;
2155 u16 ra_tid;
2156 int ret;
2157
2158 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2159 (IL49_FIRST_AMPDU_QUEUE +
2160 il->cfg->num_of_ampdu_queues <= txq_id)) {
2161 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2162 txq_id, IL49_FIRST_AMPDU_QUEUE,
2163 IL49_FIRST_AMPDU_QUEUE +
2164 il->cfg->num_of_ampdu_queues - 1);
2165 return -EINVAL;
2166 }
2167
2168 ra_tid = BUILD_RAxTID(sta_id, tid);
2169
2170
2171 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2172 if (ret)
2173 return ret;
2174
2175 spin_lock_irqsave(&il->lock, flags);
2176
2177
2178 il4965_tx_queue_stop_scheduler(il, txq_id);
2179
2180
2181 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2182
2183
2184 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2185
2186
2187
2188 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2189 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2190 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2191
2192
2193 il_write_targ_mem(il,
2194 il->scd_base_addr +
2195 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2196 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2197 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2198
2199 il_write_targ_mem(il,
2200 il->scd_base_addr +
2201 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2202 (SCD_FRAME_LIMIT <<
2203 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2204 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2205
2206 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2207
2208
2209 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2210
2211 spin_unlock_irqrestore(&il->lock, flags);
2212
2213 return 0;
2214}
2215
2216int
2217il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2218 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2219{
2220 int sta_id;
2221 int tx_fifo;
2222 int txq_id;
2223 int ret;
2224 unsigned long flags;
2225 struct il_tid_data *tid_data;
2226
2227
2228 tx_fifo = il4965_get_fifo_from_tid(tid);
2229 if (unlikely(tx_fifo < 0))
2230 return tx_fifo;
2231
2232 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2233
2234 sta_id = il_sta_id(sta);
2235 if (sta_id == IL_INVALID_STATION) {
2236 IL_ERR("Start AGG on invalid station\n");
2237 return -ENXIO;
2238 }
2239 if (unlikely(tid >= MAX_TID_COUNT))
2240 return -EINVAL;
2241
2242 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2243 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2244 return -ENXIO;
2245 }
2246
2247 txq_id = il4965_txq_ctx_activate_free(il);
2248 if (txq_id == -1) {
2249 IL_ERR("No free aggregation queue available\n");
2250 return -ENXIO;
2251 }
2252
2253 spin_lock_irqsave(&il->sta_lock, flags);
2254 tid_data = &il->stations[sta_id].tid[tid];
2255 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2256 tid_data->agg.txq_id = txq_id;
2257 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2258 spin_unlock_irqrestore(&il->sta_lock, flags);
2259
2260 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2261 if (ret)
2262 return ret;
2263
2264 spin_lock_irqsave(&il->sta_lock, flags);
2265 tid_data = &il->stations[sta_id].tid[tid];
2266 if (tid_data->tfds_in_queue == 0) {
2267 D_HT("HW queue is empty\n");
2268 tid_data->agg.state = IL_AGG_ON;
2269 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2270 } else {
2271 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2272 tid_data->tfds_in_queue);
2273 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2274 }
2275 spin_unlock_irqrestore(&il->sta_lock, flags);
2276 return ret;
2277}
2278
2279
2280
2281
2282
2283static int
2284il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2285{
2286 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2287 (IL49_FIRST_AMPDU_QUEUE +
2288 il->cfg->num_of_ampdu_queues <= txq_id)) {
2289 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2290 txq_id, IL49_FIRST_AMPDU_QUEUE,
2291 IL49_FIRST_AMPDU_QUEUE +
2292 il->cfg->num_of_ampdu_queues - 1);
2293 return -EINVAL;
2294 }
2295
2296 il4965_tx_queue_stop_scheduler(il, txq_id);
2297
2298 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2299
2300 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2301 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2302
2303 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2304
2305 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2306 il_txq_ctx_deactivate(il, txq_id);
2307 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2308
2309 return 0;
2310}
2311
2312int
2313il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2314 struct ieee80211_sta *sta, u16 tid)
2315{
2316 int tx_fifo_id, txq_id, sta_id, ssn;
2317 struct il_tid_data *tid_data;
2318 int write_ptr, read_ptr;
2319 unsigned long flags;
2320
2321
2322 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2323 if (unlikely(tx_fifo_id < 0))
2324 return tx_fifo_id;
2325
2326 sta_id = il_sta_id(sta);
2327
2328 if (sta_id == IL_INVALID_STATION) {
2329 IL_ERR("Invalid station for AGG tid %d\n", tid);
2330 return -ENXIO;
2331 }
2332
2333 spin_lock_irqsave(&il->sta_lock, flags);
2334
2335 tid_data = &il->stations[sta_id].tid[tid];
2336 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2337 txq_id = tid_data->agg.txq_id;
2338
2339 switch (il->stations[sta_id].tid[tid].agg.state) {
2340 case IL_EMPTYING_HW_QUEUE_ADDBA:
2341
2342
2343
2344
2345
2346
2347 D_HT("AGG stop before setup done\n");
2348 goto turn_off;
2349 case IL_AGG_ON:
2350 break;
2351 default:
2352 IL_WARN("Stopping AGG while state not ON or starting\n");
2353 }
2354
2355 write_ptr = il->txq[txq_id].q.write_ptr;
2356 read_ptr = il->txq[txq_id].q.read_ptr;
2357
2358
2359 if (write_ptr != read_ptr) {
2360 D_HT("Stopping a non empty AGG HW QUEUE\n");
2361 il->stations[sta_id].tid[tid].agg.state =
2362 IL_EMPTYING_HW_QUEUE_DELBA;
2363 spin_unlock_irqrestore(&il->sta_lock, flags);
2364 return 0;
2365 }
2366
2367 D_HT("HW queue is empty\n");
2368turn_off:
2369 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2370
2371
2372 spin_unlock(&il->sta_lock);
2373 spin_lock(&il->lock);
2374
2375
2376
2377
2378
2379
2380
2381
2382 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2383 spin_unlock_irqrestore(&il->lock, flags);
2384
2385 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2386
2387 return 0;
2388}
2389
2390int
2391il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2392{
2393 struct il_queue *q = &il->txq[txq_id].q;
2394 u8 *addr = il->stations[sta_id].sta.sta.addr;
2395 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2396
2397 lockdep_assert_held(&il->sta_lock);
2398
2399 switch (il->stations[sta_id].tid[tid].agg.state) {
2400 case IL_EMPTYING_HW_QUEUE_DELBA:
2401
2402
2403 if (txq_id == tid_data->agg.txq_id &&
2404 q->read_ptr == q->write_ptr) {
2405 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2406 int tx_fifo = il4965_get_fifo_from_tid(tid);
2407 D_HT("HW queue empty: continue DELBA flow\n");
2408 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2409 tid_data->agg.state = IL_AGG_OFF;
2410 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2411 }
2412 break;
2413 case IL_EMPTYING_HW_QUEUE_ADDBA:
2414
2415 if (tid_data->tfds_in_queue == 0) {
2416 D_HT("HW queue empty: continue ADDBA flow\n");
2417 tid_data->agg.state = IL_AGG_ON;
2418 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2419 }
2420 break;
2421 }
2422
2423 return 0;
2424}
2425
2426static void
2427il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2428{
2429 struct ieee80211_sta *sta;
2430 struct il_station_priv *sta_priv;
2431
2432 rcu_read_lock();
2433 sta = ieee80211_find_sta(il->vif, addr1);
2434 if (sta) {
2435 sta_priv = (void *)sta->drv_priv;
2436
2437 if (sta_priv->client &&
2438 atomic_dec_return(&sta_priv->pending_frames) == 0)
2439 ieee80211_sta_block_awake(il->hw, sta, false);
2440 }
2441 rcu_read_unlock();
2442}
2443
2444static void
2445il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2446{
2447 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2448
2449 if (!is_agg)
2450 il4965_non_agg_tx_status(il, hdr->addr1);
2451
2452 ieee80211_tx_status_irqsafe(il->hw, skb);
2453}
2454
2455int
2456il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2457{
2458 struct il_tx_queue *txq = &il->txq[txq_id];
2459 struct il_queue *q = &txq->q;
2460 int nfreed = 0;
2461 struct ieee80211_hdr *hdr;
2462 struct sk_buff *skb;
2463
2464 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2465 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2466 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2467 q->write_ptr, q->read_ptr);
2468 return 0;
2469 }
2470
2471 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2472 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2473
2474 skb = txq->skbs[txq->q.read_ptr];
2475
2476 if (WARN_ON_ONCE(skb == NULL))
2477 continue;
2478
2479 hdr = (struct ieee80211_hdr *) skb->data;
2480 if (ieee80211_is_data_qos(hdr->frame_control))
2481 nfreed++;
2482
2483 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2484
2485 txq->skbs[txq->q.read_ptr] = NULL;
2486 il->ops->txq_free_tfd(il, txq);
2487 }
2488 return nfreed;
2489}
2490
2491
2492
2493
2494
2495
2496
2497static int
2498il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2499 struct il_compressed_ba_resp *ba_resp)
2500{
2501 int i, sh, ack;
2502 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2503 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2504 int successes = 0;
2505 struct ieee80211_tx_info *info;
2506 u64 bitmap, sent_bitmap;
2507
2508 if (unlikely(!agg->wait_for_ba)) {
2509 if (unlikely(ba_resp->bitmap))
2510 IL_ERR("Received BA when not expected\n");
2511 return -EINVAL;
2512 }
2513
2514
2515 agg->wait_for_ba = 0;
2516 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2517
2518
2519 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2520 if (sh < 0)
2521 sh += 0x100;
2522
2523 if (agg->frame_count > (64 - sh)) {
2524 D_TX_REPLY("more frames than bitmap size");
2525 return -1;
2526 }
2527
2528
2529 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2530
2531
2532
2533 sent_bitmap = bitmap & agg->bitmap;
2534
2535
2536
2537 i = 0;
2538 while (sent_bitmap) {
2539 ack = sent_bitmap & 1ULL;
2540 successes += ack;
2541 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2542 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2543 sent_bitmap >>= 1;
2544 ++i;
2545 }
2546
2547 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2548
2549 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2550 memset(&info->status, 0, sizeof(info->status));
2551 info->flags |= IEEE80211_TX_STAT_ACK;
2552 info->flags |= IEEE80211_TX_STAT_AMPDU;
2553 info->status.ampdu_ack_len = successes;
2554 info->status.ampdu_len = agg->frame_count;
2555 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2556
2557 return 0;
2558}
2559
2560static inline bool
2561il4965_is_tx_success(u32 status)
2562{
2563 status &= TX_STATUS_MSK;
2564 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2565}
2566
2567static u8
2568il4965_find_station(struct il_priv *il, const u8 *addr)
2569{
2570 int i;
2571 int start = 0;
2572 int ret = IL_INVALID_STATION;
2573 unsigned long flags;
2574
2575 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2576 start = IL_STA_ID;
2577
2578 if (is_broadcast_ether_addr(addr))
2579 return il->hw_params.bcast_id;
2580
2581 spin_lock_irqsave(&il->sta_lock, flags);
2582 for (i = start; i < il->hw_params.max_stations; i++)
2583 if (il->stations[i].used &&
2584 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2585 ret = i;
2586 goto out;
2587 }
2588
2589 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2590
2591out:
2592
2593
2594
2595
2596
2597 if (ret != IL_INVALID_STATION &&
2598 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2599 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2600 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2601 IL_ERR("Requested station info for sta %d before ready.\n",
2602 ret);
2603 ret = IL_INVALID_STATION;
2604 }
2605 spin_unlock_irqrestore(&il->sta_lock, flags);
2606 return ret;
2607}
2608
2609static int
2610il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2611{
2612 if (il->iw_mode == NL80211_IFTYPE_STATION)
2613 return IL_AP_ID;
2614 else {
2615 u8 *da = ieee80211_get_DA(hdr);
2616
2617 return il4965_find_station(il, da);
2618 }
2619}
2620
2621static inline u32
2622il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2623{
2624 return le32_to_cpup(&tx_resp->u.status +
2625 tx_resp->frame_count) & IEEE80211_MAX_SN;
2626}
2627
2628static inline u32
2629il4965_tx_status_to_mac80211(u32 status)
2630{
2631 status &= TX_STATUS_MSK;
2632
2633 switch (status) {
2634 case TX_STATUS_SUCCESS:
2635 case TX_STATUS_DIRECT_DONE:
2636 return IEEE80211_TX_STAT_ACK;
2637 case TX_STATUS_FAIL_DEST_PS:
2638 return IEEE80211_TX_STAT_TX_FILTERED;
2639 default:
2640 return 0;
2641 }
2642}
2643
2644
2645
2646
2647static int
2648il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2649 struct il4965_tx_resp *tx_resp, int txq_id,
2650 u16 start_idx)
2651{
2652 u16 status;
2653 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2654 struct ieee80211_tx_info *info = NULL;
2655 struct ieee80211_hdr *hdr = NULL;
2656 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2657 int i, sh, idx;
2658 u16 seq;
2659 if (agg->wait_for_ba)
2660 D_TX_REPLY("got tx response w/o block-ack\n");
2661
2662 agg->frame_count = tx_resp->frame_count;
2663 agg->start_idx = start_idx;
2664 agg->rate_n_flags = rate_n_flags;
2665 agg->bitmap = 0;
2666
2667
2668 if (agg->frame_count == 1) {
2669
2670 status = le16_to_cpu(frame_status[0].status);
2671 idx = start_idx;
2672
2673 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2674 agg->frame_count, agg->start_idx, idx);
2675
2676 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2677 info->status.rates[0].count = tx_resp->failure_frame + 1;
2678 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2679 info->flags |= il4965_tx_status_to_mac80211(status);
2680 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2681
2682 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2683 tx_resp->failure_frame);
2684 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2685
2686 agg->wait_for_ba = 0;
2687 } else {
2688
2689 u64 bitmap = 0;
2690 int start = agg->start_idx;
2691 struct sk_buff *skb;
2692
2693
2694 for (i = 0; i < agg->frame_count; i++) {
2695 u16 sc;
2696 status = le16_to_cpu(frame_status[i].status);
2697 seq = le16_to_cpu(frame_status[i].sequence);
2698 idx = SEQ_TO_IDX(seq);
2699 txq_id = SEQ_TO_QUEUE(seq);
2700
2701 if (status &
2702 (AGG_TX_STATE_FEW_BYTES_MSK |
2703 AGG_TX_STATE_ABORT_MSK))
2704 continue;
2705
2706 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2707 agg->frame_count, txq_id, idx);
2708
2709 skb = il->txq[txq_id].skbs[idx];
2710 if (WARN_ON_ONCE(skb == NULL))
2711 return -1;
2712 hdr = (struct ieee80211_hdr *) skb->data;
2713
2714 sc = le16_to_cpu(hdr->seq_ctrl);
2715 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2716 IL_ERR("BUG_ON idx doesn't match seq control"
2717 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2718 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2719 return -1;
2720 }
2721
2722 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2723 IEEE80211_SEQ_TO_SN(sc));
2724
2725 sh = idx - start;
2726 if (sh > 64) {
2727 sh = (start - idx) + 0xff;
2728 bitmap = bitmap << sh;
2729 sh = 0;
2730 start = idx;
2731 } else if (sh < -64)
2732 sh = 0xff - (start - idx);
2733 else if (sh < 0) {
2734 sh = start - idx;
2735 start = idx;
2736 bitmap = bitmap << sh;
2737 sh = 0;
2738 }
2739 bitmap |= 1ULL << sh;
2740 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2741 (unsigned long long)bitmap);
2742 }
2743
2744 agg->bitmap = bitmap;
2745 agg->start_idx = start;
2746 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2747 agg->frame_count, agg->start_idx,
2748 (unsigned long long)agg->bitmap);
2749
2750 if (bitmap)
2751 agg->wait_for_ba = 1;
2752 }
2753 return 0;
2754}
2755
2756
2757
2758
2759static void
2760il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2761{
2762 struct il_rx_pkt *pkt = rxb_addr(rxb);
2763 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2764 int txq_id = SEQ_TO_QUEUE(sequence);
2765 int idx = SEQ_TO_IDX(sequence);
2766 struct il_tx_queue *txq = &il->txq[txq_id];
2767 struct sk_buff *skb;
2768 struct ieee80211_hdr *hdr;
2769 struct ieee80211_tx_info *info;
2770 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2771 u32 status = le32_to_cpu(tx_resp->u.status);
2772 int uninitialized_var(tid);
2773 int sta_id;
2774 int freed;
2775 u8 *qc = NULL;
2776 unsigned long flags;
2777
2778 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2779 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2780 "is out of range [0-%d] %d %d\n", txq_id, idx,
2781 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2782 return;
2783 }
2784
2785 txq->time_stamp = jiffies;
2786
2787 skb = txq->skbs[txq->q.read_ptr];
2788 info = IEEE80211_SKB_CB(skb);
2789 memset(&info->status, 0, sizeof(info->status));
2790
2791 hdr = (struct ieee80211_hdr *) skb->data;
2792 if (ieee80211_is_data_qos(hdr->frame_control)) {
2793 qc = ieee80211_get_qos_ctl(hdr);
2794 tid = qc[0] & 0xf;
2795 }
2796
2797 sta_id = il4965_get_ra_sta_id(il, hdr);
2798 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2799 IL_ERR("Station not known\n");
2800 return;
2801 }
2802
2803
2804
2805
2806
2807
2808
2809
2810 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2811 il->iw_mode == NL80211_IFTYPE_STATION) {
2812 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2813 D_INFO("Stopped queues - RX waiting on passive channel\n");
2814 }
2815
2816 spin_lock_irqsave(&il->sta_lock, flags);
2817 if (txq->sched_retry) {
2818 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2819 struct il_ht_agg *agg = NULL;
2820 WARN_ON(!qc);
2821
2822 agg = &il->stations[sta_id].tid[tid].agg;
2823
2824 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2825
2826
2827 if (tx_resp->frame_count == 1 &&
2828 !il4965_is_tx_success(status))
2829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2830
2831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2832 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2833 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2834 "%d idx %d\n", scd_ssn, idx);
2835 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2836 if (qc)
2837 il4965_free_tfds_in_queue(il, sta_id, tid,
2838 freed);
2839
2840 if (il->mac80211_registered &&
2841 il_queue_space(&txq->q) > txq->q.low_mark &&
2842 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2843 il_wake_queue(il, txq);
2844 }
2845 } else {
2846 info->status.rates[0].count = tx_resp->failure_frame + 1;
2847 info->flags |= il4965_tx_status_to_mac80211(status);
2848 il4965_hwrate_to_tx_control(il,
2849 le32_to_cpu(tx_resp->rate_n_flags),
2850 info);
2851
2852 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2853 "rate_n_flags 0x%x retries %d\n", txq_id,
2854 il4965_get_tx_fail_reason(status), status,
2855 le32_to_cpu(tx_resp->rate_n_flags),
2856 tx_resp->failure_frame);
2857
2858 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2859 if (qc && likely(sta_id != IL_INVALID_STATION))
2860 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2861 else if (sta_id == IL_INVALID_STATION)
2862 D_TX_REPLY("Station not known\n");
2863
2864 if (il->mac80211_registered &&
2865 il_queue_space(&txq->q) > txq->q.low_mark)
2866 il_wake_queue(il, txq);
2867 }
2868 if (qc && likely(sta_id != IL_INVALID_STATION))
2869 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2870
2871 il4965_check_abort_status(il, tx_resp->frame_count, status);
2872
2873 spin_unlock_irqrestore(&il->sta_lock, flags);
2874}
2875
2876
2877
2878
2879void
2880il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2881 struct ieee80211_tx_info *info)
2882{
2883 struct ieee80211_tx_rate *r = &info->status.rates[0];
2884
2885 info->status.antenna =
2886 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2887 if (rate_n_flags & RATE_MCS_HT_MSK)
2888 r->flags |= IEEE80211_TX_RC_MCS;
2889 if (rate_n_flags & RATE_MCS_GF_MSK)
2890 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2891 if (rate_n_flags & RATE_MCS_HT40_MSK)
2892 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2893 if (rate_n_flags & RATE_MCS_DUP_MSK)
2894 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2895 if (rate_n_flags & RATE_MCS_SGI_MSK)
2896 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2897 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2898}
2899
2900
2901
2902
2903
2904
2905
2906static void
2907il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2908{
2909 struct il_rx_pkt *pkt = rxb_addr(rxb);
2910 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2911 struct il_tx_queue *txq = NULL;
2912 struct il_ht_agg *agg;
2913 int idx;
2914 int sta_id;
2915 int tid;
2916 unsigned long flags;
2917
2918
2919 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2920
2921
2922
2923 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2924
2925 if (scd_flow >= il->hw_params.max_txq_num) {
2926 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2927 return;
2928 }
2929
2930 txq = &il->txq[scd_flow];
2931 sta_id = ba_resp->sta_id;
2932 tid = ba_resp->tid;
2933 agg = &il->stations[sta_id].tid[tid].agg;
2934 if (unlikely(agg->txq_id != scd_flow)) {
2935
2936
2937
2938
2939
2940
2941 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2942 scd_flow, agg->txq_id);
2943 return;
2944 }
2945
2946
2947 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2948
2949 spin_lock_irqsave(&il->sta_lock, flags);
2950
2951 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2952 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2953 ba_resp->sta_id);
2954 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2955 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2956 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2957 ba_resp->scd_flow, ba_resp->scd_ssn);
2958 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2959 (unsigned long long)agg->bitmap);
2960
2961
2962 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2963
2964
2965
2966
2967 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2968
2969 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2970 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2971
2972 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2973 il->mac80211_registered &&
2974 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2975 il_wake_queue(il, txq);
2976
2977 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2978 }
2979
2980 spin_unlock_irqrestore(&il->sta_lock, flags);
2981}
2982
2983#ifdef CONFIG_IWLEGACY_DEBUG
2984const char *
2985il4965_get_tx_fail_reason(u32 status)
2986{
2987#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2988#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2989
2990 switch (status & TX_STATUS_MSK) {
2991 case TX_STATUS_SUCCESS:
2992 return "SUCCESS";
2993 TX_STATUS_POSTPONE(DELAY);
2994 TX_STATUS_POSTPONE(FEW_BYTES);
2995 TX_STATUS_POSTPONE(QUIET_PERIOD);
2996 TX_STATUS_POSTPONE(CALC_TTAK);
2997 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2998 TX_STATUS_FAIL(SHORT_LIMIT);
2999 TX_STATUS_FAIL(LONG_LIMIT);
3000 TX_STATUS_FAIL(FIFO_UNDERRUN);
3001 TX_STATUS_FAIL(DRAIN_FLOW);
3002 TX_STATUS_FAIL(RFKILL_FLUSH);
3003 TX_STATUS_FAIL(LIFE_EXPIRE);
3004 TX_STATUS_FAIL(DEST_PS);
3005 TX_STATUS_FAIL(HOST_ABORTED);
3006 TX_STATUS_FAIL(BT_RETRY);
3007 TX_STATUS_FAIL(STA_INVALID);
3008 TX_STATUS_FAIL(FRAG_DROPPED);
3009 TX_STATUS_FAIL(TID_DISABLE);
3010 TX_STATUS_FAIL(FIFO_FLUSHED);
3011 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3012 TX_STATUS_FAIL(PASSIVE_NO_RX);
3013 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3014 }
3015
3016 return "UNKNOWN";
3017
3018#undef TX_STATUS_FAIL
3019#undef TX_STATUS_POSTPONE
3020}
3021#endif
3022
3023static struct il_link_quality_cmd *
3024il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3025{
3026 int i, r;
3027 struct il_link_quality_cmd *link_cmd;
3028 u32 rate_flags = 0;
3029 __le32 rate_n_flags;
3030
3031 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3032 if (!link_cmd) {
3033 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3034 return NULL;
3035 }
3036
3037
3038 if (il->band == NL80211_BAND_5GHZ)
3039 r = RATE_6M_IDX;
3040 else
3041 r = RATE_1M_IDX;
3042
3043 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3044 rate_flags |= RATE_MCS_CCK_MSK;
3045
3046 rate_flags |=
3047 il4965_first_antenna(il->hw_params.
3048 valid_tx_ant) << RATE_MCS_ANT_POS;
3049 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3050 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3051 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3052
3053 link_cmd->general_params.single_stream_ant_msk =
3054 il4965_first_antenna(il->hw_params.valid_tx_ant);
3055
3056 link_cmd->general_params.dual_stream_ant_msk =
3057 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3058 valid_tx_ant);
3059 if (!link_cmd->general_params.dual_stream_ant_msk) {
3060 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3061 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3062 link_cmd->general_params.dual_stream_ant_msk =
3063 il->hw_params.valid_tx_ant;
3064 }
3065
3066 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3067 link_cmd->agg_params.agg_time_limit =
3068 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3069
3070 link_cmd->sta_id = sta_id;
3071
3072 return link_cmd;
3073}
3074
3075
3076
3077
3078
3079
3080int
3081il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3082{
3083 int ret;
3084 u8 sta_id;
3085 struct il_link_quality_cmd *link_cmd;
3086 unsigned long flags;
3087
3088 if (sta_id_r)
3089 *sta_id_r = IL_INVALID_STATION;
3090
3091 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3092 if (ret) {
3093 IL_ERR("Unable to add station %pM\n", addr);
3094 return ret;
3095 }
3096
3097 if (sta_id_r)
3098 *sta_id_r = sta_id;
3099
3100 spin_lock_irqsave(&il->sta_lock, flags);
3101 il->stations[sta_id].used |= IL_STA_LOCAL;
3102 spin_unlock_irqrestore(&il->sta_lock, flags);
3103
3104
3105 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3106 if (!link_cmd) {
3107 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3108 addr);
3109 return -ENOMEM;
3110 }
3111
3112 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3113 if (ret)
3114 IL_ERR("Link quality command failed (%d)\n", ret);
3115
3116 spin_lock_irqsave(&il->sta_lock, flags);
3117 il->stations[sta_id].lq = link_cmd;
3118 spin_unlock_irqrestore(&il->sta_lock, flags);
3119
3120 return 0;
3121}
3122
3123static int
3124il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3125{
3126 int i;
3127 u8 buff[sizeof(struct il_wep_cmd) +
3128 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3129 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3130 size_t cmd_size = sizeof(struct il_wep_cmd);
3131 struct il_host_cmd cmd = {
3132 .id = C_WEPKEY,
3133 .data = wep_cmd,
3134 .flags = CMD_SYNC,
3135 };
3136 bool not_empty = false;
3137
3138 might_sleep();
3139
3140 memset(wep_cmd, 0,
3141 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3142
3143 for (i = 0; i < WEP_KEYS_MAX; i++) {
3144 u8 key_size = il->_4965.wep_keys[i].key_size;
3145
3146 wep_cmd->key[i].key_idx = i;
3147 if (key_size) {
3148 wep_cmd->key[i].key_offset = i;
3149 not_empty = true;
3150 } else
3151 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3152
3153 wep_cmd->key[i].key_size = key_size;
3154 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3155 }
3156
3157 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3158 wep_cmd->num_keys = WEP_KEYS_MAX;
3159
3160 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3161 cmd.len = cmd_size;
3162
3163 if (not_empty || send_if_empty)
3164 return il_send_cmd(il, &cmd);
3165 else
3166 return 0;
3167}
3168
3169int
3170il4965_restore_default_wep_keys(struct il_priv *il)
3171{
3172 lockdep_assert_held(&il->mutex);
3173
3174 return il4965_static_wepkey_cmd(il, false);
3175}
3176
3177int
3178il4965_remove_default_wep_key(struct il_priv *il,
3179 struct ieee80211_key_conf *keyconf)
3180{
3181 int ret;
3182 int idx = keyconf->keyidx;
3183
3184 lockdep_assert_held(&il->mutex);
3185
3186 D_WEP("Removing default WEP key: idx=%d\n", idx);
3187
3188 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3189 if (il_is_rfkill(il)) {
3190 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3191
3192 return 0;
3193 }
3194 ret = il4965_static_wepkey_cmd(il, 1);
3195 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3196
3197 return ret;
3198}
3199
3200int
3201il4965_set_default_wep_key(struct il_priv *il,
3202 struct ieee80211_key_conf *keyconf)
3203{
3204 int ret;
3205 int len = keyconf->keylen;
3206 int idx = keyconf->keyidx;
3207
3208 lockdep_assert_held(&il->mutex);
3209
3210 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3211 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3212 return -EINVAL;
3213 }
3214
3215 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3216 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3217 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3218
3219 il->_4965.wep_keys[idx].key_size = len;
3220 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3221
3222 ret = il4965_static_wepkey_cmd(il, false);
3223
3224 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3225 return ret;
3226}
3227
3228static int
3229il4965_set_wep_dynamic_key_info(struct il_priv *il,
3230 struct ieee80211_key_conf *keyconf, u8 sta_id)
3231{
3232 unsigned long flags;
3233 __le16 key_flags = 0;
3234 struct il_addsta_cmd sta_cmd;
3235
3236 lockdep_assert_held(&il->mutex);
3237
3238 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3239
3240 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3241 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3242 key_flags &= ~STA_KEY_FLG_INVALID;
3243
3244 if (keyconf->keylen == WEP_KEY_LEN_128)
3245 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3246
3247 if (sta_id == il->hw_params.bcast_id)
3248 key_flags |= STA_KEY_MULTICAST_MSK;
3249
3250 spin_lock_irqsave(&il->sta_lock, flags);
3251
3252 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3253 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3254 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3255
3256 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3257
3258 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3259 keyconf->keylen);
3260
3261 if ((il->stations[sta_id].sta.key.
3262 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3263 il->stations[sta_id].sta.key.key_offset =
3264 il_get_free_ucode_key_idx(il);
3265
3266
3267
3268 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3269 "no space for a new key");
3270
3271 il->stations[sta_id].sta.key.key_flags = key_flags;
3272 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3273 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3274
3275 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3276 sizeof(struct il_addsta_cmd));
3277 spin_unlock_irqrestore(&il->sta_lock, flags);
3278
3279 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3280}
3281
3282static int
3283il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3284 struct ieee80211_key_conf *keyconf, u8 sta_id)
3285{
3286 unsigned long flags;
3287 __le16 key_flags = 0;
3288 struct il_addsta_cmd sta_cmd;
3289
3290 lockdep_assert_held(&il->mutex);
3291
3292 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3293 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3294 key_flags &= ~STA_KEY_FLG_INVALID;
3295
3296 if (sta_id == il->hw_params.bcast_id)
3297 key_flags |= STA_KEY_MULTICAST_MSK;
3298
3299 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3300
3301 spin_lock_irqsave(&il->sta_lock, flags);
3302 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3303 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3304
3305 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3306
3307 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3308
3309 if ((il->stations[sta_id].sta.key.
3310 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3311 il->stations[sta_id].sta.key.key_offset =
3312 il_get_free_ucode_key_idx(il);
3313
3314
3315
3316 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3317 "no space for a new key");
3318
3319 il->stations[sta_id].sta.key.key_flags = key_flags;
3320 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3321 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3322
3323 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3324 sizeof(struct il_addsta_cmd));
3325 spin_unlock_irqrestore(&il->sta_lock, flags);
3326
3327 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3328}
3329
3330static int
3331il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3332 struct ieee80211_key_conf *keyconf, u8 sta_id)
3333{
3334 unsigned long flags;
3335 int ret = 0;
3336 __le16 key_flags = 0;
3337
3338 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3339 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3340 key_flags &= ~STA_KEY_FLG_INVALID;
3341
3342 if (sta_id == il->hw_params.bcast_id)
3343 key_flags |= STA_KEY_MULTICAST_MSK;
3344
3345 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3346 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3347
3348 spin_lock_irqsave(&il->sta_lock, flags);
3349
3350 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3351 il->stations[sta_id].keyinfo.keylen = 16;
3352
3353 if ((il->stations[sta_id].sta.key.
3354 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3355 il->stations[sta_id].sta.key.key_offset =
3356 il_get_free_ucode_key_idx(il);
3357
3358
3359
3360 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3361 "no space for a new key");
3362
3363 il->stations[sta_id].sta.key.key_flags = key_flags;
3364
3365
3366 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3367
3368 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3369
3370 spin_unlock_irqrestore(&il->sta_lock, flags);
3371
3372 return ret;
3373}
3374
3375void
3376il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3377 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3378{
3379 u8 sta_id;
3380 unsigned long flags;
3381 int i;
3382
3383 if (il_scan_cancel(il)) {
3384
3385
3386 return;
3387 }
3388
3389 sta_id = il_sta_id_or_broadcast(il, sta);
3390 if (sta_id == IL_INVALID_STATION)
3391 return;
3392
3393 spin_lock_irqsave(&il->sta_lock, flags);
3394
3395 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3396
3397 for (i = 0; i < 5; i++)
3398 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3399 cpu_to_le16(phase1key[i]);
3400
3401 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3402 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3403
3404 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3405
3406 spin_unlock_irqrestore(&il->sta_lock, flags);
3407}
3408
3409int
3410il4965_remove_dynamic_key(struct il_priv *il,
3411 struct ieee80211_key_conf *keyconf, u8 sta_id)
3412{
3413 unsigned long flags;
3414 u16 key_flags;
3415 u8 keyidx;
3416 struct il_addsta_cmd sta_cmd;
3417
3418 lockdep_assert_held(&il->mutex);
3419
3420 il->_4965.key_mapping_keys--;
3421
3422 spin_lock_irqsave(&il->sta_lock, flags);
3423 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3424 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3425
3426 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3427
3428 if (keyconf->keyidx != keyidx) {
3429
3430
3431
3432
3433
3434 spin_unlock_irqrestore(&il->sta_lock, flags);
3435 return 0;
3436 }
3437
3438 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3439 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3440 key_flags);
3441 spin_unlock_irqrestore(&il->sta_lock, flags);
3442 return 0;
3443 }
3444
3445 if (!test_and_clear_bit
3446 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3447 IL_ERR("idx %d not used in uCode key table.\n",
3448 il->stations[sta_id].sta.key.key_offset);
3449 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3450 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3451 il->stations[sta_id].sta.key.key_flags =
3452 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3453 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3454 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3455 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3456
3457 if (il_is_rfkill(il)) {
3458 D_WEP
3459 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3460 spin_unlock_irqrestore(&il->sta_lock, flags);
3461 return 0;
3462 }
3463 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3464 sizeof(struct il_addsta_cmd));
3465 spin_unlock_irqrestore(&il->sta_lock, flags);
3466
3467 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3468}
3469
3470int
3471il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3472 u8 sta_id)
3473{
3474 int ret;
3475
3476 lockdep_assert_held(&il->mutex);
3477
3478 il->_4965.key_mapping_keys++;
3479 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3480
3481 switch (keyconf->cipher) {
3482 case WLAN_CIPHER_SUITE_CCMP:
3483 ret =
3484 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3485 break;
3486 case WLAN_CIPHER_SUITE_TKIP:
3487 ret =
3488 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3489 break;
3490 case WLAN_CIPHER_SUITE_WEP40:
3491 case WLAN_CIPHER_SUITE_WEP104:
3492 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3493 break;
3494 default:
3495 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3496 keyconf->cipher);
3497 ret = -EINVAL;
3498 }
3499
3500 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3501 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3502
3503 return ret;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513int
3514il4965_alloc_bcast_station(struct il_priv *il)
3515{
3516 struct il_link_quality_cmd *link_cmd;
3517 unsigned long flags;
3518 u8 sta_id;
3519
3520 spin_lock_irqsave(&il->sta_lock, flags);
3521 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3522 if (sta_id == IL_INVALID_STATION) {
3523 IL_ERR("Unable to prepare broadcast station\n");
3524 spin_unlock_irqrestore(&il->sta_lock, flags);
3525
3526 return -EINVAL;
3527 }
3528
3529 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3530 il->stations[sta_id].used |= IL_STA_BCAST;
3531 spin_unlock_irqrestore(&il->sta_lock, flags);
3532
3533 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3534 if (!link_cmd) {
3535 IL_ERR
3536 ("Unable to initialize rate scaling for bcast station.\n");
3537 return -ENOMEM;
3538 }
3539
3540 spin_lock_irqsave(&il->sta_lock, flags);
3541 il->stations[sta_id].lq = link_cmd;
3542 spin_unlock_irqrestore(&il->sta_lock, flags);
3543
3544 return 0;
3545}
3546
3547
3548
3549
3550
3551
3552
3553static int
3554il4965_update_bcast_station(struct il_priv *il)
3555{
3556 unsigned long flags;
3557 struct il_link_quality_cmd *link_cmd;
3558 u8 sta_id = il->hw_params.bcast_id;
3559
3560 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3561 if (!link_cmd) {
3562 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3563 return -ENOMEM;
3564 }
3565
3566 spin_lock_irqsave(&il->sta_lock, flags);
3567 if (il->stations[sta_id].lq)
3568 kfree(il->stations[sta_id].lq);
3569 else
3570 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3571 il->stations[sta_id].lq = link_cmd;
3572 spin_unlock_irqrestore(&il->sta_lock, flags);
3573
3574 return 0;
3575}
3576
3577int
3578il4965_update_bcast_stations(struct il_priv *il)
3579{
3580 return il4965_update_bcast_station(il);
3581}
3582
3583
3584
3585
3586int
3587il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3588{
3589 unsigned long flags;
3590 struct il_addsta_cmd sta_cmd;
3591
3592 lockdep_assert_held(&il->mutex);
3593
3594
3595 spin_lock_irqsave(&il->sta_lock, flags);
3596 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3597 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3598 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3599 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3600 sizeof(struct il_addsta_cmd));
3601 spin_unlock_irqrestore(&il->sta_lock, flags);
3602
3603 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3604}
3605
3606int
3607il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3608 u16 ssn)
3609{
3610 unsigned long flags;
3611 int sta_id;
3612 struct il_addsta_cmd sta_cmd;
3613
3614 lockdep_assert_held(&il->mutex);
3615
3616 sta_id = il_sta_id(sta);
3617 if (sta_id == IL_INVALID_STATION)
3618 return -ENXIO;
3619
3620 spin_lock_irqsave(&il->sta_lock, flags);
3621 il->stations[sta_id].sta.station_flags_msk = 0;
3622 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3623 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3624 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3625 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3626 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3627 sizeof(struct il_addsta_cmd));
3628 spin_unlock_irqrestore(&il->sta_lock, flags);
3629
3630 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3631}
3632
3633int
3634il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3635{
3636 unsigned long flags;
3637 int sta_id;
3638 struct il_addsta_cmd sta_cmd;
3639
3640 lockdep_assert_held(&il->mutex);
3641
3642 sta_id = il_sta_id(sta);
3643 if (sta_id == IL_INVALID_STATION) {
3644 IL_ERR("Invalid station for AGG tid %d\n", tid);
3645 return -ENXIO;
3646 }
3647
3648 spin_lock_irqsave(&il->sta_lock, flags);
3649 il->stations[sta_id].sta.station_flags_msk = 0;
3650 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3651 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3652 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3653 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3654 sizeof(struct il_addsta_cmd));
3655 spin_unlock_irqrestore(&il->sta_lock, flags);
3656
3657 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3658}
3659
3660void
3661il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3662{
3663 unsigned long flags;
3664
3665 spin_lock_irqsave(&il->sta_lock, flags);
3666 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3667 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3668 il->stations[sta_id].sta.sta.modify_mask =
3669 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3670 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3671 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3672 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3673 spin_unlock_irqrestore(&il->sta_lock, flags);
3674
3675}
3676
3677void
3678il4965_update_chain_flags(struct il_priv *il)
3679{
3680 if (il->ops->set_rxon_chain) {
3681 il->ops->set_rxon_chain(il);
3682 if (il->active.rx_chain != il->staging.rx_chain)
3683 il_commit_rxon(il);
3684 }
3685}
3686
3687static void
3688il4965_clear_free_frames(struct il_priv *il)
3689{
3690 struct list_head *element;
3691
3692 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3693
3694 while (!list_empty(&il->free_frames)) {
3695 element = il->free_frames.next;
3696 list_del(element);
3697 kfree(list_entry(element, struct il_frame, list));
3698 il->frames_count--;
3699 }
3700
3701 if (il->frames_count) {
3702 IL_WARN("%d frames still in use. Did we lose one?\n",
3703 il->frames_count);
3704 il->frames_count = 0;
3705 }
3706}
3707
3708static struct il_frame *
3709il4965_get_free_frame(struct il_priv *il)
3710{
3711 struct il_frame *frame;
3712 struct list_head *element;
3713 if (list_empty(&il->free_frames)) {
3714 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3715 if (!frame) {
3716 IL_ERR("Could not allocate frame!\n");
3717 return NULL;
3718 }
3719
3720 il->frames_count++;
3721 return frame;
3722 }
3723
3724 element = il->free_frames.next;
3725 list_del(element);
3726 return list_entry(element, struct il_frame, list);
3727}
3728
3729static void
3730il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3731{
3732 memset(frame, 0, sizeof(*frame));
3733 list_add(&frame->list, &il->free_frames);
3734}
3735
3736static u32
3737il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3738 int left)
3739{
3740 lockdep_assert_held(&il->mutex);
3741
3742 if (!il->beacon_skb)
3743 return 0;
3744
3745 if (il->beacon_skb->len > left)
3746 return 0;
3747
3748 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3749
3750 return il->beacon_skb->len;
3751}
3752
3753
3754static void
3755il4965_set_beacon_tim(struct il_priv *il,
3756 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3757 u32 frame_size)
3758{
3759 u16 tim_idx;
3760 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3761
3762
3763
3764
3765
3766 tim_idx = mgmt->u.beacon.variable - beacon;
3767
3768
3769 while ((tim_idx < (frame_size - 2)) &&
3770 (beacon[tim_idx] != WLAN_EID_TIM))
3771 tim_idx += beacon[tim_idx + 1] + 2;
3772
3773
3774 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3775 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3776 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3777 } else
3778 IL_WARN("Unable to find TIM Element in beacon\n");
3779}
3780
3781static unsigned int
3782il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3783{
3784 struct il_tx_beacon_cmd *tx_beacon_cmd;
3785 u32 frame_size;
3786 u32 rate_flags;
3787 u32 rate;
3788
3789
3790
3791
3792
3793 lockdep_assert_held(&il->mutex);
3794
3795 if (!il->beacon_enabled) {
3796 IL_ERR("Trying to build beacon without beaconing enabled\n");
3797 return 0;
3798 }
3799
3800
3801 tx_beacon_cmd = &frame->u.beacon;
3802 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3803
3804
3805 frame_size =
3806 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3807 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3808 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3809 return 0;
3810 if (!frame_size)
3811 return 0;
3812
3813
3814 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3815 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3816 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3817 tx_beacon_cmd->tx.tx_flags =
3818 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3819 TX_CMD_FLG_STA_RATE_MSK;
3820
3821
3822 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3823 frame_size);
3824
3825
3826 rate = il_get_lowest_plcp(il);
3827 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3828 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3829 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3830 rate_flags |= RATE_MCS_CCK_MSK;
3831 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3832
3833 return sizeof(*tx_beacon_cmd) + frame_size;
3834}
3835
3836int
3837il4965_send_beacon_cmd(struct il_priv *il)
3838{
3839 struct il_frame *frame;
3840 unsigned int frame_size;
3841 int rc;
3842
3843 frame = il4965_get_free_frame(il);
3844 if (!frame) {
3845 IL_ERR("Could not obtain free frame buffer for beacon "
3846 "command.\n");
3847 return -ENOMEM;
3848 }
3849
3850 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3851 if (!frame_size) {
3852 IL_ERR("Error configuring the beacon command\n");
3853 il4965_free_frame(il, frame);
3854 return -EINVAL;
3855 }
3856
3857 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3858
3859 il4965_free_frame(il, frame);
3860
3861 return rc;
3862}
3863
3864static inline dma_addr_t
3865il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3866{
3867 struct il_tfd_tb *tb = &tfd->tbs[idx];
3868
3869 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3870 if (sizeof(dma_addr_t) > sizeof(u32))
3871 addr |=
3872 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3873 16;
3874
3875 return addr;
3876}
3877
3878static inline u16
3879il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3880{
3881 struct il_tfd_tb *tb = &tfd->tbs[idx];
3882
3883 return le16_to_cpu(tb->hi_n_len) >> 4;
3884}
3885
3886static inline void
3887il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3888{
3889 struct il_tfd_tb *tb = &tfd->tbs[idx];
3890 u16 hi_n_len = len << 4;
3891
3892 put_unaligned_le32(addr, &tb->lo);
3893 if (sizeof(dma_addr_t) > sizeof(u32))
3894 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3895
3896 tb->hi_n_len = cpu_to_le16(hi_n_len);
3897
3898 tfd->num_tbs = idx + 1;
3899}
3900
3901static inline u8
3902il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3903{
3904 return tfd->num_tbs & 0x1f;
3905}
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915void
3916il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3917{
3918 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3919 struct il_tfd *tfd;
3920 struct pci_dev *dev = il->pci_dev;
3921 int idx = txq->q.read_ptr;
3922 int i;
3923 int num_tbs;
3924
3925 tfd = &tfd_tmp[idx];
3926
3927
3928 num_tbs = il4965_tfd_get_num_tbs(tfd);
3929
3930 if (num_tbs >= IL_NUM_OF_TBS) {
3931 IL_ERR("Too many chunks: %i\n", num_tbs);
3932
3933 return;
3934 }
3935
3936
3937 if (num_tbs)
3938 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3939 dma_unmap_len(&txq->meta[idx], len),
3940 PCI_DMA_BIDIRECTIONAL);
3941
3942
3943 for (i = 1; i < num_tbs; i++)
3944 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3945 il4965_tfd_tb_get_len(tfd, i),
3946 PCI_DMA_TODEVICE);
3947
3948
3949 if (txq->skbs) {
3950 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3951
3952
3953 if (skb) {
3954 dev_kfree_skb_any(skb);
3955 txq->skbs[txq->q.read_ptr] = NULL;
3956 }
3957 }
3958}
3959
3960int
3961il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3962 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3963{
3964 struct il_queue *q;
3965 struct il_tfd *tfd, *tfd_tmp;
3966 u32 num_tbs;
3967
3968 q = &txq->q;
3969 tfd_tmp = (struct il_tfd *)txq->tfds;
3970 tfd = &tfd_tmp[q->write_ptr];
3971
3972 if (reset)
3973 memset(tfd, 0, sizeof(*tfd));
3974
3975 num_tbs = il4965_tfd_get_num_tbs(tfd);
3976
3977
3978 if (num_tbs >= IL_NUM_OF_TBS) {
3979 IL_ERR("Error can not send more than %d chunks\n",
3980 IL_NUM_OF_TBS);
3981 return -EINVAL;
3982 }
3983
3984 BUG_ON(addr & ~DMA_BIT_MASK(36));
3985 if (unlikely(addr & ~IL_TX_DMA_MASK))
3986 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3987
3988 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3989
3990 return 0;
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000int
4001il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
4002{
4003 int txq_id = txq->q.id;
4004
4005
4006 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
4007
4008 return 0;
4009}
4010
4011
4012
4013
4014
4015
4016static void
4017il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4018{
4019 struct il_rx_pkt *pkt = rxb_addr(rxb);
4020 struct il_alive_resp *palive;
4021 struct delayed_work *pwork;
4022
4023 palive = &pkt->u.alive_frame;
4024
4025 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4026 palive->is_valid, palive->ver_type, palive->ver_subtype);
4027
4028 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4029 D_INFO("Initialization Alive received.\n");
4030 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4031 sizeof(struct il_init_alive_resp));
4032 pwork = &il->init_alive_start;
4033 } else {
4034 D_INFO("Runtime Alive received.\n");
4035 memcpy(&il->card_alive, &pkt->u.alive_frame,
4036 sizeof(struct il_alive_resp));
4037 pwork = &il->alive_start;
4038 }
4039
4040
4041
4042 if (palive->is_valid == UCODE_VALID_OK)
4043 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4044 else
4045 IL_WARN("uCode did not respond OK.\n");
4046}
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058static void
4059il4965_bg_stats_periodic(struct timer_list *t)
4060{
4061 struct il_priv *il = from_timer(il, t, stats_periodic);
4062
4063 if (test_bit(S_EXIT_PENDING, &il->status))
4064 return;
4065
4066
4067 if (!il_is_ready_rf(il))
4068 return;
4069
4070 il_send_stats_request(il, CMD_ASYNC, false);
4071}
4072
4073static void
4074il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4075{
4076 struct il_rx_pkt *pkt = rxb_addr(rxb);
4077 struct il4965_beacon_notif *beacon =
4078 (struct il4965_beacon_notif *)pkt->u.raw;
4079#ifdef CONFIG_IWLEGACY_DEBUG
4080 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4081
4082 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4083 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4084 beacon->beacon_notify_hdr.failure_frame,
4085 le32_to_cpu(beacon->ibss_mgr_status),
4086 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4087#endif
4088 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4089}
4090
4091static void
4092il4965_perform_ct_kill_task(struct il_priv *il)
4093{
4094 unsigned long flags;
4095
4096 D_POWER("Stop all queues\n");
4097
4098 if (il->mac80211_registered)
4099 ieee80211_stop_queues(il->hw);
4100
4101 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4102 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4103 _il_rd(il, CSR_UCODE_DRV_GP1);
4104
4105 spin_lock_irqsave(&il->reg_lock, flags);
4106 if (likely(_il_grab_nic_access(il)))
4107 _il_release_nic_access(il);
4108 spin_unlock_irqrestore(&il->reg_lock, flags);
4109}
4110
4111
4112
4113static void
4114il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4115{
4116 struct il_rx_pkt *pkt = rxb_addr(rxb);
4117 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4118 unsigned long status = il->status;
4119
4120 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4121 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4122 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4123 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4124
4125 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4126
4127 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4128 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4129
4130 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4131
4132 if (!(flags & RXON_CARD_DISABLED)) {
4133 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4134 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4135 il_wr(il, HBUS_TARG_MBX_C,
4136 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4137 }
4138 }
4139
4140 if (flags & CT_CARD_DISABLED)
4141 il4965_perform_ct_kill_task(il);
4142
4143 if (flags & HW_CARD_DISABLED)
4144 set_bit(S_RFKILL, &il->status);
4145 else
4146 clear_bit(S_RFKILL, &il->status);
4147
4148 if (!(flags & RXON_CARD_DISABLED))
4149 il_scan_cancel(il);
4150
4151 if ((test_bit(S_RFKILL, &status) !=
4152 test_bit(S_RFKILL, &il->status)))
4153 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4154 test_bit(S_RFKILL, &il->status));
4155 else
4156 wake_up(&il->wait_command_queue);
4157}
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168static void
4169il4965_setup_handlers(struct il_priv *il)
4170{
4171 il->handlers[N_ALIVE] = il4965_hdl_alive;
4172 il->handlers[N_ERROR] = il_hdl_error;
4173 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4174 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4175 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4176 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4177 il->handlers[N_BEACON] = il4965_hdl_beacon;
4178
4179
4180
4181
4182
4183
4184 il->handlers[C_STATS] = il4965_hdl_c_stats;
4185 il->handlers[N_STATS] = il4965_hdl_stats;
4186
4187 il_setup_rx_scan_handlers(il);
4188
4189
4190 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4191
4192 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4193
4194 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4195 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4196 il->handlers[N_RX] = il4965_hdl_rx;
4197
4198 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4199
4200 il->handlers[C_TX] = il4965_hdl_tx;
4201}
4202
4203
4204
4205
4206
4207
4208
4209
4210void
4211il4965_rx_handle(struct il_priv *il)
4212{
4213 struct il_rx_buf *rxb;
4214 struct il_rx_pkt *pkt;
4215 struct il_rx_queue *rxq = &il->rxq;
4216 u32 r, i;
4217 int reclaim;
4218 unsigned long flags;
4219 u8 fill_rx = 0;
4220 u32 count = 8;
4221 int total_empty;
4222
4223
4224
4225 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4226 i = rxq->read;
4227
4228
4229 if (i == r)
4230 D_RX("r = %d, i = %d\n", r, i);
4231
4232
4233 total_empty = r - rxq->write_actual;
4234 if (total_empty < 0)
4235 total_empty += RX_QUEUE_SIZE;
4236
4237 if (total_empty > (RX_QUEUE_SIZE / 2))
4238 fill_rx = 1;
4239
4240 while (i != r) {
4241 int len;
4242
4243 rxb = rxq->queue[i];
4244
4245
4246
4247
4248 BUG_ON(rxb == NULL);
4249
4250 rxq->queue[i] = NULL;
4251
4252 pci_unmap_page(il->pci_dev, rxb->page_dma,
4253 PAGE_SIZE << il->hw_params.rx_page_order,
4254 PCI_DMA_FROMDEVICE);
4255 pkt = rxb_addr(rxb);
4256
4257 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4258 len += sizeof(u32);
4259
4260 reclaim = il_need_reclaim(il, pkt);
4261
4262
4263
4264
4265 if (il->handlers[pkt->hdr.cmd]) {
4266 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4267 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4268 il->isr_stats.handlers[pkt->hdr.cmd]++;
4269 il->handlers[pkt->hdr.cmd] (il, rxb);
4270 } else {
4271
4272 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4273 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4274 }
4275
4276
4277
4278
4279
4280
4281
4282
4283 if (reclaim) {
4284
4285
4286
4287 if (rxb->page)
4288 il_tx_cmd_complete(il, rxb);
4289 else
4290 IL_WARN("Claim null rxb?\n");
4291 }
4292
4293
4294
4295
4296 spin_lock_irqsave(&rxq->lock, flags);
4297 if (rxb->page != NULL) {
4298 rxb->page_dma =
4299 pci_map_page(il->pci_dev, rxb->page, 0,
4300 PAGE_SIZE << il->hw_params.
4301 rx_page_order, PCI_DMA_FROMDEVICE);
4302
4303 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4304 rxb->page_dma))) {
4305 __il_free_pages(il, rxb->page);
4306 rxb->page = NULL;
4307 list_add_tail(&rxb->list, &rxq->rx_used);
4308 } else {
4309 list_add_tail(&rxb->list, &rxq->rx_free);
4310 rxq->free_count++;
4311 }
4312 } else
4313 list_add_tail(&rxb->list, &rxq->rx_used);
4314
4315 spin_unlock_irqrestore(&rxq->lock, flags);
4316
4317 i = (i + 1) & RX_QUEUE_MASK;
4318
4319
4320 if (fill_rx) {
4321 count++;
4322 if (count >= 8) {
4323 rxq->read = i;
4324 il4965_rx_replenish_now(il);
4325 count = 0;
4326 }
4327 }
4328 }
4329
4330
4331 rxq->read = i;
4332 if (fill_rx)
4333 il4965_rx_replenish_now(il);
4334 else
4335 il4965_rx_queue_restock(il);
4336}
4337
4338
4339static inline void
4340il4965_synchronize_irq(struct il_priv *il)
4341{
4342
4343 synchronize_irq(il->pci_dev->irq);
4344 tasklet_kill(&il->irq_tasklet);
4345}
4346
4347static void
4348il4965_irq_tasklet(struct il_priv *il)
4349{
4350 u32 inta, handled = 0;
4351 u32 inta_fh;
4352 unsigned long flags;
4353 u32 i;
4354#ifdef CONFIG_IWLEGACY_DEBUG
4355 u32 inta_mask;
4356#endif
4357
4358 spin_lock_irqsave(&il->lock, flags);
4359
4360
4361
4362
4363 inta = _il_rd(il, CSR_INT);
4364 _il_wr(il, CSR_INT, inta);
4365
4366
4367
4368
4369 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4370 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4371
4372#ifdef CONFIG_IWLEGACY_DEBUG
4373 if (il_get_debug_level(il) & IL_DL_ISR) {
4374
4375 inta_mask = _il_rd(il, CSR_INT_MASK);
4376 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4377 inta_mask, inta_fh);
4378 }
4379#endif
4380
4381 spin_unlock_irqrestore(&il->lock, flags);
4382
4383
4384
4385
4386
4387 if (inta_fh & CSR49_FH_INT_RX_MASK)
4388 inta |= CSR_INT_BIT_FH_RX;
4389 if (inta_fh & CSR49_FH_INT_TX_MASK)
4390 inta |= CSR_INT_BIT_FH_TX;
4391
4392
4393 if (inta & CSR_INT_BIT_HW_ERR) {
4394 IL_ERR("Hardware error detected. Restarting.\n");
4395
4396
4397 il_disable_interrupts(il);
4398
4399 il->isr_stats.hw++;
4400 il_irq_handle_error(il);
4401
4402 handled |= CSR_INT_BIT_HW_ERR;
4403
4404 return;
4405 }
4406#ifdef CONFIG_IWLEGACY_DEBUG
4407 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4408
4409 if (inta & CSR_INT_BIT_SCD) {
4410 D_ISR("Scheduler finished to transmit "
4411 "the frame/frames.\n");
4412 il->isr_stats.sch++;
4413 }
4414
4415
4416 if (inta & CSR_INT_BIT_ALIVE) {
4417 D_ISR("Alive interrupt\n");
4418 il->isr_stats.alive++;
4419 }
4420 }
4421#endif
4422
4423 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4424
4425
4426 if (inta & CSR_INT_BIT_RF_KILL) {
4427 int hw_rf_kill = 0;
4428
4429 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4430 hw_rf_kill = 1;
4431
4432 IL_WARN("RF_KILL bit toggled to %s.\n",
4433 hw_rf_kill ? "disable radio" : "enable radio");
4434
4435 il->isr_stats.rfkill++;
4436
4437
4438
4439
4440
4441
4442 if (hw_rf_kill) {
4443 set_bit(S_RFKILL, &il->status);
4444 } else {
4445 clear_bit(S_RFKILL, &il->status);
4446 il_force_reset(il, true);
4447 }
4448 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4449
4450 handled |= CSR_INT_BIT_RF_KILL;
4451 }
4452
4453
4454 if (inta & CSR_INT_BIT_CT_KILL) {
4455 IL_ERR("Microcode CT kill error detected.\n");
4456 il->isr_stats.ctkill++;
4457 handled |= CSR_INT_BIT_CT_KILL;
4458 }
4459
4460
4461 if (inta & CSR_INT_BIT_SW_ERR) {
4462 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4463 inta);
4464 il->isr_stats.sw++;
4465 il_irq_handle_error(il);
4466 handled |= CSR_INT_BIT_SW_ERR;
4467 }
4468
4469
4470
4471
4472
4473
4474 if (inta & CSR_INT_BIT_WAKEUP) {
4475 D_ISR("Wakeup interrupt\n");
4476 il_rx_queue_update_write_ptr(il, &il->rxq);
4477 for (i = 0; i < il->hw_params.max_txq_num; i++)
4478 il_txq_update_write_ptr(il, &il->txq[i]);
4479 il->isr_stats.wakeup++;
4480 handled |= CSR_INT_BIT_WAKEUP;
4481 }
4482
4483
4484
4485
4486 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4487 il4965_rx_handle(il);
4488 il->isr_stats.rx++;
4489 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4490 }
4491
4492
4493 if (inta & CSR_INT_BIT_FH_TX) {
4494 D_ISR("uCode load interrupt\n");
4495 il->isr_stats.tx++;
4496 handled |= CSR_INT_BIT_FH_TX;
4497
4498 il->ucode_write_complete = 1;
4499 wake_up(&il->wait_command_queue);
4500 }
4501
4502 if (inta & ~handled) {
4503 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4504 il->isr_stats.unhandled++;
4505 }
4506
4507 if (inta & ~(il->inta_mask)) {
4508 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4509 inta & ~il->inta_mask);
4510 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4511 }
4512
4513
4514
4515 if (test_bit(S_INT_ENABLED, &il->status))
4516 il_enable_interrupts(il);
4517
4518 else if (handled & CSR_INT_BIT_RF_KILL)
4519 il_enable_rfkill_int(il);
4520
4521#ifdef CONFIG_IWLEGACY_DEBUG
4522 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4523 inta = _il_rd(il, CSR_INT);
4524 inta_mask = _il_rd(il, CSR_INT_MASK);
4525 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4526 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4527 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4528 }
4529#endif
4530}
4531
4532
4533
4534
4535
4536
4537
4538#ifdef CONFIG_IWLEGACY_DEBUG
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551static ssize_t
4552il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4553 char *buf)
4554{
4555 struct il_priv *il = dev_get_drvdata(d);
4556 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4557}
4558
4559static ssize_t
4560il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4561 const char *buf, size_t count)
4562{
4563 struct il_priv *il = dev_get_drvdata(d);
4564 unsigned long val;
4565 int ret;
4566
4567 ret = kstrtoul(buf, 0, &val);
4568 if (ret)
4569 IL_ERR("%s is not in hex or decimal form.\n", buf);
4570 else
4571 il->debug_level = val;
4572
4573 return strnlen(buf, count);
4574}
4575
4576static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
4577 il4965_store_debug_level);
4578
4579#endif
4580
4581static ssize_t
4582il4965_show_temperature(struct device *d, struct device_attribute *attr,
4583 char *buf)
4584{
4585 struct il_priv *il = dev_get_drvdata(d);
4586
4587 if (!il_is_alive(il))
4588 return -EAGAIN;
4589
4590 return sprintf(buf, "%d\n", il->temperature);
4591}
4592
4593static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
4594
4595static ssize_t
4596il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4597{
4598 struct il_priv *il = dev_get_drvdata(d);
4599
4600 if (!il_is_ready_rf(il))
4601 return sprintf(buf, "off\n");
4602 else
4603 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4604}
4605
4606static ssize_t
4607il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4608 const char *buf, size_t count)
4609{
4610 struct il_priv *il = dev_get_drvdata(d);
4611 unsigned long val;
4612 int ret;
4613
4614 ret = kstrtoul(buf, 10, &val);
4615 if (ret)
4616 IL_INFO("%s is not in decimal form.\n", buf);
4617 else {
4618 ret = il_set_tx_power(il, val, false);
4619 if (ret)
4620 IL_ERR("failed setting tx power (0x%08x).\n", ret);
4621 else
4622 ret = count;
4623 }
4624 return ret;
4625}
4626
4627static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
4628 il4965_store_tx_power);
4629
4630static struct attribute *il_sysfs_entries[] = {
4631 &dev_attr_temperature.attr,
4632 &dev_attr_tx_power.attr,
4633#ifdef CONFIG_IWLEGACY_DEBUG
4634 &dev_attr_debug_level.attr,
4635#endif
4636 NULL
4637};
4638
4639static const struct attribute_group il_attribute_group = {
4640 .name = NULL,
4641 .attrs = il_sysfs_entries,
4642};
4643
4644
4645
4646
4647
4648
4649
4650static void
4651il4965_dealloc_ucode_pci(struct il_priv *il)
4652{
4653 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4654 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4655 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4656 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4657 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4658 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4659}
4660
4661static void
4662il4965_nic_start(struct il_priv *il)
4663{
4664
4665 _il_wr(il, CSR_RESET, 0);
4666}
4667
4668static void il4965_ucode_callback(const struct firmware *ucode_raw,
4669 void *context);
4670static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4671
4672static int __must_check
4673il4965_request_firmware(struct il_priv *il, bool first)
4674{
4675 const char *name_pre = il->cfg->fw_name_pre;
4676 char tag[8];
4677
4678 if (first) {
4679 il->fw_idx = il->cfg->ucode_api_max;
4680 sprintf(tag, "%d", il->fw_idx);
4681 } else {
4682 il->fw_idx--;
4683 sprintf(tag, "%d", il->fw_idx);
4684 }
4685
4686 if (il->fw_idx < il->cfg->ucode_api_min) {
4687 IL_ERR("no suitable firmware found!\n");
4688 return -ENOENT;
4689 }
4690
4691 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4692
4693 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4694
4695 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4696 &il->pci_dev->dev, GFP_KERNEL, il,
4697 il4965_ucode_callback);
4698}
4699
4700struct il4965_firmware_pieces {
4701 const void *inst, *data, *init, *init_data, *boot;
4702 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4703};
4704
4705static int
4706il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4707 struct il4965_firmware_pieces *pieces)
4708{
4709 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4710 u32 api_ver, hdr_size;
4711 const u8 *src;
4712
4713 il->ucode_ver = le32_to_cpu(ucode->ver);
4714 api_ver = IL_UCODE_API(il->ucode_ver);
4715
4716 switch (api_ver) {
4717 default:
4718 case 0:
4719 case 1:
4720 case 2:
4721 hdr_size = 24;
4722 if (ucode_raw->size < hdr_size) {
4723 IL_ERR("File size too small!\n");
4724 return -EINVAL;
4725 }
4726 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4727 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4728 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4729 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4730 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4731 src = ucode->v1.data;
4732 break;
4733 }
4734
4735
4736 if (ucode_raw->size !=
4737 hdr_size + pieces->inst_size + pieces->data_size +
4738 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4739
4740 IL_ERR("uCode file size %d does not match expected size\n",
4741 (int)ucode_raw->size);
4742 return -EINVAL;
4743 }
4744
4745 pieces->inst = src;
4746 src += pieces->inst_size;
4747 pieces->data = src;
4748 src += pieces->data_size;
4749 pieces->init = src;
4750 src += pieces->init_size;
4751 pieces->init_data = src;
4752 src += pieces->init_data_size;
4753 pieces->boot = src;
4754 src += pieces->boot_size;
4755
4756 return 0;
4757}
4758
4759
4760
4761
4762
4763
4764
4765static void
4766il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4767{
4768 struct il_priv *il = context;
4769 int err;
4770 struct il4965_firmware_pieces pieces;
4771 const unsigned int api_max = il->cfg->ucode_api_max;
4772 const unsigned int api_min = il->cfg->ucode_api_min;
4773 u32 api_ver;
4774
4775 u32 max_probe_length = 200;
4776 u32 standard_phy_calibration_size =
4777 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4778
4779 memset(&pieces, 0, sizeof(pieces));
4780
4781 if (!ucode_raw) {
4782 if (il->fw_idx <= il->cfg->ucode_api_max)
4783 IL_ERR("request for firmware file '%s' failed.\n",
4784 il->firmware_name);
4785 goto try_again;
4786 }
4787
4788 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4789 ucode_raw->size);
4790
4791
4792 if (ucode_raw->size < 4) {
4793 IL_ERR("File size way too small!\n");
4794 goto try_again;
4795 }
4796
4797
4798 err = il4965_load_firmware(il, ucode_raw, &pieces);
4799
4800 if (err)
4801 goto try_again;
4802
4803 api_ver = IL_UCODE_API(il->ucode_ver);
4804
4805
4806
4807
4808
4809
4810 if (api_ver < api_min || api_ver > api_max) {
4811 IL_ERR("Driver unable to support your firmware API. "
4812 "Driver supports v%u, firmware is v%u.\n", api_max,
4813 api_ver);
4814 goto try_again;
4815 }
4816
4817 if (api_ver != api_max)
4818 IL_ERR("Firmware has old API version. Expected v%u, "
4819 "got v%u. New firmware can be obtained "
4820 "from http://www.intellinuxwireless.org.\n", api_max,
4821 api_ver);
4822
4823 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4824 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4825 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4826
4827 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4828 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4829 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4830 IL_UCODE_SERIAL(il->ucode_ver));
4831
4832
4833
4834
4835
4836
4837
4838 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4839 D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
4840 D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
4841 D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
4842 D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
4843 D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
4844
4845
4846 if (pieces.inst_size > il->hw_params.max_inst_size) {
4847 IL_ERR("uCode instr len %zd too large to fit in\n",
4848 pieces.inst_size);
4849 goto try_again;
4850 }
4851
4852 if (pieces.data_size > il->hw_params.max_data_size) {
4853 IL_ERR("uCode data len %zd too large to fit in\n",
4854 pieces.data_size);
4855 goto try_again;
4856 }
4857
4858 if (pieces.init_size > il->hw_params.max_inst_size) {
4859 IL_ERR("uCode init instr len %zd too large to fit in\n",
4860 pieces.init_size);
4861 goto try_again;
4862 }
4863
4864 if (pieces.init_data_size > il->hw_params.max_data_size) {
4865 IL_ERR("uCode init data len %zd too large to fit in\n",
4866 pieces.init_data_size);
4867 goto try_again;
4868 }
4869
4870 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4871 IL_ERR("uCode boot instr len %zd too large to fit in\n",
4872 pieces.boot_size);
4873 goto try_again;
4874 }
4875
4876
4877
4878
4879
4880
4881 il->ucode_code.len = pieces.inst_size;
4882 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4883
4884 il->ucode_data.len = pieces.data_size;
4885 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4886
4887 il->ucode_data_backup.len = pieces.data_size;
4888 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4889
4890 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4891 !il->ucode_data_backup.v_addr)
4892 goto err_pci_alloc;
4893
4894
4895 if (pieces.init_size && pieces.init_data_size) {
4896 il->ucode_init.len = pieces.init_size;
4897 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4898
4899 il->ucode_init_data.len = pieces.init_data_size;
4900 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4901
4902 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4903 goto err_pci_alloc;
4904 }
4905
4906
4907 if (pieces.boot_size) {
4908 il->ucode_boot.len = pieces.boot_size;
4909 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4910
4911 if (!il->ucode_boot.v_addr)
4912 goto err_pci_alloc;
4913 }
4914
4915
4916
4917 il->sta_key_max_num = STA_KEY_MAX_NUM;
4918
4919
4920
4921
4922 D_INFO("Copying (but not loading) uCode instr len %zd\n",
4923 pieces.inst_size);
4924 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4925
4926 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4927 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4928
4929
4930
4931
4932
4933 D_INFO("Copying (but not loading) uCode data len %zd\n",
4934 pieces.data_size);
4935 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4936 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4937
4938
4939 if (pieces.init_size) {
4940 D_INFO("Copying (but not loading) init instr len %zd\n",
4941 pieces.init_size);
4942 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4943 }
4944
4945
4946 if (pieces.init_data_size) {
4947 D_INFO("Copying (but not loading) init data len %zd\n",
4948 pieces.init_data_size);
4949 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4950 pieces.init_data_size);
4951 }
4952
4953
4954 D_INFO("Copying (but not loading) boot instr len %zd\n",
4955 pieces.boot_size);
4956 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4957
4958
4959
4960
4961
4962 il->_4965.phy_calib_chain_noise_reset_cmd =
4963 standard_phy_calibration_size;
4964 il->_4965.phy_calib_chain_noise_gain_cmd =
4965 standard_phy_calibration_size + 1;
4966
4967
4968
4969
4970
4971
4972 err = il4965_mac_setup_register(il, max_probe_length);
4973 if (err)
4974 goto out_unbind;
4975
4976 il_dbgfs_register(il, DRV_NAME);
4977
4978 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4979 if (err) {
4980 IL_ERR("failed to create sysfs device attributes\n");
4981 goto out_unbind;
4982 }
4983
4984
4985 release_firmware(ucode_raw);
4986 complete(&il->_4965.firmware_loading_complete);
4987 return;
4988
4989try_again:
4990
4991 if (il4965_request_firmware(il, false))
4992 goto out_unbind;
4993 release_firmware(ucode_raw);
4994 return;
4995
4996err_pci_alloc:
4997 IL_ERR("failed to allocate pci memory\n");
4998 il4965_dealloc_ucode_pci(il);
4999out_unbind:
5000 complete(&il->_4965.firmware_loading_complete);
5001 device_release_driver(&il->pci_dev->dev);
5002 release_firmware(ucode_raw);
5003}
5004
5005static const char *const desc_lookup_text[] = {
5006 "OK",
5007 "FAIL",
5008 "BAD_PARAM",
5009 "BAD_CHECKSUM",
5010 "NMI_INTERRUPT_WDG",
5011 "SYSASSERT",
5012 "FATAL_ERROR",
5013 "BAD_COMMAND",
5014 "HW_ERROR_TUNE_LOCK",
5015 "HW_ERROR_TEMPERATURE",
5016 "ILLEGAL_CHAN_FREQ",
5017 "VCC_NOT_STBL",
5018 "FH49_ERROR",
5019 "NMI_INTERRUPT_HOST",
5020 "NMI_INTERRUPT_ACTION_PT",
5021 "NMI_INTERRUPT_UNKNOWN",
5022 "UCODE_VERSION_MISMATCH",
5023 "HW_ERROR_ABS_LOCK",
5024 "HW_ERROR_CAL_LOCK_FAIL",
5025 "NMI_INTERRUPT_INST_ACTION_PT",
5026 "NMI_INTERRUPT_DATA_ACTION_PT",
5027 "NMI_TRM_HW_ER",
5028 "NMI_INTERRUPT_TRM",
5029 "NMI_INTERRUPT_BREAK_POINT",
5030 "DEBUG_0",
5031 "DEBUG_1",
5032 "DEBUG_2",
5033 "DEBUG_3",
5034};
5035
5036static struct {
5037 char *name;
5038 u8 num;
5039} advanced_lookup[] = {
5040 {
5041 "NMI_INTERRUPT_WDG", 0x34}, {
5042 "SYSASSERT", 0x35}, {
5043 "UCODE_VERSION_MISMATCH", 0x37}, {
5044 "BAD_COMMAND", 0x38}, {
5045 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5046 "FATAL_ERROR", 0x3D}, {
5047 "NMI_TRM_HW_ERR", 0x46}, {
5048 "NMI_INTERRUPT_TRM", 0x4C}, {
5049 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5050 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5051 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5052 "NMI_INTERRUPT_HOST", 0x66}, {
5053 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5054 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5055 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5056"ADVANCED_SYSASSERT", 0},};
5057
5058static const char *
5059il4965_desc_lookup(u32 num)
5060{
5061 int i;
5062 int max = ARRAY_SIZE(desc_lookup_text);
5063
5064 if (num < max)
5065 return desc_lookup_text[num];
5066
5067 max = ARRAY_SIZE(advanced_lookup) - 1;
5068 for (i = 0; i < max; i++) {
5069 if (advanced_lookup[i].num == num)
5070 break;
5071 }
5072 return advanced_lookup[i].name;
5073}
5074
5075#define ERROR_START_OFFSET (1 * sizeof(u32))
5076#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5077
5078void
5079il4965_dump_nic_error_log(struct il_priv *il)
5080{
5081 u32 data2, line;
5082 u32 desc, time, count, base, data1;
5083 u32 blink1, blink2, ilink1, ilink2;
5084 u32 pc, hcmd;
5085
5086 if (il->ucode_type == UCODE_INIT)
5087 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5088 else
5089 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5090
5091 if (!il->ops->is_valid_rtc_data_addr(base)) {
5092 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5093 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5094 return;
5095 }
5096
5097 count = il_read_targ_mem(il, base);
5098
5099 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5100 IL_ERR("Start IWL Error Log Dump:\n");
5101 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5102 }
5103
5104 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5105 il->isr_stats.err_code = desc;
5106 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5107 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5108 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5109 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5110 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5111 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5112 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5113 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5114 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5115 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5116
5117 IL_ERR("Desc Time "
5118 "data1 data2 line\n");
5119 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5120 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5121 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5122 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5123 blink2, ilink1, ilink2, hcmd);
5124}
5125
5126static void
5127il4965_rf_kill_ct_config(struct il_priv *il)
5128{
5129 struct il_ct_kill_config cmd;
5130 unsigned long flags;
5131 int ret = 0;
5132
5133 spin_lock_irqsave(&il->lock, flags);
5134 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5135 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5136 spin_unlock_irqrestore(&il->lock, flags);
5137
5138 cmd.critical_temperature_R =
5139 cpu_to_le32(il->hw_params.ct_kill_threshold);
5140
5141 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5142 if (ret)
5143 IL_ERR("C_CT_KILL_CONFIG failed\n");
5144 else
5145 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5146 "critical temperature is %d\n",
5147 il->hw_params.ct_kill_threshold);
5148}
5149
5150static const s8 default_queue_to_tx_fifo[] = {
5151 IL_TX_FIFO_VO,
5152 IL_TX_FIFO_VI,
5153 IL_TX_FIFO_BE,
5154 IL_TX_FIFO_BK,
5155 IL49_CMD_FIFO_NUM,
5156 IL_TX_FIFO_UNUSED,
5157 IL_TX_FIFO_UNUSED,
5158};
5159
5160#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5161
5162static int
5163il4965_alive_notify(struct il_priv *il)
5164{
5165 u32 a;
5166 unsigned long flags;
5167 int i, chan;
5168 u32 reg_val;
5169
5170 spin_lock_irqsave(&il->lock, flags);
5171
5172
5173 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5174 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5175 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5176 il_write_targ_mem(il, a, 0);
5177 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5178 il_write_targ_mem(il, a, 0);
5179 for (;
5180 a <
5181 il->scd_base_addr +
5182 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5183 a += 4)
5184 il_write_targ_mem(il, a, 0);
5185
5186
5187 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5188
5189
5190 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5191 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5192 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5193 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5194
5195
5196 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5197 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5198 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5199
5200
5201 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5202
5203
5204 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5205
5206
5207 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5208 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5209
5210
5211 il_write_targ_mem(il,
5212 il->scd_base_addr +
5213 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5214 (SCD_WIN_SIZE <<
5215 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5216 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5217
5218
5219 il_write_targ_mem(il,
5220 il->scd_base_addr +
5221 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5222 sizeof(u32),
5223 (SCD_FRAME_LIMIT <<
5224 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5225 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5226
5227 }
5228 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5229 (1 << il->hw_params.max_txq_num) - 1);
5230
5231
5232 il4965_txq_set_sched(il, IL_MASK(0, 6));
5233
5234 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5235
5236
5237 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5238 for (i = 0; i < 4; i++)
5239 atomic_set(&il->queue_stop_count[i], 0);
5240
5241
5242 il->txq_ctx_active_msk = 0;
5243
5244 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5245
5246 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5247 int ac = default_queue_to_tx_fifo[i];
5248
5249 il_txq_ctx_activate(il, i);
5250
5251 if (ac == IL_TX_FIFO_UNUSED)
5252 continue;
5253
5254 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5255 }
5256
5257 spin_unlock_irqrestore(&il->lock, flags);
5258
5259 return 0;
5260}
5261
5262
5263
5264
5265
5266
5267static void
5268il4965_alive_start(struct il_priv *il)
5269{
5270 int ret = 0;
5271
5272 D_INFO("Runtime Alive received.\n");
5273
5274 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5275
5276
5277 D_INFO("Alive failed.\n");
5278 goto restart;
5279 }
5280
5281
5282
5283
5284 if (il4965_verify_ucode(il)) {
5285
5286
5287 D_INFO("Bad runtime uCode load.\n");
5288 goto restart;
5289 }
5290
5291 ret = il4965_alive_notify(il);
5292 if (ret) {
5293 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5294 goto restart;
5295 }
5296
5297
5298 set_bit(S_ALIVE, &il->status);
5299
5300
5301 il_setup_watchdog(il);
5302
5303 if (il_is_rfkill(il))
5304 return;
5305
5306 ieee80211_wake_queues(il->hw);
5307
5308 il->active_rate = RATES_MASK;
5309
5310 il_power_update_mode(il, true);
5311 D_INFO("Updated power mode\n");
5312
5313 if (il_is_associated(il)) {
5314 struct il_rxon_cmd *active_rxon =
5315 (struct il_rxon_cmd *)&il->active;
5316
5317 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5318 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5319 } else {
5320
5321 il_connection_init_rx_config(il);
5322
5323 if (il->ops->set_rxon_chain)
5324 il->ops->set_rxon_chain(il);
5325 }
5326
5327
5328 il_send_bt_config(il);
5329
5330 il4965_reset_run_time_calib(il);
5331
5332 set_bit(S_READY, &il->status);
5333
5334
5335 il_commit_rxon(il);
5336
5337
5338 il4965_rf_kill_ct_config(il);
5339
5340 D_INFO("ALIVE processing complete.\n");
5341 wake_up(&il->wait_command_queue);
5342
5343 return;
5344
5345restart:
5346 queue_work(il->workqueue, &il->restart);
5347}
5348
5349static void il4965_cancel_deferred_work(struct il_priv *il);
5350
5351static void
5352__il4965_down(struct il_priv *il)
5353{
5354 unsigned long flags;
5355 int exit_pending;
5356
5357 D_INFO(DRV_NAME " is going down\n");
5358
5359 il_scan_cancel_timeout(il, 200);
5360
5361 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5362
5363
5364
5365 del_timer_sync(&il->watchdog);
5366
5367 il_clear_ucode_stations(il);
5368
5369
5370 spin_lock_irq(&il->sta_lock);
5371
5372
5373
5374
5375
5376
5377
5378 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5379 il->_4965.key_mapping_keys = 0;
5380 spin_unlock_irq(&il->sta_lock);
5381
5382 il_dealloc_bcast_stations(il);
5383 il_clear_driver_stations(il);
5384
5385
5386 wake_up_all(&il->wait_command_queue);
5387
5388
5389
5390 if (!exit_pending)
5391 clear_bit(S_EXIT_PENDING, &il->status);
5392
5393
5394 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5395
5396
5397 spin_lock_irqsave(&il->lock, flags);
5398 il_disable_interrupts(il);
5399 spin_unlock_irqrestore(&il->lock, flags);
5400 il4965_synchronize_irq(il);
5401
5402 if (il->mac80211_registered)
5403 ieee80211_stop_queues(il->hw);
5404
5405
5406
5407 if (!il_is_init(il)) {
5408 il->status =
5409 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5410 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5411 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5412 goto exit;
5413 }
5414
5415
5416
5417 il->status &=
5418 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5419 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5420 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5421 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5422
5423
5424
5425
5426
5427
5428 spin_lock_irq(&il->reg_lock);
5429
5430
5431 il4965_txq_ctx_stop(il);
5432 il4965_rxq_stop(il);
5433
5434 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5435 udelay(5);
5436
5437 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5438
5439 _il_apm_stop(il);
5440
5441 spin_unlock_irq(&il->reg_lock);
5442
5443 il4965_txq_ctx_unmap(il);
5444exit:
5445 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5446
5447 dev_kfree_skb(il->beacon_skb);
5448 il->beacon_skb = NULL;
5449
5450
5451 il4965_clear_free_frames(il);
5452}
5453
5454static void
5455il4965_down(struct il_priv *il)
5456{
5457 mutex_lock(&il->mutex);
5458 __il4965_down(il);
5459 mutex_unlock(&il->mutex);
5460
5461 il4965_cancel_deferred_work(il);
5462}
5463
5464
5465static void
5466il4965_set_hw_ready(struct il_priv *il)
5467{
5468 int ret;
5469
5470 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5471 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5472
5473
5474 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5475 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5476 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5477 100);
5478 if (ret >= 0)
5479 il->hw_ready = true;
5480
5481 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5482}
5483
5484static void
5485il4965_prepare_card_hw(struct il_priv *il)
5486{
5487 int ret;
5488
5489 il->hw_ready = false;
5490
5491 il4965_set_hw_ready(il);
5492 if (il->hw_ready)
5493 return;
5494
5495
5496 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5497
5498 ret =
5499 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5500 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5501 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5502
5503
5504 if (ret != -ETIMEDOUT)
5505 il4965_set_hw_ready(il);
5506}
5507
5508#define MAX_HW_RESTARTS 5
5509
5510static int
5511__il4965_up(struct il_priv *il)
5512{
5513 int i;
5514 int ret;
5515
5516 if (test_bit(S_EXIT_PENDING, &il->status)) {
5517 IL_WARN("Exit pending; will not bring the NIC up\n");
5518 return -EIO;
5519 }
5520
5521 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5522 IL_ERR("ucode not available for device bringup\n");
5523 return -EIO;
5524 }
5525
5526 ret = il4965_alloc_bcast_station(il);
5527 if (ret) {
5528 il_dealloc_bcast_stations(il);
5529 return ret;
5530 }
5531
5532 il4965_prepare_card_hw(il);
5533 if (!il->hw_ready) {
5534 il_dealloc_bcast_stations(il);
5535 IL_ERR("HW not ready\n");
5536 return -EIO;
5537 }
5538
5539
5540 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5541 clear_bit(S_RFKILL, &il->status);
5542 else {
5543 set_bit(S_RFKILL, &il->status);
5544 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5545
5546 il_dealloc_bcast_stations(il);
5547 il_enable_rfkill_int(il);
5548 IL_WARN("Radio disabled by HW RF Kill switch\n");
5549 return 0;
5550 }
5551
5552 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5553
5554
5555 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5556
5557 ret = il4965_hw_nic_init(il);
5558 if (ret) {
5559 IL_ERR("Unable to init nic\n");
5560 il_dealloc_bcast_stations(il);
5561 return ret;
5562 }
5563
5564
5565 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5566 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5567
5568
5569 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5570 il_enable_interrupts(il);
5571
5572
5573 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5574 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5575
5576
5577
5578
5579 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5580 il->ucode_data.len);
5581
5582 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5583
5584
5585
5586
5587 ret = il->ops->load_ucode(il);
5588
5589 if (ret) {
5590 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5591 continue;
5592 }
5593
5594
5595 il4965_nic_start(il);
5596
5597 D_INFO(DRV_NAME " is coming up\n");
5598
5599 return 0;
5600 }
5601
5602 set_bit(S_EXIT_PENDING, &il->status);
5603 __il4965_down(il);
5604 clear_bit(S_EXIT_PENDING, &il->status);
5605
5606
5607
5608 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5609 return -EIO;
5610}
5611
5612
5613
5614
5615
5616
5617
5618static void
5619il4965_bg_init_alive_start(struct work_struct *data)
5620{
5621 struct il_priv *il =
5622 container_of(data, struct il_priv, init_alive_start.work);
5623
5624 mutex_lock(&il->mutex);
5625 if (test_bit(S_EXIT_PENDING, &il->status))
5626 goto out;
5627
5628 il->ops->init_alive_start(il);
5629out:
5630 mutex_unlock(&il->mutex);
5631}
5632
5633static void
5634il4965_bg_alive_start(struct work_struct *data)
5635{
5636 struct il_priv *il =
5637 container_of(data, struct il_priv, alive_start.work);
5638
5639 mutex_lock(&il->mutex);
5640 if (test_bit(S_EXIT_PENDING, &il->status))
5641 goto out;
5642
5643 il4965_alive_start(il);
5644out:
5645 mutex_unlock(&il->mutex);
5646}
5647
5648static void
5649il4965_bg_run_time_calib_work(struct work_struct *work)
5650{
5651 struct il_priv *il = container_of(work, struct il_priv,
5652 run_time_calib_work);
5653
5654 mutex_lock(&il->mutex);
5655
5656 if (test_bit(S_EXIT_PENDING, &il->status) ||
5657 test_bit(S_SCANNING, &il->status)) {
5658 mutex_unlock(&il->mutex);
5659 return;
5660 }
5661
5662 if (il->start_calib) {
5663 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5664 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5665 }
5666
5667 mutex_unlock(&il->mutex);
5668}
5669
5670static void
5671il4965_bg_restart(struct work_struct *data)
5672{
5673 struct il_priv *il = container_of(data, struct il_priv, restart);
5674
5675 if (test_bit(S_EXIT_PENDING, &il->status))
5676 return;
5677
5678 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5679 mutex_lock(&il->mutex);
5680 il->is_open = 0;
5681
5682 __il4965_down(il);
5683
5684 mutex_unlock(&il->mutex);
5685 il4965_cancel_deferred_work(il);
5686 ieee80211_restart_hw(il->hw);
5687 } else {
5688 il4965_down(il);
5689
5690 mutex_lock(&il->mutex);
5691 if (test_bit(S_EXIT_PENDING, &il->status)) {
5692 mutex_unlock(&il->mutex);
5693 return;
5694 }
5695
5696 __il4965_up(il);
5697 mutex_unlock(&il->mutex);
5698 }
5699}
5700
5701static void
5702il4965_bg_rx_replenish(struct work_struct *data)
5703{
5704 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5705
5706 if (test_bit(S_EXIT_PENDING, &il->status))
5707 return;
5708
5709 mutex_lock(&il->mutex);
5710 il4965_rx_replenish(il);
5711 mutex_unlock(&il->mutex);
5712}
5713
5714
5715
5716
5717
5718
5719
5720#define UCODE_READY_TIMEOUT (4 * HZ)
5721
5722
5723
5724
5725
5726static int
5727il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5728{
5729 int ret;
5730 struct ieee80211_hw *hw = il->hw;
5731
5732 hw->rate_control_algorithm = "iwl-4965-rs";
5733
5734
5735 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
5736 ieee80211_hw_set(hw, SUPPORTS_PS);
5737 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
5738 ieee80211_hw_set(hw, SPECTRUM_MGMT);
5739 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
5740 ieee80211_hw_set(hw, SIGNAL_DBM);
5741 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
5742 if (il->cfg->sku & IL_SKU_N)
5743 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
5744 NL80211_FEATURE_STATIC_SMPS;
5745
5746 hw->sta_data_size = sizeof(struct il_station_priv);
5747 hw->vif_data_size = sizeof(struct il_vif_priv);
5748
5749 hw->wiphy->interface_modes =
5750 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5751
5752 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5753 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
5754 REGULATORY_DISABLE_BEACON_HINTS;
5755
5756
5757
5758
5759
5760 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5761
5762 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5763
5764 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5765
5766
5767 hw->queues = 4;
5768
5769 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5770
5771 if (il->bands[NL80211_BAND_2GHZ].n_channels)
5772 il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
5773 &il->bands[NL80211_BAND_2GHZ];
5774 if (il->bands[NL80211_BAND_5GHZ].n_channels)
5775 il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
5776 &il->bands[NL80211_BAND_5GHZ];
5777
5778 il_leds_init(il);
5779
5780 wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
5781
5782 ret = ieee80211_register_hw(il->hw);
5783 if (ret) {
5784 IL_ERR("Failed to register hw (error %d)\n", ret);
5785 return ret;
5786 }
5787 il->mac80211_registered = 1;
5788
5789 return 0;
5790}
5791
5792int
5793il4965_mac_start(struct ieee80211_hw *hw)
5794{
5795 struct il_priv *il = hw->priv;
5796 int ret;
5797
5798 D_MAC80211("enter\n");
5799
5800
5801 mutex_lock(&il->mutex);
5802 ret = __il4965_up(il);
5803 mutex_unlock(&il->mutex);
5804
5805 if (ret)
5806 return ret;
5807
5808 if (il_is_rfkill(il))
5809 goto out;
5810
5811 D_INFO("Start UP work done.\n");
5812
5813
5814
5815 ret = wait_event_timeout(il->wait_command_queue,
5816 test_bit(S_READY, &il->status),
5817 UCODE_READY_TIMEOUT);
5818 if (!ret) {
5819 if (!test_bit(S_READY, &il->status)) {
5820 IL_ERR("START_ALIVE timeout after %dms.\n",
5821 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5822 return -ETIMEDOUT;
5823 }
5824 }
5825
5826 il4965_led_enable(il);
5827
5828out:
5829 il->is_open = 1;
5830 D_MAC80211("leave\n");
5831 return 0;
5832}
5833
5834void
5835il4965_mac_stop(struct ieee80211_hw *hw)
5836{
5837 struct il_priv *il = hw->priv;
5838
5839 D_MAC80211("enter\n");
5840
5841 if (!il->is_open)
5842 return;
5843
5844 il->is_open = 0;
5845
5846 il4965_down(il);
5847
5848 flush_workqueue(il->workqueue);
5849
5850
5851
5852 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5853 il_enable_rfkill_int(il);
5854
5855 D_MAC80211("leave\n");
5856}
5857
5858void
5859il4965_mac_tx(struct ieee80211_hw *hw,
5860 struct ieee80211_tx_control *control,
5861 struct sk_buff *skb)
5862{
5863 struct il_priv *il = hw->priv;
5864
5865 D_MACDUMP("enter\n");
5866
5867 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5868 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5869
5870 if (il4965_tx_skb(il, control->sta, skb))
5871 dev_kfree_skb_any(skb);
5872
5873 D_MACDUMP("leave\n");
5874}
5875
5876void
5877il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5878 struct ieee80211_key_conf *keyconf,
5879 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5880{
5881 struct il_priv *il = hw->priv;
5882
5883 D_MAC80211("enter\n");
5884
5885 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5886
5887 D_MAC80211("leave\n");
5888}
5889
5890int
5891il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5892 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5893 struct ieee80211_key_conf *key)
5894{
5895 struct il_priv *il = hw->priv;
5896 int ret;
5897 u8 sta_id;
5898 bool is_default_wep_key = false;
5899
5900 D_MAC80211("enter\n");
5901
5902 if (il->cfg->mod_params->sw_crypto) {
5903 D_MAC80211("leave - hwcrypto disabled\n");
5904 return -EOPNOTSUPP;
5905 }
5906
5907
5908
5909
5910
5911 if (vif->type == NL80211_IFTYPE_ADHOC &&
5912 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5913 D_MAC80211("leave - ad-hoc group key\n");
5914 return -EOPNOTSUPP;
5915 }
5916
5917 sta_id = il_sta_id_or_broadcast(il, sta);
5918 if (sta_id == IL_INVALID_STATION)
5919 return -EINVAL;
5920
5921 mutex_lock(&il->mutex);
5922 il_scan_cancel_timeout(il, 100);
5923
5924
5925
5926
5927
5928
5929
5930 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5931 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5932 if (cmd == SET_KEY)
5933 is_default_wep_key = !il->_4965.key_mapping_keys;
5934 else
5935 is_default_wep_key =
5936 (key->hw_key_idx == HW_KEY_DEFAULT);
5937 }
5938
5939 switch (cmd) {
5940 case SET_KEY:
5941 if (is_default_wep_key)
5942 ret = il4965_set_default_wep_key(il, key);
5943 else
5944 ret = il4965_set_dynamic_key(il, key, sta_id);
5945
5946 D_MAC80211("enable hwcrypto key\n");
5947 break;
5948 case DISABLE_KEY:
5949 if (is_default_wep_key)
5950 ret = il4965_remove_default_wep_key(il, key);
5951 else
5952 ret = il4965_remove_dynamic_key(il, key, sta_id);
5953
5954 D_MAC80211("disable hwcrypto key\n");
5955 break;
5956 default:
5957 ret = -EINVAL;
5958 }
5959
5960 mutex_unlock(&il->mutex);
5961 D_MAC80211("leave\n");
5962
5963 return ret;
5964}
5965
5966int
5967il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5968 struct ieee80211_ampdu_params *params)
5969{
5970 struct il_priv *il = hw->priv;
5971 int ret = -EINVAL;
5972 struct ieee80211_sta *sta = params->sta;
5973 enum ieee80211_ampdu_mlme_action action = params->action;
5974 u16 tid = params->tid;
5975 u16 *ssn = ¶ms->ssn;
5976
5977 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5978
5979 if (!(il->cfg->sku & IL_SKU_N))
5980 return -EACCES;
5981
5982 mutex_lock(&il->mutex);
5983
5984 switch (action) {
5985 case IEEE80211_AMPDU_RX_START:
5986 D_HT("start Rx\n");
5987 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5988 break;
5989 case IEEE80211_AMPDU_RX_STOP:
5990 D_HT("stop Rx\n");
5991 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5992 if (test_bit(S_EXIT_PENDING, &il->status))
5993 ret = 0;
5994 break;
5995 case IEEE80211_AMPDU_TX_START:
5996 D_HT("start Tx\n");
5997 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5998 break;
5999 case IEEE80211_AMPDU_TX_STOP_CONT:
6000 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6001 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6002 D_HT("stop Tx\n");
6003 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6004 if (test_bit(S_EXIT_PENDING, &il->status))
6005 ret = 0;
6006 break;
6007 case IEEE80211_AMPDU_TX_OPERATIONAL:
6008 ret = 0;
6009 break;
6010 }
6011 mutex_unlock(&il->mutex);
6012
6013 return ret;
6014}
6015
6016int
6017il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6018 struct ieee80211_sta *sta)
6019{
6020 struct il_priv *il = hw->priv;
6021 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6022 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6023 int ret;
6024 u8 sta_id;
6025
6026 D_INFO("received request to add station %pM\n", sta->addr);
6027 mutex_lock(&il->mutex);
6028 D_INFO("proceeding to add station %pM\n", sta->addr);
6029 sta_priv->common.sta_id = IL_INVALID_STATION;
6030
6031 atomic_set(&sta_priv->pending_frames, 0);
6032
6033 ret =
6034 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6035 if (ret) {
6036 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6037
6038 mutex_unlock(&il->mutex);
6039 return ret;
6040 }
6041
6042 sta_priv->common.sta_id = sta_id;
6043
6044
6045 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6046 il4965_rs_rate_init(il, sta, sta_id);
6047 mutex_unlock(&il->mutex);
6048
6049 return 0;
6050}
6051
6052void
6053il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6054 struct ieee80211_channel_switch *ch_switch)
6055{
6056 struct il_priv *il = hw->priv;
6057 const struct il_channel_info *ch_info;
6058 struct ieee80211_conf *conf = &hw->conf;
6059 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6060 struct il_ht_config *ht_conf = &il->current_ht_config;
6061 u16 ch;
6062
6063 D_MAC80211("enter\n");
6064
6065 mutex_lock(&il->mutex);
6066
6067 if (il_is_rfkill(il))
6068 goto out;
6069
6070 if (test_bit(S_EXIT_PENDING, &il->status) ||
6071 test_bit(S_SCANNING, &il->status) ||
6072 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6073 goto out;
6074
6075 if (!il_is_associated(il))
6076 goto out;
6077
6078 if (!il->ops->set_channel_switch)
6079 goto out;
6080
6081 ch = channel->hw_value;
6082 if (le16_to_cpu(il->active.channel) == ch)
6083 goto out;
6084
6085 ch_info = il_get_channel_info(il, channel->band, ch);
6086 if (!il_is_channel_valid(ch_info)) {
6087 D_MAC80211("invalid channel\n");
6088 goto out;
6089 }
6090
6091 spin_lock_irq(&il->lock);
6092
6093 il->current_ht_config.smps = conf->smps_mode;
6094
6095
6096 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6097 case NL80211_CHAN_NO_HT:
6098 case NL80211_CHAN_HT20:
6099 il->ht.is_40mhz = false;
6100 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6101 break;
6102 case NL80211_CHAN_HT40MINUS:
6103 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6104 il->ht.is_40mhz = true;
6105 break;
6106 case NL80211_CHAN_HT40PLUS:
6107 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6108 il->ht.is_40mhz = true;
6109 break;
6110 }
6111
6112 if ((le16_to_cpu(il->staging.channel) != ch))
6113 il->staging.flags = 0;
6114
6115 il_set_rxon_channel(il, channel);
6116 il_set_rxon_ht(il, ht_conf);
6117 il_set_flags_for_band(il, channel->band, il->vif);
6118
6119 spin_unlock_irq(&il->lock);
6120
6121 il_set_rate(il);
6122
6123
6124
6125
6126 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6127 il->switch_channel = cpu_to_le16(ch);
6128 if (il->ops->set_channel_switch(il, ch_switch)) {
6129 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6130 il->switch_channel = 0;
6131 ieee80211_chswitch_done(il->vif, false);
6132 }
6133
6134out:
6135 mutex_unlock(&il->mutex);
6136 D_MAC80211("leave\n");
6137}
6138
6139void
6140il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6141 unsigned int *total_flags, u64 multicast)
6142{
6143 struct il_priv *il = hw->priv;
6144 __le32 filter_or = 0, filter_nand = 0;
6145
6146#define CHK(test, flag) do { \
6147 if (*total_flags & (test)) \
6148 filter_or |= (flag); \
6149 else \
6150 filter_nand |= (flag); \
6151 } while (0)
6152
6153 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6154 *total_flags);
6155
6156 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
6157
6158 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6159 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6160
6161#undef CHK
6162
6163 mutex_lock(&il->mutex);
6164
6165 il->staging.filter_flags &= ~filter_nand;
6166 il->staging.filter_flags |= filter_or;
6167
6168
6169
6170
6171
6172
6173 mutex_unlock(&il->mutex);
6174
6175
6176
6177
6178
6179
6180
6181 *total_flags &=
6182 FIF_OTHER_BSS | FIF_ALLMULTI |
6183 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6184}
6185
6186
6187
6188
6189
6190
6191
6192static void
6193il4965_bg_txpower_work(struct work_struct *work)
6194{
6195 struct il_priv *il = container_of(work, struct il_priv,
6196 txpower_work);
6197
6198 mutex_lock(&il->mutex);
6199
6200
6201
6202
6203
6204 if (test_bit(S_EXIT_PENDING, &il->status) ||
6205 test_bit(S_SCANNING, &il->status))
6206 goto out;
6207
6208
6209
6210
6211 il->ops->send_tx_power(il);
6212
6213
6214
6215 il->last_temperature = il->temperature;
6216out:
6217 mutex_unlock(&il->mutex);
6218}
6219
6220static void
6221il4965_setup_deferred_work(struct il_priv *il)
6222{
6223 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6224
6225 init_waitqueue_head(&il->wait_command_queue);
6226
6227 INIT_WORK(&il->restart, il4965_bg_restart);
6228 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6229 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6230 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6231 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6232
6233 il_setup_scan_deferred_work(il);
6234
6235 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6236
6237 timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
6238
6239 timer_setup(&il->watchdog, il_bg_watchdog, 0);
6240
6241 tasklet_init(&il->irq_tasklet,
6242 (void (*)(unsigned long))il4965_irq_tasklet,
6243 (unsigned long)il);
6244}
6245
6246static void
6247il4965_cancel_deferred_work(struct il_priv *il)
6248{
6249 cancel_work_sync(&il->txpower_work);
6250 cancel_delayed_work_sync(&il->init_alive_start);
6251 cancel_delayed_work(&il->alive_start);
6252 cancel_work_sync(&il->run_time_calib_work);
6253
6254 il_cancel_scan_deferred_work(il);
6255
6256 del_timer_sync(&il->stats_periodic);
6257}
6258
6259static void
6260il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6261{
6262 int i;
6263
6264 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6265 rates[i].bitrate = il_rates[i].ieee * 5;
6266 rates[i].hw_value = i;
6267 rates[i].hw_value_short = i;
6268 rates[i].flags = 0;
6269 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6270
6271
6272
6273 rates[i].flags |=
6274 (il_rates[i].plcp ==
6275 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6276 }
6277 }
6278}
6279
6280
6281
6282
6283void
6284il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6285{
6286 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6287 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6288}
6289
6290void
6291il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6292 int tx_fifo_id, int scd_retry)
6293{
6294 int txq_id = txq->q.id;
6295
6296
6297 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6298
6299
6300 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6301 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6302 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6303 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6304 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6305 IL49_SCD_QUEUE_STTS_REG_MSK);
6306
6307 txq->sched_retry = scd_retry;
6308
6309 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6310 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6311}
6312
6313static const struct ieee80211_ops il4965_mac_ops = {
6314 .tx = il4965_mac_tx,
6315 .start = il4965_mac_start,
6316 .stop = il4965_mac_stop,
6317 .add_interface = il_mac_add_interface,
6318 .remove_interface = il_mac_remove_interface,
6319 .change_interface = il_mac_change_interface,
6320 .config = il_mac_config,
6321 .configure_filter = il4965_configure_filter,
6322 .set_key = il4965_mac_set_key,
6323 .update_tkip_key = il4965_mac_update_tkip_key,
6324 .conf_tx = il_mac_conf_tx,
6325 .reset_tsf = il_mac_reset_tsf,
6326 .bss_info_changed = il_mac_bss_info_changed,
6327 .ampdu_action = il4965_mac_ampdu_action,
6328 .hw_scan = il_mac_hw_scan,
6329 .sta_add = il4965_mac_sta_add,
6330 .sta_remove = il_mac_sta_remove,
6331 .channel_switch = il4965_mac_channel_switch,
6332 .tx_last_beacon = il_mac_tx_last_beacon,
6333 .flush = il_mac_flush,
6334};
6335
6336static int
6337il4965_init_drv(struct il_priv *il)
6338{
6339 int ret;
6340
6341 spin_lock_init(&il->sta_lock);
6342 spin_lock_init(&il->hcmd_lock);
6343
6344 INIT_LIST_HEAD(&il->free_frames);
6345
6346 mutex_init(&il->mutex);
6347
6348 il->ieee_channels = NULL;
6349 il->ieee_rates = NULL;
6350 il->band = NL80211_BAND_2GHZ;
6351
6352 il->iw_mode = NL80211_IFTYPE_STATION;
6353 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6354 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6355
6356
6357 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6358
6359
6360 if (il->ops->set_rxon_chain)
6361 il->ops->set_rxon_chain(il);
6362
6363 il_init_scan_params(il);
6364
6365 ret = il_init_channel_map(il);
6366 if (ret) {
6367 IL_ERR("initializing regulatory failed: %d\n", ret);
6368 goto err;
6369 }
6370
6371 ret = il_init_geos(il);
6372 if (ret) {
6373 IL_ERR("initializing geos failed: %d\n", ret);
6374 goto err_free_channel_map;
6375 }
6376 il4965_init_hw_rates(il, il->ieee_rates);
6377
6378 return 0;
6379
6380err_free_channel_map:
6381 il_free_channel_map(il);
6382err:
6383 return ret;
6384}
6385
6386static void
6387il4965_uninit_drv(struct il_priv *il)
6388{
6389 il_free_geos(il);
6390 il_free_channel_map(il);
6391 kfree(il->scan_cmd);
6392}
6393
6394static void
6395il4965_hw_detect(struct il_priv *il)
6396{
6397 il->hw_rev = _il_rd(il, CSR_HW_REV);
6398 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6399 il->rev_id = il->pci_dev->revision;
6400 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6401}
6402
6403static const struct il_sensitivity_ranges il4965_sensitivity = {
6404 .min_nrg_cck = 97,
6405 .max_nrg_cck = 0,
6406
6407 .auto_corr_min_ofdm = 85,
6408 .auto_corr_min_ofdm_mrc = 170,
6409 .auto_corr_min_ofdm_x1 = 105,
6410 .auto_corr_min_ofdm_mrc_x1 = 220,
6411
6412 .auto_corr_max_ofdm = 120,
6413 .auto_corr_max_ofdm_mrc = 210,
6414 .auto_corr_max_ofdm_x1 = 140,
6415 .auto_corr_max_ofdm_mrc_x1 = 270,
6416
6417 .auto_corr_min_cck = 125,
6418 .auto_corr_max_cck = 200,
6419 .auto_corr_min_cck_mrc = 200,
6420 .auto_corr_max_cck_mrc = 400,
6421
6422 .nrg_th_cck = 100,
6423 .nrg_th_ofdm = 100,
6424
6425 .barker_corr_th_min = 190,
6426 .barker_corr_th_min_mrc = 390,
6427 .nrg_th_cca = 62,
6428};
6429
6430static void
6431il4965_set_hw_params(struct il_priv *il)
6432{
6433 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6434 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6435 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6436 if (il->cfg->mod_params->amsdu_size_8K)
6437 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6438 else
6439 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6440
6441 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6442
6443 if (il->cfg->mod_params->disable_11n)
6444 il->cfg->sku &= ~IL_SKU_N;
6445
6446 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6447 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6448 il->cfg->num_of_queues =
6449 il->cfg->mod_params->num_of_queues;
6450
6451 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6452 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6453 il->hw_params.scd_bc_tbls_size =
6454 il->cfg->num_of_queues *
6455 sizeof(struct il4965_scd_bc_tbl);
6456
6457 il->hw_params.tfd_size = sizeof(struct il_tfd);
6458 il->hw_params.max_stations = IL4965_STATION_COUNT;
6459 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6460 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6461 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6462 il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
6463
6464 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6465
6466 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6467 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6468 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6469 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6470
6471 il->hw_params.ct_kill_threshold =
6472 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6473
6474 il->hw_params.sens = &il4965_sensitivity;
6475 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6476}
6477
6478static int
6479il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6480{
6481 int err = 0;
6482 struct il_priv *il;
6483 struct ieee80211_hw *hw;
6484 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6485 unsigned long flags;
6486 u16 pci_cmd;
6487
6488
6489
6490
6491
6492 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6493 if (!hw) {
6494 err = -ENOMEM;
6495 goto out;
6496 }
6497 il = hw->priv;
6498 il->hw = hw;
6499 SET_IEEE80211_DEV(hw, &pdev->dev);
6500
6501 D_INFO("*** LOAD DRIVER ***\n");
6502 il->cfg = cfg;
6503 il->ops = &il4965_ops;
6504#ifdef CONFIG_IWLEGACY_DEBUGFS
6505 il->debugfs_ops = &il4965_debugfs_ops;
6506#endif
6507 il->pci_dev = pdev;
6508 il->inta_mask = CSR_INI_SET_MASK;
6509
6510
6511
6512
6513 pci_disable_link_state(pdev,
6514 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6515 PCIE_LINK_STATE_CLKPM);
6516
6517 if (pci_enable_device(pdev)) {
6518 err = -ENODEV;
6519 goto out_ieee80211_free_hw;
6520 }
6521
6522 pci_set_master(pdev);
6523
6524 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6525 if (!err)
6526 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6527 if (err) {
6528 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6529 if (!err)
6530 err =
6531 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6532
6533 if (err) {
6534 IL_WARN("No suitable DMA available.\n");
6535 goto out_pci_disable_device;
6536 }
6537 }
6538
6539 err = pci_request_regions(pdev, DRV_NAME);
6540 if (err)
6541 goto out_pci_disable_device;
6542
6543 pci_set_drvdata(pdev, il);
6544
6545
6546
6547
6548 il->hw_base = pci_ioremap_bar(pdev, 0);
6549 if (!il->hw_base) {
6550 err = -ENODEV;
6551 goto out_pci_release_regions;
6552 }
6553
6554 D_INFO("pci_resource_len = 0x%08llx\n",
6555 (unsigned long long)pci_resource_len(pdev, 0));
6556 D_INFO("pci_resource_base = %p\n", il->hw_base);
6557
6558
6559
6560
6561 spin_lock_init(&il->reg_lock);
6562 spin_lock_init(&il->lock);
6563
6564
6565
6566
6567
6568
6569 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6570
6571 il4965_hw_detect(il);
6572 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6573
6574
6575
6576 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6577
6578 il4965_prepare_card_hw(il);
6579 if (!il->hw_ready) {
6580 IL_WARN("Failed, HW not ready\n");
6581 err = -EIO;
6582 goto out_iounmap;
6583 }
6584
6585
6586
6587
6588
6589 err = il_eeprom_init(il);
6590 if (err) {
6591 IL_ERR("Unable to init EEPROM\n");
6592 goto out_iounmap;
6593 }
6594 err = il4965_eeprom_check_version(il);
6595 if (err)
6596 goto out_free_eeprom;
6597
6598
6599 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6600 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6601 il->hw->wiphy->addresses = il->addresses;
6602 il->hw->wiphy->n_addresses = 1;
6603
6604
6605
6606
6607 il4965_set_hw_params(il);
6608
6609
6610
6611
6612
6613 err = il4965_init_drv(il);
6614 if (err)
6615 goto out_free_eeprom;
6616
6617
6618
6619
6620
6621 spin_lock_irqsave(&il->lock, flags);
6622 il_disable_interrupts(il);
6623 spin_unlock_irqrestore(&il->lock, flags);
6624
6625 pci_enable_msi(il->pci_dev);
6626
6627 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6628 if (err) {
6629 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6630 goto out_disable_msi;
6631 }
6632
6633 il4965_setup_deferred_work(il);
6634 il4965_setup_handlers(il);
6635
6636
6637
6638
6639
6640
6641 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6642 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6643 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6644 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6645 }
6646
6647 il_enable_rfkill_int(il);
6648
6649
6650 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6651 clear_bit(S_RFKILL, &il->status);
6652 else
6653 set_bit(S_RFKILL, &il->status);
6654
6655 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6656 test_bit(S_RFKILL, &il->status));
6657
6658 il_power_initialize(il);
6659
6660 init_completion(&il->_4965.firmware_loading_complete);
6661
6662 err = il4965_request_firmware(il, true);
6663 if (err)
6664 goto out_destroy_workqueue;
6665
6666 return 0;
6667
6668out_destroy_workqueue:
6669 destroy_workqueue(il->workqueue);
6670 il->workqueue = NULL;
6671 free_irq(il->pci_dev->irq, il);
6672out_disable_msi:
6673 pci_disable_msi(il->pci_dev);
6674 il4965_uninit_drv(il);
6675out_free_eeprom:
6676 il_eeprom_free(il);
6677out_iounmap:
6678 iounmap(il->hw_base);
6679out_pci_release_regions:
6680 pci_release_regions(pdev);
6681out_pci_disable_device:
6682 pci_disable_device(pdev);
6683out_ieee80211_free_hw:
6684 ieee80211_free_hw(il->hw);
6685out:
6686 return err;
6687}
6688
6689static void
6690il4965_pci_remove(struct pci_dev *pdev)
6691{
6692 struct il_priv *il = pci_get_drvdata(pdev);
6693 unsigned long flags;
6694
6695 if (!il)
6696 return;
6697
6698 wait_for_completion(&il->_4965.firmware_loading_complete);
6699
6700 D_INFO("*** UNLOAD DRIVER ***\n");
6701
6702 il_dbgfs_unregister(il);
6703 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6704
6705
6706
6707
6708
6709 set_bit(S_EXIT_PENDING, &il->status);
6710
6711 il_leds_exit(il);
6712
6713 if (il->mac80211_registered) {
6714 ieee80211_unregister_hw(il->hw);
6715 il->mac80211_registered = 0;
6716 } else {
6717 il4965_down(il);
6718 }
6719
6720
6721
6722
6723
6724
6725
6726
6727 il_apm_stop(il);
6728
6729
6730
6731
6732 spin_lock_irqsave(&il->lock, flags);
6733 il_disable_interrupts(il);
6734 spin_unlock_irqrestore(&il->lock, flags);
6735
6736 il4965_synchronize_irq(il);
6737
6738 il4965_dealloc_ucode_pci(il);
6739
6740 if (il->rxq.bd)
6741 il4965_rx_queue_free(il, &il->rxq);
6742 il4965_hw_txq_ctx_free(il);
6743
6744 il_eeprom_free(il);
6745
6746
6747 flush_workqueue(il->workqueue);
6748
6749
6750
6751
6752 destroy_workqueue(il->workqueue);
6753 il->workqueue = NULL;
6754
6755 free_irq(il->pci_dev->irq, il);
6756 pci_disable_msi(il->pci_dev);
6757 iounmap(il->hw_base);
6758 pci_release_regions(pdev);
6759 pci_disable_device(pdev);
6760
6761 il4965_uninit_drv(il);
6762
6763 dev_kfree_skb(il->beacon_skb);
6764
6765 ieee80211_free_hw(il->hw);
6766}
6767
6768
6769
6770
6771
6772void
6773il4965_txq_set_sched(struct il_priv *il, u32 mask)
6774{
6775 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6776}
6777
6778
6779
6780
6781
6782
6783
6784
6785static const struct pci_device_id il4965_hw_card_ids[] = {
6786 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6787 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6788 {0}
6789};
6790MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6791
6792static struct pci_driver il4965_driver = {
6793 .name = DRV_NAME,
6794 .id_table = il4965_hw_card_ids,
6795 .probe = il4965_pci_probe,
6796 .remove = il4965_pci_remove,
6797 .driver.pm = IL_LEGACY_PM_OPS,
6798};
6799
6800static int __init
6801il4965_init(void)
6802{
6803
6804 int ret;
6805 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6806 pr_info(DRV_COPYRIGHT "\n");
6807
6808 ret = il4965_rate_control_register();
6809 if (ret) {
6810 pr_err("Unable to register rate control algorithm: %d\n", ret);
6811 return ret;
6812 }
6813
6814 ret = pci_register_driver(&il4965_driver);
6815 if (ret) {
6816 pr_err("Unable to initialize PCI module\n");
6817 goto error_register;
6818 }
6819
6820 return ret;
6821
6822error_register:
6823 il4965_rate_control_unregister();
6824 return ret;
6825}
6826
6827static void __exit
6828il4965_exit(void)
6829{
6830 pci_unregister_driver(&il4965_driver);
6831 il4965_rate_control_unregister();
6832}
6833
6834module_exit(il4965_exit);
6835module_init(il4965_init);
6836
6837#ifdef CONFIG_IWLEGACY_DEBUG
6838module_param_named(debug, il_debug_level, uint, 0644);
6839MODULE_PARM_DESC(debug, "debug output mask");
6840#endif
6841
6842module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
6843MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6844module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
6845MODULE_PARM_DESC(queues_num, "number of hw queues.");
6846module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
6847MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6848module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
6849MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
6850module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
6851MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
6852