1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21#include <linux/slab.h>
22#include <linux/dma-mapping.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/netdevice.h>
27#include <linux/firmware.h>
28#include <linux/etherdevice.h>
29#include <linux/if_arp.h>
30
31#include <net/mac80211.h>
32
33#include <asm/div64.h>
34
35#define DRV_NAME "iwl4965"
36
37#include "common.h"
38#include "4965.h"
39
40
41
42
43
44
45
46
47
48
49#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
50
51#ifdef CONFIG_IWLEGACY_DEBUG
52#define VD "d"
53#else
54#define VD
55#endif
56
57#define DRV_VERSION IWLWIFI_VERSION VD
58
59MODULE_DESCRIPTION(DRV_DESCRIPTION);
60MODULE_VERSION(DRV_VERSION);
61MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
62MODULE_LICENSE("GPL");
63MODULE_ALIAS("iwl4965");
64
65void
66il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
67{
68 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
69 IL_ERR("Tx flush command to flush out all frames\n");
70 if (!test_bit(S_EXIT_PENDING, &il->status))
71 queue_work(il->workqueue, &il->tx_flush);
72 }
73}
74
75
76
77
78struct il_mod_params il4965_mod_params = {
79 .restart_fw = 1,
80
81};
82
83void
84il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
85{
86 unsigned long flags;
87 int i;
88 spin_lock_irqsave(&rxq->lock, flags);
89 INIT_LIST_HEAD(&rxq->rx_free);
90 INIT_LIST_HEAD(&rxq->rx_used);
91
92 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
93
94
95 if (rxq->pool[i].page != NULL) {
96 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
97 PAGE_SIZE << il->hw_params.rx_page_order,
98 PCI_DMA_FROMDEVICE);
99 __il_free_pages(il, rxq->pool[i].page);
100 rxq->pool[i].page = NULL;
101 }
102 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
103 }
104
105 for (i = 0; i < RX_QUEUE_SIZE; i++)
106 rxq->queue[i] = NULL;
107
108
109
110 rxq->read = rxq->write = 0;
111 rxq->write_actual = 0;
112 rxq->free_count = 0;
113 spin_unlock_irqrestore(&rxq->lock, flags);
114}
115
116int
117il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
118{
119 u32 rb_size;
120 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
121 u32 rb_timeout = 0;
122
123 if (il->cfg->mod_params->amsdu_size_8K)
124 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
125 else
126 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
127
128
129 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
130
131
132 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
133
134
135 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
136
137
138 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
139
140
141
142
143
144
145
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
147 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
148 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
149 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
150 rb_size |
151 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
152 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
153
154
155 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
156
157 return 0;
158}
159
160static void
161il4965_set_pwr_vmain(struct il_priv *il)
162{
163
164
165
166
167
168
169
170
171
172
173 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
174 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
175 ~APMG_PS_CTRL_MSK_PWR_SRC);
176}
177
178int
179il4965_hw_nic_init(struct il_priv *il)
180{
181 unsigned long flags;
182 struct il_rx_queue *rxq = &il->rxq;
183 int ret;
184
185 spin_lock_irqsave(&il->lock, flags);
186 il_apm_init(il);
187
188 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
189 spin_unlock_irqrestore(&il->lock, flags);
190
191 il4965_set_pwr_vmain(il);
192 il4965_nic_config(il);
193
194
195 if (!rxq->bd) {
196 ret = il_rx_queue_alloc(il);
197 if (ret) {
198 IL_ERR("Unable to initialize Rx queue\n");
199 return -ENOMEM;
200 }
201 } else
202 il4965_rx_queue_reset(il, rxq);
203
204 il4965_rx_replenish(il);
205
206 il4965_rx_init(il, rxq);
207
208 spin_lock_irqsave(&il->lock, flags);
209
210 rxq->need_update = 1;
211 il_rx_queue_update_write_ptr(il, rxq);
212
213 spin_unlock_irqrestore(&il->lock, flags);
214
215
216 if (!il->txq) {
217 ret = il4965_txq_ctx_alloc(il);
218 if (ret)
219 return ret;
220 } else
221 il4965_txq_ctx_reset(il);
222
223 set_bit(S_INIT, &il->status);
224
225 return 0;
226}
227
228
229
230
231static inline __le32
232il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
233{
234 return cpu_to_le32((u32) (dma_addr >> 8));
235}
236
237
238
239
240
241
242
243
244
245
246
247
248void
249il4965_rx_queue_restock(struct il_priv *il)
250{
251 struct il_rx_queue *rxq = &il->rxq;
252 struct list_head *element;
253 struct il_rx_buf *rxb;
254 unsigned long flags;
255
256 spin_lock_irqsave(&rxq->lock, flags);
257 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
258
259 rxb = rxq->queue[rxq->write];
260 BUG_ON(rxb && rxb->page);
261
262
263 element = rxq->rx_free.next;
264 rxb = list_entry(element, struct il_rx_buf, list);
265 list_del(element);
266
267
268 rxq->bd[rxq->write] =
269 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
270 rxq->queue[rxq->write] = rxb;
271 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
272 rxq->free_count--;
273 }
274 spin_unlock_irqrestore(&rxq->lock, flags);
275
276
277 if (rxq->free_count <= RX_LOW_WATERMARK)
278 queue_work(il->workqueue, &il->rx_replenish);
279
280
281
282 if (rxq->write_actual != (rxq->write & ~0x7)) {
283 spin_lock_irqsave(&rxq->lock, flags);
284 rxq->need_update = 1;
285 spin_unlock_irqrestore(&rxq->lock, flags);
286 il_rx_queue_update_write_ptr(il, rxq);
287 }
288}
289
290
291
292
293
294
295
296
297
298static void
299il4965_rx_allocate(struct il_priv *il, gfp_t priority)
300{
301 struct il_rx_queue *rxq = &il->rxq;
302 struct list_head *element;
303 struct il_rx_buf *rxb;
304 struct page *page;
305 dma_addr_t page_dma;
306 unsigned long flags;
307 gfp_t gfp_mask = priority;
308
309 while (1) {
310 spin_lock_irqsave(&rxq->lock, flags);
311 if (list_empty(&rxq->rx_used)) {
312 spin_unlock_irqrestore(&rxq->lock, flags);
313 return;
314 }
315 spin_unlock_irqrestore(&rxq->lock, flags);
316
317 if (rxq->free_count > RX_LOW_WATERMARK)
318 gfp_mask |= __GFP_NOWARN;
319
320 if (il->hw_params.rx_page_order > 0)
321 gfp_mask |= __GFP_COMP;
322
323
324 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
325 if (!page) {
326 if (net_ratelimit())
327 D_INFO("alloc_pages failed, " "order: %d\n",
328 il->hw_params.rx_page_order);
329
330 if (rxq->free_count <= RX_LOW_WATERMARK &&
331 net_ratelimit())
332 IL_ERR("Failed to alloc_pages with %s. "
333 "Only %u free buffers remaining.\n",
334 priority ==
335 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
336 rxq->free_count);
337
338
339
340 return;
341 }
342
343
344 page_dma =
345 pci_map_page(il->pci_dev, page, 0,
346 PAGE_SIZE << il->hw_params.rx_page_order,
347 PCI_DMA_FROMDEVICE);
348 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
349 __free_pages(page, il->hw_params.rx_page_order);
350 break;
351 }
352
353 spin_lock_irqsave(&rxq->lock, flags);
354
355 if (list_empty(&rxq->rx_used)) {
356 spin_unlock_irqrestore(&rxq->lock, flags);
357 pci_unmap_page(il->pci_dev, page_dma,
358 PAGE_SIZE << il->hw_params.rx_page_order,
359 PCI_DMA_FROMDEVICE);
360 __free_pages(page, il->hw_params.rx_page_order);
361 return;
362 }
363
364 element = rxq->rx_used.next;
365 rxb = list_entry(element, struct il_rx_buf, list);
366 list_del(element);
367
368 BUG_ON(rxb->page);
369
370 rxb->page = page;
371 rxb->page_dma = page_dma;
372 list_add_tail(&rxb->list, &rxq->rx_free);
373 rxq->free_count++;
374 il->alloc_rxb_page++;
375
376 spin_unlock_irqrestore(&rxq->lock, flags);
377 }
378}
379
380void
381il4965_rx_replenish(struct il_priv *il)
382{
383 unsigned long flags;
384
385 il4965_rx_allocate(il, GFP_KERNEL);
386
387 spin_lock_irqsave(&il->lock, flags);
388 il4965_rx_queue_restock(il);
389 spin_unlock_irqrestore(&il->lock, flags);
390}
391
392void
393il4965_rx_replenish_now(struct il_priv *il)
394{
395 il4965_rx_allocate(il, GFP_ATOMIC);
396
397 il4965_rx_queue_restock(il);
398}
399
400
401
402
403
404
405void
406il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
407{
408 int i;
409 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
410 if (rxq->pool[i].page != NULL) {
411 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
412 PAGE_SIZE << il->hw_params.rx_page_order,
413 PCI_DMA_FROMDEVICE);
414 __il_free_pages(il, rxq->pool[i].page);
415 rxq->pool[i].page = NULL;
416 }
417 }
418
419 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
420 rxq->bd_dma);
421 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
422 rxq->rb_stts, rxq->rb_stts_dma);
423 rxq->bd = NULL;
424 rxq->rb_stts = NULL;
425}
426
427int
428il4965_rxq_stop(struct il_priv *il)
429{
430 int ret;
431
432 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
433 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
434 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
435 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
436 1000);
437 if (ret < 0)
438 IL_ERR("Can't stop Rx DMA.\n");
439
440 return 0;
441}
442
443int
444il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
445{
446 int idx = 0;
447 int band_offset = 0;
448
449
450 if (rate_n_flags & RATE_MCS_HT_MSK) {
451 idx = (rate_n_flags & 0xff);
452 return idx;
453
454 } else {
455 if (band == NL80211_BAND_5GHZ)
456 band_offset = IL_FIRST_OFDM_RATE;
457 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
458 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
459 return idx - band_offset;
460 }
461
462 return -1;
463}
464
465static int
466il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
467{
468
469
470 struct il4965_rx_non_cfg_phy *ncphy =
471 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
472 u32 agc =
473 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
474 IL49_AGC_DB_POS;
475
476 u32 valid_antennae =
477 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
478 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
479 u8 max_rssi = 0;
480 u32 i;
481
482
483
484
485
486
487 for (i = 0; i < 3; i++)
488 if (valid_antennae & (1 << i))
489 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
490
491 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
492 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
493 max_rssi, agc);
494
495
496
497 return max_rssi - agc - IL4965_RSSI_OFFSET;
498}
499
500static u32
501il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
502{
503 u32 decrypt_out = 0;
504
505 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
506 RX_RES_STATUS_STATION_FOUND)
507 decrypt_out |=
508 (RX_RES_STATUS_STATION_FOUND |
509 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
510
511 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
512
513
514 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
515 RX_RES_STATUS_SEC_TYPE_NONE)
516 return decrypt_out;
517
518
519 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
520 RX_RES_STATUS_SEC_TYPE_ERR)
521 return decrypt_out;
522
523
524 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
525 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
526 return decrypt_out;
527
528 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
529
530 case RX_RES_STATUS_SEC_TYPE_CCMP:
531
532 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
533
534 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
535 else
536 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
537
538 break;
539
540 case RX_RES_STATUS_SEC_TYPE_TKIP:
541 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
542
543 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
544 break;
545 }
546
547 default:
548 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
549 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
550 else
551 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
552 break;
553 }
554
555 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
556
557 return decrypt_out;
558}
559
560#define SMALL_PACKET_SIZE 256
561
562static void
563il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
564 u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
565 struct ieee80211_rx_status *stats)
566{
567 struct sk_buff *skb;
568 __le16 fc = hdr->frame_control;
569
570
571 if (unlikely(!il->is_open)) {
572 D_DROP("Dropping packet while interface is not open.\n");
573 return;
574 }
575
576 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
577 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
578 D_INFO("Woke queues - frame received on passive channel\n");
579 }
580
581
582 if (!il->cfg->mod_params->sw_crypto &&
583 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
584 return;
585
586 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
587 if (!skb) {
588 IL_ERR("dev_alloc_skb failed\n");
589 return;
590 }
591
592 if (len <= SMALL_PACKET_SIZE) {
593 skb_put_data(skb, hdr, len);
594 } else {
595 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
596 len, PAGE_SIZE << il->hw_params.rx_page_order);
597 il->alloc_rxb_page--;
598 rxb->page = NULL;
599 }
600
601 il_update_stats(il, false, fc, len);
602 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
603
604 ieee80211_rx(il->hw, skb);
605}
606
607
608
609static void
610il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
611{
612 struct ieee80211_hdr *header;
613 struct ieee80211_rx_status rx_status = {};
614 struct il_rx_pkt *pkt = rxb_addr(rxb);
615 struct il_rx_phy_res *phy_res;
616 __le32 rx_pkt_status;
617 struct il_rx_mpdu_res_start *amsdu;
618 u32 len;
619 u32 ampdu_status;
620 u32 rate_n_flags;
621
622
623
624
625
626
627
628
629
630
631 if (pkt->hdr.cmd == N_RX) {
632 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
633 header =
634 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
635 phy_res->cfg_phy_cnt);
636
637 len = le16_to_cpu(phy_res->byte_count);
638 rx_pkt_status =
639 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
640 phy_res->cfg_phy_cnt + len);
641 ampdu_status = le32_to_cpu(rx_pkt_status);
642 } else {
643 if (!il->_4965.last_phy_res_valid) {
644 IL_ERR("MPDU frame without cached PHY data\n");
645 return;
646 }
647 phy_res = &il->_4965.last_phy_res;
648 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
649 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
650 len = le16_to_cpu(amsdu->byte_count);
651 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
652 ampdu_status =
653 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
654 }
655
656 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
657 D_DROP("dsp size out of range [0,20]: %d\n",
658 phy_res->cfg_phy_cnt);
659 return;
660 }
661
662 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
663 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
664 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
665 return;
666 }
667
668
669 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
670
671
672 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
673 rx_status.band =
674 (phy_res->
675 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
676 NL80211_BAND_5GHZ;
677 rx_status.freq =
678 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
679 rx_status.band);
680 rx_status.rate_idx =
681 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
682 rx_status.flag = 0;
683
684
685
686
687
688 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
689
690
691 rx_status.signal = il4965_calc_rssi(il, phy_res);
692
693 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
694 (unsigned long long)rx_status.mactime);
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 rx_status.antenna =
710 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
711 RX_RES_PHY_FLAGS_ANTENNA_POS;
712
713
714 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
715 rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
716
717
718 if (rate_n_flags & RATE_MCS_HT_MSK)
719 rx_status.encoding = RX_ENC_HT;
720 if (rate_n_flags & RATE_MCS_HT40_MSK)
721 rx_status.bw = RATE_INFO_BW_40;
722 else
723 rx_status.bw = RATE_INFO_BW_20;
724 if (rate_n_flags & RATE_MCS_SGI_MSK)
725 rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
726
727 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
728
729
730
731
732
733 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
734 rx_status.ampdu_reference = il->_4965.ampdu_ref;
735 }
736
737 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
738 &rx_status);
739}
740
741
742
743static void
744il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
745{
746 struct il_rx_pkt *pkt = rxb_addr(rxb);
747 il->_4965.last_phy_res_valid = true;
748 il->_4965.ampdu_ref++;
749 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
750 sizeof(struct il_rx_phy_res));
751}
752
753static int
754il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
755 enum nl80211_band band, u8 is_active,
756 u8 n_probes, struct il_scan_channel *scan_ch)
757{
758 struct ieee80211_channel *chan;
759 const struct ieee80211_supported_band *sband;
760 const struct il_channel_info *ch_info;
761 u16 passive_dwell = 0;
762 u16 active_dwell = 0;
763 int added, i;
764 u16 channel;
765
766 sband = il_get_hw_mode(il, band);
767 if (!sband)
768 return 0;
769
770 active_dwell = il_get_active_dwell_time(il, band, n_probes);
771 passive_dwell = il_get_passive_dwell_time(il, band, vif);
772
773 if (passive_dwell <= active_dwell)
774 passive_dwell = active_dwell + 1;
775
776 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
777 chan = il->scan_request->channels[i];
778
779 if (chan->band != band)
780 continue;
781
782 channel = chan->hw_value;
783 scan_ch->channel = cpu_to_le16(channel);
784
785 ch_info = il_get_channel_info(il, band, channel);
786 if (!il_is_channel_valid(ch_info)) {
787 D_SCAN("Channel %d is INVALID for this band.\n",
788 channel);
789 continue;
790 }
791
792 if (!is_active || il_is_channel_passive(ch_info) ||
793 (chan->flags & IEEE80211_CHAN_NO_IR))
794 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
795 else
796 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
797
798 if (n_probes)
799 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
800
801 scan_ch->active_dwell = cpu_to_le16(active_dwell);
802 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
803
804
805 scan_ch->dsp_atten = 110;
806
807
808
809
810
811 if (band == NL80211_BAND_5GHZ)
812 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
813 else
814 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
815
816 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
817 le32_to_cpu(scan_ch->type),
818 (scan_ch->
819 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
820 (scan_ch->
821 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
822 passive_dwell);
823
824 scan_ch++;
825 added++;
826 }
827
828 D_SCAN("total channels to scan %d\n", added);
829 return added;
830}
831
832static void
833il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
834{
835 int i;
836 u8 ind = *ant;
837
838 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
839 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
840 if (valid & BIT(ind)) {
841 *ant = ind;
842 return;
843 }
844 }
845}
846
847int
848il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
849{
850 struct il_host_cmd cmd = {
851 .id = C_SCAN,
852 .len = sizeof(struct il_scan_cmd),
853 .flags = CMD_SIZE_HUGE,
854 };
855 struct il_scan_cmd *scan;
856 u32 rate_flags = 0;
857 u16 cmd_len;
858 u16 rx_chain = 0;
859 enum nl80211_band band;
860 u8 n_probes = 0;
861 u8 rx_ant = il->hw_params.valid_rx_ant;
862 u8 rate;
863 bool is_active = false;
864 int chan_mod;
865 u8 active_chains;
866 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
867 int ret;
868
869 lockdep_assert_held(&il->mutex);
870
871 if (!il->scan_cmd) {
872 il->scan_cmd =
873 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
874 GFP_KERNEL);
875 if (!il->scan_cmd) {
876 D_SCAN("fail to allocate memory for scan\n");
877 return -ENOMEM;
878 }
879 }
880 scan = il->scan_cmd;
881 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
882
883 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
884 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
885
886 if (il_is_any_associated(il)) {
887 u16 interval;
888 u32 extra;
889 u32 suspend_time = 100;
890 u32 scan_suspend_time = 100;
891
892 D_INFO("Scanning while associated...\n");
893 interval = vif->bss_conf.beacon_int;
894
895 scan->suspend_time = 0;
896 scan->max_out_time = cpu_to_le32(200 * 1024);
897 if (!interval)
898 interval = suspend_time;
899
900 extra = (suspend_time / interval) << 22;
901 scan_suspend_time =
902 (extra | ((suspend_time % interval) * 1024));
903 scan->suspend_time = cpu_to_le32(scan_suspend_time);
904 D_SCAN("suspend_time 0x%X beacon interval %d\n",
905 scan_suspend_time, interval);
906 }
907
908 if (il->scan_request->n_ssids) {
909 int i, p = 0;
910 D_SCAN("Kicking off active scan\n");
911 for (i = 0; i < il->scan_request->n_ssids; i++) {
912
913 if (!il->scan_request->ssids[i].ssid_len)
914 continue;
915 scan->direct_scan[p].id = WLAN_EID_SSID;
916 scan->direct_scan[p].len =
917 il->scan_request->ssids[i].ssid_len;
918 memcpy(scan->direct_scan[p].ssid,
919 il->scan_request->ssids[i].ssid,
920 il->scan_request->ssids[i].ssid_len);
921 n_probes++;
922 p++;
923 }
924 is_active = true;
925 } else
926 D_SCAN("Start passive scan.\n");
927
928 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
929 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
930 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
931
932 switch (il->scan_band) {
933 case NL80211_BAND_2GHZ:
934 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
935 chan_mod =
936 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
937 RXON_FLG_CHANNEL_MODE_POS;
938 if (chan_mod == CHANNEL_MODE_PURE_40) {
939 rate = RATE_6M_PLCP;
940 } else {
941 rate = RATE_1M_PLCP;
942 rate_flags = RATE_MCS_CCK_MSK;
943 }
944 break;
945 case NL80211_BAND_5GHZ:
946 rate = RATE_6M_PLCP;
947 break;
948 default:
949 IL_WARN("Invalid scan band\n");
950 return -EIO;
951 }
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970 scan->good_CRC_th =
971 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
972
973 band = il->scan_band;
974
975 if (il->cfg->scan_rx_antennas[band])
976 rx_ant = il->cfg->scan_rx_antennas[band];
977
978 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
979 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
980 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
981
982
983 if (test_bit(S_POWER_PMI, &il->status)) {
984
985 active_chains =
986 rx_ant & ((u8) (il->chain_noise_data.active_chains));
987 if (!active_chains)
988 active_chains = rx_ant;
989
990 D_SCAN("chain_noise_data.active_chains: %u\n",
991 il->chain_noise_data.active_chains);
992
993 rx_ant = il4965_first_antenna(active_chains);
994 }
995
996
997 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
998 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
999 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1000 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1001 scan->rx_chain = cpu_to_le16(rx_chain);
1002
1003 cmd_len =
1004 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1005 vif->addr, il->scan_request->ie,
1006 il->scan_request->ie_len,
1007 IL_MAX_SCAN_SIZE - sizeof(*scan));
1008 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1009
1010 scan->filter_flags |=
1011 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1012
1013 scan->channel_count =
1014 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1015 (void *)&scan->data[cmd_len]);
1016 if (scan->channel_count == 0) {
1017 D_SCAN("channel count %d\n", scan->channel_count);
1018 return -EIO;
1019 }
1020
1021 cmd.len +=
1022 le16_to_cpu(scan->tx_cmd.len) +
1023 scan->channel_count * sizeof(struct il_scan_channel);
1024 cmd.data = scan;
1025 scan->len = cpu_to_le16(cmd.len);
1026
1027 set_bit(S_SCAN_HW, &il->status);
1028
1029 ret = il_send_cmd_sync(il, &cmd);
1030 if (ret)
1031 clear_bit(S_SCAN_HW, &il->status);
1032
1033 return ret;
1034}
1035
1036int
1037il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1038 bool add)
1039{
1040 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1041
1042 if (add)
1043 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1044 &vif_priv->ibss_bssid_sta_id);
1045 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1046 vif->bss_conf.bssid);
1047}
1048
1049void
1050il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1051{
1052 lockdep_assert_held(&il->sta_lock);
1053
1054 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1055 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1056 else {
1057 D_TX("free more than tfds_in_queue (%u:%d)\n",
1058 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1059 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1060 }
1061}
1062
1063#define IL_TX_QUEUE_MSK 0xfffff
1064
1065static bool
1066il4965_is_single_rx_stream(struct il_priv *il)
1067{
1068 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1069 il->current_ht_config.single_chain_sufficient;
1070}
1071
1072#define IL_NUM_RX_CHAINS_MULTIPLE 3
1073#define IL_NUM_RX_CHAINS_SINGLE 2
1074#define IL_NUM_IDLE_CHAINS_DUAL 2
1075#define IL_NUM_IDLE_CHAINS_SINGLE 1
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087static int
1088il4965_get_active_rx_chain_count(struct il_priv *il)
1089{
1090
1091 if (il4965_is_single_rx_stream(il))
1092 return IL_NUM_RX_CHAINS_SINGLE;
1093 else
1094 return IL_NUM_RX_CHAINS_MULTIPLE;
1095}
1096
1097
1098
1099
1100
1101static int
1102il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1103{
1104
1105 switch (il->current_ht_config.smps) {
1106 case IEEE80211_SMPS_STATIC:
1107 case IEEE80211_SMPS_DYNAMIC:
1108 return IL_NUM_IDLE_CHAINS_SINGLE;
1109 case IEEE80211_SMPS_OFF:
1110 return active_cnt;
1111 default:
1112 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1113 return active_cnt;
1114 }
1115}
1116
1117
1118static u8
1119il4965_count_chain_bitmap(u32 chain_bitmap)
1120{
1121 u8 res;
1122 res = (chain_bitmap & BIT(0)) >> 0;
1123 res += (chain_bitmap & BIT(1)) >> 1;
1124 res += (chain_bitmap & BIT(2)) >> 2;
1125 res += (chain_bitmap & BIT(3)) >> 3;
1126 return res;
1127}
1128
1129
1130
1131
1132
1133
1134
1135void
1136il4965_set_rxon_chain(struct il_priv *il)
1137{
1138 bool is_single = il4965_is_single_rx_stream(il);
1139 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1140 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1141 u32 active_chains;
1142 u16 rx_chain;
1143
1144
1145
1146
1147
1148 if (il->chain_noise_data.active_chains)
1149 active_chains = il->chain_noise_data.active_chains;
1150 else
1151 active_chains = il->hw_params.valid_rx_ant;
1152
1153 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1154
1155
1156 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1157 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1158
1159
1160
1161
1162 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1163 if (valid_rx_cnt < active_rx_cnt)
1164 active_rx_cnt = valid_rx_cnt;
1165
1166 if (valid_rx_cnt < idle_rx_cnt)
1167 idle_rx_cnt = valid_rx_cnt;
1168
1169 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1170 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1171
1172 il->staging.rx_chain = cpu_to_le16(rx_chain);
1173
1174 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1175 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1176 else
1177 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1178
1179 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1180 active_rx_cnt, idle_rx_cnt);
1181
1182 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1183 active_rx_cnt < idle_rx_cnt);
1184}
1185
1186static const char *
1187il4965_get_fh_string(int cmd)
1188{
1189 switch (cmd) {
1190 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1191 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1192 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1193 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1194 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1195 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1196 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1197 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1198 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1199 default:
1200 return "UNKNOWN";
1201 }
1202}
1203
1204int
1205il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1206{
1207 int i;
1208#ifdef CONFIG_IWLEGACY_DEBUG
1209 int pos = 0;
1210 size_t bufsz = 0;
1211#endif
1212 static const u32 fh_tbl[] = {
1213 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1214 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1215 FH49_RSCSR_CHNL0_WPTR,
1216 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1217 FH49_MEM_RSSR_SHARED_CTRL_REG,
1218 FH49_MEM_RSSR_RX_STATUS_REG,
1219 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1220 FH49_TSSR_TX_STATUS_REG,
1221 FH49_TSSR_TX_ERROR_REG
1222 };
1223#ifdef CONFIG_IWLEGACY_DEBUG
1224 if (display) {
1225 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1226 *buf = kmalloc(bufsz, GFP_KERNEL);
1227 if (!*buf)
1228 return -ENOMEM;
1229 pos +=
1230 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1231 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1232 pos +=
1233 scnprintf(*buf + pos, bufsz - pos,
1234 " %34s: 0X%08x\n",
1235 il4965_get_fh_string(fh_tbl[i]),
1236 il_rd(il, fh_tbl[i]));
1237 }
1238 return pos;
1239 }
1240#endif
1241 IL_ERR("FH register values:\n");
1242 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1243 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1244 il_rd(il, fh_tbl[i]));
1245 }
1246 return 0;
1247}
1248
1249static void
1250il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1251{
1252 struct il_rx_pkt *pkt = rxb_addr(rxb);
1253 struct il_missed_beacon_notif *missed_beacon;
1254
1255 missed_beacon = &pkt->u.missed_beacon;
1256 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1257 il->missed_beacon_threshold) {
1258 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1259 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1260 le32_to_cpu(missed_beacon->total_missed_becons),
1261 le32_to_cpu(missed_beacon->num_recvd_beacons),
1262 le32_to_cpu(missed_beacon->num_expected_beacons));
1263 if (!test_bit(S_SCANNING, &il->status))
1264 il4965_init_sensitivity(il);
1265 }
1266}
1267
1268
1269
1270
1271static void
1272il4965_rx_calc_noise(struct il_priv *il)
1273{
1274 struct stats_rx_non_phy *rx_info;
1275 int num_active_rx = 0;
1276 int total_silence = 0;
1277 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1278 int last_rx_noise;
1279
1280 rx_info = &(il->_4965.stats.rx.general);
1281 bcn_silence_a =
1282 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1283 bcn_silence_b =
1284 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1285 bcn_silence_c =
1286 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1287
1288 if (bcn_silence_a) {
1289 total_silence += bcn_silence_a;
1290 num_active_rx++;
1291 }
1292 if (bcn_silence_b) {
1293 total_silence += bcn_silence_b;
1294 num_active_rx++;
1295 }
1296 if (bcn_silence_c) {
1297 total_silence += bcn_silence_c;
1298 num_active_rx++;
1299 }
1300
1301
1302 if (num_active_rx)
1303 last_rx_noise = (total_silence / num_active_rx) - 107;
1304 else
1305 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1306
1307 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1308 bcn_silence_b, bcn_silence_c, last_rx_noise);
1309}
1310
1311#ifdef CONFIG_IWLEGACY_DEBUGFS
1312
1313
1314
1315
1316
1317static void
1318il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1319{
1320 int i, size;
1321 __le32 *prev_stats;
1322 u32 *accum_stats;
1323 u32 *delta, *max_delta;
1324 struct stats_general_common *general, *accum_general;
1325
1326 prev_stats = (__le32 *) &il->_4965.stats;
1327 accum_stats = (u32 *) &il->_4965.accum_stats;
1328 size = sizeof(struct il_notif_stats);
1329 general = &il->_4965.stats.general.common;
1330 accum_general = &il->_4965.accum_stats.general.common;
1331 delta = (u32 *) &il->_4965.delta_stats;
1332 max_delta = (u32 *) &il->_4965.max_delta;
1333
1334 for (i = sizeof(__le32); i < size;
1335 i +=
1336 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1337 accum_stats++) {
1338 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1339 *delta =
1340 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1341 *accum_stats += *delta;
1342 if (*delta > *max_delta)
1343 *max_delta = *delta;
1344 }
1345 }
1346
1347
1348 accum_general->temperature = general->temperature;
1349 accum_general->ttl_timestamp = general->ttl_timestamp;
1350}
1351#endif
1352
1353static void
1354il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1355{
1356 const int recalib_seconds = 60;
1357 bool change;
1358 struct il_rx_pkt *pkt = rxb_addr(rxb);
1359
1360 D_RX("Statistics notification received (%d vs %d).\n",
1361 (int)sizeof(struct il_notif_stats),
1362 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1363
1364 change =
1365 ((il->_4965.stats.general.common.temperature !=
1366 pkt->u.stats.general.common.temperature) ||
1367 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1368 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1369#ifdef CONFIG_IWLEGACY_DEBUGFS
1370 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1371#endif
1372
1373
1374 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1375
1376 set_bit(S_STATS, &il->status);
1377
1378
1379
1380
1381
1382 mod_timer(&il->stats_periodic,
1383 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1384
1385 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1386 (pkt->hdr.cmd == N_STATS)) {
1387 il4965_rx_calc_noise(il);
1388 queue_work(il->workqueue, &il->run_time_calib_work);
1389 }
1390
1391 if (change)
1392 il4965_temperature_calib(il);
1393}
1394
1395static void
1396il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1397{
1398 struct il_rx_pkt *pkt = rxb_addr(rxb);
1399
1400 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1401#ifdef CONFIG_IWLEGACY_DEBUGFS
1402 memset(&il->_4965.accum_stats, 0,
1403 sizeof(struct il_notif_stats));
1404 memset(&il->_4965.delta_stats, 0,
1405 sizeof(struct il_notif_stats));
1406 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1407#endif
1408 D_RX("Statistics have been cleared\n");
1409 }
1410 il4965_hdl_stats(il, rxb);
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440static const u8 tid_to_ac[] = {
1441 IEEE80211_AC_BE,
1442 IEEE80211_AC_BK,
1443 IEEE80211_AC_BK,
1444 IEEE80211_AC_BE,
1445 IEEE80211_AC_VI,
1446 IEEE80211_AC_VI,
1447 IEEE80211_AC_VO,
1448 IEEE80211_AC_VO
1449};
1450
1451static inline int
1452il4965_get_ac_from_tid(u16 tid)
1453{
1454 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1455 return tid_to_ac[tid];
1456
1457
1458 return -EINVAL;
1459}
1460
1461static inline int
1462il4965_get_fifo_from_tid(u16 tid)
1463{
1464 static const u8 ac_to_fifo[] = {
1465 IL_TX_FIFO_VO,
1466 IL_TX_FIFO_VI,
1467 IL_TX_FIFO_BE,
1468 IL_TX_FIFO_BK,
1469 };
1470
1471 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1472 return ac_to_fifo[tid_to_ac[tid]];
1473
1474
1475 return -EINVAL;
1476}
1477
1478
1479
1480
1481static void
1482il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1483 struct il_tx_cmd *tx_cmd,
1484 struct ieee80211_tx_info *info,
1485 struct ieee80211_hdr *hdr, u8 std_id)
1486{
1487 __le16 fc = hdr->frame_control;
1488 __le32 tx_flags = tx_cmd->tx_flags;
1489
1490 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1491 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1492 tx_flags |= TX_CMD_FLG_ACK_MSK;
1493 if (ieee80211_is_mgmt(fc))
1494 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1495 if (ieee80211_is_probe_resp(fc) &&
1496 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1497 tx_flags |= TX_CMD_FLG_TSF_MSK;
1498 } else {
1499 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1500 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1501 }
1502
1503 if (ieee80211_is_back_req(fc))
1504 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1505
1506 tx_cmd->sta_id = std_id;
1507 if (ieee80211_has_morefrags(fc))
1508 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1509
1510 if (ieee80211_is_data_qos(fc)) {
1511 u8 *qc = ieee80211_get_qos_ctl(hdr);
1512 tx_cmd->tid_tspec = qc[0] & 0xf;
1513 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1514 } else {
1515 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1516 }
1517
1518 il_tx_cmd_protection(il, info, fc, &tx_flags);
1519
1520 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1521 if (ieee80211_is_mgmt(fc)) {
1522 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1524 else
1525 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1526 } else {
1527 tx_cmd->timeout.pm_frame_timeout = 0;
1528 }
1529
1530 tx_cmd->driver_txop = 0;
1531 tx_cmd->tx_flags = tx_flags;
1532 tx_cmd->next_frame_len = 0;
1533}
1534
1535static void
1536il4965_tx_cmd_build_rate(struct il_priv *il,
1537 struct il_tx_cmd *tx_cmd,
1538 struct ieee80211_tx_info *info,
1539 struct ieee80211_sta *sta,
1540 __le16 fc)
1541{
1542 const u8 rts_retry_limit = 60;
1543 u32 rate_flags;
1544 int rate_idx;
1545 u8 data_retry_limit;
1546 u8 rate_plcp;
1547
1548
1549 if (ieee80211_is_probe_resp(fc))
1550 data_retry_limit = 3;
1551 else
1552 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1553 tx_cmd->data_retry_limit = data_retry_limit;
1554
1555 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1556
1557
1558
1559 if (ieee80211_is_data(fc)) {
1560 tx_cmd->initial_rate_idx = 0;
1561 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1562 return;
1563 }
1564
1565
1566
1567
1568
1569
1570
1571 rate_idx = info->control.rates[0].idx;
1572 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1573 || rate_idx > RATE_COUNT_LEGACY)
1574 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1575
1576 if (info->band == NL80211_BAND_5GHZ)
1577 rate_idx += IL_FIRST_OFDM_RATE;
1578
1579 rate_plcp = il_rates[rate_idx].plcp;
1580
1581 rate_flags = 0;
1582
1583
1584 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1585 rate_flags |= RATE_MCS_CCK_MSK;
1586
1587
1588 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1589 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1590
1591
1592 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1593}
1594
1595static void
1596il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1597 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1598 int sta_id)
1599{
1600 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1601
1602 switch (keyconf->cipher) {
1603 case WLAN_CIPHER_SUITE_CCMP:
1604 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1605 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1606 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1607 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1608 D_TX("tx_cmd with AES hwcrypto\n");
1609 break;
1610
1611 case WLAN_CIPHER_SUITE_TKIP:
1612 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1613 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1614 D_TX("tx_cmd with tkip hwcrypto\n");
1615 break;
1616
1617 case WLAN_CIPHER_SUITE_WEP104:
1618 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1619
1620 case WLAN_CIPHER_SUITE_WEP40:
1621 tx_cmd->sec_ctl |=
1622 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1623 TX_CMD_SEC_SHIFT);
1624
1625 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1626
1627 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1628 keyconf->keyidx);
1629 break;
1630
1631 default:
1632 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1633 break;
1634 }
1635}
1636
1637
1638
1639
1640int
1641il4965_tx_skb(struct il_priv *il,
1642 struct ieee80211_sta *sta,
1643 struct sk_buff *skb)
1644{
1645 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1646 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1647 struct il_station_priv *sta_priv = NULL;
1648 struct il_tx_queue *txq;
1649 struct il_queue *q;
1650 struct il_device_cmd *out_cmd;
1651 struct il_cmd_meta *out_meta;
1652 struct il_tx_cmd *tx_cmd;
1653 int txq_id;
1654 dma_addr_t phys_addr;
1655 dma_addr_t txcmd_phys;
1656 dma_addr_t scratch_phys;
1657 u16 len, firstlen, secondlen;
1658 u16 seq_number = 0;
1659 __le16 fc;
1660 u8 hdr_len;
1661 u8 sta_id;
1662 u8 wait_write_ptr = 0;
1663 u8 tid = 0;
1664 u8 *qc = NULL;
1665 unsigned long flags;
1666 bool is_agg = false;
1667
1668 spin_lock_irqsave(&il->lock, flags);
1669 if (il_is_rfkill(il)) {
1670 D_DROP("Dropping - RF KILL\n");
1671 goto drop_unlock;
1672 }
1673
1674 fc = hdr->frame_control;
1675
1676#ifdef CONFIG_IWLEGACY_DEBUG
1677 if (ieee80211_is_auth(fc))
1678 D_TX("Sending AUTH frame\n");
1679 else if (ieee80211_is_assoc_req(fc))
1680 D_TX("Sending ASSOC frame\n");
1681 else if (ieee80211_is_reassoc_req(fc))
1682 D_TX("Sending REASSOC frame\n");
1683#endif
1684
1685 hdr_len = ieee80211_hdrlen(fc);
1686
1687
1688 if (!ieee80211_is_data(fc))
1689 sta_id = il->hw_params.bcast_id;
1690 else {
1691
1692 sta_id = il_sta_id_or_broadcast(il, sta);
1693
1694 if (sta_id == IL_INVALID_STATION) {
1695 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1696 goto drop_unlock;
1697 }
1698 }
1699
1700 D_TX("station Id %d\n", sta_id);
1701
1702 if (sta)
1703 sta_priv = (void *)sta->drv_priv;
1704
1705 if (sta_priv && sta_priv->asleep &&
1706 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1717 }
1718
1719
1720 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1721
1722
1723 txq_id = skb_get_queue_mapping(skb);
1724
1725
1726 spin_lock(&il->sta_lock);
1727
1728 if (ieee80211_is_data_qos(fc)) {
1729 qc = ieee80211_get_qos_ctl(hdr);
1730 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1731 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1732 spin_unlock(&il->sta_lock);
1733 goto drop_unlock;
1734 }
1735 seq_number = il->stations[sta_id].tid[tid].seq_number;
1736 seq_number &= IEEE80211_SCTL_SEQ;
1737 hdr->seq_ctrl =
1738 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1739 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1740 seq_number += 0x10;
1741
1742 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1743 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1744 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1745 is_agg = true;
1746 }
1747 }
1748
1749 txq = &il->txq[txq_id];
1750 q = &txq->q;
1751
1752 if (unlikely(il_queue_space(q) < q->high_mark)) {
1753 spin_unlock(&il->sta_lock);
1754 goto drop_unlock;
1755 }
1756
1757 if (ieee80211_is_data_qos(fc)) {
1758 il->stations[sta_id].tid[tid].tfds_in_queue++;
1759 if (!ieee80211_has_morefrags(fc))
1760 il->stations[sta_id].tid[tid].seq_number = seq_number;
1761 }
1762
1763 spin_unlock(&il->sta_lock);
1764
1765 txq->skbs[q->write_ptr] = skb;
1766
1767
1768 out_cmd = txq->cmd[q->write_ptr];
1769 out_meta = &txq->meta[q->write_ptr];
1770 tx_cmd = &out_cmd->cmd.tx;
1771 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1772 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1773
1774
1775
1776
1777
1778
1779
1780 out_cmd->hdr.cmd = C_TX;
1781 out_cmd->hdr.sequence =
1782 cpu_to_le16((u16)
1783 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1784
1785
1786 memcpy(tx_cmd->hdr, hdr, hdr_len);
1787
1788
1789 tx_cmd->len = cpu_to_le16((u16) skb->len);
1790
1791 if (info->control.hw_key)
1792 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1793
1794
1795 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1796
1797 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1809 firstlen = (len + 3) & ~3;
1810
1811
1812 if (firstlen != len)
1813 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1814
1815
1816
1817 txcmd_phys =
1818 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1819 PCI_DMA_BIDIRECTIONAL);
1820 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1821 goto drop_unlock;
1822
1823
1824
1825 secondlen = skb->len - hdr_len;
1826 if (secondlen > 0) {
1827 phys_addr =
1828 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1829 PCI_DMA_TODEVICE);
1830 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1831 goto drop_unlock;
1832 }
1833
1834
1835
1836 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1837 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1838 dma_unmap_len_set(out_meta, len, firstlen);
1839 if (secondlen)
1840 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1841 0, 0);
1842
1843 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1844 txq->need_update = 1;
1845 } else {
1846 wait_write_ptr = 1;
1847 txq->need_update = 0;
1848 }
1849
1850 scratch_phys =
1851 txcmd_phys + sizeof(struct il_cmd_header) +
1852 offsetof(struct il_tx_cmd, scratch);
1853
1854
1855 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1856 PCI_DMA_BIDIRECTIONAL);
1857 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1858 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1859
1860 il_update_stats(il, true, fc, skb->len);
1861
1862 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1863 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1864 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1865 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1866
1867
1868 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1869 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1870
1871 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1872 PCI_DMA_BIDIRECTIONAL);
1873
1874
1875 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1876 il_txq_update_write_ptr(il, txq);
1877 spin_unlock_irqrestore(&il->lock, flags);
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (sta_priv && sta_priv->client && !is_agg)
1894 atomic_inc(&sta_priv->pending_frames);
1895
1896 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1897 if (wait_write_ptr) {
1898 spin_lock_irqsave(&il->lock, flags);
1899 txq->need_update = 1;
1900 il_txq_update_write_ptr(il, txq);
1901 spin_unlock_irqrestore(&il->lock, flags);
1902 } else {
1903 il_stop_queue(il, txq);
1904 }
1905 }
1906
1907 return 0;
1908
1909drop_unlock:
1910 spin_unlock_irqrestore(&il->lock, flags);
1911 return -1;
1912}
1913
1914static inline int
1915il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1916{
1917 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1918 GFP_KERNEL);
1919 if (!ptr->addr)
1920 return -ENOMEM;
1921 ptr->size = size;
1922 return 0;
1923}
1924
1925static inline void
1926il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1927{
1928 if (unlikely(!ptr->addr))
1929 return;
1930
1931 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1932 memset(ptr, 0, sizeof(*ptr));
1933}
1934
1935
1936
1937
1938
1939
1940void
1941il4965_hw_txq_ctx_free(struct il_priv *il)
1942{
1943 int txq_id;
1944
1945
1946 if (il->txq) {
1947 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1948 if (txq_id == il->cmd_queue)
1949 il_cmd_queue_free(il);
1950 else
1951 il_tx_queue_free(il, txq_id);
1952 }
1953 il4965_free_dma_ptr(il, &il->kw);
1954
1955 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1956
1957
1958 il_free_txq_mem(il);
1959}
1960
1961
1962
1963
1964
1965
1966
1967
1968int
1969il4965_txq_ctx_alloc(struct il_priv *il)
1970{
1971 int ret, txq_id;
1972 unsigned long flags;
1973
1974
1975 il4965_hw_txq_ctx_free(il);
1976
1977 ret =
1978 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1979 il->hw_params.scd_bc_tbls_size);
1980 if (ret) {
1981 IL_ERR("Scheduler BC Table allocation failed\n");
1982 goto error_bc_tbls;
1983 }
1984
1985 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
1986 if (ret) {
1987 IL_ERR("Keep Warm allocation failed\n");
1988 goto error_kw;
1989 }
1990
1991
1992 ret = il_alloc_txq_mem(il);
1993 if (ret)
1994 goto error;
1995
1996 spin_lock_irqsave(&il->lock, flags);
1997
1998
1999 il4965_txq_set_sched(il, 0);
2000
2001
2002 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2003
2004 spin_unlock_irqrestore(&il->lock, flags);
2005
2006
2007 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2008 ret = il_tx_queue_init(il, txq_id);
2009 if (ret) {
2010 IL_ERR("Tx %d queue init failed\n", txq_id);
2011 goto error;
2012 }
2013 }
2014
2015 return ret;
2016
2017error:
2018 il4965_hw_txq_ctx_free(il);
2019 il4965_free_dma_ptr(il, &il->kw);
2020error_kw:
2021 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2022error_bc_tbls:
2023 return ret;
2024}
2025
2026void
2027il4965_txq_ctx_reset(struct il_priv *il)
2028{
2029 int txq_id;
2030 unsigned long flags;
2031
2032 spin_lock_irqsave(&il->lock, flags);
2033
2034
2035 il4965_txq_set_sched(il, 0);
2036
2037 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2038
2039 spin_unlock_irqrestore(&il->lock, flags);
2040
2041
2042 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2043 il_tx_queue_reset(il, txq_id);
2044}
2045
2046static void
2047il4965_txq_ctx_unmap(struct il_priv *il)
2048{
2049 int txq_id;
2050
2051 if (!il->txq)
2052 return;
2053
2054
2055 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2056 if (txq_id == il->cmd_queue)
2057 il_cmd_queue_unmap(il);
2058 else
2059 il_tx_queue_unmap(il, txq_id);
2060}
2061
2062
2063
2064
2065void
2066il4965_txq_ctx_stop(struct il_priv *il)
2067{
2068 int ch, ret;
2069
2070 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2071
2072
2073 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2074 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2075 ret =
2076 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2077 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2078 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2079 1000);
2080 if (ret < 0)
2081 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2082 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2083 }
2084}
2085
2086
2087
2088
2089
2090
2091
2092static int
2093il4965_txq_ctx_activate_free(struct il_priv *il)
2094{
2095 int txq_id;
2096
2097 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2098 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2099 return txq_id;
2100 return -1;
2101}
2102
2103
2104
2105
2106static void
2107il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2108{
2109
2110
2111 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2112 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2113 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2114}
2115
2116
2117
2118
2119static int
2120il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2121{
2122 u32 tbl_dw_addr;
2123 u32 tbl_dw;
2124 u16 scd_q2ratid;
2125
2126 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2127
2128 tbl_dw_addr =
2129 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2130
2131 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2132
2133 if (txq_id & 0x1)
2134 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2135 else
2136 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2137
2138 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2139
2140 return 0;
2141}
2142
2143
2144
2145
2146
2147
2148
2149static int
2150il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2151 int tid, u16 ssn_idx)
2152{
2153 unsigned long flags;
2154 u16 ra_tid;
2155 int ret;
2156
2157 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2158 (IL49_FIRST_AMPDU_QUEUE +
2159 il->cfg->num_of_ampdu_queues <= txq_id)) {
2160 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2161 txq_id, IL49_FIRST_AMPDU_QUEUE,
2162 IL49_FIRST_AMPDU_QUEUE +
2163 il->cfg->num_of_ampdu_queues - 1);
2164 return -EINVAL;
2165 }
2166
2167 ra_tid = BUILD_RAxTID(sta_id, tid);
2168
2169
2170 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2171 if (ret)
2172 return ret;
2173
2174 spin_lock_irqsave(&il->lock, flags);
2175
2176
2177 il4965_tx_queue_stop_scheduler(il, txq_id);
2178
2179
2180 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2181
2182
2183 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2184
2185
2186
2187 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2188 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2189 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2190
2191
2192 il_write_targ_mem(il,
2193 il->scd_base_addr +
2194 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2195 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2196 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2197
2198 il_write_targ_mem(il,
2199 il->scd_base_addr +
2200 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2201 (SCD_FRAME_LIMIT <<
2202 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2203 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2204
2205 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2206
2207
2208 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2209
2210 spin_unlock_irqrestore(&il->lock, flags);
2211
2212 return 0;
2213}
2214
2215int
2216il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2217 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2218{
2219 int sta_id;
2220 int tx_fifo;
2221 int txq_id;
2222 int ret;
2223 unsigned long flags;
2224 struct il_tid_data *tid_data;
2225
2226
2227 tx_fifo = il4965_get_fifo_from_tid(tid);
2228 if (unlikely(tx_fifo < 0))
2229 return tx_fifo;
2230
2231 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2232
2233 sta_id = il_sta_id(sta);
2234 if (sta_id == IL_INVALID_STATION) {
2235 IL_ERR("Start AGG on invalid station\n");
2236 return -ENXIO;
2237 }
2238 if (unlikely(tid >= MAX_TID_COUNT))
2239 return -EINVAL;
2240
2241 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2242 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2243 return -ENXIO;
2244 }
2245
2246 txq_id = il4965_txq_ctx_activate_free(il);
2247 if (txq_id == -1) {
2248 IL_ERR("No free aggregation queue available\n");
2249 return -ENXIO;
2250 }
2251
2252 spin_lock_irqsave(&il->sta_lock, flags);
2253 tid_data = &il->stations[sta_id].tid[tid];
2254 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2255 tid_data->agg.txq_id = txq_id;
2256 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2257 spin_unlock_irqrestore(&il->sta_lock, flags);
2258
2259 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2260 if (ret)
2261 return ret;
2262
2263 spin_lock_irqsave(&il->sta_lock, flags);
2264 tid_data = &il->stations[sta_id].tid[tid];
2265 if (tid_data->tfds_in_queue == 0) {
2266 D_HT("HW queue is empty\n");
2267 tid_data->agg.state = IL_AGG_ON;
2268 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2269 } else {
2270 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2271 tid_data->tfds_in_queue);
2272 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2273 }
2274 spin_unlock_irqrestore(&il->sta_lock, flags);
2275 return ret;
2276}
2277
2278
2279
2280
2281
2282static int
2283il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2284{
2285 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2286 (IL49_FIRST_AMPDU_QUEUE +
2287 il->cfg->num_of_ampdu_queues <= txq_id)) {
2288 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2289 txq_id, IL49_FIRST_AMPDU_QUEUE,
2290 IL49_FIRST_AMPDU_QUEUE +
2291 il->cfg->num_of_ampdu_queues - 1);
2292 return -EINVAL;
2293 }
2294
2295 il4965_tx_queue_stop_scheduler(il, txq_id);
2296
2297 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2298
2299 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2300 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2301
2302 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2303
2304 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2305 il_txq_ctx_deactivate(il, txq_id);
2306 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2307
2308 return 0;
2309}
2310
2311int
2312il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2313 struct ieee80211_sta *sta, u16 tid)
2314{
2315 int tx_fifo_id, txq_id, sta_id, ssn;
2316 struct il_tid_data *tid_data;
2317 int write_ptr, read_ptr;
2318 unsigned long flags;
2319
2320
2321 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2322 if (unlikely(tx_fifo_id < 0))
2323 return tx_fifo_id;
2324
2325 sta_id = il_sta_id(sta);
2326
2327 if (sta_id == IL_INVALID_STATION) {
2328 IL_ERR("Invalid station for AGG tid %d\n", tid);
2329 return -ENXIO;
2330 }
2331
2332 spin_lock_irqsave(&il->sta_lock, flags);
2333
2334 tid_data = &il->stations[sta_id].tid[tid];
2335 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2336 txq_id = tid_data->agg.txq_id;
2337
2338 switch (il->stations[sta_id].tid[tid].agg.state) {
2339 case IL_EMPTYING_HW_QUEUE_ADDBA:
2340
2341
2342
2343
2344
2345
2346 D_HT("AGG stop before setup done\n");
2347 goto turn_off;
2348 case IL_AGG_ON:
2349 break;
2350 default:
2351 IL_WARN("Stopping AGG while state not ON or starting\n");
2352 }
2353
2354 write_ptr = il->txq[txq_id].q.write_ptr;
2355 read_ptr = il->txq[txq_id].q.read_ptr;
2356
2357
2358 if (write_ptr != read_ptr) {
2359 D_HT("Stopping a non empty AGG HW QUEUE\n");
2360 il->stations[sta_id].tid[tid].agg.state =
2361 IL_EMPTYING_HW_QUEUE_DELBA;
2362 spin_unlock_irqrestore(&il->sta_lock, flags);
2363 return 0;
2364 }
2365
2366 D_HT("HW queue is empty\n");
2367turn_off:
2368 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2369
2370
2371 spin_unlock(&il->sta_lock);
2372 spin_lock(&il->lock);
2373
2374
2375
2376
2377
2378
2379
2380
2381 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2382 spin_unlock_irqrestore(&il->lock, flags);
2383
2384 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2385
2386 return 0;
2387}
2388
2389int
2390il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2391{
2392 struct il_queue *q = &il->txq[txq_id].q;
2393 u8 *addr = il->stations[sta_id].sta.sta.addr;
2394 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2395
2396 lockdep_assert_held(&il->sta_lock);
2397
2398 switch (il->stations[sta_id].tid[tid].agg.state) {
2399 case IL_EMPTYING_HW_QUEUE_DELBA:
2400
2401
2402 if (txq_id == tid_data->agg.txq_id &&
2403 q->read_ptr == q->write_ptr) {
2404 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2405 int tx_fifo = il4965_get_fifo_from_tid(tid);
2406 D_HT("HW queue empty: continue DELBA flow\n");
2407 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2408 tid_data->agg.state = IL_AGG_OFF;
2409 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2410 }
2411 break;
2412 case IL_EMPTYING_HW_QUEUE_ADDBA:
2413
2414 if (tid_data->tfds_in_queue == 0) {
2415 D_HT("HW queue empty: continue ADDBA flow\n");
2416 tid_data->agg.state = IL_AGG_ON;
2417 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2418 }
2419 break;
2420 }
2421
2422 return 0;
2423}
2424
2425static void
2426il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2427{
2428 struct ieee80211_sta *sta;
2429 struct il_station_priv *sta_priv;
2430
2431 rcu_read_lock();
2432 sta = ieee80211_find_sta(il->vif, addr1);
2433 if (sta) {
2434 sta_priv = (void *)sta->drv_priv;
2435
2436 if (sta_priv->client &&
2437 atomic_dec_return(&sta_priv->pending_frames) == 0)
2438 ieee80211_sta_block_awake(il->hw, sta, false);
2439 }
2440 rcu_read_unlock();
2441}
2442
2443static void
2444il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2445{
2446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2447
2448 if (!is_agg)
2449 il4965_non_agg_tx_status(il, hdr->addr1);
2450
2451 ieee80211_tx_status_irqsafe(il->hw, skb);
2452}
2453
2454int
2455il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2456{
2457 struct il_tx_queue *txq = &il->txq[txq_id];
2458 struct il_queue *q = &txq->q;
2459 int nfreed = 0;
2460 struct ieee80211_hdr *hdr;
2461 struct sk_buff *skb;
2462
2463 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2464 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2465 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2466 q->write_ptr, q->read_ptr);
2467 return 0;
2468 }
2469
2470 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2471 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2472
2473 skb = txq->skbs[txq->q.read_ptr];
2474
2475 if (WARN_ON_ONCE(skb == NULL))
2476 continue;
2477
2478 hdr = (struct ieee80211_hdr *) skb->data;
2479 if (ieee80211_is_data_qos(hdr->frame_control))
2480 nfreed++;
2481
2482 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2483
2484 txq->skbs[txq->q.read_ptr] = NULL;
2485 il->ops->txq_free_tfd(il, txq);
2486 }
2487 return nfreed;
2488}
2489
2490
2491
2492
2493
2494
2495
2496static int
2497il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2498 struct il_compressed_ba_resp *ba_resp)
2499{
2500 int i, sh, ack;
2501 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2502 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2503 int successes = 0;
2504 struct ieee80211_tx_info *info;
2505 u64 bitmap, sent_bitmap;
2506
2507 if (unlikely(!agg->wait_for_ba)) {
2508 if (unlikely(ba_resp->bitmap))
2509 IL_ERR("Received BA when not expected\n");
2510 return -EINVAL;
2511 }
2512
2513
2514 agg->wait_for_ba = 0;
2515 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2516
2517
2518 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2519 if (sh < 0)
2520 sh += 0x100;
2521
2522 if (agg->frame_count > (64 - sh)) {
2523 D_TX_REPLY("more frames than bitmap size");
2524 return -1;
2525 }
2526
2527
2528 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2529
2530
2531
2532 sent_bitmap = bitmap & agg->bitmap;
2533
2534
2535
2536 i = 0;
2537 while (sent_bitmap) {
2538 ack = sent_bitmap & 1ULL;
2539 successes += ack;
2540 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2541 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2542 sent_bitmap >>= 1;
2543 ++i;
2544 }
2545
2546 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2547
2548 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2549 memset(&info->status, 0, sizeof(info->status));
2550 info->flags |= IEEE80211_TX_STAT_ACK;
2551 info->flags |= IEEE80211_TX_STAT_AMPDU;
2552 info->status.ampdu_ack_len = successes;
2553 info->status.ampdu_len = agg->frame_count;
2554 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2555
2556 return 0;
2557}
2558
2559static inline bool
2560il4965_is_tx_success(u32 status)
2561{
2562 status &= TX_STATUS_MSK;
2563 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2564}
2565
2566static u8
2567il4965_find_station(struct il_priv *il, const u8 *addr)
2568{
2569 int i;
2570 int start = 0;
2571 int ret = IL_INVALID_STATION;
2572 unsigned long flags;
2573
2574 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2575 start = IL_STA_ID;
2576
2577 if (is_broadcast_ether_addr(addr))
2578 return il->hw_params.bcast_id;
2579
2580 spin_lock_irqsave(&il->sta_lock, flags);
2581 for (i = start; i < il->hw_params.max_stations; i++)
2582 if (il->stations[i].used &&
2583 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2584 ret = i;
2585 goto out;
2586 }
2587
2588 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2589
2590out:
2591
2592
2593
2594
2595
2596 if (ret != IL_INVALID_STATION &&
2597 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2598 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2599 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2600 IL_ERR("Requested station info for sta %d before ready.\n",
2601 ret);
2602 ret = IL_INVALID_STATION;
2603 }
2604 spin_unlock_irqrestore(&il->sta_lock, flags);
2605 return ret;
2606}
2607
2608static int
2609il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2610{
2611 if (il->iw_mode == NL80211_IFTYPE_STATION)
2612 return IL_AP_ID;
2613 else {
2614 u8 *da = ieee80211_get_DA(hdr);
2615
2616 return il4965_find_station(il, da);
2617 }
2618}
2619
2620static inline u32
2621il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2622{
2623 return le32_to_cpup(&tx_resp->u.status +
2624 tx_resp->frame_count) & IEEE80211_MAX_SN;
2625}
2626
2627static inline u32
2628il4965_tx_status_to_mac80211(u32 status)
2629{
2630 status &= TX_STATUS_MSK;
2631
2632 switch (status) {
2633 case TX_STATUS_SUCCESS:
2634 case TX_STATUS_DIRECT_DONE:
2635 return IEEE80211_TX_STAT_ACK;
2636 case TX_STATUS_FAIL_DEST_PS:
2637 return IEEE80211_TX_STAT_TX_FILTERED;
2638 default:
2639 return 0;
2640 }
2641}
2642
2643
2644
2645
2646static int
2647il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2648 struct il4965_tx_resp *tx_resp, int txq_id,
2649 u16 start_idx)
2650{
2651 u16 status;
2652 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2653 struct ieee80211_tx_info *info = NULL;
2654 struct ieee80211_hdr *hdr = NULL;
2655 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2656 int i, sh, idx;
2657 u16 seq;
2658 if (agg->wait_for_ba)
2659 D_TX_REPLY("got tx response w/o block-ack\n");
2660
2661 agg->frame_count = tx_resp->frame_count;
2662 agg->start_idx = start_idx;
2663 agg->rate_n_flags = rate_n_flags;
2664 agg->bitmap = 0;
2665
2666
2667 if (agg->frame_count == 1) {
2668
2669 status = le16_to_cpu(frame_status[0].status);
2670 idx = start_idx;
2671
2672 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2673 agg->frame_count, agg->start_idx, idx);
2674
2675 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2676 info->status.rates[0].count = tx_resp->failure_frame + 1;
2677 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2678 info->flags |= il4965_tx_status_to_mac80211(status);
2679 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2680
2681 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2682 tx_resp->failure_frame);
2683 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2684
2685 agg->wait_for_ba = 0;
2686 } else {
2687
2688 u64 bitmap = 0;
2689 int start = agg->start_idx;
2690 struct sk_buff *skb;
2691
2692
2693 for (i = 0; i < agg->frame_count; i++) {
2694 u16 sc;
2695 status = le16_to_cpu(frame_status[i].status);
2696 seq = le16_to_cpu(frame_status[i].sequence);
2697 idx = SEQ_TO_IDX(seq);
2698 txq_id = SEQ_TO_QUEUE(seq);
2699
2700 if (status &
2701 (AGG_TX_STATE_FEW_BYTES_MSK |
2702 AGG_TX_STATE_ABORT_MSK))
2703 continue;
2704
2705 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2706 agg->frame_count, txq_id, idx);
2707
2708 skb = il->txq[txq_id].skbs[idx];
2709 if (WARN_ON_ONCE(skb == NULL))
2710 return -1;
2711 hdr = (struct ieee80211_hdr *) skb->data;
2712
2713 sc = le16_to_cpu(hdr->seq_ctrl);
2714 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2715 IL_ERR("BUG_ON idx doesn't match seq control"
2716 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2717 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2718 return -1;
2719 }
2720
2721 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2722 IEEE80211_SEQ_TO_SN(sc));
2723
2724 sh = idx - start;
2725 if (sh > 64) {
2726 sh = (start - idx) + 0xff;
2727 bitmap = bitmap << sh;
2728 sh = 0;
2729 start = idx;
2730 } else if (sh < -64)
2731 sh = 0xff - (start - idx);
2732 else if (sh < 0) {
2733 sh = start - idx;
2734 start = idx;
2735 bitmap = bitmap << sh;
2736 sh = 0;
2737 }
2738 bitmap |= 1ULL << sh;
2739 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2740 (unsigned long long)bitmap);
2741 }
2742
2743 agg->bitmap = bitmap;
2744 agg->start_idx = start;
2745 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2746 agg->frame_count, agg->start_idx,
2747 (unsigned long long)agg->bitmap);
2748
2749 if (bitmap)
2750 agg->wait_for_ba = 1;
2751 }
2752 return 0;
2753}
2754
2755
2756
2757
2758static void
2759il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2760{
2761 struct il_rx_pkt *pkt = rxb_addr(rxb);
2762 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2763 int txq_id = SEQ_TO_QUEUE(sequence);
2764 int idx = SEQ_TO_IDX(sequence);
2765 struct il_tx_queue *txq = &il->txq[txq_id];
2766 struct sk_buff *skb;
2767 struct ieee80211_hdr *hdr;
2768 struct ieee80211_tx_info *info;
2769 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2770 u32 status = le32_to_cpu(tx_resp->u.status);
2771 int uninitialized_var(tid);
2772 int sta_id;
2773 int freed;
2774 u8 *qc = NULL;
2775 unsigned long flags;
2776
2777 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2778 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2779 "is out of range [0-%d] %d %d\n", txq_id, idx,
2780 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2781 return;
2782 }
2783
2784 txq->time_stamp = jiffies;
2785
2786 skb = txq->skbs[txq->q.read_ptr];
2787 info = IEEE80211_SKB_CB(skb);
2788 memset(&info->status, 0, sizeof(info->status));
2789
2790 hdr = (struct ieee80211_hdr *) skb->data;
2791 if (ieee80211_is_data_qos(hdr->frame_control)) {
2792 qc = ieee80211_get_qos_ctl(hdr);
2793 tid = qc[0] & 0xf;
2794 }
2795
2796 sta_id = il4965_get_ra_sta_id(il, hdr);
2797 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2798 IL_ERR("Station not known\n");
2799 return;
2800 }
2801
2802
2803
2804
2805
2806
2807
2808
2809 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2810 il->iw_mode == NL80211_IFTYPE_STATION) {
2811 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2812 D_INFO("Stopped queues - RX waiting on passive channel\n");
2813 }
2814
2815 spin_lock_irqsave(&il->sta_lock, flags);
2816 if (txq->sched_retry) {
2817 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2818 struct il_ht_agg *agg = NULL;
2819 WARN_ON(!qc);
2820
2821 agg = &il->stations[sta_id].tid[tid].agg;
2822
2823 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2824
2825
2826 if (tx_resp->frame_count == 1 &&
2827 !il4965_is_tx_success(status))
2828 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2829
2830 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2831 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2832 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2833 "%d idx %d\n", scd_ssn, idx);
2834 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2835 if (qc)
2836 il4965_free_tfds_in_queue(il, sta_id, tid,
2837 freed);
2838
2839 if (il->mac80211_registered &&
2840 il_queue_space(&txq->q) > txq->q.low_mark &&
2841 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2842 il_wake_queue(il, txq);
2843 }
2844 } else {
2845 info->status.rates[0].count = tx_resp->failure_frame + 1;
2846 info->flags |= il4965_tx_status_to_mac80211(status);
2847 il4965_hwrate_to_tx_control(il,
2848 le32_to_cpu(tx_resp->rate_n_flags),
2849 info);
2850
2851 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2852 "rate_n_flags 0x%x retries %d\n", txq_id,
2853 il4965_get_tx_fail_reason(status), status,
2854 le32_to_cpu(tx_resp->rate_n_flags),
2855 tx_resp->failure_frame);
2856
2857 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2858 if (qc && likely(sta_id != IL_INVALID_STATION))
2859 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2860 else if (sta_id == IL_INVALID_STATION)
2861 D_TX_REPLY("Station not known\n");
2862
2863 if (il->mac80211_registered &&
2864 il_queue_space(&txq->q) > txq->q.low_mark)
2865 il_wake_queue(il, txq);
2866 }
2867 if (qc && likely(sta_id != IL_INVALID_STATION))
2868 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2869
2870 il4965_check_abort_status(il, tx_resp->frame_count, status);
2871
2872 spin_unlock_irqrestore(&il->sta_lock, flags);
2873}
2874
2875
2876
2877
2878void
2879il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2880 struct ieee80211_tx_info *info)
2881{
2882 struct ieee80211_tx_rate *r = &info->status.rates[0];
2883
2884 info->status.antenna =
2885 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2886 if (rate_n_flags & RATE_MCS_HT_MSK)
2887 r->flags |= IEEE80211_TX_RC_MCS;
2888 if (rate_n_flags & RATE_MCS_GF_MSK)
2889 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2890 if (rate_n_flags & RATE_MCS_HT40_MSK)
2891 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2892 if (rate_n_flags & RATE_MCS_DUP_MSK)
2893 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2894 if (rate_n_flags & RATE_MCS_SGI_MSK)
2895 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2896 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2897}
2898
2899
2900
2901
2902
2903
2904
2905static void
2906il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2907{
2908 struct il_rx_pkt *pkt = rxb_addr(rxb);
2909 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2910 struct il_tx_queue *txq = NULL;
2911 struct il_ht_agg *agg;
2912 int idx;
2913 int sta_id;
2914 int tid;
2915 unsigned long flags;
2916
2917
2918 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2919
2920
2921
2922 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2923
2924 if (scd_flow >= il->hw_params.max_txq_num) {
2925 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2926 return;
2927 }
2928
2929 txq = &il->txq[scd_flow];
2930 sta_id = ba_resp->sta_id;
2931 tid = ba_resp->tid;
2932 agg = &il->stations[sta_id].tid[tid].agg;
2933 if (unlikely(agg->txq_id != scd_flow)) {
2934
2935
2936
2937
2938
2939
2940 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2941 scd_flow, agg->txq_id);
2942 return;
2943 }
2944
2945
2946 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2947
2948 spin_lock_irqsave(&il->sta_lock, flags);
2949
2950 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2951 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2952 ba_resp->sta_id);
2953 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2954 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2955 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2956 ba_resp->scd_flow, ba_resp->scd_ssn);
2957 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2958 (unsigned long long)agg->bitmap);
2959
2960
2961 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2962
2963
2964
2965
2966 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2967
2968 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2969 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2970
2971 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2972 il->mac80211_registered &&
2973 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2974 il_wake_queue(il, txq);
2975
2976 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2977 }
2978
2979 spin_unlock_irqrestore(&il->sta_lock, flags);
2980}
2981
2982#ifdef CONFIG_IWLEGACY_DEBUG
2983const char *
2984il4965_get_tx_fail_reason(u32 status)
2985{
2986#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2987#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2988
2989 switch (status & TX_STATUS_MSK) {
2990 case TX_STATUS_SUCCESS:
2991 return "SUCCESS";
2992 TX_STATUS_POSTPONE(DELAY);
2993 TX_STATUS_POSTPONE(FEW_BYTES);
2994 TX_STATUS_POSTPONE(QUIET_PERIOD);
2995 TX_STATUS_POSTPONE(CALC_TTAK);
2996 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2997 TX_STATUS_FAIL(SHORT_LIMIT);
2998 TX_STATUS_FAIL(LONG_LIMIT);
2999 TX_STATUS_FAIL(FIFO_UNDERRUN);
3000 TX_STATUS_FAIL(DRAIN_FLOW);
3001 TX_STATUS_FAIL(RFKILL_FLUSH);
3002 TX_STATUS_FAIL(LIFE_EXPIRE);
3003 TX_STATUS_FAIL(DEST_PS);
3004 TX_STATUS_FAIL(HOST_ABORTED);
3005 TX_STATUS_FAIL(BT_RETRY);
3006 TX_STATUS_FAIL(STA_INVALID);
3007 TX_STATUS_FAIL(FRAG_DROPPED);
3008 TX_STATUS_FAIL(TID_DISABLE);
3009 TX_STATUS_FAIL(FIFO_FLUSHED);
3010 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3011 TX_STATUS_FAIL(PASSIVE_NO_RX);
3012 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3013 }
3014
3015 return "UNKNOWN";
3016
3017#undef TX_STATUS_FAIL
3018#undef TX_STATUS_POSTPONE
3019}
3020#endif
3021
3022static struct il_link_quality_cmd *
3023il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3024{
3025 int i, r;
3026 struct il_link_quality_cmd *link_cmd;
3027 u32 rate_flags = 0;
3028 __le32 rate_n_flags;
3029
3030 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3031 if (!link_cmd) {
3032 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3033 return NULL;
3034 }
3035
3036
3037 if (il->band == NL80211_BAND_5GHZ)
3038 r = RATE_6M_IDX;
3039 else
3040 r = RATE_1M_IDX;
3041
3042 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3043 rate_flags |= RATE_MCS_CCK_MSK;
3044
3045 rate_flags |=
3046 il4965_first_antenna(il->hw_params.
3047 valid_tx_ant) << RATE_MCS_ANT_POS;
3048 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3049 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3050 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3051
3052 link_cmd->general_params.single_stream_ant_msk =
3053 il4965_first_antenna(il->hw_params.valid_tx_ant);
3054
3055 link_cmd->general_params.dual_stream_ant_msk =
3056 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3057 valid_tx_ant);
3058 if (!link_cmd->general_params.dual_stream_ant_msk) {
3059 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3060 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3061 link_cmd->general_params.dual_stream_ant_msk =
3062 il->hw_params.valid_tx_ant;
3063 }
3064
3065 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3066 link_cmd->agg_params.agg_time_limit =
3067 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3068
3069 link_cmd->sta_id = sta_id;
3070
3071 return link_cmd;
3072}
3073
3074
3075
3076
3077
3078
3079int
3080il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3081{
3082 int ret;
3083 u8 sta_id;
3084 struct il_link_quality_cmd *link_cmd;
3085 unsigned long flags;
3086
3087 if (sta_id_r)
3088 *sta_id_r = IL_INVALID_STATION;
3089
3090 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3091 if (ret) {
3092 IL_ERR("Unable to add station %pM\n", addr);
3093 return ret;
3094 }
3095
3096 if (sta_id_r)
3097 *sta_id_r = sta_id;
3098
3099 spin_lock_irqsave(&il->sta_lock, flags);
3100 il->stations[sta_id].used |= IL_STA_LOCAL;
3101 spin_unlock_irqrestore(&il->sta_lock, flags);
3102
3103
3104 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3105 if (!link_cmd) {
3106 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3107 addr);
3108 return -ENOMEM;
3109 }
3110
3111 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3112 if (ret)
3113 IL_ERR("Link quality command failed (%d)\n", ret);
3114
3115 spin_lock_irqsave(&il->sta_lock, flags);
3116 il->stations[sta_id].lq = link_cmd;
3117 spin_unlock_irqrestore(&il->sta_lock, flags);
3118
3119 return 0;
3120}
3121
3122static int
3123il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3124{
3125 int i;
3126 u8 buff[sizeof(struct il_wep_cmd) +
3127 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3128 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3129 size_t cmd_size = sizeof(struct il_wep_cmd);
3130 struct il_host_cmd cmd = {
3131 .id = C_WEPKEY,
3132 .data = wep_cmd,
3133 .flags = CMD_SYNC,
3134 };
3135 bool not_empty = false;
3136
3137 might_sleep();
3138
3139 memset(wep_cmd, 0,
3140 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3141
3142 for (i = 0; i < WEP_KEYS_MAX; i++) {
3143 u8 key_size = il->_4965.wep_keys[i].key_size;
3144
3145 wep_cmd->key[i].key_idx = i;
3146 if (key_size) {
3147 wep_cmd->key[i].key_offset = i;
3148 not_empty = true;
3149 } else
3150 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3151
3152 wep_cmd->key[i].key_size = key_size;
3153 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3154 }
3155
3156 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3157 wep_cmd->num_keys = WEP_KEYS_MAX;
3158
3159 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3160 cmd.len = cmd_size;
3161
3162 if (not_empty || send_if_empty)
3163 return il_send_cmd(il, &cmd);
3164 else
3165 return 0;
3166}
3167
3168int
3169il4965_restore_default_wep_keys(struct il_priv *il)
3170{
3171 lockdep_assert_held(&il->mutex);
3172
3173 return il4965_static_wepkey_cmd(il, false);
3174}
3175
3176int
3177il4965_remove_default_wep_key(struct il_priv *il,
3178 struct ieee80211_key_conf *keyconf)
3179{
3180 int ret;
3181 int idx = keyconf->keyidx;
3182
3183 lockdep_assert_held(&il->mutex);
3184
3185 D_WEP("Removing default WEP key: idx=%d\n", idx);
3186
3187 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3188 if (il_is_rfkill(il)) {
3189 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3190
3191 return 0;
3192 }
3193 ret = il4965_static_wepkey_cmd(il, 1);
3194 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3195
3196 return ret;
3197}
3198
3199int
3200il4965_set_default_wep_key(struct il_priv *il,
3201 struct ieee80211_key_conf *keyconf)
3202{
3203 int ret;
3204 int len = keyconf->keylen;
3205 int idx = keyconf->keyidx;
3206
3207 lockdep_assert_held(&il->mutex);
3208
3209 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3210 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3211 return -EINVAL;
3212 }
3213
3214 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3215 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3216 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3217
3218 il->_4965.wep_keys[idx].key_size = len;
3219 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3220
3221 ret = il4965_static_wepkey_cmd(il, false);
3222
3223 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3224 return ret;
3225}
3226
3227static int
3228il4965_set_wep_dynamic_key_info(struct il_priv *il,
3229 struct ieee80211_key_conf *keyconf, u8 sta_id)
3230{
3231 unsigned long flags;
3232 __le16 key_flags = 0;
3233 struct il_addsta_cmd sta_cmd;
3234
3235 lockdep_assert_held(&il->mutex);
3236
3237 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3238
3239 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3240 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3241 key_flags &= ~STA_KEY_FLG_INVALID;
3242
3243 if (keyconf->keylen == WEP_KEY_LEN_128)
3244 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3245
3246 if (sta_id == il->hw_params.bcast_id)
3247 key_flags |= STA_KEY_MULTICAST_MSK;
3248
3249 spin_lock_irqsave(&il->sta_lock, flags);
3250
3251 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3252 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3253 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3254
3255 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3256
3257 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3258 keyconf->keylen);
3259
3260 if ((il->stations[sta_id].sta.key.
3261 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3262 il->stations[sta_id].sta.key.key_offset =
3263 il_get_free_ucode_key_idx(il);
3264
3265
3266
3267 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3268 "no space for a new key");
3269
3270 il->stations[sta_id].sta.key.key_flags = key_flags;
3271 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3272 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3273
3274 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3275 sizeof(struct il_addsta_cmd));
3276 spin_unlock_irqrestore(&il->sta_lock, flags);
3277
3278 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3279}
3280
3281static int
3282il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3283 struct ieee80211_key_conf *keyconf, u8 sta_id)
3284{
3285 unsigned long flags;
3286 __le16 key_flags = 0;
3287 struct il_addsta_cmd sta_cmd;
3288
3289 lockdep_assert_held(&il->mutex);
3290
3291 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3292 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3293 key_flags &= ~STA_KEY_FLG_INVALID;
3294
3295 if (sta_id == il->hw_params.bcast_id)
3296 key_flags |= STA_KEY_MULTICAST_MSK;
3297
3298 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3299
3300 spin_lock_irqsave(&il->sta_lock, flags);
3301 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3302 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3303
3304 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3305
3306 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3307
3308 if ((il->stations[sta_id].sta.key.
3309 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3310 il->stations[sta_id].sta.key.key_offset =
3311 il_get_free_ucode_key_idx(il);
3312
3313
3314
3315 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3316 "no space for a new key");
3317
3318 il->stations[sta_id].sta.key.key_flags = key_flags;
3319 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3320 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3321
3322 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3323 sizeof(struct il_addsta_cmd));
3324 spin_unlock_irqrestore(&il->sta_lock, flags);
3325
3326 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3327}
3328
3329static int
3330il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3331 struct ieee80211_key_conf *keyconf, u8 sta_id)
3332{
3333 unsigned long flags;
3334 int ret = 0;
3335 __le16 key_flags = 0;
3336
3337 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3338 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3339 key_flags &= ~STA_KEY_FLG_INVALID;
3340
3341 if (sta_id == il->hw_params.bcast_id)
3342 key_flags |= STA_KEY_MULTICAST_MSK;
3343
3344 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3345 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3346
3347 spin_lock_irqsave(&il->sta_lock, flags);
3348
3349 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3350 il->stations[sta_id].keyinfo.keylen = 16;
3351
3352 if ((il->stations[sta_id].sta.key.
3353 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3354 il->stations[sta_id].sta.key.key_offset =
3355 il_get_free_ucode_key_idx(il);
3356
3357
3358
3359 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3360 "no space for a new key");
3361
3362 il->stations[sta_id].sta.key.key_flags = key_flags;
3363
3364
3365 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3366
3367 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3368
3369 spin_unlock_irqrestore(&il->sta_lock, flags);
3370
3371 return ret;
3372}
3373
3374void
3375il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3376 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3377{
3378 u8 sta_id;
3379 unsigned long flags;
3380 int i;
3381
3382 if (il_scan_cancel(il)) {
3383
3384
3385 return;
3386 }
3387
3388 sta_id = il_sta_id_or_broadcast(il, sta);
3389 if (sta_id == IL_INVALID_STATION)
3390 return;
3391
3392 spin_lock_irqsave(&il->sta_lock, flags);
3393
3394 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3395
3396 for (i = 0; i < 5; i++)
3397 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3398 cpu_to_le16(phase1key[i]);
3399
3400 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3401 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3402
3403 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3404
3405 spin_unlock_irqrestore(&il->sta_lock, flags);
3406}
3407
3408int
3409il4965_remove_dynamic_key(struct il_priv *il,
3410 struct ieee80211_key_conf *keyconf, u8 sta_id)
3411{
3412 unsigned long flags;
3413 u16 key_flags;
3414 u8 keyidx;
3415 struct il_addsta_cmd sta_cmd;
3416
3417 lockdep_assert_held(&il->mutex);
3418
3419 il->_4965.key_mapping_keys--;
3420
3421 spin_lock_irqsave(&il->sta_lock, flags);
3422 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3423 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3424
3425 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3426
3427 if (keyconf->keyidx != keyidx) {
3428
3429
3430
3431
3432
3433 spin_unlock_irqrestore(&il->sta_lock, flags);
3434 return 0;
3435 }
3436
3437 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3438 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3439 key_flags);
3440 spin_unlock_irqrestore(&il->sta_lock, flags);
3441 return 0;
3442 }
3443
3444 if (!test_and_clear_bit
3445 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3446 IL_ERR("idx %d not used in uCode key table.\n",
3447 il->stations[sta_id].sta.key.key_offset);
3448 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3449 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3450 il->stations[sta_id].sta.key.key_flags =
3451 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3452 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3453 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3454 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3455
3456 if (il_is_rfkill(il)) {
3457 D_WEP
3458 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3459 spin_unlock_irqrestore(&il->sta_lock, flags);
3460 return 0;
3461 }
3462 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3463 sizeof(struct il_addsta_cmd));
3464 spin_unlock_irqrestore(&il->sta_lock, flags);
3465
3466 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3467}
3468
3469int
3470il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3471 u8 sta_id)
3472{
3473 int ret;
3474
3475 lockdep_assert_held(&il->mutex);
3476
3477 il->_4965.key_mapping_keys++;
3478 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3479
3480 switch (keyconf->cipher) {
3481 case WLAN_CIPHER_SUITE_CCMP:
3482 ret =
3483 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3484 break;
3485 case WLAN_CIPHER_SUITE_TKIP:
3486 ret =
3487 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3488 break;
3489 case WLAN_CIPHER_SUITE_WEP40:
3490 case WLAN_CIPHER_SUITE_WEP104:
3491 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3492 break;
3493 default:
3494 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3495 keyconf->cipher);
3496 ret = -EINVAL;
3497 }
3498
3499 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3500 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3501
3502 return ret;
3503}
3504
3505
3506
3507
3508
3509
3510
3511
3512int
3513il4965_alloc_bcast_station(struct il_priv *il)
3514{
3515 struct il_link_quality_cmd *link_cmd;
3516 unsigned long flags;
3517 u8 sta_id;
3518
3519 spin_lock_irqsave(&il->sta_lock, flags);
3520 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3521 if (sta_id == IL_INVALID_STATION) {
3522 IL_ERR("Unable to prepare broadcast station\n");
3523 spin_unlock_irqrestore(&il->sta_lock, flags);
3524
3525 return -EINVAL;
3526 }
3527
3528 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3529 il->stations[sta_id].used |= IL_STA_BCAST;
3530 spin_unlock_irqrestore(&il->sta_lock, flags);
3531
3532 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3533 if (!link_cmd) {
3534 IL_ERR
3535 ("Unable to initialize rate scaling for bcast station.\n");
3536 return -ENOMEM;
3537 }
3538
3539 spin_lock_irqsave(&il->sta_lock, flags);
3540 il->stations[sta_id].lq = link_cmd;
3541 spin_unlock_irqrestore(&il->sta_lock, flags);
3542
3543 return 0;
3544}
3545
3546
3547
3548
3549
3550
3551
3552static int
3553il4965_update_bcast_station(struct il_priv *il)
3554{
3555 unsigned long flags;
3556 struct il_link_quality_cmd *link_cmd;
3557 u8 sta_id = il->hw_params.bcast_id;
3558
3559 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3560 if (!link_cmd) {
3561 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3562 return -ENOMEM;
3563 }
3564
3565 spin_lock_irqsave(&il->sta_lock, flags);
3566 if (il->stations[sta_id].lq)
3567 kfree(il->stations[sta_id].lq);
3568 else
3569 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3570 il->stations[sta_id].lq = link_cmd;
3571 spin_unlock_irqrestore(&il->sta_lock, flags);
3572
3573 return 0;
3574}
3575
3576int
3577il4965_update_bcast_stations(struct il_priv *il)
3578{
3579 return il4965_update_bcast_station(il);
3580}
3581
3582
3583
3584
3585int
3586il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3587{
3588 unsigned long flags;
3589 struct il_addsta_cmd sta_cmd;
3590
3591 lockdep_assert_held(&il->mutex);
3592
3593
3594 spin_lock_irqsave(&il->sta_lock, flags);
3595 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3596 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3597 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3598 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3599 sizeof(struct il_addsta_cmd));
3600 spin_unlock_irqrestore(&il->sta_lock, flags);
3601
3602 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3603}
3604
3605int
3606il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3607 u16 ssn)
3608{
3609 unsigned long flags;
3610 int sta_id;
3611 struct il_addsta_cmd sta_cmd;
3612
3613 lockdep_assert_held(&il->mutex);
3614
3615 sta_id = il_sta_id(sta);
3616 if (sta_id == IL_INVALID_STATION)
3617 return -ENXIO;
3618
3619 spin_lock_irqsave(&il->sta_lock, flags);
3620 il->stations[sta_id].sta.station_flags_msk = 0;
3621 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3622 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3623 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3624 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3625 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3626 sizeof(struct il_addsta_cmd));
3627 spin_unlock_irqrestore(&il->sta_lock, flags);
3628
3629 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3630}
3631
3632int
3633il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3634{
3635 unsigned long flags;
3636 int sta_id;
3637 struct il_addsta_cmd sta_cmd;
3638
3639 lockdep_assert_held(&il->mutex);
3640
3641 sta_id = il_sta_id(sta);
3642 if (sta_id == IL_INVALID_STATION) {
3643 IL_ERR("Invalid station for AGG tid %d\n", tid);
3644 return -ENXIO;
3645 }
3646
3647 spin_lock_irqsave(&il->sta_lock, flags);
3648 il->stations[sta_id].sta.station_flags_msk = 0;
3649 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3650 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3651 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3652 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3653 sizeof(struct il_addsta_cmd));
3654 spin_unlock_irqrestore(&il->sta_lock, flags);
3655
3656 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3657}
3658
3659void
3660il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3661{
3662 unsigned long flags;
3663
3664 spin_lock_irqsave(&il->sta_lock, flags);
3665 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3666 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3667 il->stations[sta_id].sta.sta.modify_mask =
3668 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3669 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3670 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3671 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3672 spin_unlock_irqrestore(&il->sta_lock, flags);
3673
3674}
3675
3676void
3677il4965_update_chain_flags(struct il_priv *il)
3678{
3679 if (il->ops->set_rxon_chain) {
3680 il->ops->set_rxon_chain(il);
3681 if (il->active.rx_chain != il->staging.rx_chain)
3682 il_commit_rxon(il);
3683 }
3684}
3685
3686static void
3687il4965_clear_free_frames(struct il_priv *il)
3688{
3689 struct list_head *element;
3690
3691 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3692
3693 while (!list_empty(&il->free_frames)) {
3694 element = il->free_frames.next;
3695 list_del(element);
3696 kfree(list_entry(element, struct il_frame, list));
3697 il->frames_count--;
3698 }
3699
3700 if (il->frames_count) {
3701 IL_WARN("%d frames still in use. Did we lose one?\n",
3702 il->frames_count);
3703 il->frames_count = 0;
3704 }
3705}
3706
3707static struct il_frame *
3708il4965_get_free_frame(struct il_priv *il)
3709{
3710 struct il_frame *frame;
3711 struct list_head *element;
3712 if (list_empty(&il->free_frames)) {
3713 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3714 if (!frame) {
3715 IL_ERR("Could not allocate frame!\n");
3716 return NULL;
3717 }
3718
3719 il->frames_count++;
3720 return frame;
3721 }
3722
3723 element = il->free_frames.next;
3724 list_del(element);
3725 return list_entry(element, struct il_frame, list);
3726}
3727
3728static void
3729il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3730{
3731 memset(frame, 0, sizeof(*frame));
3732 list_add(&frame->list, &il->free_frames);
3733}
3734
3735static u32
3736il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3737 int left)
3738{
3739 lockdep_assert_held(&il->mutex);
3740
3741 if (!il->beacon_skb)
3742 return 0;
3743
3744 if (il->beacon_skb->len > left)
3745 return 0;
3746
3747 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3748
3749 return il->beacon_skb->len;
3750}
3751
3752
3753static void
3754il4965_set_beacon_tim(struct il_priv *il,
3755 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3756 u32 frame_size)
3757{
3758 u16 tim_idx;
3759 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3760
3761
3762
3763
3764
3765 tim_idx = mgmt->u.beacon.variable - beacon;
3766
3767
3768 while ((tim_idx < (frame_size - 2)) &&
3769 (beacon[tim_idx] != WLAN_EID_TIM))
3770 tim_idx += beacon[tim_idx + 1] + 2;
3771
3772
3773 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3774 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3775 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3776 } else
3777 IL_WARN("Unable to find TIM Element in beacon\n");
3778}
3779
3780static unsigned int
3781il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3782{
3783 struct il_tx_beacon_cmd *tx_beacon_cmd;
3784 u32 frame_size;
3785 u32 rate_flags;
3786 u32 rate;
3787
3788
3789
3790
3791
3792 lockdep_assert_held(&il->mutex);
3793
3794 if (!il->beacon_enabled) {
3795 IL_ERR("Trying to build beacon without beaconing enabled\n");
3796 return 0;
3797 }
3798
3799
3800 tx_beacon_cmd = &frame->u.beacon;
3801 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3802
3803
3804 frame_size =
3805 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3806 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3807 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3808 return 0;
3809 if (!frame_size)
3810 return 0;
3811
3812
3813 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3814 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3815 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3816 tx_beacon_cmd->tx.tx_flags =
3817 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3818 TX_CMD_FLG_STA_RATE_MSK;
3819
3820
3821 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3822 frame_size);
3823
3824
3825 rate = il_get_lowest_plcp(il);
3826 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3827 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3828 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3829 rate_flags |= RATE_MCS_CCK_MSK;
3830 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3831
3832 return sizeof(*tx_beacon_cmd) + frame_size;
3833}
3834
3835int
3836il4965_send_beacon_cmd(struct il_priv *il)
3837{
3838 struct il_frame *frame;
3839 unsigned int frame_size;
3840 int rc;
3841
3842 frame = il4965_get_free_frame(il);
3843 if (!frame) {
3844 IL_ERR("Could not obtain free frame buffer for beacon "
3845 "command.\n");
3846 return -ENOMEM;
3847 }
3848
3849 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3850 if (!frame_size) {
3851 IL_ERR("Error configuring the beacon command\n");
3852 il4965_free_frame(il, frame);
3853 return -EINVAL;
3854 }
3855
3856 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3857
3858 il4965_free_frame(il, frame);
3859
3860 return rc;
3861}
3862
3863static inline dma_addr_t
3864il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3865{
3866 struct il_tfd_tb *tb = &tfd->tbs[idx];
3867
3868 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3869 if (sizeof(dma_addr_t) > sizeof(u32))
3870 addr |=
3871 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3872 16;
3873
3874 return addr;
3875}
3876
3877static inline u16
3878il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3879{
3880 struct il_tfd_tb *tb = &tfd->tbs[idx];
3881
3882 return le16_to_cpu(tb->hi_n_len) >> 4;
3883}
3884
3885static inline void
3886il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3887{
3888 struct il_tfd_tb *tb = &tfd->tbs[idx];
3889 u16 hi_n_len = len << 4;
3890
3891 put_unaligned_le32(addr, &tb->lo);
3892 if (sizeof(dma_addr_t) > sizeof(u32))
3893 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3894
3895 tb->hi_n_len = cpu_to_le16(hi_n_len);
3896
3897 tfd->num_tbs = idx + 1;
3898}
3899
3900static inline u8
3901il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3902{
3903 return tfd->num_tbs & 0x1f;
3904}
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914void
3915il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3916{
3917 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3918 struct il_tfd *tfd;
3919 struct pci_dev *dev = il->pci_dev;
3920 int idx = txq->q.read_ptr;
3921 int i;
3922 int num_tbs;
3923
3924 tfd = &tfd_tmp[idx];
3925
3926
3927 num_tbs = il4965_tfd_get_num_tbs(tfd);
3928
3929 if (num_tbs >= IL_NUM_OF_TBS) {
3930 IL_ERR("Too many chunks: %i\n", num_tbs);
3931
3932 return;
3933 }
3934
3935
3936 if (num_tbs)
3937 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3938 dma_unmap_len(&txq->meta[idx], len),
3939 PCI_DMA_BIDIRECTIONAL);
3940
3941
3942 for (i = 1; i < num_tbs; i++)
3943 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3944 il4965_tfd_tb_get_len(tfd, i),
3945 PCI_DMA_TODEVICE);
3946
3947
3948 if (txq->skbs) {
3949 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3950
3951
3952 if (skb) {
3953 dev_kfree_skb_any(skb);
3954 txq->skbs[txq->q.read_ptr] = NULL;
3955 }
3956 }
3957}
3958
3959int
3960il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3961 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3962{
3963 struct il_queue *q;
3964 struct il_tfd *tfd, *tfd_tmp;
3965 u32 num_tbs;
3966
3967 q = &txq->q;
3968 tfd_tmp = (struct il_tfd *)txq->tfds;
3969 tfd = &tfd_tmp[q->write_ptr];
3970
3971 if (reset)
3972 memset(tfd, 0, sizeof(*tfd));
3973
3974 num_tbs = il4965_tfd_get_num_tbs(tfd);
3975
3976
3977 if (num_tbs >= IL_NUM_OF_TBS) {
3978 IL_ERR("Error can not send more than %d chunks\n",
3979 IL_NUM_OF_TBS);
3980 return -EINVAL;
3981 }
3982
3983 BUG_ON(addr & ~DMA_BIT_MASK(36));
3984 if (unlikely(addr & ~IL_TX_DMA_MASK))
3985 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3986
3987 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3988
3989 return 0;
3990}
3991
3992
3993
3994
3995
3996
3997
3998
3999int
4000il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
4001{
4002 int txq_id = txq->q.id;
4003
4004
4005 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
4006
4007 return 0;
4008}
4009
4010
4011
4012
4013
4014
4015static void
4016il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4017{
4018 struct il_rx_pkt *pkt = rxb_addr(rxb);
4019 struct il_alive_resp *palive;
4020 struct delayed_work *pwork;
4021
4022 palive = &pkt->u.alive_frame;
4023
4024 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4025 palive->is_valid, palive->ver_type, palive->ver_subtype);
4026
4027 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4028 D_INFO("Initialization Alive received.\n");
4029 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4030 sizeof(struct il_init_alive_resp));
4031 pwork = &il->init_alive_start;
4032 } else {
4033 D_INFO("Runtime Alive received.\n");
4034 memcpy(&il->card_alive, &pkt->u.alive_frame,
4035 sizeof(struct il_alive_resp));
4036 pwork = &il->alive_start;
4037 }
4038
4039
4040
4041 if (palive->is_valid == UCODE_VALID_OK)
4042 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4043 else
4044 IL_WARN("uCode did not respond OK.\n");
4045}
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057static void
4058il4965_bg_stats_periodic(struct timer_list *t)
4059{
4060 struct il_priv *il = from_timer(il, t, stats_periodic);
4061
4062 if (test_bit(S_EXIT_PENDING, &il->status))
4063 return;
4064
4065
4066 if (!il_is_ready_rf(il))
4067 return;
4068
4069 il_send_stats_request(il, CMD_ASYNC, false);
4070}
4071
4072static void
4073il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4074{
4075 struct il_rx_pkt *pkt = rxb_addr(rxb);
4076 struct il4965_beacon_notif *beacon =
4077 (struct il4965_beacon_notif *)pkt->u.raw;
4078#ifdef CONFIG_IWLEGACY_DEBUG
4079 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4080
4081 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4082 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4083 beacon->beacon_notify_hdr.failure_frame,
4084 le32_to_cpu(beacon->ibss_mgr_status),
4085 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4086#endif
4087 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4088}
4089
4090static void
4091il4965_perform_ct_kill_task(struct il_priv *il)
4092{
4093 unsigned long flags;
4094
4095 D_POWER("Stop all queues\n");
4096
4097 if (il->mac80211_registered)
4098 ieee80211_stop_queues(il->hw);
4099
4100 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4101 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4102 _il_rd(il, CSR_UCODE_DRV_GP1);
4103
4104 spin_lock_irqsave(&il->reg_lock, flags);
4105 if (likely(_il_grab_nic_access(il)))
4106 _il_release_nic_access(il);
4107 spin_unlock_irqrestore(&il->reg_lock, flags);
4108}
4109
4110
4111
4112static void
4113il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4114{
4115 struct il_rx_pkt *pkt = rxb_addr(rxb);
4116 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4117 unsigned long status = il->status;
4118
4119 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4120 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4121 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4122 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4123
4124 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4125
4126 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4127 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4128
4129 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4130
4131 if (!(flags & RXON_CARD_DISABLED)) {
4132 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4133 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4134 il_wr(il, HBUS_TARG_MBX_C,
4135 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4136 }
4137 }
4138
4139 if (flags & CT_CARD_DISABLED)
4140 il4965_perform_ct_kill_task(il);
4141
4142 if (flags & HW_CARD_DISABLED)
4143 set_bit(S_RFKILL, &il->status);
4144 else
4145 clear_bit(S_RFKILL, &il->status);
4146
4147 if (!(flags & RXON_CARD_DISABLED))
4148 il_scan_cancel(il);
4149
4150 if ((test_bit(S_RFKILL, &status) !=
4151 test_bit(S_RFKILL, &il->status)))
4152 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4153 test_bit(S_RFKILL, &il->status));
4154 else
4155 wake_up(&il->wait_command_queue);
4156}
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167static void
4168il4965_setup_handlers(struct il_priv *il)
4169{
4170 il->handlers[N_ALIVE] = il4965_hdl_alive;
4171 il->handlers[N_ERROR] = il_hdl_error;
4172 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4173 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4174 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4175 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4176 il->handlers[N_BEACON] = il4965_hdl_beacon;
4177
4178
4179
4180
4181
4182
4183 il->handlers[C_STATS] = il4965_hdl_c_stats;
4184 il->handlers[N_STATS] = il4965_hdl_stats;
4185
4186 il_setup_rx_scan_handlers(il);
4187
4188
4189 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4190
4191 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4192
4193 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4194 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4195 il->handlers[N_RX] = il4965_hdl_rx;
4196
4197 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4198
4199 il->handlers[C_TX] = il4965_hdl_tx;
4200}
4201
4202
4203
4204
4205
4206
4207
4208
4209void
4210il4965_rx_handle(struct il_priv *il)
4211{
4212 struct il_rx_buf *rxb;
4213 struct il_rx_pkt *pkt;
4214 struct il_rx_queue *rxq = &il->rxq;
4215 u32 r, i;
4216 int reclaim;
4217 unsigned long flags;
4218 u8 fill_rx = 0;
4219 u32 count = 8;
4220 int total_empty;
4221
4222
4223
4224 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4225 i = rxq->read;
4226
4227
4228 if (i == r)
4229 D_RX("r = %d, i = %d\n", r, i);
4230
4231
4232 total_empty = r - rxq->write_actual;
4233 if (total_empty < 0)
4234 total_empty += RX_QUEUE_SIZE;
4235
4236 if (total_empty > (RX_QUEUE_SIZE / 2))
4237 fill_rx = 1;
4238
4239 while (i != r) {
4240 int len;
4241
4242 rxb = rxq->queue[i];
4243
4244
4245
4246
4247 BUG_ON(rxb == NULL);
4248
4249 rxq->queue[i] = NULL;
4250
4251 pci_unmap_page(il->pci_dev, rxb->page_dma,
4252 PAGE_SIZE << il->hw_params.rx_page_order,
4253 PCI_DMA_FROMDEVICE);
4254 pkt = rxb_addr(rxb);
4255
4256 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4257 len += sizeof(u32);
4258
4259 reclaim = il_need_reclaim(il, pkt);
4260
4261
4262
4263
4264 if (il->handlers[pkt->hdr.cmd]) {
4265 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4266 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4267 il->isr_stats.handlers[pkt->hdr.cmd]++;
4268 il->handlers[pkt->hdr.cmd] (il, rxb);
4269 } else {
4270
4271 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4272 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4273 }
4274
4275
4276
4277
4278
4279
4280
4281
4282 if (reclaim) {
4283
4284
4285
4286 if (rxb->page)
4287 il_tx_cmd_complete(il, rxb);
4288 else
4289 IL_WARN("Claim null rxb?\n");
4290 }
4291
4292
4293
4294
4295 spin_lock_irqsave(&rxq->lock, flags);
4296 if (rxb->page != NULL) {
4297 rxb->page_dma =
4298 pci_map_page(il->pci_dev, rxb->page, 0,
4299 PAGE_SIZE << il->hw_params.
4300 rx_page_order, PCI_DMA_FROMDEVICE);
4301
4302 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4303 rxb->page_dma))) {
4304 __il_free_pages(il, rxb->page);
4305 rxb->page = NULL;
4306 list_add_tail(&rxb->list, &rxq->rx_used);
4307 } else {
4308 list_add_tail(&rxb->list, &rxq->rx_free);
4309 rxq->free_count++;
4310 }
4311 } else
4312 list_add_tail(&rxb->list, &rxq->rx_used);
4313
4314 spin_unlock_irqrestore(&rxq->lock, flags);
4315
4316 i = (i + 1) & RX_QUEUE_MASK;
4317
4318
4319 if (fill_rx) {
4320 count++;
4321 if (count >= 8) {
4322 rxq->read = i;
4323 il4965_rx_replenish_now(il);
4324 count = 0;
4325 }
4326 }
4327 }
4328
4329
4330 rxq->read = i;
4331 if (fill_rx)
4332 il4965_rx_replenish_now(il);
4333 else
4334 il4965_rx_queue_restock(il);
4335}
4336
4337
4338static inline void
4339il4965_synchronize_irq(struct il_priv *il)
4340{
4341
4342 synchronize_irq(il->pci_dev->irq);
4343 tasklet_kill(&il->irq_tasklet);
4344}
4345
4346static void
4347il4965_irq_tasklet(struct il_priv *il)
4348{
4349 u32 inta, handled = 0;
4350 u32 inta_fh;
4351 unsigned long flags;
4352 u32 i;
4353#ifdef CONFIG_IWLEGACY_DEBUG
4354 u32 inta_mask;
4355#endif
4356
4357 spin_lock_irqsave(&il->lock, flags);
4358
4359
4360
4361
4362 inta = _il_rd(il, CSR_INT);
4363 _il_wr(il, CSR_INT, inta);
4364
4365
4366
4367
4368 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4369 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4370
4371#ifdef CONFIG_IWLEGACY_DEBUG
4372 if (il_get_debug_level(il) & IL_DL_ISR) {
4373
4374 inta_mask = _il_rd(il, CSR_INT_MASK);
4375 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4376 inta_mask, inta_fh);
4377 }
4378#endif
4379
4380 spin_unlock_irqrestore(&il->lock, flags);
4381
4382
4383
4384
4385
4386 if (inta_fh & CSR49_FH_INT_RX_MASK)
4387 inta |= CSR_INT_BIT_FH_RX;
4388 if (inta_fh & CSR49_FH_INT_TX_MASK)
4389 inta |= CSR_INT_BIT_FH_TX;
4390
4391
4392 if (inta & CSR_INT_BIT_HW_ERR) {
4393 IL_ERR("Hardware error detected. Restarting.\n");
4394
4395
4396 il_disable_interrupts(il);
4397
4398 il->isr_stats.hw++;
4399 il_irq_handle_error(il);
4400
4401 handled |= CSR_INT_BIT_HW_ERR;
4402
4403 return;
4404 }
4405#ifdef CONFIG_IWLEGACY_DEBUG
4406 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4407
4408 if (inta & CSR_INT_BIT_SCD) {
4409 D_ISR("Scheduler finished to transmit "
4410 "the frame/frames.\n");
4411 il->isr_stats.sch++;
4412 }
4413
4414
4415 if (inta & CSR_INT_BIT_ALIVE) {
4416 D_ISR("Alive interrupt\n");
4417 il->isr_stats.alive++;
4418 }
4419 }
4420#endif
4421
4422 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4423
4424
4425 if (inta & CSR_INT_BIT_RF_KILL) {
4426 int hw_rf_kill = 0;
4427
4428 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4429 hw_rf_kill = 1;
4430
4431 IL_WARN("RF_KILL bit toggled to %s.\n",
4432 hw_rf_kill ? "disable radio" : "enable radio");
4433
4434 il->isr_stats.rfkill++;
4435
4436
4437
4438
4439
4440
4441 if (hw_rf_kill) {
4442 set_bit(S_RFKILL, &il->status);
4443 } else {
4444 clear_bit(S_RFKILL, &il->status);
4445 il_force_reset(il, true);
4446 }
4447 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4448
4449 handled |= CSR_INT_BIT_RF_KILL;
4450 }
4451
4452
4453 if (inta & CSR_INT_BIT_CT_KILL) {
4454 IL_ERR("Microcode CT kill error detected.\n");
4455 il->isr_stats.ctkill++;
4456 handled |= CSR_INT_BIT_CT_KILL;
4457 }
4458
4459
4460 if (inta & CSR_INT_BIT_SW_ERR) {
4461 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4462 inta);
4463 il->isr_stats.sw++;
4464 il_irq_handle_error(il);
4465 handled |= CSR_INT_BIT_SW_ERR;
4466 }
4467
4468
4469
4470
4471
4472
4473 if (inta & CSR_INT_BIT_WAKEUP) {
4474 D_ISR("Wakeup interrupt\n");
4475 il_rx_queue_update_write_ptr(il, &il->rxq);
4476 for (i = 0; i < il->hw_params.max_txq_num; i++)
4477 il_txq_update_write_ptr(il, &il->txq[i]);
4478 il->isr_stats.wakeup++;
4479 handled |= CSR_INT_BIT_WAKEUP;
4480 }
4481
4482
4483
4484
4485 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4486 il4965_rx_handle(il);
4487 il->isr_stats.rx++;
4488 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4489 }
4490
4491
4492 if (inta & CSR_INT_BIT_FH_TX) {
4493 D_ISR("uCode load interrupt\n");
4494 il->isr_stats.tx++;
4495 handled |= CSR_INT_BIT_FH_TX;
4496
4497 il->ucode_write_complete = 1;
4498 wake_up(&il->wait_command_queue);
4499 }
4500
4501 if (inta & ~handled) {
4502 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4503 il->isr_stats.unhandled++;
4504 }
4505
4506 if (inta & ~(il->inta_mask)) {
4507 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4508 inta & ~il->inta_mask);
4509 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4510 }
4511
4512
4513
4514 if (test_bit(S_INT_ENABLED, &il->status))
4515 il_enable_interrupts(il);
4516
4517 else if (handled & CSR_INT_BIT_RF_KILL)
4518 il_enable_rfkill_int(il);
4519
4520#ifdef CONFIG_IWLEGACY_DEBUG
4521 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4522 inta = _il_rd(il, CSR_INT);
4523 inta_mask = _il_rd(il, CSR_INT_MASK);
4524 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4525 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4526 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4527 }
4528#endif
4529}
4530
4531
4532
4533
4534
4535
4536
4537#ifdef CONFIG_IWLEGACY_DEBUG
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550static ssize_t
4551il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4552 char *buf)
4553{
4554 struct il_priv *il = dev_get_drvdata(d);
4555 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4556}
4557
4558static ssize_t
4559il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4560 const char *buf, size_t count)
4561{
4562 struct il_priv *il = dev_get_drvdata(d);
4563 unsigned long val;
4564 int ret;
4565
4566 ret = kstrtoul(buf, 0, &val);
4567 if (ret)
4568 IL_ERR("%s is not in hex or decimal form.\n", buf);
4569 else
4570 il->debug_level = val;
4571
4572 return strnlen(buf, count);
4573}
4574
4575static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
4576 il4965_store_debug_level);
4577
4578#endif
4579
4580static ssize_t
4581il4965_show_temperature(struct device *d, struct device_attribute *attr,
4582 char *buf)
4583{
4584 struct il_priv *il = dev_get_drvdata(d);
4585
4586 if (!il_is_alive(il))
4587 return -EAGAIN;
4588
4589 return sprintf(buf, "%d\n", il->temperature);
4590}
4591
4592static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
4593
4594static ssize_t
4595il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4596{
4597 struct il_priv *il = dev_get_drvdata(d);
4598
4599 if (!il_is_ready_rf(il))
4600 return sprintf(buf, "off\n");
4601 else
4602 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4603}
4604
4605static ssize_t
4606il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4607 const char *buf, size_t count)
4608{
4609 struct il_priv *il = dev_get_drvdata(d);
4610 unsigned long val;
4611 int ret;
4612
4613 ret = kstrtoul(buf, 10, &val);
4614 if (ret)
4615 IL_INFO("%s is not in decimal form.\n", buf);
4616 else {
4617 ret = il_set_tx_power(il, val, false);
4618 if (ret)
4619 IL_ERR("failed setting tx power (0x%08x).\n", ret);
4620 else
4621 ret = count;
4622 }
4623 return ret;
4624}
4625
4626static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
4627 il4965_store_tx_power);
4628
4629static struct attribute *il_sysfs_entries[] = {
4630 &dev_attr_temperature.attr,
4631 &dev_attr_tx_power.attr,
4632#ifdef CONFIG_IWLEGACY_DEBUG
4633 &dev_attr_debug_level.attr,
4634#endif
4635 NULL
4636};
4637
4638static const struct attribute_group il_attribute_group = {
4639 .name = NULL,
4640 .attrs = il_sysfs_entries,
4641};
4642
4643
4644
4645
4646
4647
4648
4649static void
4650il4965_dealloc_ucode_pci(struct il_priv *il)
4651{
4652 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4653 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4654 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4655 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4656 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4657 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4658}
4659
4660static void
4661il4965_nic_start(struct il_priv *il)
4662{
4663
4664 _il_wr(il, CSR_RESET, 0);
4665}
4666
4667static void il4965_ucode_callback(const struct firmware *ucode_raw,
4668 void *context);
4669static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4670
4671static int __must_check
4672il4965_request_firmware(struct il_priv *il, bool first)
4673{
4674 const char *name_pre = il->cfg->fw_name_pre;
4675 char tag[8];
4676
4677 if (first) {
4678 il->fw_idx = il->cfg->ucode_api_max;
4679 sprintf(tag, "%d", il->fw_idx);
4680 } else {
4681 il->fw_idx--;
4682 sprintf(tag, "%d", il->fw_idx);
4683 }
4684
4685 if (il->fw_idx < il->cfg->ucode_api_min) {
4686 IL_ERR("no suitable firmware found!\n");
4687 return -ENOENT;
4688 }
4689
4690 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4691
4692 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4693
4694 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4695 &il->pci_dev->dev, GFP_KERNEL, il,
4696 il4965_ucode_callback);
4697}
4698
4699struct il4965_firmware_pieces {
4700 const void *inst, *data, *init, *init_data, *boot;
4701 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4702};
4703
4704static int
4705il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4706 struct il4965_firmware_pieces *pieces)
4707{
4708 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4709 u32 api_ver, hdr_size;
4710 const u8 *src;
4711
4712 il->ucode_ver = le32_to_cpu(ucode->ver);
4713 api_ver = IL_UCODE_API(il->ucode_ver);
4714
4715 switch (api_ver) {
4716 default:
4717 case 0:
4718 case 1:
4719 case 2:
4720 hdr_size = 24;
4721 if (ucode_raw->size < hdr_size) {
4722 IL_ERR("File size too small!\n");
4723 return -EINVAL;
4724 }
4725 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4726 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4727 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4728 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4729 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4730 src = ucode->v1.data;
4731 break;
4732 }
4733
4734
4735 if (ucode_raw->size !=
4736 hdr_size + pieces->inst_size + pieces->data_size +
4737 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4738
4739 IL_ERR("uCode file size %d does not match expected size\n",
4740 (int)ucode_raw->size);
4741 return -EINVAL;
4742 }
4743
4744 pieces->inst = src;
4745 src += pieces->inst_size;
4746 pieces->data = src;
4747 src += pieces->data_size;
4748 pieces->init = src;
4749 src += pieces->init_size;
4750 pieces->init_data = src;
4751 src += pieces->init_data_size;
4752 pieces->boot = src;
4753 src += pieces->boot_size;
4754
4755 return 0;
4756}
4757
4758
4759
4760
4761
4762
4763
4764static void
4765il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4766{
4767 struct il_priv *il = context;
4768 int err;
4769 struct il4965_firmware_pieces pieces;
4770 const unsigned int api_max = il->cfg->ucode_api_max;
4771 const unsigned int api_min = il->cfg->ucode_api_min;
4772 u32 api_ver;
4773
4774 u32 max_probe_length = 200;
4775 u32 standard_phy_calibration_size =
4776 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4777
4778 memset(&pieces, 0, sizeof(pieces));
4779
4780 if (!ucode_raw) {
4781 if (il->fw_idx <= il->cfg->ucode_api_max)
4782 IL_ERR("request for firmware file '%s' failed.\n",
4783 il->firmware_name);
4784 goto try_again;
4785 }
4786
4787 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4788 ucode_raw->size);
4789
4790
4791 if (ucode_raw->size < 4) {
4792 IL_ERR("File size way too small!\n");
4793 goto try_again;
4794 }
4795
4796
4797 err = il4965_load_firmware(il, ucode_raw, &pieces);
4798
4799 if (err)
4800 goto try_again;
4801
4802 api_ver = IL_UCODE_API(il->ucode_ver);
4803
4804
4805
4806
4807
4808
4809 if (api_ver < api_min || api_ver > api_max) {
4810 IL_ERR("Driver unable to support your firmware API. "
4811 "Driver supports v%u, firmware is v%u.\n", api_max,
4812 api_ver);
4813 goto try_again;
4814 }
4815
4816 if (api_ver != api_max)
4817 IL_ERR("Firmware has old API version. Expected v%u, "
4818 "got v%u. New firmware can be obtained "
4819 "from http://www.intellinuxwireless.org.\n", api_max,
4820 api_ver);
4821
4822 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4823 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4824 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4825
4826 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4827 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4828 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4829 IL_UCODE_SERIAL(il->ucode_ver));
4830
4831
4832
4833
4834
4835
4836
4837 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4838 D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
4839 D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
4840 D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
4841 D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
4842 D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
4843
4844
4845 if (pieces.inst_size > il->hw_params.max_inst_size) {
4846 IL_ERR("uCode instr len %zd too large to fit in\n",
4847 pieces.inst_size);
4848 goto try_again;
4849 }
4850
4851 if (pieces.data_size > il->hw_params.max_data_size) {
4852 IL_ERR("uCode data len %zd too large to fit in\n",
4853 pieces.data_size);
4854 goto try_again;
4855 }
4856
4857 if (pieces.init_size > il->hw_params.max_inst_size) {
4858 IL_ERR("uCode init instr len %zd too large to fit in\n",
4859 pieces.init_size);
4860 goto try_again;
4861 }
4862
4863 if (pieces.init_data_size > il->hw_params.max_data_size) {
4864 IL_ERR("uCode init data len %zd too large to fit in\n",
4865 pieces.init_data_size);
4866 goto try_again;
4867 }
4868
4869 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4870 IL_ERR("uCode boot instr len %zd too large to fit in\n",
4871 pieces.boot_size);
4872 goto try_again;
4873 }
4874
4875
4876
4877
4878
4879
4880 il->ucode_code.len = pieces.inst_size;
4881 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4882
4883 il->ucode_data.len = pieces.data_size;
4884 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4885
4886 il->ucode_data_backup.len = pieces.data_size;
4887 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4888
4889 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4890 !il->ucode_data_backup.v_addr)
4891 goto err_pci_alloc;
4892
4893
4894 if (pieces.init_size && pieces.init_data_size) {
4895 il->ucode_init.len = pieces.init_size;
4896 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4897
4898 il->ucode_init_data.len = pieces.init_data_size;
4899 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4900
4901 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4902 goto err_pci_alloc;
4903 }
4904
4905
4906 if (pieces.boot_size) {
4907 il->ucode_boot.len = pieces.boot_size;
4908 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4909
4910 if (!il->ucode_boot.v_addr)
4911 goto err_pci_alloc;
4912 }
4913
4914
4915
4916 il->sta_key_max_num = STA_KEY_MAX_NUM;
4917
4918
4919
4920
4921 D_INFO("Copying (but not loading) uCode instr len %zd\n",
4922 pieces.inst_size);
4923 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4924
4925 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4926 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4927
4928
4929
4930
4931
4932 D_INFO("Copying (but not loading) uCode data len %zd\n",
4933 pieces.data_size);
4934 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4935 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4936
4937
4938 if (pieces.init_size) {
4939 D_INFO("Copying (but not loading) init instr len %zd\n",
4940 pieces.init_size);
4941 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4942 }
4943
4944
4945 if (pieces.init_data_size) {
4946 D_INFO("Copying (but not loading) init data len %zd\n",
4947 pieces.init_data_size);
4948 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4949 pieces.init_data_size);
4950 }
4951
4952
4953 D_INFO("Copying (but not loading) boot instr len %zd\n",
4954 pieces.boot_size);
4955 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4956
4957
4958
4959
4960
4961 il->_4965.phy_calib_chain_noise_reset_cmd =
4962 standard_phy_calibration_size;
4963 il->_4965.phy_calib_chain_noise_gain_cmd =
4964 standard_phy_calibration_size + 1;
4965
4966
4967
4968
4969
4970
4971 err = il4965_mac_setup_register(il, max_probe_length);
4972 if (err)
4973 goto out_unbind;
4974
4975 il_dbgfs_register(il, DRV_NAME);
4976
4977 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4978 if (err) {
4979 IL_ERR("failed to create sysfs device attributes\n");
4980 goto out_unbind;
4981 }
4982
4983
4984 release_firmware(ucode_raw);
4985 complete(&il->_4965.firmware_loading_complete);
4986 return;
4987
4988try_again:
4989
4990 if (il4965_request_firmware(il, false))
4991 goto out_unbind;
4992 release_firmware(ucode_raw);
4993 return;
4994
4995err_pci_alloc:
4996 IL_ERR("failed to allocate pci memory\n");
4997 il4965_dealloc_ucode_pci(il);
4998out_unbind:
4999 complete(&il->_4965.firmware_loading_complete);
5000 device_release_driver(&il->pci_dev->dev);
5001 release_firmware(ucode_raw);
5002}
5003
5004static const char *const desc_lookup_text[] = {
5005 "OK",
5006 "FAIL",
5007 "BAD_PARAM",
5008 "BAD_CHECKSUM",
5009 "NMI_INTERRUPT_WDG",
5010 "SYSASSERT",
5011 "FATAL_ERROR",
5012 "BAD_COMMAND",
5013 "HW_ERROR_TUNE_LOCK",
5014 "HW_ERROR_TEMPERATURE",
5015 "ILLEGAL_CHAN_FREQ",
5016 "VCC_NOT_STBL",
5017 "FH49_ERROR",
5018 "NMI_INTERRUPT_HOST",
5019 "NMI_INTERRUPT_ACTION_PT",
5020 "NMI_INTERRUPT_UNKNOWN",
5021 "UCODE_VERSION_MISMATCH",
5022 "HW_ERROR_ABS_LOCK",
5023 "HW_ERROR_CAL_LOCK_FAIL",
5024 "NMI_INTERRUPT_INST_ACTION_PT",
5025 "NMI_INTERRUPT_DATA_ACTION_PT",
5026 "NMI_TRM_HW_ER",
5027 "NMI_INTERRUPT_TRM",
5028 "NMI_INTERRUPT_BREAK_POINT",
5029 "DEBUG_0",
5030 "DEBUG_1",
5031 "DEBUG_2",
5032 "DEBUG_3",
5033};
5034
5035static struct {
5036 char *name;
5037 u8 num;
5038} advanced_lookup[] = {
5039 {
5040 "NMI_INTERRUPT_WDG", 0x34}, {
5041 "SYSASSERT", 0x35}, {
5042 "UCODE_VERSION_MISMATCH", 0x37}, {
5043 "BAD_COMMAND", 0x38}, {
5044 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5045 "FATAL_ERROR", 0x3D}, {
5046 "NMI_TRM_HW_ERR", 0x46}, {
5047 "NMI_INTERRUPT_TRM", 0x4C}, {
5048 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5049 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5050 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5051 "NMI_INTERRUPT_HOST", 0x66}, {
5052 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5053 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5054 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5055"ADVANCED_SYSASSERT", 0},};
5056
5057static const char *
5058il4965_desc_lookup(u32 num)
5059{
5060 int i;
5061 int max = ARRAY_SIZE(desc_lookup_text);
5062
5063 if (num < max)
5064 return desc_lookup_text[num];
5065
5066 max = ARRAY_SIZE(advanced_lookup) - 1;
5067 for (i = 0; i < max; i++) {
5068 if (advanced_lookup[i].num == num)
5069 break;
5070 }
5071 return advanced_lookup[i].name;
5072}
5073
5074#define ERROR_START_OFFSET (1 * sizeof(u32))
5075#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5076
5077void
5078il4965_dump_nic_error_log(struct il_priv *il)
5079{
5080 u32 data2, line;
5081 u32 desc, time, count, base, data1;
5082 u32 blink1, blink2, ilink1, ilink2;
5083 u32 pc, hcmd;
5084
5085 if (il->ucode_type == UCODE_INIT)
5086 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5087 else
5088 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5089
5090 if (!il->ops->is_valid_rtc_data_addr(base)) {
5091 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5092 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5093 return;
5094 }
5095
5096 count = il_read_targ_mem(il, base);
5097
5098 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5099 IL_ERR("Start IWL Error Log Dump:\n");
5100 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5101 }
5102
5103 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5104 il->isr_stats.err_code = desc;
5105 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5106 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5107 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5108 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5109 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5110 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5111 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5112 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5113 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5114 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5115
5116 IL_ERR("Desc Time "
5117 "data1 data2 line\n");
5118 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5119 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5120 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5121 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5122 blink2, ilink1, ilink2, hcmd);
5123}
5124
5125static void
5126il4965_rf_kill_ct_config(struct il_priv *il)
5127{
5128 struct il_ct_kill_config cmd;
5129 unsigned long flags;
5130 int ret = 0;
5131
5132 spin_lock_irqsave(&il->lock, flags);
5133 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5134 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5135 spin_unlock_irqrestore(&il->lock, flags);
5136
5137 cmd.critical_temperature_R =
5138 cpu_to_le32(il->hw_params.ct_kill_threshold);
5139
5140 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5141 if (ret)
5142 IL_ERR("C_CT_KILL_CONFIG failed\n");
5143 else
5144 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5145 "critical temperature is %d\n",
5146 il->hw_params.ct_kill_threshold);
5147}
5148
5149static const s8 default_queue_to_tx_fifo[] = {
5150 IL_TX_FIFO_VO,
5151 IL_TX_FIFO_VI,
5152 IL_TX_FIFO_BE,
5153 IL_TX_FIFO_BK,
5154 IL49_CMD_FIFO_NUM,
5155 IL_TX_FIFO_UNUSED,
5156 IL_TX_FIFO_UNUSED,
5157};
5158
5159#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5160
5161static int
5162il4965_alive_notify(struct il_priv *il)
5163{
5164 u32 a;
5165 unsigned long flags;
5166 int i, chan;
5167 u32 reg_val;
5168
5169 spin_lock_irqsave(&il->lock, flags);
5170
5171
5172 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5173 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5174 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5175 il_write_targ_mem(il, a, 0);
5176 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5177 il_write_targ_mem(il, a, 0);
5178 for (;
5179 a <
5180 il->scd_base_addr +
5181 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5182 a += 4)
5183 il_write_targ_mem(il, a, 0);
5184
5185
5186 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5187
5188
5189 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5190 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5191 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5192 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5193
5194
5195 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5196 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5197 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5198
5199
5200 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5201
5202
5203 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5204
5205
5206 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5207 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5208
5209
5210 il_write_targ_mem(il,
5211 il->scd_base_addr +
5212 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5213 (SCD_WIN_SIZE <<
5214 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5215 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5216
5217
5218 il_write_targ_mem(il,
5219 il->scd_base_addr +
5220 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5221 sizeof(u32),
5222 (SCD_FRAME_LIMIT <<
5223 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5224 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5225
5226 }
5227 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5228 (1 << il->hw_params.max_txq_num) - 1);
5229
5230
5231 il4965_txq_set_sched(il, IL_MASK(0, 6));
5232
5233 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5234
5235
5236 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5237 for (i = 0; i < 4; i++)
5238 atomic_set(&il->queue_stop_count[i], 0);
5239
5240
5241 il->txq_ctx_active_msk = 0;
5242
5243 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5244
5245 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5246 int ac = default_queue_to_tx_fifo[i];
5247
5248 il_txq_ctx_activate(il, i);
5249
5250 if (ac == IL_TX_FIFO_UNUSED)
5251 continue;
5252
5253 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5254 }
5255
5256 spin_unlock_irqrestore(&il->lock, flags);
5257
5258 return 0;
5259}
5260
5261
5262
5263
5264
5265
5266static void
5267il4965_alive_start(struct il_priv *il)
5268{
5269 int ret = 0;
5270
5271 D_INFO("Runtime Alive received.\n");
5272
5273 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5274
5275
5276 D_INFO("Alive failed.\n");
5277 goto restart;
5278 }
5279
5280
5281
5282
5283 if (il4965_verify_ucode(il)) {
5284
5285
5286 D_INFO("Bad runtime uCode load.\n");
5287 goto restart;
5288 }
5289
5290 ret = il4965_alive_notify(il);
5291 if (ret) {
5292 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5293 goto restart;
5294 }
5295
5296
5297 set_bit(S_ALIVE, &il->status);
5298
5299
5300 il_setup_watchdog(il);
5301
5302 if (il_is_rfkill(il))
5303 return;
5304
5305 ieee80211_wake_queues(il->hw);
5306
5307 il->active_rate = RATES_MASK;
5308
5309 il_power_update_mode(il, true);
5310 D_INFO("Updated power mode\n");
5311
5312 if (il_is_associated(il)) {
5313 struct il_rxon_cmd *active_rxon =
5314 (struct il_rxon_cmd *)&il->active;
5315
5316 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5317 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5318 } else {
5319
5320 il_connection_init_rx_config(il);
5321
5322 if (il->ops->set_rxon_chain)
5323 il->ops->set_rxon_chain(il);
5324 }
5325
5326
5327 il_send_bt_config(il);
5328
5329 il4965_reset_run_time_calib(il);
5330
5331 set_bit(S_READY, &il->status);
5332
5333
5334 il_commit_rxon(il);
5335
5336
5337 il4965_rf_kill_ct_config(il);
5338
5339 D_INFO("ALIVE processing complete.\n");
5340 wake_up(&il->wait_command_queue);
5341
5342 return;
5343
5344restart:
5345 queue_work(il->workqueue, &il->restart);
5346}
5347
5348static void il4965_cancel_deferred_work(struct il_priv *il);
5349
5350static void
5351__il4965_down(struct il_priv *il)
5352{
5353 unsigned long flags;
5354 int exit_pending;
5355
5356 D_INFO(DRV_NAME " is going down\n");
5357
5358 il_scan_cancel_timeout(il, 200);
5359
5360 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5361
5362
5363
5364 del_timer_sync(&il->watchdog);
5365
5366 il_clear_ucode_stations(il);
5367
5368
5369 spin_lock_irq(&il->sta_lock);
5370
5371
5372
5373
5374
5375
5376
5377 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5378 il->_4965.key_mapping_keys = 0;
5379 spin_unlock_irq(&il->sta_lock);
5380
5381 il_dealloc_bcast_stations(il);
5382 il_clear_driver_stations(il);
5383
5384
5385 wake_up_all(&il->wait_command_queue);
5386
5387
5388
5389 if (!exit_pending)
5390 clear_bit(S_EXIT_PENDING, &il->status);
5391
5392
5393 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5394
5395
5396 spin_lock_irqsave(&il->lock, flags);
5397 il_disable_interrupts(il);
5398 spin_unlock_irqrestore(&il->lock, flags);
5399 il4965_synchronize_irq(il);
5400
5401 if (il->mac80211_registered)
5402 ieee80211_stop_queues(il->hw);
5403
5404
5405
5406 if (!il_is_init(il)) {
5407 il->status =
5408 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5409 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5410 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5411 goto exit;
5412 }
5413
5414
5415
5416 il->status &=
5417 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5418 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5419 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5420 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5421
5422
5423
5424
5425
5426
5427 spin_lock_irq(&il->reg_lock);
5428
5429
5430 il4965_txq_ctx_stop(il);
5431 il4965_rxq_stop(il);
5432
5433 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5434 udelay(5);
5435
5436 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5437
5438 _il_apm_stop(il);
5439
5440 spin_unlock_irq(&il->reg_lock);
5441
5442 il4965_txq_ctx_unmap(il);
5443exit:
5444 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5445
5446 dev_kfree_skb(il->beacon_skb);
5447 il->beacon_skb = NULL;
5448
5449
5450 il4965_clear_free_frames(il);
5451}
5452
5453static void
5454il4965_down(struct il_priv *il)
5455{
5456 mutex_lock(&il->mutex);
5457 __il4965_down(il);
5458 mutex_unlock(&il->mutex);
5459
5460 il4965_cancel_deferred_work(il);
5461}
5462
5463
5464static void
5465il4965_set_hw_ready(struct il_priv *il)
5466{
5467 int ret;
5468
5469 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5470 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5471
5472
5473 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5474 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5475 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5476 100);
5477 if (ret >= 0)
5478 il->hw_ready = true;
5479
5480 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5481}
5482
5483static void
5484il4965_prepare_card_hw(struct il_priv *il)
5485{
5486 int ret;
5487
5488 il->hw_ready = false;
5489
5490 il4965_set_hw_ready(il);
5491 if (il->hw_ready)
5492 return;
5493
5494
5495 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5496
5497 ret =
5498 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5499 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5500 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5501
5502
5503 if (ret != -ETIMEDOUT)
5504 il4965_set_hw_ready(il);
5505}
5506
5507#define MAX_HW_RESTARTS 5
5508
5509static int
5510__il4965_up(struct il_priv *il)
5511{
5512 int i;
5513 int ret;
5514
5515 if (test_bit(S_EXIT_PENDING, &il->status)) {
5516 IL_WARN("Exit pending; will not bring the NIC up\n");
5517 return -EIO;
5518 }
5519
5520 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5521 IL_ERR("ucode not available for device bringup\n");
5522 return -EIO;
5523 }
5524
5525 ret = il4965_alloc_bcast_station(il);
5526 if (ret) {
5527 il_dealloc_bcast_stations(il);
5528 return ret;
5529 }
5530
5531 il4965_prepare_card_hw(il);
5532 if (!il->hw_ready) {
5533 il_dealloc_bcast_stations(il);
5534 IL_ERR("HW not ready\n");
5535 return -EIO;
5536 }
5537
5538
5539 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5540 clear_bit(S_RFKILL, &il->status);
5541 else {
5542 set_bit(S_RFKILL, &il->status);
5543 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5544
5545 il_dealloc_bcast_stations(il);
5546 il_enable_rfkill_int(il);
5547 IL_WARN("Radio disabled by HW RF Kill switch\n");
5548 return 0;
5549 }
5550
5551 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5552
5553
5554 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5555
5556 ret = il4965_hw_nic_init(il);
5557 if (ret) {
5558 IL_ERR("Unable to init nic\n");
5559 il_dealloc_bcast_stations(il);
5560 return ret;
5561 }
5562
5563
5564 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5565 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5566
5567
5568 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5569 il_enable_interrupts(il);
5570
5571
5572 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5573 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5574
5575
5576
5577
5578 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5579 il->ucode_data.len);
5580
5581 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5582
5583
5584
5585
5586 ret = il->ops->load_ucode(il);
5587
5588 if (ret) {
5589 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5590 continue;
5591 }
5592
5593
5594 il4965_nic_start(il);
5595
5596 D_INFO(DRV_NAME " is coming up\n");
5597
5598 return 0;
5599 }
5600
5601 set_bit(S_EXIT_PENDING, &il->status);
5602 __il4965_down(il);
5603 clear_bit(S_EXIT_PENDING, &il->status);
5604
5605
5606
5607 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5608 return -EIO;
5609}
5610
5611
5612
5613
5614
5615
5616
5617static void
5618il4965_bg_init_alive_start(struct work_struct *data)
5619{
5620 struct il_priv *il =
5621 container_of(data, struct il_priv, init_alive_start.work);
5622
5623 mutex_lock(&il->mutex);
5624 if (test_bit(S_EXIT_PENDING, &il->status))
5625 goto out;
5626
5627 il->ops->init_alive_start(il);
5628out:
5629 mutex_unlock(&il->mutex);
5630}
5631
5632static void
5633il4965_bg_alive_start(struct work_struct *data)
5634{
5635 struct il_priv *il =
5636 container_of(data, struct il_priv, alive_start.work);
5637
5638 mutex_lock(&il->mutex);
5639 if (test_bit(S_EXIT_PENDING, &il->status))
5640 goto out;
5641
5642 il4965_alive_start(il);
5643out:
5644 mutex_unlock(&il->mutex);
5645}
5646
5647static void
5648il4965_bg_run_time_calib_work(struct work_struct *work)
5649{
5650 struct il_priv *il = container_of(work, struct il_priv,
5651 run_time_calib_work);
5652
5653 mutex_lock(&il->mutex);
5654
5655 if (test_bit(S_EXIT_PENDING, &il->status) ||
5656 test_bit(S_SCANNING, &il->status)) {
5657 mutex_unlock(&il->mutex);
5658 return;
5659 }
5660
5661 if (il->start_calib) {
5662 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5663 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5664 }
5665
5666 mutex_unlock(&il->mutex);
5667}
5668
5669static void
5670il4965_bg_restart(struct work_struct *data)
5671{
5672 struct il_priv *il = container_of(data, struct il_priv, restart);
5673
5674 if (test_bit(S_EXIT_PENDING, &il->status))
5675 return;
5676
5677 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5678 mutex_lock(&il->mutex);
5679 il->is_open = 0;
5680
5681 __il4965_down(il);
5682
5683 mutex_unlock(&il->mutex);
5684 il4965_cancel_deferred_work(il);
5685 ieee80211_restart_hw(il->hw);
5686 } else {
5687 il4965_down(il);
5688
5689 mutex_lock(&il->mutex);
5690 if (test_bit(S_EXIT_PENDING, &il->status)) {
5691 mutex_unlock(&il->mutex);
5692 return;
5693 }
5694
5695 __il4965_up(il);
5696 mutex_unlock(&il->mutex);
5697 }
5698}
5699
5700static void
5701il4965_bg_rx_replenish(struct work_struct *data)
5702{
5703 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5704
5705 if (test_bit(S_EXIT_PENDING, &il->status))
5706 return;
5707
5708 mutex_lock(&il->mutex);
5709 il4965_rx_replenish(il);
5710 mutex_unlock(&il->mutex);
5711}
5712
5713
5714
5715
5716
5717
5718
5719#define UCODE_READY_TIMEOUT (4 * HZ)
5720
5721
5722
5723
5724
5725static int
5726il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5727{
5728 int ret;
5729 struct ieee80211_hw *hw = il->hw;
5730
5731 hw->rate_control_algorithm = "iwl-4965-rs";
5732
5733
5734 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
5735 ieee80211_hw_set(hw, SUPPORTS_PS);
5736 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
5737 ieee80211_hw_set(hw, SPECTRUM_MGMT);
5738 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
5739 ieee80211_hw_set(hw, SIGNAL_DBM);
5740 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
5741 if (il->cfg->sku & IL_SKU_N)
5742 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
5743 NL80211_FEATURE_STATIC_SMPS;
5744
5745 hw->sta_data_size = sizeof(struct il_station_priv);
5746 hw->vif_data_size = sizeof(struct il_vif_priv);
5747
5748 hw->wiphy->interface_modes =
5749 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5750
5751 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5752 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
5753 REGULATORY_DISABLE_BEACON_HINTS;
5754
5755
5756
5757
5758
5759 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5760
5761 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5762
5763 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5764
5765
5766 hw->queues = 4;
5767
5768 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5769
5770 if (il->bands[NL80211_BAND_2GHZ].n_channels)
5771 il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
5772 &il->bands[NL80211_BAND_2GHZ];
5773 if (il->bands[NL80211_BAND_5GHZ].n_channels)
5774 il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
5775 &il->bands[NL80211_BAND_5GHZ];
5776
5777 il_leds_init(il);
5778
5779 wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
5780
5781 ret = ieee80211_register_hw(il->hw);
5782 if (ret) {
5783 IL_ERR("Failed to register hw (error %d)\n", ret);
5784 return ret;
5785 }
5786 il->mac80211_registered = 1;
5787
5788 return 0;
5789}
5790
5791int
5792il4965_mac_start(struct ieee80211_hw *hw)
5793{
5794 struct il_priv *il = hw->priv;
5795 int ret;
5796
5797 D_MAC80211("enter\n");
5798
5799
5800 mutex_lock(&il->mutex);
5801 ret = __il4965_up(il);
5802 mutex_unlock(&il->mutex);
5803
5804 if (ret)
5805 return ret;
5806
5807 if (il_is_rfkill(il))
5808 goto out;
5809
5810 D_INFO("Start UP work done.\n");
5811
5812
5813
5814 ret = wait_event_timeout(il->wait_command_queue,
5815 test_bit(S_READY, &il->status),
5816 UCODE_READY_TIMEOUT);
5817 if (!ret) {
5818 if (!test_bit(S_READY, &il->status)) {
5819 IL_ERR("START_ALIVE timeout after %dms.\n",
5820 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5821 return -ETIMEDOUT;
5822 }
5823 }
5824
5825 il4965_led_enable(il);
5826
5827out:
5828 il->is_open = 1;
5829 D_MAC80211("leave\n");
5830 return 0;
5831}
5832
5833void
5834il4965_mac_stop(struct ieee80211_hw *hw)
5835{
5836 struct il_priv *il = hw->priv;
5837
5838 D_MAC80211("enter\n");
5839
5840 if (!il->is_open)
5841 return;
5842
5843 il->is_open = 0;
5844
5845 il4965_down(il);
5846
5847 flush_workqueue(il->workqueue);
5848
5849
5850
5851 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5852 il_enable_rfkill_int(il);
5853
5854 D_MAC80211("leave\n");
5855}
5856
5857void
5858il4965_mac_tx(struct ieee80211_hw *hw,
5859 struct ieee80211_tx_control *control,
5860 struct sk_buff *skb)
5861{
5862 struct il_priv *il = hw->priv;
5863
5864 D_MACDUMP("enter\n");
5865
5866 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5867 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5868
5869 if (il4965_tx_skb(il, control->sta, skb))
5870 dev_kfree_skb_any(skb);
5871
5872 D_MACDUMP("leave\n");
5873}
5874
5875void
5876il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5877 struct ieee80211_key_conf *keyconf,
5878 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5879{
5880 struct il_priv *il = hw->priv;
5881
5882 D_MAC80211("enter\n");
5883
5884 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5885
5886 D_MAC80211("leave\n");
5887}
5888
5889int
5890il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5891 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5892 struct ieee80211_key_conf *key)
5893{
5894 struct il_priv *il = hw->priv;
5895 int ret;
5896 u8 sta_id;
5897 bool is_default_wep_key = false;
5898
5899 D_MAC80211("enter\n");
5900
5901 if (il->cfg->mod_params->sw_crypto) {
5902 D_MAC80211("leave - hwcrypto disabled\n");
5903 return -EOPNOTSUPP;
5904 }
5905
5906
5907
5908
5909
5910 if (vif->type == NL80211_IFTYPE_ADHOC &&
5911 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5912 D_MAC80211("leave - ad-hoc group key\n");
5913 return -EOPNOTSUPP;
5914 }
5915
5916 sta_id = il_sta_id_or_broadcast(il, sta);
5917 if (sta_id == IL_INVALID_STATION)
5918 return -EINVAL;
5919
5920 mutex_lock(&il->mutex);
5921 il_scan_cancel_timeout(il, 100);
5922
5923
5924
5925
5926
5927
5928
5929 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5930 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5931 if (cmd == SET_KEY)
5932 is_default_wep_key = !il->_4965.key_mapping_keys;
5933 else
5934 is_default_wep_key =
5935 (key->hw_key_idx == HW_KEY_DEFAULT);
5936 }
5937
5938 switch (cmd) {
5939 case SET_KEY:
5940 if (is_default_wep_key)
5941 ret = il4965_set_default_wep_key(il, key);
5942 else
5943 ret = il4965_set_dynamic_key(il, key, sta_id);
5944
5945 D_MAC80211("enable hwcrypto key\n");
5946 break;
5947 case DISABLE_KEY:
5948 if (is_default_wep_key)
5949 ret = il4965_remove_default_wep_key(il, key);
5950 else
5951 ret = il4965_remove_dynamic_key(il, key, sta_id);
5952
5953 D_MAC80211("disable hwcrypto key\n");
5954 break;
5955 default:
5956 ret = -EINVAL;
5957 }
5958
5959 mutex_unlock(&il->mutex);
5960 D_MAC80211("leave\n");
5961
5962 return ret;
5963}
5964
5965int
5966il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5967 struct ieee80211_ampdu_params *params)
5968{
5969 struct il_priv *il = hw->priv;
5970 int ret = -EINVAL;
5971 struct ieee80211_sta *sta = params->sta;
5972 enum ieee80211_ampdu_mlme_action action = params->action;
5973 u16 tid = params->tid;
5974 u16 *ssn = ¶ms->ssn;
5975
5976 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5977
5978 if (!(il->cfg->sku & IL_SKU_N))
5979 return -EACCES;
5980
5981 mutex_lock(&il->mutex);
5982
5983 switch (action) {
5984 case IEEE80211_AMPDU_RX_START:
5985 D_HT("start Rx\n");
5986 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5987 break;
5988 case IEEE80211_AMPDU_RX_STOP:
5989 D_HT("stop Rx\n");
5990 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5991 if (test_bit(S_EXIT_PENDING, &il->status))
5992 ret = 0;
5993 break;
5994 case IEEE80211_AMPDU_TX_START:
5995 D_HT("start Tx\n");
5996 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5997 break;
5998 case IEEE80211_AMPDU_TX_STOP_CONT:
5999 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6000 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6001 D_HT("stop Tx\n");
6002 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6003 if (test_bit(S_EXIT_PENDING, &il->status))
6004 ret = 0;
6005 break;
6006 case IEEE80211_AMPDU_TX_OPERATIONAL:
6007 ret = 0;
6008 break;
6009 }
6010 mutex_unlock(&il->mutex);
6011
6012 return ret;
6013}
6014
6015int
6016il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6017 struct ieee80211_sta *sta)
6018{
6019 struct il_priv *il = hw->priv;
6020 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6021 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6022 int ret;
6023 u8 sta_id;
6024
6025 D_INFO("received request to add station %pM\n", sta->addr);
6026 mutex_lock(&il->mutex);
6027 D_INFO("proceeding to add station %pM\n", sta->addr);
6028 sta_priv->common.sta_id = IL_INVALID_STATION;
6029
6030 atomic_set(&sta_priv->pending_frames, 0);
6031
6032 ret =
6033 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6034 if (ret) {
6035 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6036
6037 mutex_unlock(&il->mutex);
6038 return ret;
6039 }
6040
6041 sta_priv->common.sta_id = sta_id;
6042
6043
6044 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6045 il4965_rs_rate_init(il, sta, sta_id);
6046 mutex_unlock(&il->mutex);
6047
6048 return 0;
6049}
6050
6051void
6052il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6053 struct ieee80211_channel_switch *ch_switch)
6054{
6055 struct il_priv *il = hw->priv;
6056 const struct il_channel_info *ch_info;
6057 struct ieee80211_conf *conf = &hw->conf;
6058 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6059 struct il_ht_config *ht_conf = &il->current_ht_config;
6060 u16 ch;
6061
6062 D_MAC80211("enter\n");
6063
6064 mutex_lock(&il->mutex);
6065
6066 if (il_is_rfkill(il))
6067 goto out;
6068
6069 if (test_bit(S_EXIT_PENDING, &il->status) ||
6070 test_bit(S_SCANNING, &il->status) ||
6071 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6072 goto out;
6073
6074 if (!il_is_associated(il))
6075 goto out;
6076
6077 if (!il->ops->set_channel_switch)
6078 goto out;
6079
6080 ch = channel->hw_value;
6081 if (le16_to_cpu(il->active.channel) == ch)
6082 goto out;
6083
6084 ch_info = il_get_channel_info(il, channel->band, ch);
6085 if (!il_is_channel_valid(ch_info)) {
6086 D_MAC80211("invalid channel\n");
6087 goto out;
6088 }
6089
6090 spin_lock_irq(&il->lock);
6091
6092 il->current_ht_config.smps = conf->smps_mode;
6093
6094
6095 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6096 case NL80211_CHAN_NO_HT:
6097 case NL80211_CHAN_HT20:
6098 il->ht.is_40mhz = false;
6099 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6100 break;
6101 case NL80211_CHAN_HT40MINUS:
6102 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6103 il->ht.is_40mhz = true;
6104 break;
6105 case NL80211_CHAN_HT40PLUS:
6106 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6107 il->ht.is_40mhz = true;
6108 break;
6109 }
6110
6111 if ((le16_to_cpu(il->staging.channel) != ch))
6112 il->staging.flags = 0;
6113
6114 il_set_rxon_channel(il, channel);
6115 il_set_rxon_ht(il, ht_conf);
6116 il_set_flags_for_band(il, channel->band, il->vif);
6117
6118 spin_unlock_irq(&il->lock);
6119
6120 il_set_rate(il);
6121
6122
6123
6124
6125 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6126 il->switch_channel = cpu_to_le16(ch);
6127 if (il->ops->set_channel_switch(il, ch_switch)) {
6128 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6129 il->switch_channel = 0;
6130 ieee80211_chswitch_done(il->vif, false);
6131 }
6132
6133out:
6134 mutex_unlock(&il->mutex);
6135 D_MAC80211("leave\n");
6136}
6137
6138void
6139il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6140 unsigned int *total_flags, u64 multicast)
6141{
6142 struct il_priv *il = hw->priv;
6143 __le32 filter_or = 0, filter_nand = 0;
6144
6145#define CHK(test, flag) do { \
6146 if (*total_flags & (test)) \
6147 filter_or |= (flag); \
6148 else \
6149 filter_nand |= (flag); \
6150 } while (0)
6151
6152 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6153 *total_flags);
6154
6155 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
6156
6157 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6158 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6159
6160#undef CHK
6161
6162 mutex_lock(&il->mutex);
6163
6164 il->staging.filter_flags &= ~filter_nand;
6165 il->staging.filter_flags |= filter_or;
6166
6167
6168
6169
6170
6171
6172 mutex_unlock(&il->mutex);
6173
6174
6175
6176
6177
6178
6179
6180 *total_flags &=
6181 FIF_OTHER_BSS | FIF_ALLMULTI |
6182 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6183}
6184
6185
6186
6187
6188
6189
6190
6191static void
6192il4965_bg_txpower_work(struct work_struct *work)
6193{
6194 struct il_priv *il = container_of(work, struct il_priv,
6195 txpower_work);
6196
6197 mutex_lock(&il->mutex);
6198
6199
6200
6201
6202
6203 if (test_bit(S_EXIT_PENDING, &il->status) ||
6204 test_bit(S_SCANNING, &il->status))
6205 goto out;
6206
6207
6208
6209
6210 il->ops->send_tx_power(il);
6211
6212
6213
6214 il->last_temperature = il->temperature;
6215out:
6216 mutex_unlock(&il->mutex);
6217}
6218
6219static void
6220il4965_setup_deferred_work(struct il_priv *il)
6221{
6222 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6223
6224 init_waitqueue_head(&il->wait_command_queue);
6225
6226 INIT_WORK(&il->restart, il4965_bg_restart);
6227 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6228 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6229 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6230 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6231
6232 il_setup_scan_deferred_work(il);
6233
6234 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6235
6236 timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
6237
6238 timer_setup(&il->watchdog, il_bg_watchdog, 0);
6239
6240 tasklet_init(&il->irq_tasklet,
6241 (void (*)(unsigned long))il4965_irq_tasklet,
6242 (unsigned long)il);
6243}
6244
6245static void
6246il4965_cancel_deferred_work(struct il_priv *il)
6247{
6248 cancel_work_sync(&il->txpower_work);
6249 cancel_delayed_work_sync(&il->init_alive_start);
6250 cancel_delayed_work(&il->alive_start);
6251 cancel_work_sync(&il->run_time_calib_work);
6252
6253 il_cancel_scan_deferred_work(il);
6254
6255 del_timer_sync(&il->stats_periodic);
6256}
6257
6258static void
6259il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6260{
6261 int i;
6262
6263 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6264 rates[i].bitrate = il_rates[i].ieee * 5;
6265 rates[i].hw_value = i;
6266 rates[i].hw_value_short = i;
6267 rates[i].flags = 0;
6268 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6269
6270
6271
6272 rates[i].flags |=
6273 (il_rates[i].plcp ==
6274 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6275 }
6276 }
6277}
6278
6279
6280
6281
6282void
6283il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6284{
6285 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6286 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6287}
6288
6289void
6290il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6291 int tx_fifo_id, int scd_retry)
6292{
6293 int txq_id = txq->q.id;
6294
6295
6296 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6297
6298
6299 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6300 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6301 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6302 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6303 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6304 IL49_SCD_QUEUE_STTS_REG_MSK);
6305
6306 txq->sched_retry = scd_retry;
6307
6308 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6309 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6310}
6311
6312static const struct ieee80211_ops il4965_mac_ops = {
6313 .tx = il4965_mac_tx,
6314 .start = il4965_mac_start,
6315 .stop = il4965_mac_stop,
6316 .add_interface = il_mac_add_interface,
6317 .remove_interface = il_mac_remove_interface,
6318 .change_interface = il_mac_change_interface,
6319 .config = il_mac_config,
6320 .configure_filter = il4965_configure_filter,
6321 .set_key = il4965_mac_set_key,
6322 .update_tkip_key = il4965_mac_update_tkip_key,
6323 .conf_tx = il_mac_conf_tx,
6324 .reset_tsf = il_mac_reset_tsf,
6325 .bss_info_changed = il_mac_bss_info_changed,
6326 .ampdu_action = il4965_mac_ampdu_action,
6327 .hw_scan = il_mac_hw_scan,
6328 .sta_add = il4965_mac_sta_add,
6329 .sta_remove = il_mac_sta_remove,
6330 .channel_switch = il4965_mac_channel_switch,
6331 .tx_last_beacon = il_mac_tx_last_beacon,
6332 .flush = il_mac_flush,
6333};
6334
6335static int
6336il4965_init_drv(struct il_priv *il)
6337{
6338 int ret;
6339
6340 spin_lock_init(&il->sta_lock);
6341 spin_lock_init(&il->hcmd_lock);
6342
6343 INIT_LIST_HEAD(&il->free_frames);
6344
6345 mutex_init(&il->mutex);
6346
6347 il->ieee_channels = NULL;
6348 il->ieee_rates = NULL;
6349 il->band = NL80211_BAND_2GHZ;
6350
6351 il->iw_mode = NL80211_IFTYPE_STATION;
6352 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6353 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6354
6355
6356 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6357
6358
6359 if (il->ops->set_rxon_chain)
6360 il->ops->set_rxon_chain(il);
6361
6362 il_init_scan_params(il);
6363
6364 ret = il_init_channel_map(il);
6365 if (ret) {
6366 IL_ERR("initializing regulatory failed: %d\n", ret);
6367 goto err;
6368 }
6369
6370 ret = il_init_geos(il);
6371 if (ret) {
6372 IL_ERR("initializing geos failed: %d\n", ret);
6373 goto err_free_channel_map;
6374 }
6375 il4965_init_hw_rates(il, il->ieee_rates);
6376
6377 return 0;
6378
6379err_free_channel_map:
6380 il_free_channel_map(il);
6381err:
6382 return ret;
6383}
6384
6385static void
6386il4965_uninit_drv(struct il_priv *il)
6387{
6388 il_free_geos(il);
6389 il_free_channel_map(il);
6390 kfree(il->scan_cmd);
6391}
6392
6393static void
6394il4965_hw_detect(struct il_priv *il)
6395{
6396 il->hw_rev = _il_rd(il, CSR_HW_REV);
6397 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6398 il->rev_id = il->pci_dev->revision;
6399 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6400}
6401
6402static const struct il_sensitivity_ranges il4965_sensitivity = {
6403 .min_nrg_cck = 97,
6404 .max_nrg_cck = 0,
6405
6406 .auto_corr_min_ofdm = 85,
6407 .auto_corr_min_ofdm_mrc = 170,
6408 .auto_corr_min_ofdm_x1 = 105,
6409 .auto_corr_min_ofdm_mrc_x1 = 220,
6410
6411 .auto_corr_max_ofdm = 120,
6412 .auto_corr_max_ofdm_mrc = 210,
6413 .auto_corr_max_ofdm_x1 = 140,
6414 .auto_corr_max_ofdm_mrc_x1 = 270,
6415
6416 .auto_corr_min_cck = 125,
6417 .auto_corr_max_cck = 200,
6418 .auto_corr_min_cck_mrc = 200,
6419 .auto_corr_max_cck_mrc = 400,
6420
6421 .nrg_th_cck = 100,
6422 .nrg_th_ofdm = 100,
6423
6424 .barker_corr_th_min = 190,
6425 .barker_corr_th_min_mrc = 390,
6426 .nrg_th_cca = 62,
6427};
6428
6429static void
6430il4965_set_hw_params(struct il_priv *il)
6431{
6432 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6433 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6434 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6435 if (il->cfg->mod_params->amsdu_size_8K)
6436 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6437 else
6438 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6439
6440 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6441
6442 if (il->cfg->mod_params->disable_11n)
6443 il->cfg->sku &= ~IL_SKU_N;
6444
6445 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6446 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6447 il->cfg->num_of_queues =
6448 il->cfg->mod_params->num_of_queues;
6449
6450 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6451 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6452 il->hw_params.scd_bc_tbls_size =
6453 il->cfg->num_of_queues *
6454 sizeof(struct il4965_scd_bc_tbl);
6455
6456 il->hw_params.tfd_size = sizeof(struct il_tfd);
6457 il->hw_params.max_stations = IL4965_STATION_COUNT;
6458 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6459 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6460 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6461 il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
6462
6463 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6464
6465 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6466 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6467 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6468 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6469
6470 il->hw_params.ct_kill_threshold =
6471 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6472
6473 il->hw_params.sens = &il4965_sensitivity;
6474 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6475}
6476
6477static int
6478il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6479{
6480 int err = 0;
6481 struct il_priv *il;
6482 struct ieee80211_hw *hw;
6483 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6484 unsigned long flags;
6485 u16 pci_cmd;
6486
6487
6488
6489
6490
6491 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6492 if (!hw) {
6493 err = -ENOMEM;
6494 goto out;
6495 }
6496 il = hw->priv;
6497 il->hw = hw;
6498 SET_IEEE80211_DEV(hw, &pdev->dev);
6499
6500 D_INFO("*** LOAD DRIVER ***\n");
6501 il->cfg = cfg;
6502 il->ops = &il4965_ops;
6503#ifdef CONFIG_IWLEGACY_DEBUGFS
6504 il->debugfs_ops = &il4965_debugfs_ops;
6505#endif
6506 il->pci_dev = pdev;
6507 il->inta_mask = CSR_INI_SET_MASK;
6508
6509
6510
6511
6512 pci_disable_link_state(pdev,
6513 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6514 PCIE_LINK_STATE_CLKPM);
6515
6516 if (pci_enable_device(pdev)) {
6517 err = -ENODEV;
6518 goto out_ieee80211_free_hw;
6519 }
6520
6521 pci_set_master(pdev);
6522
6523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6524 if (!err)
6525 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6526 if (err) {
6527 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6528 if (!err)
6529 err =
6530 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6531
6532 if (err) {
6533 IL_WARN("No suitable DMA available.\n");
6534 goto out_pci_disable_device;
6535 }
6536 }
6537
6538 err = pci_request_regions(pdev, DRV_NAME);
6539 if (err)
6540 goto out_pci_disable_device;
6541
6542 pci_set_drvdata(pdev, il);
6543
6544
6545
6546
6547 il->hw_base = pci_ioremap_bar(pdev, 0);
6548 if (!il->hw_base) {
6549 err = -ENODEV;
6550 goto out_pci_release_regions;
6551 }
6552
6553 D_INFO("pci_resource_len = 0x%08llx\n",
6554 (unsigned long long)pci_resource_len(pdev, 0));
6555 D_INFO("pci_resource_base = %p\n", il->hw_base);
6556
6557
6558
6559
6560 spin_lock_init(&il->reg_lock);
6561 spin_lock_init(&il->lock);
6562
6563
6564
6565
6566
6567
6568 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6569
6570 il4965_hw_detect(il);
6571 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6572
6573
6574
6575 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6576
6577 il4965_prepare_card_hw(il);
6578 if (!il->hw_ready) {
6579 IL_WARN("Failed, HW not ready\n");
6580 err = -EIO;
6581 goto out_iounmap;
6582 }
6583
6584
6585
6586
6587
6588 err = il_eeprom_init(il);
6589 if (err) {
6590 IL_ERR("Unable to init EEPROM\n");
6591 goto out_iounmap;
6592 }
6593 err = il4965_eeprom_check_version(il);
6594 if (err)
6595 goto out_free_eeprom;
6596
6597
6598 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6599 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6600 il->hw->wiphy->addresses = il->addresses;
6601 il->hw->wiphy->n_addresses = 1;
6602
6603
6604
6605
6606 il4965_set_hw_params(il);
6607
6608
6609
6610
6611
6612 err = il4965_init_drv(il);
6613 if (err)
6614 goto out_free_eeprom;
6615
6616
6617
6618
6619
6620 spin_lock_irqsave(&il->lock, flags);
6621 il_disable_interrupts(il);
6622 spin_unlock_irqrestore(&il->lock, flags);
6623
6624 pci_enable_msi(il->pci_dev);
6625
6626 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6627 if (err) {
6628 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6629 goto out_disable_msi;
6630 }
6631
6632 il4965_setup_deferred_work(il);
6633 il4965_setup_handlers(il);
6634
6635
6636
6637
6638
6639
6640 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6641 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6642 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6643 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6644 }
6645
6646 il_enable_rfkill_int(il);
6647
6648
6649 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6650 clear_bit(S_RFKILL, &il->status);
6651 else
6652 set_bit(S_RFKILL, &il->status);
6653
6654 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6655 test_bit(S_RFKILL, &il->status));
6656
6657 il_power_initialize(il);
6658
6659 init_completion(&il->_4965.firmware_loading_complete);
6660
6661 err = il4965_request_firmware(il, true);
6662 if (err)
6663 goto out_destroy_workqueue;
6664
6665 return 0;
6666
6667out_destroy_workqueue:
6668 destroy_workqueue(il->workqueue);
6669 il->workqueue = NULL;
6670 free_irq(il->pci_dev->irq, il);
6671out_disable_msi:
6672 pci_disable_msi(il->pci_dev);
6673 il4965_uninit_drv(il);
6674out_free_eeprom:
6675 il_eeprom_free(il);
6676out_iounmap:
6677 iounmap(il->hw_base);
6678out_pci_release_regions:
6679 pci_release_regions(pdev);
6680out_pci_disable_device:
6681 pci_disable_device(pdev);
6682out_ieee80211_free_hw:
6683 ieee80211_free_hw(il->hw);
6684out:
6685 return err;
6686}
6687
6688static void
6689il4965_pci_remove(struct pci_dev *pdev)
6690{
6691 struct il_priv *il = pci_get_drvdata(pdev);
6692 unsigned long flags;
6693
6694 if (!il)
6695 return;
6696
6697 wait_for_completion(&il->_4965.firmware_loading_complete);
6698
6699 D_INFO("*** UNLOAD DRIVER ***\n");
6700
6701 il_dbgfs_unregister(il);
6702 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6703
6704
6705
6706
6707
6708 set_bit(S_EXIT_PENDING, &il->status);
6709
6710 il_leds_exit(il);
6711
6712 if (il->mac80211_registered) {
6713 ieee80211_unregister_hw(il->hw);
6714 il->mac80211_registered = 0;
6715 } else {
6716 il4965_down(il);
6717 }
6718
6719
6720
6721
6722
6723
6724
6725
6726 il_apm_stop(il);
6727
6728
6729
6730
6731 spin_lock_irqsave(&il->lock, flags);
6732 il_disable_interrupts(il);
6733 spin_unlock_irqrestore(&il->lock, flags);
6734
6735 il4965_synchronize_irq(il);
6736
6737 il4965_dealloc_ucode_pci(il);
6738
6739 if (il->rxq.bd)
6740 il4965_rx_queue_free(il, &il->rxq);
6741 il4965_hw_txq_ctx_free(il);
6742
6743 il_eeprom_free(il);
6744
6745
6746 flush_workqueue(il->workqueue);
6747
6748
6749
6750
6751 destroy_workqueue(il->workqueue);
6752 il->workqueue = NULL;
6753
6754 free_irq(il->pci_dev->irq, il);
6755 pci_disable_msi(il->pci_dev);
6756 iounmap(il->hw_base);
6757 pci_release_regions(pdev);
6758 pci_disable_device(pdev);
6759
6760 il4965_uninit_drv(il);
6761
6762 dev_kfree_skb(il->beacon_skb);
6763
6764 ieee80211_free_hw(il->hw);
6765}
6766
6767
6768
6769
6770
6771void
6772il4965_txq_set_sched(struct il_priv *il, u32 mask)
6773{
6774 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6775}
6776
6777
6778
6779
6780
6781
6782
6783
6784static const struct pci_device_id il4965_hw_card_ids[] = {
6785 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6786 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6787 {0}
6788};
6789MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6790
6791static struct pci_driver il4965_driver = {
6792 .name = DRV_NAME,
6793 .id_table = il4965_hw_card_ids,
6794 .probe = il4965_pci_probe,
6795 .remove = il4965_pci_remove,
6796 .driver.pm = IL_LEGACY_PM_OPS,
6797};
6798
6799static int __init
6800il4965_init(void)
6801{
6802
6803 int ret;
6804 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6805 pr_info(DRV_COPYRIGHT "\n");
6806
6807 ret = il4965_rate_control_register();
6808 if (ret) {
6809 pr_err("Unable to register rate control algorithm: %d\n", ret);
6810 return ret;
6811 }
6812
6813 ret = pci_register_driver(&il4965_driver);
6814 if (ret) {
6815 pr_err("Unable to initialize PCI module\n");
6816 goto error_register;
6817 }
6818
6819 return ret;
6820
6821error_register:
6822 il4965_rate_control_unregister();
6823 return ret;
6824}
6825
6826static void __exit
6827il4965_exit(void)
6828{
6829 pci_unregister_driver(&il4965_driver);
6830 il4965_rate_control_unregister();
6831}
6832
6833module_exit(il4965_exit);
6834module_init(il4965_init);
6835
6836#ifdef CONFIG_IWLEGACY_DEBUG
6837module_param_named(debug, il_debug_level, uint, 0644);
6838MODULE_PARM_DESC(debug, "debug output mask");
6839#endif
6840
6841module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
6842MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6843module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
6844MODULE_PARM_DESC(queues_num, "number of hw queues.");
6845module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
6846MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6847module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
6848MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
6849module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
6850MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
6851