1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41#include <net/mac80211.h>
42
43#include "common.h"
44
45int
46_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
47{
48 const int interval = 10;
49 int t = 0;
50
51 do {
52 if ((_il_rd(il, addr) & mask) == (bits & mask))
53 return t;
54 udelay(interval);
55 t += interval;
56 } while (t < timeout);
57
58 return -ETIMEDOUT;
59}
60EXPORT_SYMBOL(_il_poll_bit);
61
62void
63il_set_bit(struct il_priv *p, u32 r, u32 m)
64{
65 unsigned long reg_flags;
66
67 spin_lock_irqsave(&p->reg_lock, reg_flags);
68 _il_set_bit(p, r, m);
69 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
70}
71EXPORT_SYMBOL(il_set_bit);
72
73void
74il_clear_bit(struct il_priv *p, u32 r, u32 m)
75{
76 unsigned long reg_flags;
77
78 spin_lock_irqsave(&p->reg_lock, reg_flags);
79 _il_clear_bit(p, r, m);
80 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
81}
82EXPORT_SYMBOL(il_clear_bit);
83
84bool
85_il_grab_nic_access(struct il_priv *il)
86{
87 int ret;
88 u32 val;
89
90
91 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110 ret =
111 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
112 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
113 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
114 if (unlikely(ret < 0)) {
115 val = _il_rd(il, CSR_GP_CNTRL);
116 WARN_ONCE(1, "Timeout waiting for ucode processor access "
117 "(CSR_GP_CNTRL 0x%08x)\n", val);
118 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
119 return false;
120 }
121
122 return true;
123}
124EXPORT_SYMBOL_GPL(_il_grab_nic_access);
125
126int
127il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
128{
129 const int interval = 10;
130 int t = 0;
131
132 do {
133 if ((il_rd(il, addr) & mask) == mask)
134 return t;
135 udelay(interval);
136 t += interval;
137 } while (t < timeout);
138
139 return -ETIMEDOUT;
140}
141EXPORT_SYMBOL(il_poll_bit);
142
143u32
144il_rd_prph(struct il_priv *il, u32 reg)
145{
146 unsigned long reg_flags;
147 u32 val;
148
149 spin_lock_irqsave(&il->reg_lock, reg_flags);
150 _il_grab_nic_access(il);
151 val = _il_rd_prph(il, reg);
152 _il_release_nic_access(il);
153 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
154 return val;
155}
156EXPORT_SYMBOL(il_rd_prph);
157
158void
159il_wr_prph(struct il_priv *il, u32 addr, u32 val)
160{
161 unsigned long reg_flags;
162
163 spin_lock_irqsave(&il->reg_lock, reg_flags);
164 if (likely(_il_grab_nic_access(il))) {
165 _il_wr_prph(il, addr, val);
166 _il_release_nic_access(il);
167 }
168 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
169}
170EXPORT_SYMBOL(il_wr_prph);
171
172u32
173il_read_targ_mem(struct il_priv *il, u32 addr)
174{
175 unsigned long reg_flags;
176 u32 value;
177
178 spin_lock_irqsave(&il->reg_lock, reg_flags);
179 _il_grab_nic_access(il);
180
181 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
182 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
183
184 _il_release_nic_access(il);
185 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
186 return value;
187}
188EXPORT_SYMBOL(il_read_targ_mem);
189
190void
191il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
192{
193 unsigned long reg_flags;
194
195 spin_lock_irqsave(&il->reg_lock, reg_flags);
196 if (likely(_il_grab_nic_access(il))) {
197 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
198 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
199 _il_release_nic_access(il);
200 }
201 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
202}
203EXPORT_SYMBOL(il_write_targ_mem);
204
205const char *
206il_get_cmd_string(u8 cmd)
207{
208 switch (cmd) {
209 IL_CMD(N_ALIVE);
210 IL_CMD(N_ERROR);
211 IL_CMD(C_RXON);
212 IL_CMD(C_RXON_ASSOC);
213 IL_CMD(C_QOS_PARAM);
214 IL_CMD(C_RXON_TIMING);
215 IL_CMD(C_ADD_STA);
216 IL_CMD(C_REM_STA);
217 IL_CMD(C_WEPKEY);
218 IL_CMD(N_3945_RX);
219 IL_CMD(C_TX);
220 IL_CMD(C_RATE_SCALE);
221 IL_CMD(C_LEDS);
222 IL_CMD(C_TX_LINK_QUALITY_CMD);
223 IL_CMD(C_CHANNEL_SWITCH);
224 IL_CMD(N_CHANNEL_SWITCH);
225 IL_CMD(C_SPECTRUM_MEASUREMENT);
226 IL_CMD(N_SPECTRUM_MEASUREMENT);
227 IL_CMD(C_POWER_TBL);
228 IL_CMD(N_PM_SLEEP);
229 IL_CMD(N_PM_DEBUG_STATS);
230 IL_CMD(C_SCAN);
231 IL_CMD(C_SCAN_ABORT);
232 IL_CMD(N_SCAN_START);
233 IL_CMD(N_SCAN_RESULTS);
234 IL_CMD(N_SCAN_COMPLETE);
235 IL_CMD(N_BEACON);
236 IL_CMD(C_TX_BEACON);
237 IL_CMD(C_TX_PWR_TBL);
238 IL_CMD(C_BT_CONFIG);
239 IL_CMD(C_STATS);
240 IL_CMD(N_STATS);
241 IL_CMD(N_CARD_STATE);
242 IL_CMD(N_MISSED_BEACONS);
243 IL_CMD(C_CT_KILL_CONFIG);
244 IL_CMD(C_SENSITIVITY);
245 IL_CMD(C_PHY_CALIBRATION);
246 IL_CMD(N_RX_PHY);
247 IL_CMD(N_RX_MPDU);
248 IL_CMD(N_RX);
249 IL_CMD(N_COMPRESSED_BA);
250 default:
251 return "UNKNOWN";
252
253 }
254}
255EXPORT_SYMBOL(il_get_cmd_string);
256
257#define HOST_COMPLETE_TIMEOUT (HZ / 2)
258
259static void
260il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
261 struct il_rx_pkt *pkt)
262{
263 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
264 IL_ERR("Bad return from %s (0x%08X)\n",
265 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
266 return;
267 }
268#ifdef CONFIG_IWLEGACY_DEBUG
269 switch (cmd->hdr.cmd) {
270 case C_TX_LINK_QUALITY_CMD:
271 case C_SENSITIVITY:
272 D_HC_DUMP("back from %s (0x%08X)\n",
273 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
274 break;
275 default:
276 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
277 pkt->hdr.flags);
278 }
279#endif
280}
281
282static int
283il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
284{
285 int ret;
286
287 BUG_ON(!(cmd->flags & CMD_ASYNC));
288
289
290 BUG_ON(cmd->flags & CMD_WANT_SKB);
291
292
293 if (!cmd->callback)
294 cmd->callback = il_generic_cmd_callback;
295
296 if (test_bit(S_EXIT_PENDING, &il->status))
297 return -EBUSY;
298
299 ret = il_enqueue_hcmd(il, cmd);
300 if (ret < 0) {
301 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
302 il_get_cmd_string(cmd->id), ret);
303 return ret;
304 }
305 return 0;
306}
307
308int
309il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
310{
311 int cmd_idx;
312 int ret;
313
314 lockdep_assert_held(&il->mutex);
315
316 BUG_ON(cmd->flags & CMD_ASYNC);
317
318
319 BUG_ON(cmd->callback);
320
321 D_INFO("Attempting to send sync command %s\n",
322 il_get_cmd_string(cmd->id));
323
324 set_bit(S_HCMD_ACTIVE, &il->status);
325 D_INFO("Setting HCMD_ACTIVE for command %s\n",
326 il_get_cmd_string(cmd->id));
327
328 cmd_idx = il_enqueue_hcmd(il, cmd);
329 if (cmd_idx < 0) {
330 ret = cmd_idx;
331 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
332 il_get_cmd_string(cmd->id), ret);
333 goto out;
334 }
335
336 ret = wait_event_timeout(il->wait_command_queue,
337 !test_bit(S_HCMD_ACTIVE, &il->status),
338 HOST_COMPLETE_TIMEOUT);
339 if (!ret) {
340 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
341 IL_ERR("Error sending %s: time out after %dms.\n",
342 il_get_cmd_string(cmd->id),
343 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
344
345 clear_bit(S_HCMD_ACTIVE, &il->status);
346 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
347 il_get_cmd_string(cmd->id));
348 ret = -ETIMEDOUT;
349 goto cancel;
350 }
351 }
352
353 if (test_bit(S_RFKILL, &il->status)) {
354 IL_ERR("Command %s aborted: RF KILL Switch\n",
355 il_get_cmd_string(cmd->id));
356 ret = -ECANCELED;
357 goto fail;
358 }
359 if (test_bit(S_FW_ERROR, &il->status)) {
360 IL_ERR("Command %s failed: FW Error\n",
361 il_get_cmd_string(cmd->id));
362 ret = -EIO;
363 goto fail;
364 }
365 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
366 IL_ERR("Error: Response NULL in '%s'\n",
367 il_get_cmd_string(cmd->id));
368 ret = -EIO;
369 goto cancel;
370 }
371
372 ret = 0;
373 goto out;
374
375cancel:
376 if (cmd->flags & CMD_WANT_SKB) {
377
378
379
380
381
382
383 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
384 }
385fail:
386 if (cmd->reply_page) {
387 il_free_pages(il, cmd->reply_page);
388 cmd->reply_page = 0;
389 }
390out:
391 return ret;
392}
393EXPORT_SYMBOL(il_send_cmd_sync);
394
395int
396il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
397{
398 if (cmd->flags & CMD_ASYNC)
399 return il_send_cmd_async(il, cmd);
400
401 return il_send_cmd_sync(il, cmd);
402}
403EXPORT_SYMBOL(il_send_cmd);
404
405int
406il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
407{
408 struct il_host_cmd cmd = {
409 .id = id,
410 .len = len,
411 .data = data,
412 };
413
414 return il_send_cmd_sync(il, &cmd);
415}
416EXPORT_SYMBOL(il_send_cmd_pdu);
417
418int
419il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
420 void (*callback) (struct il_priv *il,
421 struct il_device_cmd *cmd,
422 struct il_rx_pkt *pkt))
423{
424 struct il_host_cmd cmd = {
425 .id = id,
426 .len = len,
427 .data = data,
428 };
429
430 cmd.flags |= CMD_ASYNC;
431 cmd.callback = callback;
432
433 return il_send_cmd_async(il, &cmd);
434}
435EXPORT_SYMBOL(il_send_cmd_pdu_async);
436
437
438static int led_mode;
439module_param(led_mode, int, S_IRUGO);
440MODULE_PARM_DESC(led_mode,
441 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static const struct ieee80211_tpt_blink il_blink[] = {
457 {.throughput = 0, .blink_time = 334},
458 {.throughput = 1 * 1024 - 1, .blink_time = 260},
459 {.throughput = 5 * 1024 - 1, .blink_time = 220},
460 {.throughput = 10 * 1024 - 1, .blink_time = 190},
461 {.throughput = 20 * 1024 - 1, .blink_time = 170},
462 {.throughput = 50 * 1024 - 1, .blink_time = 150},
463 {.throughput = 70 * 1024 - 1, .blink_time = 130},
464 {.throughput = 100 * 1024 - 1, .blink_time = 110},
465 {.throughput = 200 * 1024 - 1, .blink_time = 80},
466 {.throughput = 300 * 1024 - 1, .blink_time = 50},
467};
468
469
470
471
472
473
474
475
476
477
478
479
480static inline u8
481il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
482{
483 if (!compensation) {
484 IL_ERR("undefined blink compensation: "
485 "use pre-defined blinking time\n");
486 return time;
487 }
488
489 return (u8) ((time * compensation) >> 6);
490}
491
492
493static int
494il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
495{
496 struct il_led_cmd led_cmd = {
497 .id = IL_LED_LINK,
498 .interval = IL_DEF_LED_INTRVL
499 };
500 int ret;
501
502 if (!test_bit(S_READY, &il->status))
503 return -EBUSY;
504
505 if (il->blink_on == on && il->blink_off == off)
506 return 0;
507
508 if (off == 0) {
509
510 on = IL_LED_SOLID;
511 }
512
513 D_LED("Led blink time compensation=%u\n",
514 il->cfg->led_compensation);
515 led_cmd.on =
516 il_blink_compensation(il, on,
517 il->cfg->led_compensation);
518 led_cmd.off =
519 il_blink_compensation(il, off,
520 il->cfg->led_compensation);
521
522 ret = il->ops->send_led_cmd(il, &led_cmd);
523 if (!ret) {
524 il->blink_on = on;
525 il->blink_off = off;
526 }
527 return ret;
528}
529
530static void
531il_led_brightness_set(struct led_classdev *led_cdev,
532 enum led_brightness brightness)
533{
534 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
535 unsigned long on = 0;
536
537 if (brightness > 0)
538 on = IL_LED_SOLID;
539
540 il_led_cmd(il, on, 0);
541}
542
543static int
544il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
545 unsigned long *delay_off)
546{
547 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
548
549 return il_led_cmd(il, *delay_on, *delay_off);
550}
551
552void
553il_leds_init(struct il_priv *il)
554{
555 int mode = led_mode;
556 int ret;
557
558 if (mode == IL_LED_DEFAULT)
559 mode = il->cfg->led_mode;
560
561 il->led.name =
562 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
563 il->led.brightness_set = il_led_brightness_set;
564 il->led.blink_set = il_led_blink_set;
565 il->led.max_brightness = 1;
566
567 switch (mode) {
568 case IL_LED_DEFAULT:
569 WARN_ON(1);
570 break;
571 case IL_LED_BLINK:
572 il->led.default_trigger =
573 ieee80211_create_tpt_led_trigger(il->hw,
574 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
575 il_blink,
576 ARRAY_SIZE(il_blink));
577 break;
578 case IL_LED_RF_STATE:
579 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
580 break;
581 }
582
583 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
584 if (ret) {
585 kfree(il->led.name);
586 return;
587 }
588
589 il->led_registered = true;
590}
591EXPORT_SYMBOL(il_leds_init);
592
593void
594il_leds_exit(struct il_priv *il)
595{
596 if (!il->led_registered)
597 return;
598
599 led_classdev_unregister(&il->led);
600 kfree(il->led.name);
601}
602EXPORT_SYMBOL(il_leds_exit);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636const u8 il_eeprom_band_1[14] = {
637 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
638};
639
640
641static const u8 il_eeprom_band_2[] = {
642 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
643};
644
645static const u8 il_eeprom_band_3[] = {
646 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
647};
648
649static const u8 il_eeprom_band_4[] = {
650 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
651};
652
653static const u8 il_eeprom_band_5[] = {
654 145, 149, 153, 157, 161, 165
655};
656
657static const u8 il_eeprom_band_6[] = {
658 1, 2, 3, 4, 5, 6, 7
659};
660
661static const u8 il_eeprom_band_7[] = {
662 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
663};
664
665
666
667
668
669
670
671static int
672il_eeprom_verify_signature(struct il_priv *il)
673{
674 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
675 int ret = 0;
676
677 D_EEPROM("EEPROM signature=0x%08x\n", gp);
678 switch (gp) {
679 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
680 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
681 break;
682 default:
683 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
684 ret = -ENOENT;
685 break;
686 }
687 return ret;
688}
689
690const u8 *
691il_eeprom_query_addr(const struct il_priv *il, size_t offset)
692{
693 BUG_ON(offset >= il->cfg->eeprom_size);
694 return &il->eeprom[offset];
695}
696EXPORT_SYMBOL(il_eeprom_query_addr);
697
698u16
699il_eeprom_query16(const struct il_priv *il, size_t offset)
700{
701 if (!il->eeprom)
702 return 0;
703 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
704}
705EXPORT_SYMBOL(il_eeprom_query16);
706
707
708
709
710
711
712
713
714int
715il_eeprom_init(struct il_priv *il)
716{
717 __le16 *e;
718 u32 gp = _il_rd(il, CSR_EEPROM_GP);
719 int sz;
720 int ret;
721 u16 addr;
722
723
724 sz = il->cfg->eeprom_size;
725 D_EEPROM("NVM size = %d\n", sz);
726 il->eeprom = kzalloc(sz, GFP_KERNEL);
727 if (!il->eeprom) {
728 ret = -ENOMEM;
729 goto alloc_err;
730 }
731 e = (__le16 *) il->eeprom;
732
733 il->ops->apm_init(il);
734
735 ret = il_eeprom_verify_signature(il);
736 if (ret < 0) {
737 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
738 ret = -ENOENT;
739 goto err;
740 }
741
742
743 ret = il->ops->eeprom_acquire_semaphore(il);
744 if (ret < 0) {
745 IL_ERR("Failed to acquire EEPROM semaphore.\n");
746 ret = -ENOENT;
747 goto err;
748 }
749
750
751 for (addr = 0; addr < sz; addr += sizeof(u16)) {
752 u32 r;
753
754 _il_wr(il, CSR_EEPROM_REG,
755 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
756
757 ret =
758 _il_poll_bit(il, CSR_EEPROM_REG,
759 CSR_EEPROM_REG_READ_VALID_MSK,
760 CSR_EEPROM_REG_READ_VALID_MSK,
761 IL_EEPROM_ACCESS_TIMEOUT);
762 if (ret < 0) {
763 IL_ERR("Time out reading EEPROM[%d]\n", addr);
764 goto done;
765 }
766 r = _il_rd(il, CSR_EEPROM_REG);
767 e[addr / 2] = cpu_to_le16(r >> 16);
768 }
769
770 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
771 il_eeprom_query16(il, EEPROM_VERSION));
772
773 ret = 0;
774done:
775 il->ops->eeprom_release_semaphore(il);
776
777err:
778 if (ret)
779 il_eeprom_free(il);
780
781 il_apm_stop(il);
782alloc_err:
783 return ret;
784}
785EXPORT_SYMBOL(il_eeprom_init);
786
787void
788il_eeprom_free(struct il_priv *il)
789{
790 kfree(il->eeprom);
791 il->eeprom = NULL;
792}
793EXPORT_SYMBOL(il_eeprom_free);
794
795static void
796il_init_band_reference(const struct il_priv *il, int eep_band,
797 int *eeprom_ch_count,
798 const struct il_eeprom_channel **eeprom_ch_info,
799 const u8 **eeprom_ch_idx)
800{
801 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
802
803 switch (eep_band) {
804 case 1:
805 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
806 *eeprom_ch_info =
807 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
808 offset);
809 *eeprom_ch_idx = il_eeprom_band_1;
810 break;
811 case 2:
812 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
813 *eeprom_ch_info =
814 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
815 offset);
816 *eeprom_ch_idx = il_eeprom_band_2;
817 break;
818 case 3:
819 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
820 *eeprom_ch_info =
821 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
822 offset);
823 *eeprom_ch_idx = il_eeprom_band_3;
824 break;
825 case 4:
826 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
827 *eeprom_ch_info =
828 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
829 offset);
830 *eeprom_ch_idx = il_eeprom_band_4;
831 break;
832 case 5:
833 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
834 *eeprom_ch_info =
835 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
836 offset);
837 *eeprom_ch_idx = il_eeprom_band_5;
838 break;
839 case 6:
840 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
841 *eeprom_ch_info =
842 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
843 offset);
844 *eeprom_ch_idx = il_eeprom_band_6;
845 break;
846 case 7:
847 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
848 *eeprom_ch_info =
849 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
850 offset);
851 *eeprom_ch_idx = il_eeprom_band_7;
852 break;
853 default:
854 BUG();
855 }
856}
857
858#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
859 ? # x " " : "")
860
861
862
863
864
865static int
866il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
867 const struct il_eeprom_channel *eeprom_ch,
868 u8 clear_ht40_extension_channel)
869{
870 struct il_channel_info *ch_info;
871
872 ch_info =
873 (struct il_channel_info *)il_get_channel_info(il, band, channel);
874
875 if (!il_is_channel_valid(ch_info))
876 return -1;
877
878 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
879 " Ad-Hoc %ssupported\n", ch_info->channel,
880 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
881 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
882 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
883 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
884 eeprom_ch->max_power_avg,
885 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
886 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
887
888 ch_info->ht40_eeprom = *eeprom_ch;
889 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
890 ch_info->ht40_flags = eeprom_ch->flags;
891 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
892 ch_info->ht40_extension_channel &=
893 ~clear_ht40_extension_channel;
894
895 return 0;
896}
897
898#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
899 ? # x " " : "")
900
901
902
903
904int
905il_init_channel_map(struct il_priv *il)
906{
907 int eeprom_ch_count = 0;
908 const u8 *eeprom_ch_idx = NULL;
909 const struct il_eeprom_channel *eeprom_ch_info = NULL;
910 int band, ch;
911 struct il_channel_info *ch_info;
912
913 if (il->channel_count) {
914 D_EEPROM("Channel map already initialized.\n");
915 return 0;
916 }
917
918 D_EEPROM("Initializing regulatory info from EEPROM\n");
919
920 il->channel_count =
921 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
922 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
923 ARRAY_SIZE(il_eeprom_band_5);
924
925 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
926
927 il->channel_info =
928 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
929 GFP_KERNEL);
930 if (!il->channel_info) {
931 IL_ERR("Could not allocate channel_info\n");
932 il->channel_count = 0;
933 return -ENOMEM;
934 }
935
936 ch_info = il->channel_info;
937
938
939
940
941 for (band = 1; band <= 5; band++) {
942
943 il_init_band_reference(il, band, &eeprom_ch_count,
944 &eeprom_ch_info, &eeprom_ch_idx);
945
946
947 for (ch = 0; ch < eeprom_ch_count; ch++) {
948 ch_info->channel = eeprom_ch_idx[ch];
949 ch_info->band =
950 (band ==
951 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
952
953
954
955 ch_info->eeprom = eeprom_ch_info[ch];
956
957
958
959 ch_info->flags = eeprom_ch_info[ch].flags;
960
961
962 ch_info->ht40_extension_channel =
963 IEEE80211_CHAN_NO_HT40;
964
965 if (!(il_is_channel_valid(ch_info))) {
966 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
967 "No traffic\n", ch_info->channel,
968 ch_info->flags,
969 il_is_channel_a_band(ch_info) ? "5.2" :
970 "2.4");
971 ch_info++;
972 continue;
973 }
974
975
976 ch_info->max_power_avg = ch_info->curr_txpow =
977 eeprom_ch_info[ch].max_power_avg;
978 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
979 ch_info->min_power = 0;
980
981 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
982 " Ad-Hoc %ssupported\n", ch_info->channel,
983 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
984 CHECK_AND_PRINT_I(VALID),
985 CHECK_AND_PRINT_I(IBSS),
986 CHECK_AND_PRINT_I(ACTIVE),
987 CHECK_AND_PRINT_I(RADAR),
988 CHECK_AND_PRINT_I(WIDE),
989 CHECK_AND_PRINT_I(DFS),
990 eeprom_ch_info[ch].flags,
991 eeprom_ch_info[ch].max_power_avg,
992 ((eeprom_ch_info[ch].
993 flags & EEPROM_CHANNEL_IBSS) &&
994 !(eeprom_ch_info[ch].
995 flags & EEPROM_CHANNEL_RADAR)) ? "" :
996 "not ");
997
998 ch_info++;
999 }
1000 }
1001
1002
1003 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
1004 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
1005 return 0;
1006
1007
1008 for (band = 6; band <= 7; band++) {
1009 enum ieee80211_band ieeeband;
1010
1011 il_init_band_reference(il, band, &eeprom_ch_count,
1012 &eeprom_ch_info, &eeprom_ch_idx);
1013
1014
1015 ieeeband =
1016 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1017
1018
1019 for (ch = 0; ch < eeprom_ch_count; ch++) {
1020
1021 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1022 &eeprom_ch_info[ch],
1023 IEEE80211_CHAN_NO_HT40PLUS);
1024
1025
1026 il_mod_ht40_chan_info(il, ieeeband,
1027 eeprom_ch_idx[ch] + 4,
1028 &eeprom_ch_info[ch],
1029 IEEE80211_CHAN_NO_HT40MINUS);
1030 }
1031 }
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL(il_init_channel_map);
1036
1037
1038
1039
1040void
1041il_free_channel_map(struct il_priv *il)
1042{
1043 kfree(il->channel_info);
1044 il->channel_count = 0;
1045}
1046EXPORT_SYMBOL(il_free_channel_map);
1047
1048
1049
1050
1051
1052
1053const struct il_channel_info *
1054il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
1055 u16 channel)
1056{
1057 int i;
1058
1059 switch (band) {
1060 case IEEE80211_BAND_5GHZ:
1061 for (i = 14; i < il->channel_count; i++) {
1062 if (il->channel_info[i].channel == channel)
1063 return &il->channel_info[i];
1064 }
1065 break;
1066 case IEEE80211_BAND_2GHZ:
1067 if (channel >= 1 && channel <= 14)
1068 return &il->channel_info[channel - 1];
1069 break;
1070 default:
1071 BUG();
1072 }
1073
1074 return NULL;
1075}
1076EXPORT_SYMBOL(il_get_channel_info);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091struct il_power_vec_entry {
1092 struct il_powertable_cmd cmd;
1093 u8 no_dtim;
1094};
1095
1096static void
1097il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1098{
1099 memset(cmd, 0, sizeof(*cmd));
1100
1101 if (il->power_data.pci_pm)
1102 cmd->flags |= IL_POWER_PCI_PM_MSK;
1103
1104 D_POWER("Sleep command for CAM\n");
1105}
1106
1107static int
1108il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1109{
1110 D_POWER("Sending power/sleep command\n");
1111 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1112 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1113 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1114 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1115 le32_to_cpu(cmd->sleep_interval[0]),
1116 le32_to_cpu(cmd->sleep_interval[1]),
1117 le32_to_cpu(cmd->sleep_interval[2]),
1118 le32_to_cpu(cmd->sleep_interval[3]),
1119 le32_to_cpu(cmd->sleep_interval[4]));
1120
1121 return il_send_cmd_pdu(il, C_POWER_TBL,
1122 sizeof(struct il_powertable_cmd), cmd);
1123}
1124
1125int
1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1127{
1128 int ret;
1129 bool update_chains;
1130
1131 lockdep_assert_held(&il->mutex);
1132
1133
1134 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1135 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1136
1137 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1138 return 0;
1139
1140 if (!il_is_ready_rf(il))
1141 return -EIO;
1142
1143
1144 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1145 if (test_bit(S_SCANNING, &il->status) && !force) {
1146 D_INFO("Defer power set mode while scanning\n");
1147 return 0;
1148 }
1149
1150 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1151 set_bit(S_POWER_PMI, &il->status);
1152
1153 ret = il_set_power(il, cmd);
1154 if (!ret) {
1155 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1156 clear_bit(S_POWER_PMI, &il->status);
1157
1158 if (il->ops->update_chain_flags && update_chains)
1159 il->ops->update_chain_flags(il);
1160 else if (il->ops->update_chain_flags)
1161 D_POWER("Cannot update the power, chain noise "
1162 "calibration running: %d\n",
1163 il->chain_noise_data.state);
1164
1165 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1166 } else
1167 IL_ERR("set power fail, ret = %d", ret);
1168
1169 return ret;
1170}
1171
1172int
1173il_power_update_mode(struct il_priv *il, bool force)
1174{
1175 struct il_powertable_cmd cmd;
1176
1177 il_power_sleep_cam_cmd(il, &cmd);
1178 return il_power_set_mode(il, &cmd, force);
1179}
1180EXPORT_SYMBOL(il_power_update_mode);
1181
1182
1183void
1184il_power_initialize(struct il_priv *il)
1185{
1186 u16 lctl = il_pcie_link_ctl(il);
1187
1188 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1189
1190 il->power_data.debug_sleep_level_override = -1;
1191
1192 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1193}
1194EXPORT_SYMBOL(il_power_initialize);
1195
1196
1197
1198
1199#define IL_ACTIVE_DWELL_TIME_24 (30)
1200#define IL_ACTIVE_DWELL_TIME_52 (20)
1201
1202#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1203#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1204
1205
1206
1207
1208#define IL_PASSIVE_DWELL_TIME_24 (20)
1209#define IL_PASSIVE_DWELL_TIME_52 (10)
1210#define IL_PASSIVE_DWELL_BASE (100)
1211#define IL_CHANNEL_TUNE_TIME 5
1212
1213static int
1214il_send_scan_abort(struct il_priv *il)
1215{
1216 int ret;
1217 struct il_rx_pkt *pkt;
1218 struct il_host_cmd cmd = {
1219 .id = C_SCAN_ABORT,
1220 .flags = CMD_WANT_SKB,
1221 };
1222
1223
1224
1225
1226 if (!test_bit(S_READY, &il->status) ||
1227 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1228 !test_bit(S_SCAN_HW, &il->status) ||
1229 test_bit(S_FW_ERROR, &il->status) ||
1230 test_bit(S_EXIT_PENDING, &il->status))
1231 return -EIO;
1232
1233 ret = il_send_cmd_sync(il, &cmd);
1234 if (ret)
1235 return ret;
1236
1237 pkt = (struct il_rx_pkt *)cmd.reply_page;
1238 if (pkt->u.status != CAN_ABORT_STATUS) {
1239
1240
1241
1242
1243
1244
1245 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1246 ret = -EIO;
1247 }
1248
1249 il_free_pages(il, cmd.reply_page);
1250 return ret;
1251}
1252
1253static void
1254il_complete_scan(struct il_priv *il, bool aborted)
1255{
1256
1257 if (il->scan_request) {
1258 D_SCAN("Complete scan in mac80211\n");
1259 ieee80211_scan_completed(il->hw, aborted);
1260 }
1261
1262 il->scan_vif = NULL;
1263 il->scan_request = NULL;
1264}
1265
1266void
1267il_force_scan_end(struct il_priv *il)
1268{
1269 lockdep_assert_held(&il->mutex);
1270
1271 if (!test_bit(S_SCANNING, &il->status)) {
1272 D_SCAN("Forcing scan end while not scanning\n");
1273 return;
1274 }
1275
1276 D_SCAN("Forcing scan end\n");
1277 clear_bit(S_SCANNING, &il->status);
1278 clear_bit(S_SCAN_HW, &il->status);
1279 clear_bit(S_SCAN_ABORTING, &il->status);
1280 il_complete_scan(il, true);
1281}
1282
1283static void
1284il_do_scan_abort(struct il_priv *il)
1285{
1286 int ret;
1287
1288 lockdep_assert_held(&il->mutex);
1289
1290 if (!test_bit(S_SCANNING, &il->status)) {
1291 D_SCAN("Not performing scan to abort\n");
1292 return;
1293 }
1294
1295 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1296 D_SCAN("Scan abort in progress\n");
1297 return;
1298 }
1299
1300 ret = il_send_scan_abort(il);
1301 if (ret) {
1302 D_SCAN("Send scan abort failed %d\n", ret);
1303 il_force_scan_end(il);
1304 } else
1305 D_SCAN("Successfully send scan abort\n");
1306}
1307
1308
1309
1310
1311int
1312il_scan_cancel(struct il_priv *il)
1313{
1314 D_SCAN("Queuing abort scan\n");
1315 queue_work(il->workqueue, &il->abort_scan);
1316 return 0;
1317}
1318EXPORT_SYMBOL(il_scan_cancel);
1319
1320
1321
1322
1323
1324
1325int
1326il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1327{
1328 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1329
1330 lockdep_assert_held(&il->mutex);
1331
1332 D_SCAN("Scan cancel timeout\n");
1333
1334 il_do_scan_abort(il);
1335
1336 while (time_before_eq(jiffies, timeout)) {
1337 if (!test_bit(S_SCAN_HW, &il->status))
1338 break;
1339 msleep(20);
1340 }
1341
1342 return test_bit(S_SCAN_HW, &il->status);
1343}
1344EXPORT_SYMBOL(il_scan_cancel_timeout);
1345
1346
1347static void
1348il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1349{
1350#ifdef CONFIG_IWLEGACY_DEBUG
1351 struct il_rx_pkt *pkt = rxb_addr(rxb);
1352 struct il_scanreq_notification *notif =
1353 (struct il_scanreq_notification *)pkt->u.raw;
1354
1355 D_SCAN("Scan request status = 0x%x\n", notif->status);
1356#endif
1357}
1358
1359
1360static void
1361il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1362{
1363 struct il_rx_pkt *pkt = rxb_addr(rxb);
1364 struct il_scanstart_notification *notif =
1365 (struct il_scanstart_notification *)pkt->u.raw;
1366 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1367 D_SCAN("Scan start: " "%d [802.11%s] "
1368 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1369 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1370 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1371}
1372
1373
1374static void
1375il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1376{
1377#ifdef CONFIG_IWLEGACY_DEBUG
1378 struct il_rx_pkt *pkt = rxb_addr(rxb);
1379 struct il_scanresults_notification *notif =
1380 (struct il_scanresults_notification *)pkt->u.raw;
1381
1382 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1383 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1384 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1385 le32_to_cpu(notif->stats[0]),
1386 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1387#endif
1388}
1389
1390
1391static void
1392il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1393{
1394
1395#ifdef CONFIG_IWLEGACY_DEBUG
1396 struct il_rx_pkt *pkt = rxb_addr(rxb);
1397 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1398#endif
1399
1400 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1401 scan_notif->scanned_channels, scan_notif->tsf_low,
1402 scan_notif->tsf_high, scan_notif->status);
1403
1404
1405 clear_bit(S_SCAN_HW, &il->status);
1406
1407 D_SCAN("Scan on %sGHz took %dms\n",
1408 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1409 jiffies_to_msecs(jiffies - il->scan_start));
1410
1411 queue_work(il->workqueue, &il->scan_completed);
1412}
1413
1414void
1415il_setup_rx_scan_handlers(struct il_priv *il)
1416{
1417
1418 il->handlers[C_SCAN] = il_hdl_scan;
1419 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1420 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1421 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1422}
1423EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1424
1425inline u16
1426il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1427 u8 n_probes)
1428{
1429 if (band == IEEE80211_BAND_5GHZ)
1430 return IL_ACTIVE_DWELL_TIME_52 +
1431 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1432 else
1433 return IL_ACTIVE_DWELL_TIME_24 +
1434 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1435}
1436EXPORT_SYMBOL(il_get_active_dwell_time);
1437
1438u16
1439il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1440 struct ieee80211_vif *vif)
1441{
1442 u16 value;
1443
1444 u16 passive =
1445 (band ==
1446 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1447 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1448 IL_PASSIVE_DWELL_TIME_52;
1449
1450 if (il_is_any_associated(il)) {
1451
1452
1453
1454
1455
1456 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1457 if (value > IL_PASSIVE_DWELL_BASE || !value)
1458 value = IL_PASSIVE_DWELL_BASE;
1459 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1460 passive = min(value, passive);
1461 }
1462
1463 return passive;
1464}
1465EXPORT_SYMBOL(il_get_passive_dwell_time);
1466
1467void
1468il_init_scan_params(struct il_priv *il)
1469{
1470 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1471 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1472 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1473 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1474 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1475}
1476EXPORT_SYMBOL(il_init_scan_params);
1477
1478static int
1479il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1480{
1481 int ret;
1482
1483 lockdep_assert_held(&il->mutex);
1484
1485 cancel_delayed_work(&il->scan_check);
1486
1487 if (!il_is_ready_rf(il)) {
1488 IL_WARN("Request scan called when driver not ready.\n");
1489 return -EIO;
1490 }
1491
1492 if (test_bit(S_SCAN_HW, &il->status)) {
1493 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1494 return -EBUSY;
1495 }
1496
1497 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1498 D_SCAN("Scan request while abort pending.\n");
1499 return -EBUSY;
1500 }
1501
1502 D_SCAN("Starting scan...\n");
1503
1504 set_bit(S_SCANNING, &il->status);
1505 il->scan_start = jiffies;
1506
1507 ret = il->ops->request_scan(il, vif);
1508 if (ret) {
1509 clear_bit(S_SCANNING, &il->status);
1510 return ret;
1511 }
1512
1513 queue_delayed_work(il->workqueue, &il->scan_check,
1514 IL_SCAN_CHECK_WATCHDOG);
1515
1516 return 0;
1517}
1518
1519int
1520il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1521 struct cfg80211_scan_request *req)
1522{
1523 struct il_priv *il = hw->priv;
1524 int ret;
1525
1526 if (req->n_channels == 0) {
1527 IL_ERR("Can not scan on no channels.\n");
1528 return -EINVAL;
1529 }
1530
1531 mutex_lock(&il->mutex);
1532 D_MAC80211("enter\n");
1533
1534 if (test_bit(S_SCANNING, &il->status)) {
1535 D_SCAN("Scan already in progress.\n");
1536 ret = -EAGAIN;
1537 goto out_unlock;
1538 }
1539
1540
1541 il->scan_request = req;
1542 il->scan_vif = vif;
1543 il->scan_band = req->channels[0]->band;
1544
1545 ret = il_scan_initiate(il, vif);
1546
1547out_unlock:
1548 D_MAC80211("leave ret %d\n", ret);
1549 mutex_unlock(&il->mutex);
1550
1551 return ret;
1552}
1553EXPORT_SYMBOL(il_mac_hw_scan);
1554
1555static void
1556il_bg_scan_check(struct work_struct *data)
1557{
1558 struct il_priv *il =
1559 container_of(data, struct il_priv, scan_check.work);
1560
1561 D_SCAN("Scan check work\n");
1562
1563
1564
1565
1566 mutex_lock(&il->mutex);
1567 il_force_scan_end(il);
1568 mutex_unlock(&il->mutex);
1569}
1570
1571
1572
1573
1574
1575u16
1576il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1577 const u8 *ta, const u8 *ies, int ie_len, int left)
1578{
1579 int len = 0;
1580 u8 *pos = NULL;
1581
1582
1583
1584 left -= 24;
1585 if (left < 0)
1586 return 0;
1587
1588 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1589 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1590 memcpy(frame->sa, ta, ETH_ALEN);
1591 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1592 frame->seq_ctrl = 0;
1593
1594 len += 24;
1595
1596
1597 pos = &frame->u.probe_req.variable[0];
1598
1599
1600 left -= 2;
1601 if (left < 0)
1602 return 0;
1603 *pos++ = WLAN_EID_SSID;
1604 *pos++ = 0;
1605
1606 len += 2;
1607
1608 if (WARN_ON(left < ie_len))
1609 return len;
1610
1611 if (ies && ie_len) {
1612 memcpy(pos, ies, ie_len);
1613 len += ie_len;
1614 }
1615
1616 return (u16) len;
1617}
1618EXPORT_SYMBOL(il_fill_probe_req);
1619
1620static void
1621il_bg_abort_scan(struct work_struct *work)
1622{
1623 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1624
1625 D_SCAN("Abort scan work\n");
1626
1627
1628
1629 mutex_lock(&il->mutex);
1630 il_scan_cancel_timeout(il, 200);
1631 mutex_unlock(&il->mutex);
1632}
1633
1634static void
1635il_bg_scan_completed(struct work_struct *work)
1636{
1637 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1638 bool aborted;
1639
1640 D_SCAN("Completed scan.\n");
1641
1642 cancel_delayed_work(&il->scan_check);
1643
1644 mutex_lock(&il->mutex);
1645
1646 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1647 if (aborted)
1648 D_SCAN("Aborted scan completed.\n");
1649
1650 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1651 D_SCAN("Scan already completed.\n");
1652 goto out_settings;
1653 }
1654
1655 il_complete_scan(il, aborted);
1656
1657out_settings:
1658
1659 if (!il_is_ready_rf(il))
1660 goto out;
1661
1662
1663
1664
1665
1666 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1667 il_set_tx_power(il, il->tx_power_next, false);
1668
1669 il->ops->post_scan(il);
1670
1671out:
1672 mutex_unlock(&il->mutex);
1673}
1674
1675void
1676il_setup_scan_deferred_work(struct il_priv *il)
1677{
1678 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1679 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1680 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1681}
1682EXPORT_SYMBOL(il_setup_scan_deferred_work);
1683
1684void
1685il_cancel_scan_deferred_work(struct il_priv *il)
1686{
1687 cancel_work_sync(&il->abort_scan);
1688 cancel_work_sync(&il->scan_completed);
1689
1690 if (cancel_delayed_work_sync(&il->scan_check)) {
1691 mutex_lock(&il->mutex);
1692 il_force_scan_end(il);
1693 mutex_unlock(&il->mutex);
1694 }
1695}
1696EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1697
1698
1699static void
1700il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1701{
1702
1703 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1704 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1705 sta_id, il->stations[sta_id].sta.sta.addr);
1706
1707 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1708 D_ASSOC("STA id %u addr %pM already present"
1709 " in uCode (according to driver)\n", sta_id,
1710 il->stations[sta_id].sta.sta.addr);
1711 } else {
1712 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1713 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1714 il->stations[sta_id].sta.sta.addr);
1715 }
1716}
1717
1718static int
1719il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1720 struct il_rx_pkt *pkt, bool sync)
1721{
1722 u8 sta_id = addsta->sta.sta_id;
1723 unsigned long flags;
1724 int ret = -EIO;
1725
1726 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1727 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1728 return ret;
1729 }
1730
1731 D_INFO("Processing response for adding station %u\n", sta_id);
1732
1733 spin_lock_irqsave(&il->sta_lock, flags);
1734
1735 switch (pkt->u.add_sta.status) {
1736 case ADD_STA_SUCCESS_MSK:
1737 D_INFO("C_ADD_STA PASSED\n");
1738 il_sta_ucode_activate(il, sta_id);
1739 ret = 0;
1740 break;
1741 case ADD_STA_NO_ROOM_IN_TBL:
1742 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1743 break;
1744 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1745 IL_ERR("Adding station %d failed, no block ack resource.\n",
1746 sta_id);
1747 break;
1748 case ADD_STA_MODIFY_NON_EXIST_STA:
1749 IL_ERR("Attempting to modify non-existing station %d\n",
1750 sta_id);
1751 break;
1752 default:
1753 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1754 break;
1755 }
1756
1757 D_INFO("%s station id %u addr %pM\n",
1758 il->stations[sta_id].sta.mode ==
1759 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1760 il->stations[sta_id].sta.sta.addr);
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770 D_INFO("%s station according to cmd buffer %pM\n",
1771 il->stations[sta_id].sta.mode ==
1772 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1773 spin_unlock_irqrestore(&il->sta_lock, flags);
1774
1775 return ret;
1776}
1777
1778static void
1779il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1780 struct il_rx_pkt *pkt)
1781{
1782 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1783
1784 il_process_add_sta_resp(il, addsta, pkt, false);
1785
1786}
1787
1788int
1789il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1790{
1791 struct il_rx_pkt *pkt = NULL;
1792 int ret = 0;
1793 u8 data[sizeof(*sta)];
1794 struct il_host_cmd cmd = {
1795 .id = C_ADD_STA,
1796 .flags = flags,
1797 .data = data,
1798 };
1799 u8 sta_id __maybe_unused = sta->sta.sta_id;
1800
1801 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1802 flags & CMD_ASYNC ? "a" : "");
1803
1804 if (flags & CMD_ASYNC)
1805 cmd.callback = il_add_sta_callback;
1806 else {
1807 cmd.flags |= CMD_WANT_SKB;
1808 might_sleep();
1809 }
1810
1811 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1812 ret = il_send_cmd(il, &cmd);
1813
1814 if (ret || (flags & CMD_ASYNC))
1815 return ret;
1816
1817 if (ret == 0) {
1818 pkt = (struct il_rx_pkt *)cmd.reply_page;
1819 ret = il_process_add_sta_resp(il, sta, pkt, true);
1820 }
1821 il_free_pages(il, cmd.reply_page);
1822
1823 return ret;
1824}
1825EXPORT_SYMBOL(il_send_add_sta);
1826
1827static void
1828il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1829{
1830 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1831 __le32 sta_flags;
1832 u8 mimo_ps_mode;
1833
1834 if (!sta || !sta_ht_inf->ht_supported)
1835 goto done;
1836
1837 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1838 D_ASSOC("spatial multiplexing power save mode: %s\n",
1839 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
1840 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
1841 "disabled");
1842
1843 sta_flags = il->stations[idx].sta.station_flags;
1844
1845 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1846
1847 switch (mimo_ps_mode) {
1848 case WLAN_HT_CAP_SM_PS_STATIC:
1849 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1850 break;
1851 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1852 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1853 break;
1854 case WLAN_HT_CAP_SM_PS_DISABLED:
1855 break;
1856 default:
1857 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1858 break;
1859 }
1860
1861 sta_flags |=
1862 cpu_to_le32((u32) sta_ht_inf->
1863 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1864
1865 sta_flags |=
1866 cpu_to_le32((u32) sta_ht_inf->
1867 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1868
1869 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1870 sta_flags |= STA_FLG_HT40_EN_MSK;
1871 else
1872 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1873
1874 il->stations[idx].sta.station_flags = sta_flags;
1875done:
1876 return;
1877}
1878
1879
1880
1881
1882
1883
1884u8
1885il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1886 struct ieee80211_sta *sta)
1887{
1888 struct il_station_entry *station;
1889 int i;
1890 u8 sta_id = IL_INVALID_STATION;
1891 u16 rate;
1892
1893 if (is_ap)
1894 sta_id = IL_AP_ID;
1895 else if (is_broadcast_ether_addr(addr))
1896 sta_id = il->hw_params.bcast_id;
1897 else
1898 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1899 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1900 addr)) {
1901 sta_id = i;
1902 break;
1903 }
1904
1905 if (!il->stations[i].used &&
1906 sta_id == IL_INVALID_STATION)
1907 sta_id = i;
1908 }
1909
1910
1911
1912
1913
1914 if (unlikely(sta_id == IL_INVALID_STATION))
1915 return sta_id;
1916
1917
1918
1919
1920
1921
1922 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1923 D_INFO("STA %d already in process of being added.\n", sta_id);
1924 return sta_id;
1925 }
1926
1927 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1928 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1929 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1930 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1931 sta_id, addr);
1932 return sta_id;
1933 }
1934
1935 station = &il->stations[sta_id];
1936 station->used = IL_STA_DRIVER_ACTIVE;
1937 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1938 il->num_stations++;
1939
1940
1941 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1942 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1943 station->sta.mode = 0;
1944 station->sta.sta.sta_id = sta_id;
1945 station->sta.station_flags = 0;
1946
1947
1948
1949
1950
1951
1952 il_set_ht_add_station(il, sta_id, sta);
1953
1954
1955 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1956
1957 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1958
1959 return sta_id;
1960
1961}
1962EXPORT_SYMBOL_GPL(il_prep_station);
1963
1964#define STA_WAIT_TIMEOUT (HZ/2)
1965
1966
1967
1968
1969int
1970il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
1971 struct ieee80211_sta *sta, u8 *sta_id_r)
1972{
1973 unsigned long flags_spin;
1974 int ret = 0;
1975 u8 sta_id;
1976 struct il_addsta_cmd sta_cmd;
1977
1978 *sta_id_r = 0;
1979 spin_lock_irqsave(&il->sta_lock, flags_spin);
1980 sta_id = il_prep_station(il, addr, is_ap, sta);
1981 if (sta_id == IL_INVALID_STATION) {
1982 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1983 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1984 return -EINVAL;
1985 }
1986
1987
1988
1989
1990
1991
1992 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1993 D_INFO("STA %d already in process of being added.\n", sta_id);
1994 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1995 return -EEXIST;
1996 }
1997
1998 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1999 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2000 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2001 sta_id, addr);
2002 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2003 return -EEXIST;
2004 }
2005
2006 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2007 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2008 sizeof(struct il_addsta_cmd));
2009 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2010
2011
2012 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2013 if (ret) {
2014 spin_lock_irqsave(&il->sta_lock, flags_spin);
2015 IL_ERR("Adding station %pM failed.\n",
2016 il->stations[sta_id].sta.sta.addr);
2017 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2018 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2019 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2020 }
2021 *sta_id_r = sta_id;
2022 return ret;
2023}
2024EXPORT_SYMBOL(il_add_station_common);
2025
2026
2027
2028
2029
2030
2031static void
2032il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2033{
2034
2035 if ((il->stations[sta_id].
2036 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2037 IL_STA_UCODE_ACTIVE)
2038 IL_ERR("removed non active STA %u\n", sta_id);
2039
2040 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2041
2042 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2043 D_ASSOC("Removed STA %u\n", sta_id);
2044}
2045
2046static int
2047il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2048 bool temporary)
2049{
2050 struct il_rx_pkt *pkt;
2051 int ret;
2052
2053 unsigned long flags_spin;
2054 struct il_rem_sta_cmd rm_sta_cmd;
2055
2056 struct il_host_cmd cmd = {
2057 .id = C_REM_STA,
2058 .len = sizeof(struct il_rem_sta_cmd),
2059 .flags = CMD_SYNC,
2060 .data = &rm_sta_cmd,
2061 };
2062
2063 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2064 rm_sta_cmd.num_sta = 1;
2065 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2066
2067 cmd.flags |= CMD_WANT_SKB;
2068
2069 ret = il_send_cmd(il, &cmd);
2070
2071 if (ret)
2072 return ret;
2073
2074 pkt = (struct il_rx_pkt *)cmd.reply_page;
2075 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2076 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2077 ret = -EIO;
2078 }
2079
2080 if (!ret) {
2081 switch (pkt->u.rem_sta.status) {
2082 case REM_STA_SUCCESS_MSK:
2083 if (!temporary) {
2084 spin_lock_irqsave(&il->sta_lock, flags_spin);
2085 il_sta_ucode_deactivate(il, sta_id);
2086 spin_unlock_irqrestore(&il->sta_lock,
2087 flags_spin);
2088 }
2089 D_ASSOC("C_REM_STA PASSED\n");
2090 break;
2091 default:
2092 ret = -EIO;
2093 IL_ERR("C_REM_STA failed\n");
2094 break;
2095 }
2096 }
2097 il_free_pages(il, cmd.reply_page);
2098
2099 return ret;
2100}
2101
2102
2103
2104
2105int
2106il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2107{
2108 unsigned long flags;
2109
2110 if (!il_is_ready(il)) {
2111 D_INFO("Unable to remove station %pM, device not ready.\n",
2112 addr);
2113
2114
2115
2116
2117
2118 return 0;
2119 }
2120
2121 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2122
2123 if (WARN_ON(sta_id == IL_INVALID_STATION))
2124 return -EINVAL;
2125
2126 spin_lock_irqsave(&il->sta_lock, flags);
2127
2128 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2129 D_INFO("Removing %pM but non DRIVER active\n", addr);
2130 goto out_err;
2131 }
2132
2133 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2134 D_INFO("Removing %pM but non UCODE active\n", addr);
2135 goto out_err;
2136 }
2137
2138 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2139 kfree(il->stations[sta_id].lq);
2140 il->stations[sta_id].lq = NULL;
2141 }
2142
2143 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2144
2145 il->num_stations--;
2146
2147 BUG_ON(il->num_stations < 0);
2148
2149 spin_unlock_irqrestore(&il->sta_lock, flags);
2150
2151 return il_send_remove_station(il, addr, sta_id, false);
2152out_err:
2153 spin_unlock_irqrestore(&il->sta_lock, flags);
2154 return -EINVAL;
2155}
2156EXPORT_SYMBOL_GPL(il_remove_station);
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166void
2167il_clear_ucode_stations(struct il_priv *il)
2168{
2169 int i;
2170 unsigned long flags_spin;
2171 bool cleared = false;
2172
2173 D_INFO("Clearing ucode stations in driver\n");
2174
2175 spin_lock_irqsave(&il->sta_lock, flags_spin);
2176 for (i = 0; i < il->hw_params.max_stations; i++) {
2177 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2178 D_INFO("Clearing ucode active for station %d\n", i);
2179 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2180 cleared = true;
2181 }
2182 }
2183 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2184
2185 if (!cleared)
2186 D_INFO("No active stations found to be cleared\n");
2187}
2188EXPORT_SYMBOL(il_clear_ucode_stations);
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198void
2199il_restore_stations(struct il_priv *il)
2200{
2201 struct il_addsta_cmd sta_cmd;
2202 struct il_link_quality_cmd lq;
2203 unsigned long flags_spin;
2204 int i;
2205 bool found = false;
2206 int ret;
2207 bool send_lq;
2208
2209 if (!il_is_ready(il)) {
2210 D_INFO("Not ready yet, not restoring any stations.\n");
2211 return;
2212 }
2213
2214 D_ASSOC("Restoring all known stations ... start.\n");
2215 spin_lock_irqsave(&il->sta_lock, flags_spin);
2216 for (i = 0; i < il->hw_params.max_stations; i++) {
2217 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2218 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2219 D_ASSOC("Restoring sta %pM\n",
2220 il->stations[i].sta.sta.addr);
2221 il->stations[i].sta.mode = 0;
2222 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2223 found = true;
2224 }
2225 }
2226
2227 for (i = 0; i < il->hw_params.max_stations; i++) {
2228 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2229 memcpy(&sta_cmd, &il->stations[i].sta,
2230 sizeof(struct il_addsta_cmd));
2231 send_lq = false;
2232 if (il->stations[i].lq) {
2233 memcpy(&lq, il->stations[i].lq,
2234 sizeof(struct il_link_quality_cmd));
2235 send_lq = true;
2236 }
2237 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2238 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2239 if (ret) {
2240 spin_lock_irqsave(&il->sta_lock, flags_spin);
2241 IL_ERR("Adding station %pM failed.\n",
2242 il->stations[i].sta.sta.addr);
2243 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2244 il->stations[i].used &=
2245 ~IL_STA_UCODE_INPROGRESS;
2246 spin_unlock_irqrestore(&il->sta_lock,
2247 flags_spin);
2248 }
2249
2250
2251
2252
2253 if (send_lq)
2254 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2255 spin_lock_irqsave(&il->sta_lock, flags_spin);
2256 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2257 }
2258 }
2259
2260 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2261 if (!found)
2262 D_INFO("Restoring all known stations"
2263 " .... no stations to be restored.\n");
2264 else
2265 D_INFO("Restoring all known stations" " .... complete.\n");
2266}
2267EXPORT_SYMBOL(il_restore_stations);
2268
2269int
2270il_get_free_ucode_key_idx(struct il_priv *il)
2271{
2272 int i;
2273
2274 for (i = 0; i < il->sta_key_max_num; i++)
2275 if (!test_and_set_bit(i, &il->ucode_key_table))
2276 return i;
2277
2278 return WEP_INVALID_OFFSET;
2279}
2280EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2281
2282void
2283il_dealloc_bcast_stations(struct il_priv *il)
2284{
2285 unsigned long flags;
2286 int i;
2287
2288 spin_lock_irqsave(&il->sta_lock, flags);
2289 for (i = 0; i < il->hw_params.max_stations; i++) {
2290 if (!(il->stations[i].used & IL_STA_BCAST))
2291 continue;
2292
2293 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2294 il->num_stations--;
2295 BUG_ON(il->num_stations < 0);
2296 kfree(il->stations[i].lq);
2297 il->stations[i].lq = NULL;
2298 }
2299 spin_unlock_irqrestore(&il->sta_lock, flags);
2300}
2301EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2302
2303#ifdef CONFIG_IWLEGACY_DEBUG
2304static void
2305il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2306{
2307 int i;
2308 D_RATE("lq station id 0x%x\n", lq->sta_id);
2309 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2310 lq->general_params.dual_stream_ant_msk);
2311
2312 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2313 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2314}
2315#else
2316static inline void
2317il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2318{
2319}
2320#endif
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333static bool
2334il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2335{
2336 int i;
2337
2338 if (il->ht.enabled)
2339 return true;
2340
2341 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2342 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2343 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2344 D_INFO("idx %d of LQ expects HT channel\n", i);
2345 return false;
2346 }
2347 }
2348 return true;
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361int
2362il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2363 u8 flags, bool init)
2364{
2365 int ret = 0;
2366 unsigned long flags_spin;
2367
2368 struct il_host_cmd cmd = {
2369 .id = C_TX_LINK_QUALITY_CMD,
2370 .len = sizeof(struct il_link_quality_cmd),
2371 .flags = flags,
2372 .data = lq,
2373 };
2374
2375 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2376 return -EINVAL;
2377
2378 spin_lock_irqsave(&il->sta_lock, flags_spin);
2379 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2380 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2381 return -EINVAL;
2382 }
2383 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2384
2385 il_dump_lq_cmd(il, lq);
2386 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2387
2388 if (il_is_lq_table_valid(il, lq))
2389 ret = il_send_cmd(il, &cmd);
2390 else
2391 ret = -EINVAL;
2392
2393 if (cmd.flags & CMD_ASYNC)
2394 return ret;
2395
2396 if (init) {
2397 D_INFO("init LQ command complete,"
2398 " clearing sta addition status for sta %d\n",
2399 lq->sta_id);
2400 spin_lock_irqsave(&il->sta_lock, flags_spin);
2401 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2402 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2403 }
2404 return ret;
2405}
2406EXPORT_SYMBOL(il_send_lq_cmd);
2407
2408int
2409il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2410 struct ieee80211_sta *sta)
2411{
2412 struct il_priv *il = hw->priv;
2413 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2414 int ret;
2415
2416 mutex_lock(&il->mutex);
2417 D_MAC80211("enter station %pM\n", sta->addr);
2418
2419 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2420 if (ret)
2421 IL_ERR("Error removing station %pM\n", sta->addr);
2422
2423 D_MAC80211("leave ret %d\n", ret);
2424 mutex_unlock(&il->mutex);
2425
2426 return ret;
2427}
2428EXPORT_SYMBOL(il_mac_sta_remove);
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500int
2501il_rx_queue_space(const struct il_rx_queue *q)
2502{
2503 int s = q->read - q->write;
2504 if (s <= 0)
2505 s += RX_QUEUE_SIZE;
2506
2507 s -= 2;
2508 if (s < 0)
2509 s = 0;
2510 return s;
2511}
2512EXPORT_SYMBOL(il_rx_queue_space);
2513
2514
2515
2516
2517void
2518il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2519{
2520 unsigned long flags;
2521 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2522 u32 reg;
2523
2524 spin_lock_irqsave(&q->lock, flags);
2525
2526 if (q->need_update == 0)
2527 goto exit_unlock;
2528
2529
2530 if (test_bit(S_POWER_PMI, &il->status)) {
2531 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2532
2533 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2534 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2535 reg);
2536 il_set_bit(il, CSR_GP_CNTRL,
2537 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2538 goto exit_unlock;
2539 }
2540
2541 q->write_actual = (q->write & ~0x7);
2542 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2543
2544
2545 } else {
2546
2547 q->write_actual = (q->write & ~0x7);
2548 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2549 }
2550
2551 q->need_update = 0;
2552
2553exit_unlock:
2554 spin_unlock_irqrestore(&q->lock, flags);
2555}
2556EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2557
2558int
2559il_rx_queue_alloc(struct il_priv *il)
2560{
2561 struct il_rx_queue *rxq = &il->rxq;
2562 struct device *dev = &il->pci_dev->dev;
2563 int i;
2564
2565 spin_lock_init(&rxq->lock);
2566 INIT_LIST_HEAD(&rxq->rx_free);
2567 INIT_LIST_HEAD(&rxq->rx_used);
2568
2569
2570 rxq->bd =
2571 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2572 GFP_KERNEL);
2573 if (!rxq->bd)
2574 goto err_bd;
2575
2576 rxq->rb_stts =
2577 dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2578 &rxq->rb_stts_dma, GFP_KERNEL);
2579 if (!rxq->rb_stts)
2580 goto err_rb;
2581
2582
2583 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2584 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2585
2586
2587
2588 rxq->read = rxq->write = 0;
2589 rxq->write_actual = 0;
2590 rxq->free_count = 0;
2591 rxq->need_update = 0;
2592 return 0;
2593
2594err_rb:
2595 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2596 rxq->bd_dma);
2597err_bd:
2598 return -ENOMEM;
2599}
2600EXPORT_SYMBOL(il_rx_queue_alloc);
2601
2602void
2603il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2604{
2605 struct il_rx_pkt *pkt = rxb_addr(rxb);
2606 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2607
2608 if (!report->state) {
2609 D_11H("Spectrum Measure Notification: Start\n");
2610 return;
2611 }
2612
2613 memcpy(&il->measure_report, report, sizeof(*report));
2614 il->measurement_status |= MEASUREMENT_READY;
2615}
2616EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2617
2618
2619
2620
2621int
2622il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2623 u32 decrypt_res, struct ieee80211_rx_status *stats)
2624{
2625 u16 fc = le16_to_cpu(hdr->frame_control);
2626
2627
2628
2629
2630
2631 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2632 return 0;
2633
2634 if (!(fc & IEEE80211_FCTL_PROTECTED))
2635 return 0;
2636
2637 D_RX("decrypt_res:0x%x\n", decrypt_res);
2638 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2639 case RX_RES_STATUS_SEC_TYPE_TKIP:
2640
2641
2642 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2643 RX_RES_STATUS_BAD_KEY_TTAK)
2644 break;
2645
2646 case RX_RES_STATUS_SEC_TYPE_WEP:
2647 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2648 RX_RES_STATUS_BAD_ICV_MIC) {
2649
2650
2651 D_RX("Packet destroyed\n");
2652 return -1;
2653 }
2654 case RX_RES_STATUS_SEC_TYPE_CCMP:
2655 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2656 RX_RES_STATUS_DECRYPT_OK) {
2657 D_RX("hw decrypt successfully!!!\n");
2658 stats->flag |= RX_FLAG_DECRYPTED;
2659 }
2660 break;
2661
2662 default:
2663 break;
2664 }
2665 return 0;
2666}
2667EXPORT_SYMBOL(il_set_decrypted_flag);
2668
2669
2670
2671
2672void
2673il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2674{
2675 u32 reg = 0;
2676 int txq_id = txq->q.id;
2677
2678 if (txq->need_update == 0)
2679 return;
2680
2681
2682 if (test_bit(S_POWER_PMI, &il->status)) {
2683
2684
2685
2686 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2687
2688 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2689 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2690 txq_id, reg);
2691 il_set_bit(il, CSR_GP_CNTRL,
2692 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2693 return;
2694 }
2695
2696 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2697
2698
2699
2700
2701
2702
2703 } else
2704 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2705 txq->need_update = 0;
2706}
2707EXPORT_SYMBOL(il_txq_update_write_ptr);
2708
2709
2710
2711
2712void
2713il_tx_queue_unmap(struct il_priv *il, int txq_id)
2714{
2715 struct il_tx_queue *txq = &il->txq[txq_id];
2716 struct il_queue *q = &txq->q;
2717
2718 if (q->n_bd == 0)
2719 return;
2720
2721 while (q->write_ptr != q->read_ptr) {
2722 il->ops->txq_free_tfd(il, txq);
2723 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2724 }
2725}
2726EXPORT_SYMBOL(il_tx_queue_unmap);
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736void
2737il_tx_queue_free(struct il_priv *il, int txq_id)
2738{
2739 struct il_tx_queue *txq = &il->txq[txq_id];
2740 struct device *dev = &il->pci_dev->dev;
2741 int i;
2742
2743 il_tx_queue_unmap(il, txq_id);
2744
2745
2746 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2747 kfree(txq->cmd[i]);
2748
2749
2750 if (txq->q.n_bd)
2751 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2752 txq->tfds, txq->q.dma_addr);
2753
2754
2755 kfree(txq->skbs);
2756 txq->skbs = NULL;
2757
2758
2759 kfree(txq->cmd);
2760 kfree(txq->meta);
2761 txq->cmd = NULL;
2762 txq->meta = NULL;
2763
2764
2765 memset(txq, 0, sizeof(*txq));
2766}
2767EXPORT_SYMBOL(il_tx_queue_free);
2768
2769
2770
2771
2772void
2773il_cmd_queue_unmap(struct il_priv *il)
2774{
2775 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2776 struct il_queue *q = &txq->q;
2777 int i;
2778
2779 if (q->n_bd == 0)
2780 return;
2781
2782 while (q->read_ptr != q->write_ptr) {
2783 i = il_get_cmd_idx(q, q->read_ptr, 0);
2784
2785 if (txq->meta[i].flags & CMD_MAPPED) {
2786 pci_unmap_single(il->pci_dev,
2787 dma_unmap_addr(&txq->meta[i], mapping),
2788 dma_unmap_len(&txq->meta[i], len),
2789 PCI_DMA_BIDIRECTIONAL);
2790 txq->meta[i].flags = 0;
2791 }
2792
2793 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2794 }
2795
2796 i = q->n_win;
2797 if (txq->meta[i].flags & CMD_MAPPED) {
2798 pci_unmap_single(il->pci_dev,
2799 dma_unmap_addr(&txq->meta[i], mapping),
2800 dma_unmap_len(&txq->meta[i], len),
2801 PCI_DMA_BIDIRECTIONAL);
2802 txq->meta[i].flags = 0;
2803 }
2804}
2805EXPORT_SYMBOL(il_cmd_queue_unmap);
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815void
2816il_cmd_queue_free(struct il_priv *il)
2817{
2818 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2819 struct device *dev = &il->pci_dev->dev;
2820 int i;
2821
2822 il_cmd_queue_unmap(il);
2823
2824
2825 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2826 kfree(txq->cmd[i]);
2827
2828
2829 if (txq->q.n_bd)
2830 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2831 txq->tfds, txq->q.dma_addr);
2832
2833
2834 kfree(txq->cmd);
2835 kfree(txq->meta);
2836 txq->cmd = NULL;
2837 txq->meta = NULL;
2838
2839
2840 memset(txq, 0, sizeof(*txq));
2841}
2842EXPORT_SYMBOL(il_cmd_queue_free);
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867int
2868il_queue_space(const struct il_queue *q)
2869{
2870 int s = q->read_ptr - q->write_ptr;
2871
2872 if (q->read_ptr > q->write_ptr)
2873 s -= q->n_bd;
2874
2875 if (s <= 0)
2876 s += q->n_win;
2877
2878 s -= 2;
2879 if (s < 0)
2880 s = 0;
2881 return s;
2882}
2883EXPORT_SYMBOL(il_queue_space);
2884
2885
2886
2887
2888
2889static int
2890il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2891{
2892
2893
2894
2895
2896 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2897
2898 q->n_bd = TFD_QUEUE_SIZE_MAX;
2899
2900 q->n_win = slots;
2901 q->id = id;
2902
2903
2904
2905 BUG_ON(!is_power_of_2(slots));
2906
2907 q->low_mark = q->n_win / 4;
2908 if (q->low_mark < 4)
2909 q->low_mark = 4;
2910
2911 q->high_mark = q->n_win / 8;
2912 if (q->high_mark < 2)
2913 q->high_mark = 2;
2914
2915 q->write_ptr = q->read_ptr = 0;
2916
2917 return 0;
2918}
2919
2920
2921
2922
2923static int
2924il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2925{
2926 struct device *dev = &il->pci_dev->dev;
2927 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2928
2929
2930
2931 if (id != il->cmd_queue) {
2932 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(struct skb *),
2933 GFP_KERNEL);
2934 if (!txq->skbs) {
2935 IL_ERR("Fail to alloc skbs\n");
2936 goto error;
2937 }
2938 } else
2939 txq->skbs = NULL;
2940
2941
2942
2943 txq->tfds =
2944 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2945 if (!txq->tfds) {
2946 IL_ERR("Fail to alloc TFDs\n");
2947 goto error;
2948 }
2949 txq->q.id = id;
2950
2951 return 0;
2952
2953error:
2954 kfree(txq->skbs);
2955 txq->skbs = NULL;
2956
2957 return -ENOMEM;
2958}
2959
2960
2961
2962
2963int
2964il_tx_queue_init(struct il_priv *il, u32 txq_id)
2965{
2966 int i, len, ret;
2967 int slots, actual_slots;
2968 struct il_tx_queue *txq = &il->txq[txq_id];
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978 if (txq_id == il->cmd_queue) {
2979 slots = TFD_CMD_SLOTS;
2980 actual_slots = slots + 1;
2981 } else {
2982 slots = TFD_TX_CMD_SLOTS;
2983 actual_slots = slots;
2984 }
2985
2986 txq->meta =
2987 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2988 txq->cmd =
2989 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
2990
2991 if (!txq->meta || !txq->cmd)
2992 goto out_free_arrays;
2993
2994 len = sizeof(struct il_device_cmd);
2995 for (i = 0; i < actual_slots; i++) {
2996
2997 if (i == slots)
2998 len = IL_MAX_CMD_SIZE;
2999
3000 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3001 if (!txq->cmd[i])
3002 goto err;
3003 }
3004
3005
3006 ret = il_tx_queue_alloc(il, txq, txq_id);
3007 if (ret)
3008 goto err;
3009
3010 txq->need_update = 0;
3011
3012
3013
3014
3015
3016
3017 if (txq_id < 4)
3018 il_set_swq_id(txq, txq_id, txq_id);
3019
3020
3021 il_queue_init(il, &txq->q, slots, txq_id);
3022
3023
3024 il->ops->txq_init(il, txq);
3025
3026 return 0;
3027err:
3028 for (i = 0; i < actual_slots; i++)
3029 kfree(txq->cmd[i]);
3030out_free_arrays:
3031 kfree(txq->meta);
3032 kfree(txq->cmd);
3033
3034 return -ENOMEM;
3035}
3036EXPORT_SYMBOL(il_tx_queue_init);
3037
3038void
3039il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3040{
3041 int slots, actual_slots;
3042 struct il_tx_queue *txq = &il->txq[txq_id];
3043
3044 if (txq_id == il->cmd_queue) {
3045 slots = TFD_CMD_SLOTS;
3046 actual_slots = TFD_CMD_SLOTS + 1;
3047 } else {
3048 slots = TFD_TX_CMD_SLOTS;
3049 actual_slots = TFD_TX_CMD_SLOTS;
3050 }
3051
3052 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3053 txq->need_update = 0;
3054
3055
3056 il_queue_init(il, &txq->q, slots, txq_id);
3057
3058
3059 il->ops->txq_init(il, txq);
3060}
3061EXPORT_SYMBOL(il_tx_queue_reset);
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074int
3075il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3076{
3077 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3078 struct il_queue *q = &txq->q;
3079 struct il_device_cmd *out_cmd;
3080 struct il_cmd_meta *out_meta;
3081 dma_addr_t phys_addr;
3082 unsigned long flags;
3083 int len;
3084 u32 idx;
3085 u16 fix_size;
3086
3087 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3088 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3089
3090
3091
3092
3093
3094
3095 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3096 !(cmd->flags & CMD_SIZE_HUGE));
3097 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3098
3099 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3100 IL_WARN("Not sending command - %s KILL\n",
3101 il_is_rfkill(il) ? "RF" : "CT");
3102 return -EIO;
3103 }
3104
3105 spin_lock_irqsave(&il->hcmd_lock, flags);
3106
3107 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3108 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3109
3110 IL_ERR("Restarting adapter due to command queue full\n");
3111 queue_work(il->workqueue, &il->restart);
3112 return -ENOSPC;
3113 }
3114
3115 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3116 out_cmd = txq->cmd[idx];
3117 out_meta = &txq->meta[idx];
3118
3119 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3120 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3121 return -ENOSPC;
3122 }
3123
3124 memset(out_meta, 0, sizeof(*out_meta));
3125 out_meta->flags = cmd->flags | CMD_MAPPED;
3126 if (cmd->flags & CMD_WANT_SKB)
3127 out_meta->source = cmd;
3128 if (cmd->flags & CMD_ASYNC)
3129 out_meta->callback = cmd->callback;
3130
3131 out_cmd->hdr.cmd = cmd->id;
3132 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3133
3134
3135
3136
3137 out_cmd->hdr.flags = 0;
3138 out_cmd->hdr.sequence =
3139 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3140 if (cmd->flags & CMD_SIZE_HUGE)
3141 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3142 len = sizeof(struct il_device_cmd);
3143 if (idx == TFD_CMD_SLOTS)
3144 len = IL_MAX_CMD_SIZE;
3145
3146#ifdef CONFIG_IWLEGACY_DEBUG
3147 switch (out_cmd->hdr.cmd) {
3148 case C_TX_LINK_QUALITY_CMD:
3149 case C_SENSITIVITY:
3150 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3151 "%d bytes at %d[%d]:%d\n",
3152 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3153 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3154 q->write_ptr, idx, il->cmd_queue);
3155 break;
3156 default:
3157 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3158 "%d bytes at %d[%d]:%d\n",
3159 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3160 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3161 idx, il->cmd_queue);
3162 }
3163#endif
3164 txq->need_update = 1;
3165
3166 if (il->ops->txq_update_byte_cnt_tbl)
3167
3168 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3169
3170 phys_addr =
3171 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3172 PCI_DMA_BIDIRECTIONAL);
3173 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3174 dma_unmap_len_set(out_meta, len, fix_size);
3175
3176 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3177 U32_PAD(cmd->len));
3178
3179
3180 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3181 il_txq_update_write_ptr(il, txq);
3182
3183 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3184 return idx;
3185}
3186
3187
3188
3189
3190
3191
3192
3193
3194static void
3195il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3196{
3197 struct il_tx_queue *txq = &il->txq[txq_id];
3198 struct il_queue *q = &txq->q;
3199 int nfreed = 0;
3200
3201 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3202 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3203 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3204 q->write_ptr, q->read_ptr);
3205 return;
3206 }
3207
3208 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3209 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3210
3211 if (nfreed++ > 0) {
3212 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3213 q->write_ptr, q->read_ptr);
3214 queue_work(il->workqueue, &il->restart);
3215 }
3216
3217 }
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228void
3229il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3230{
3231 struct il_rx_pkt *pkt = rxb_addr(rxb);
3232 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3233 int txq_id = SEQ_TO_QUEUE(sequence);
3234 int idx = SEQ_TO_IDX(sequence);
3235 int cmd_idx;
3236 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3237 struct il_device_cmd *cmd;
3238 struct il_cmd_meta *meta;
3239 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3240 unsigned long flags;
3241
3242
3243
3244
3245 if (WARN
3246 (txq_id != il->cmd_queue,
3247 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3248 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3249 il->txq[il->cmd_queue].q.write_ptr)) {
3250 il_print_hex_error(il, pkt, 32);
3251 return;
3252 }
3253
3254 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3255 cmd = txq->cmd[cmd_idx];
3256 meta = &txq->meta[cmd_idx];
3257
3258 txq->time_stamp = jiffies;
3259
3260 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3261 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3262
3263
3264 if (meta->flags & CMD_WANT_SKB) {
3265 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3266 rxb->page = NULL;
3267 } else if (meta->callback)
3268 meta->callback(il, cmd, pkt);
3269
3270 spin_lock_irqsave(&il->hcmd_lock, flags);
3271
3272 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3273
3274 if (!(meta->flags & CMD_ASYNC)) {
3275 clear_bit(S_HCMD_ACTIVE, &il->status);
3276 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3277 il_get_cmd_string(cmd->hdr.cmd));
3278 wake_up(&il->wait_command_queue);
3279 }
3280
3281
3282 meta->flags = 0;
3283
3284 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3285}
3286EXPORT_SYMBOL(il_tx_cmd_complete);
3287
3288MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3289MODULE_VERSION(IWLWIFI_VERSION);
3290MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3291MODULE_LICENSE("GPL");
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309static bool bt_coex_active = true;
3310module_param(bt_coex_active, bool, S_IRUGO);
3311MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3312
3313u32 il_debug_level;
3314EXPORT_SYMBOL(il_debug_level);
3315
3316const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3317EXPORT_SYMBOL(il_bcast_addr);
3318
3319#define MAX_BIT_RATE_40_MHZ 150
3320#define MAX_BIT_RATE_20_MHZ 72
3321static void
3322il_init_ht_hw_capab(const struct il_priv *il,
3323 struct ieee80211_sta_ht_cap *ht_info,
3324 enum ieee80211_band band)
3325{
3326 u16 max_bit_rate = 0;
3327 u8 rx_chains_num = il->hw_params.rx_chains_num;
3328 u8 tx_chains_num = il->hw_params.tx_chains_num;
3329
3330 ht_info->cap = 0;
3331 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3332
3333 ht_info->ht_supported = true;
3334
3335 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3336 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3337 if (il->hw_params.ht40_channel & BIT(band)) {
3338 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3339 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3340 ht_info->mcs.rx_mask[4] = 0x01;
3341 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3342 }
3343
3344 if (il->cfg->mod_params->amsdu_size_8K)
3345 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3346
3347 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3348 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3349
3350 ht_info->mcs.rx_mask[0] = 0xFF;
3351 if (rx_chains_num >= 2)
3352 ht_info->mcs.rx_mask[1] = 0xFF;
3353 if (rx_chains_num >= 3)
3354 ht_info->mcs.rx_mask[2] = 0xFF;
3355
3356
3357 max_bit_rate *= rx_chains_num;
3358 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3359 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3360
3361
3362 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3363 if (tx_chains_num != rx_chains_num) {
3364 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3365 ht_info->mcs.tx_params |=
3366 ((tx_chains_num -
3367 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3368 }
3369}
3370
3371
3372
3373
3374int
3375il_init_geos(struct il_priv *il)
3376{
3377 struct il_channel_info *ch;
3378 struct ieee80211_supported_band *sband;
3379 struct ieee80211_channel *channels;
3380 struct ieee80211_channel *geo_ch;
3381 struct ieee80211_rate *rates;
3382 int i = 0;
3383 s8 max_tx_power = 0;
3384
3385 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3386 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3387 D_INFO("Geography modes already initialized.\n");
3388 set_bit(S_GEO_CONFIGURED, &il->status);
3389 return 0;
3390 }
3391
3392 channels =
3393 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3394 GFP_KERNEL);
3395 if (!channels)
3396 return -ENOMEM;
3397
3398 rates =
3399 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3400 GFP_KERNEL);
3401 if (!rates) {
3402 kfree(channels);
3403 return -ENOMEM;
3404 }
3405
3406
3407 sband = &il->bands[IEEE80211_BAND_5GHZ];
3408 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3409
3410 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3411 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3412
3413 if (il->cfg->sku & IL_SKU_N)
3414 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3415
3416 sband = &il->bands[IEEE80211_BAND_2GHZ];
3417 sband->channels = channels;
3418
3419 sband->bitrates = rates;
3420 sband->n_bitrates = RATE_COUNT_LEGACY;
3421
3422 if (il->cfg->sku & IL_SKU_N)
3423 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3424
3425 il->ieee_channels = channels;
3426 il->ieee_rates = rates;
3427
3428 for (i = 0; i < il->channel_count; i++) {
3429 ch = &il->channel_info[i];
3430
3431 if (!il_is_channel_valid(ch))
3432 continue;
3433
3434 sband = &il->bands[ch->band];
3435
3436 geo_ch = &sband->channels[sband->n_channels++];
3437
3438 geo_ch->center_freq =
3439 ieee80211_channel_to_frequency(ch->channel, ch->band);
3440 geo_ch->max_power = ch->max_power_avg;
3441 geo_ch->max_antenna_gain = 0xff;
3442 geo_ch->hw_value = ch->channel;
3443
3444 if (il_is_channel_valid(ch)) {
3445 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3446 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3447
3448 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3449 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3450
3451 if (ch->flags & EEPROM_CHANNEL_RADAR)
3452 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3453
3454 geo_ch->flags |= ch->ht40_extension_channel;
3455
3456 if (ch->max_power_avg > max_tx_power)
3457 max_tx_power = ch->max_power_avg;
3458 } else {
3459 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3460 }
3461
3462 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3463 geo_ch->center_freq,
3464 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3465 geo_ch->
3466 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3467 geo_ch->flags);
3468 }
3469
3470 il->tx_power_device_lmt = max_tx_power;
3471 il->tx_power_user_lmt = max_tx_power;
3472 il->tx_power_next = max_tx_power;
3473
3474 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3475 (il->cfg->sku & IL_SKU_A)) {
3476 IL_INFO("Incorrectly detected BG card as ABG. "
3477 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3478 il->pci_dev->device, il->pci_dev->subsystem_device);
3479 il->cfg->sku &= ~IL_SKU_A;
3480 }
3481
3482 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3483 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3484 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3485
3486 set_bit(S_GEO_CONFIGURED, &il->status);
3487
3488 return 0;
3489}
3490EXPORT_SYMBOL(il_init_geos);
3491
3492
3493
3494
3495void
3496il_free_geos(struct il_priv *il)
3497{
3498 kfree(il->ieee_channels);
3499 kfree(il->ieee_rates);
3500 clear_bit(S_GEO_CONFIGURED, &il->status);
3501}
3502EXPORT_SYMBOL(il_free_geos);
3503
3504static bool
3505il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3506 u16 channel, u8 extension_chan_offset)
3507{
3508 const struct il_channel_info *ch_info;
3509
3510 ch_info = il_get_channel_info(il, band, channel);
3511 if (!il_is_channel_valid(ch_info))
3512 return false;
3513
3514 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3515 return !(ch_info->
3516 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3517 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3518 return !(ch_info->
3519 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3520
3521 return false;
3522}
3523
3524bool
3525il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3526{
3527 if (!il->ht.enabled || !il->ht.is_40mhz)
3528 return false;
3529
3530
3531
3532
3533
3534 if (ht_cap && !ht_cap->ht_supported)
3535 return false;
3536
3537#ifdef CONFIG_IWLEGACY_DEBUGFS
3538 if (il->disable_ht40)
3539 return false;
3540#endif
3541
3542 return il_is_channel_extension(il, il->band,
3543 le16_to_cpu(il->staging.channel),
3544 il->ht.extension_chan_offset);
3545}
3546EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3547
3548static u16
3549il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3550{
3551 u16 new_val;
3552 u16 beacon_factor;
3553
3554
3555
3556
3557
3558 if (!beacon_val)
3559 return DEFAULT_BEACON_INTERVAL;
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3574 new_val = beacon_val / beacon_factor;
3575
3576 if (!new_val)
3577 new_val = max_beacon_val;
3578
3579 return new_val;
3580}
3581
3582int
3583il_send_rxon_timing(struct il_priv *il)
3584{
3585 u64 tsf;
3586 s32 interval_tm, rem;
3587 struct ieee80211_conf *conf = NULL;
3588 u16 beacon_int;
3589 struct ieee80211_vif *vif = il->vif;
3590
3591 conf = &il->hw->conf;
3592
3593 lockdep_assert_held(&il->mutex);
3594
3595 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3596
3597 il->timing.timestamp = cpu_to_le64(il->timestamp);
3598 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3599
3600 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3601
3602
3603
3604
3605
3606 il->timing.atim_win = 0;
3607
3608 beacon_int =
3609 il_adjust_beacon_interval(beacon_int,
3610 il->hw_params.max_beacon_itrvl *
3611 TIME_UNIT);
3612 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3613
3614 tsf = il->timestamp;
3615 interval_tm = beacon_int * TIME_UNIT;
3616 rem = do_div(tsf, interval_tm);
3617 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3618
3619 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3620
3621 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3622 le16_to_cpu(il->timing.beacon_interval),
3623 le32_to_cpu(il->timing.beacon_init_val),
3624 le16_to_cpu(il->timing.atim_win));
3625
3626 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3627 &il->timing);
3628}
3629EXPORT_SYMBOL(il_send_rxon_timing);
3630
3631void
3632il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3633{
3634 struct il_rxon_cmd *rxon = &il->staging;
3635
3636 if (hw_decrypt)
3637 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3638 else
3639 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3640
3641}
3642EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3643
3644
3645int
3646il_check_rxon_cmd(struct il_priv *il)
3647{
3648 struct il_rxon_cmd *rxon = &il->staging;
3649 bool error = false;
3650
3651 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3652 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3653 IL_WARN("check 2.4G: wrong narrow\n");
3654 error = true;
3655 }
3656 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3657 IL_WARN("check 2.4G: wrong radar\n");
3658 error = true;
3659 }
3660 } else {
3661 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3662 IL_WARN("check 5.2G: not short slot!\n");
3663 error = true;
3664 }
3665 if (rxon->flags & RXON_FLG_CCK_MSK) {
3666 IL_WARN("check 5.2G: CCK!\n");
3667 error = true;
3668 }
3669 }
3670 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3671 IL_WARN("mac/bssid mcast!\n");
3672 error = true;
3673 }
3674
3675
3676 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3677 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3678 IL_WARN("neither 1 nor 6 are basic\n");
3679 error = true;
3680 }
3681
3682 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3683 IL_WARN("aid > 2007\n");
3684 error = true;
3685 }
3686
3687 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3688 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3689 IL_WARN("CCK and short slot\n");
3690 error = true;
3691 }
3692
3693 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3694 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3695 IL_WARN("CCK and auto detect");
3696 error = true;
3697 }
3698
3699 if ((rxon->
3700 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3701 RXON_FLG_TGG_PROTECT_MSK) {
3702 IL_WARN("TGg but no auto-detect\n");
3703 error = true;
3704 }
3705
3706 if (error)
3707 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3708
3709 if (error) {
3710 IL_ERR("Invalid RXON\n");
3711 return -EINVAL;
3712 }
3713 return 0;
3714}
3715EXPORT_SYMBOL(il_check_rxon_cmd);
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725int
3726il_full_rxon_required(struct il_priv *il)
3727{
3728 const struct il_rxon_cmd *staging = &il->staging;
3729 const struct il_rxon_cmd *active = &il->active;
3730
3731#define CHK(cond) \
3732 if ((cond)) { \
3733 D_INFO("need full RXON - " #cond "\n"); \
3734 return 1; \
3735 }
3736
3737#define CHK_NEQ(c1, c2) \
3738 if ((c1) != (c2)) { \
3739 D_INFO("need full RXON - " \
3740 #c1 " != " #c2 " - %d != %d\n", \
3741 (c1), (c2)); \
3742 return 1; \
3743 }
3744
3745
3746 CHK(!il_is_associated(il));
3747 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
3748 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
3749 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
3750 active->wlap_bssid_addr));
3751 CHK_NEQ(staging->dev_type, active->dev_type);
3752 CHK_NEQ(staging->channel, active->channel);
3753 CHK_NEQ(staging->air_propagation, active->air_propagation);
3754 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3755 active->ofdm_ht_single_stream_basic_rates);
3756 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3757 active->ofdm_ht_dual_stream_basic_rates);
3758 CHK_NEQ(staging->assoc_id, active->assoc_id);
3759
3760
3761
3762
3763
3764
3765 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3766 active->flags & RXON_FLG_BAND_24G_MSK);
3767
3768
3769 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3770 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3771
3772#undef CHK
3773#undef CHK_NEQ
3774
3775 return 0;
3776}
3777EXPORT_SYMBOL(il_full_rxon_required);
3778
3779u8
3780il_get_lowest_plcp(struct il_priv *il)
3781{
3782
3783
3784
3785
3786 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3787 return RATE_1M_PLCP;
3788 else
3789 return RATE_6M_PLCP;
3790}
3791EXPORT_SYMBOL(il_get_lowest_plcp);
3792
3793static void
3794_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3795{
3796 struct il_rxon_cmd *rxon = &il->staging;
3797
3798 if (!il->ht.enabled) {
3799 rxon->flags &=
3800 ~(RXON_FLG_CHANNEL_MODE_MSK |
3801 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3802 | RXON_FLG_HT_PROT_MSK);
3803 return;
3804 }
3805
3806 rxon->flags |=
3807 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3808
3809
3810
3811
3812 rxon->flags &=
3813 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3814 if (il_is_ht40_tx_allowed(il, NULL)) {
3815
3816 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3817 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3818
3819 switch (il->ht.extension_chan_offset) {
3820 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3821 rxon->flags &=
3822 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3823 break;
3824 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3825 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3826 break;
3827 }
3828 } else {
3829
3830 switch (il->ht.extension_chan_offset) {
3831 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3832 rxon->flags &=
3833 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3834 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3835 break;
3836 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3837 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3838 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3839 break;
3840 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3841 default:
3842
3843 IL_ERR("invalid extension channel offset\n");
3844 break;
3845 }
3846 }
3847 } else {
3848 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3849 }
3850
3851 if (il->ops->set_rxon_chain)
3852 il->ops->set_rxon_chain(il);
3853
3854 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3855 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3856 il->ht.protection, il->ht.extension_chan_offset);
3857}
3858
3859void
3860il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3861{
3862 _il_set_rxon_ht(il, ht_conf);
3863}
3864EXPORT_SYMBOL(il_set_rxon_ht);
3865
3866
3867u8
3868il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3869{
3870 const struct il_channel_info *ch_info;
3871 int i;
3872 u8 channel = 0;
3873 u8 min, max;
3874
3875 if (band == IEEE80211_BAND_5GHZ) {
3876 min = 14;
3877 max = il->channel_count;
3878 } else {
3879 min = 0;
3880 max = 14;
3881 }
3882
3883 for (i = min; i < max; i++) {
3884 channel = il->channel_info[i].channel;
3885 if (channel == le16_to_cpu(il->staging.channel))
3886 continue;
3887
3888 ch_info = il_get_channel_info(il, band, channel);
3889 if (il_is_channel_valid(ch_info))
3890 break;
3891 }
3892
3893 return channel;
3894}
3895EXPORT_SYMBOL(il_get_single_channel_number);
3896
3897
3898
3899
3900
3901
3902
3903
3904int
3905il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3906{
3907 enum ieee80211_band band = ch->band;
3908 u16 channel = ch->hw_value;
3909
3910 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3911 return 0;
3912
3913 il->staging.channel = cpu_to_le16(channel);
3914 if (band == IEEE80211_BAND_5GHZ)
3915 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3916 else
3917 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3918
3919 il->band = band;
3920
3921 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3922
3923 return 0;
3924}
3925EXPORT_SYMBOL(il_set_rxon_channel);
3926
3927void
3928il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
3929 struct ieee80211_vif *vif)
3930{
3931 if (band == IEEE80211_BAND_5GHZ) {
3932 il->staging.flags &=
3933 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3934 RXON_FLG_CCK_MSK);
3935 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3936 } else {
3937
3938 if (vif && vif->bss_conf.use_short_slot)
3939 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3940 else
3941 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3942
3943 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3944 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3945 il->staging.flags &= ~RXON_FLG_CCK_MSK;
3946 }
3947}
3948EXPORT_SYMBOL(il_set_flags_for_band);
3949
3950
3951
3952
3953void
3954il_connection_init_rx_config(struct il_priv *il)
3955{
3956 const struct il_channel_info *ch_info;
3957
3958 memset(&il->staging, 0, sizeof(il->staging));
3959
3960 if (!il->vif) {
3961 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3962 } else if (il->vif->type == NL80211_IFTYPE_STATION) {
3963 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3964 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3965 } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
3966 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
3967 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3968 il->staging.filter_flags =
3969 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
3970 } else {
3971 IL_ERR("Unsupported interface type %d\n", il->vif->type);
3972 return;
3973 }
3974
3975#if 0
3976
3977
3978 if (!hw_to_local(il->hw)->short_preamble)
3979 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3980 else
3981 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3982#endif
3983
3984 ch_info =
3985 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
3986
3987 if (!ch_info)
3988 ch_info = &il->channel_info[0];
3989
3990 il->staging.channel = cpu_to_le16(ch_info->channel);
3991 il->band = ch_info->band;
3992
3993 il_set_flags_for_band(il, il->band, il->vif);
3994
3995 il->staging.ofdm_basic_rates =
3996 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3997 il->staging.cck_basic_rates =
3998 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3999
4000
4001 il->staging.flags &=
4002 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4003 if (il->vif)
4004 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4005
4006 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4007 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4008}
4009EXPORT_SYMBOL(il_connection_init_rx_config);
4010
4011void
4012il_set_rate(struct il_priv *il)
4013{
4014 const struct ieee80211_supported_band *hw = NULL;
4015 struct ieee80211_rate *rate;
4016 int i;
4017
4018 hw = il_get_hw_mode(il, il->band);
4019 if (!hw) {
4020 IL_ERR("Failed to set rate: unable to get hw mode\n");
4021 return;
4022 }
4023
4024 il->active_rate = 0;
4025
4026 for (i = 0; i < hw->n_bitrates; i++) {
4027 rate = &(hw->bitrates[i]);
4028 if (rate->hw_value < RATE_COUNT_LEGACY)
4029 il->active_rate |= (1 << rate->hw_value);
4030 }
4031
4032 D_RATE("Set active_rate = %0x\n", il->active_rate);
4033
4034 il->staging.cck_basic_rates =
4035 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4036
4037 il->staging.ofdm_basic_rates =
4038 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4039}
4040EXPORT_SYMBOL(il_set_rate);
4041
4042void
4043il_chswitch_done(struct il_priv *il, bool is_success)
4044{
4045 if (test_bit(S_EXIT_PENDING, &il->status))
4046 return;
4047
4048 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4049 ieee80211_chswitch_done(il->vif, is_success);
4050}
4051EXPORT_SYMBOL(il_chswitch_done);
4052
4053void
4054il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4055{
4056 struct il_rx_pkt *pkt = rxb_addr(rxb);
4057 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4058 struct il_rxon_cmd *rxon = (void *)&il->active;
4059
4060 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4061 return;
4062
4063 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4064 rxon->channel = csa->channel;
4065 il->staging.channel = csa->channel;
4066 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4067 il_chswitch_done(il, true);
4068 } else {
4069 IL_ERR("CSA notif (fail) : channel %d\n",
4070 le16_to_cpu(csa->channel));
4071 il_chswitch_done(il, false);
4072 }
4073}
4074EXPORT_SYMBOL(il_hdl_csa);
4075
4076#ifdef CONFIG_IWLEGACY_DEBUG
4077void
4078il_print_rx_config_cmd(struct il_priv *il)
4079{
4080 struct il_rxon_cmd *rxon = &il->staging;
4081
4082 D_RADIO("RX CONFIG:\n");
4083 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4084 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4085 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4086 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4087 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4088 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4089 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4090 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4091 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4092 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4093}
4094EXPORT_SYMBOL(il_print_rx_config_cmd);
4095#endif
4096
4097
4098
4099void
4100il_irq_handle_error(struct il_priv *il)
4101{
4102
4103 set_bit(S_FW_ERROR, &il->status);
4104
4105
4106 clear_bit(S_HCMD_ACTIVE, &il->status);
4107
4108 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4109
4110 il->ops->dump_nic_error_log(il);
4111 if (il->ops->dump_fh)
4112 il->ops->dump_fh(il, NULL, false);
4113#ifdef CONFIG_IWLEGACY_DEBUG
4114 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4115 il_print_rx_config_cmd(il);
4116#endif
4117
4118 wake_up(&il->wait_command_queue);
4119
4120
4121
4122 clear_bit(S_READY, &il->status);
4123
4124 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4125 IL_DBG(IL_DL_FW_ERRORS,
4126 "Restarting adapter due to uCode error.\n");
4127
4128 if (il->cfg->mod_params->restart_fw)
4129 queue_work(il->workqueue, &il->restart);
4130 }
4131}
4132EXPORT_SYMBOL(il_irq_handle_error);
4133
4134static int
4135_il_apm_stop_master(struct il_priv *il)
4136{
4137 int ret = 0;
4138
4139
4140 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4141
4142 ret =
4143 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4144 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4145 if (ret < 0)
4146 IL_WARN("Master Disable Timed Out, 100 usec\n");
4147
4148 D_INFO("stop master\n");
4149
4150 return ret;
4151}
4152
4153void
4154_il_apm_stop(struct il_priv *il)
4155{
4156 lockdep_assert_held(&il->reg_lock);
4157
4158 D_INFO("Stop card, put in low power state\n");
4159
4160
4161 _il_apm_stop_master(il);
4162
4163
4164 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4165
4166 udelay(10);
4167
4168
4169
4170
4171
4172 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4173}
4174EXPORT_SYMBOL(_il_apm_stop);
4175
4176void
4177il_apm_stop(struct il_priv *il)
4178{
4179 unsigned long flags;
4180
4181 spin_lock_irqsave(&il->reg_lock, flags);
4182 _il_apm_stop(il);
4183 spin_unlock_irqrestore(&il->reg_lock, flags);
4184}
4185EXPORT_SYMBOL(il_apm_stop);
4186
4187
4188
4189
4190
4191
4192int
4193il_apm_init(struct il_priv *il)
4194{
4195 int ret = 0;
4196 u16 lctl;
4197
4198 D_INFO("Init card's basic functions\n");
4199
4200
4201
4202
4203
4204
4205
4206 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4207 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4208
4209
4210
4211
4212
4213 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4214 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4215
4216
4217 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4218
4219
4220
4221
4222
4223
4224 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4225 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235 if (il->cfg->set_l0s) {
4236 lctl = il_pcie_link_ctl(il);
4237 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4238 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4239
4240 il_set_bit(il, CSR_GIO_REG,
4241 CSR_GIO_REG_VAL_L0S_ENABLED);
4242 D_POWER("L1 Enabled; Disabling L0S\n");
4243 } else {
4244
4245 il_clear_bit(il, CSR_GIO_REG,
4246 CSR_GIO_REG_VAL_L0S_ENABLED);
4247 D_POWER("L1 Disabled; Enabling L0S\n");
4248 }
4249 }
4250
4251
4252 if (il->cfg->pll_cfg_val)
4253 il_set_bit(il, CSR_ANA_PLL_CFG,
4254 il->cfg->pll_cfg_val);
4255
4256
4257
4258
4259
4260 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4261
4262
4263
4264
4265
4266
4267 ret =
4268 _il_poll_bit(il, CSR_GP_CNTRL,
4269 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4270 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4271 if (ret < 0) {
4272 D_INFO("Failed to init the card\n");
4273 goto out;
4274 }
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284 if (il->cfg->use_bsm)
4285 il_wr_prph(il, APMG_CLK_EN_REG,
4286 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4287 else
4288 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4289 udelay(20);
4290
4291
4292 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4293 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4294
4295out:
4296 return ret;
4297}
4298EXPORT_SYMBOL(il_apm_init);
4299
4300int
4301il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4302{
4303 int ret;
4304 s8 prev_tx_power;
4305 bool defer;
4306
4307 lockdep_assert_held(&il->mutex);
4308
4309 if (il->tx_power_user_lmt == tx_power && !force)
4310 return 0;
4311
4312 if (!il->ops->send_tx_power)
4313 return -EOPNOTSUPP;
4314
4315
4316 if (tx_power < 0) {
4317 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4318 return -EINVAL;
4319 }
4320
4321 if (tx_power > il->tx_power_device_lmt) {
4322 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4323 tx_power, il->tx_power_device_lmt);
4324 return -EINVAL;
4325 }
4326
4327 if (!il_is_ready_rf(il))
4328 return -EIO;
4329
4330
4331
4332 il->tx_power_next = tx_power;
4333
4334
4335 defer = test_bit(S_SCANNING, &il->status) ||
4336 memcmp(&il->active, &il->staging, sizeof(il->staging));
4337 if (defer && !force) {
4338 D_INFO("Deferring tx power set\n");
4339 return 0;
4340 }
4341
4342 prev_tx_power = il->tx_power_user_lmt;
4343 il->tx_power_user_lmt = tx_power;
4344
4345 ret = il->ops->send_tx_power(il);
4346
4347
4348 if (ret) {
4349 il->tx_power_user_lmt = prev_tx_power;
4350 il->tx_power_next = prev_tx_power;
4351 }
4352 return ret;
4353}
4354EXPORT_SYMBOL(il_set_tx_power);
4355
4356void
4357il_send_bt_config(struct il_priv *il)
4358{
4359 struct il_bt_cmd bt_cmd = {
4360 .lead_time = BT_LEAD_TIME_DEF,
4361 .max_kill = BT_MAX_KILL_DEF,
4362 .kill_ack_mask = 0,
4363 .kill_cts_mask = 0,
4364 };
4365
4366 if (!bt_coex_active)
4367 bt_cmd.flags = BT_COEX_DISABLE;
4368 else
4369 bt_cmd.flags = BT_COEX_ENABLE;
4370
4371 D_INFO("BT coex %s\n",
4372 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4373
4374 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4375 IL_ERR("failed to send BT Coex Config\n");
4376}
4377EXPORT_SYMBOL(il_send_bt_config);
4378
4379int
4380il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4381{
4382 struct il_stats_cmd stats_cmd = {
4383 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4384 };
4385
4386 if (flags & CMD_ASYNC)
4387 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4388 &stats_cmd, NULL);
4389 else
4390 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4391 &stats_cmd);
4392}
4393EXPORT_SYMBOL(il_send_stats_request);
4394
4395void
4396il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4397{
4398#ifdef CONFIG_IWLEGACY_DEBUG
4399 struct il_rx_pkt *pkt = rxb_addr(rxb);
4400 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4401 D_RX("sleep mode: %d, src: %d\n",
4402 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4403#endif
4404}
4405EXPORT_SYMBOL(il_hdl_pm_sleep);
4406
4407void
4408il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4409{
4410 struct il_rx_pkt *pkt = rxb_addr(rxb);
4411 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4412 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4413 il_get_cmd_string(pkt->hdr.cmd));
4414 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4415}
4416EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4417
4418void
4419il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4420{
4421 struct il_rx_pkt *pkt = rxb_addr(rxb);
4422
4423 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4424 "seq 0x%04X ser 0x%08X\n",
4425 le32_to_cpu(pkt->u.err_resp.error_type),
4426 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4427 pkt->u.err_resp.cmd_id,
4428 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4429 le32_to_cpu(pkt->u.err_resp.error_info));
4430}
4431EXPORT_SYMBOL(il_hdl_error);
4432
4433void
4434il_clear_isr_stats(struct il_priv *il)
4435{
4436 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4437}
4438
4439int
4440il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4441 const struct ieee80211_tx_queue_params *params)
4442{
4443 struct il_priv *il = hw->priv;
4444 unsigned long flags;
4445 int q;
4446
4447 D_MAC80211("enter\n");
4448
4449 if (!il_is_ready_rf(il)) {
4450 D_MAC80211("leave - RF not ready\n");
4451 return -EIO;
4452 }
4453
4454 if (queue >= AC_NUM) {
4455 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4456 return 0;
4457 }
4458
4459 q = AC_NUM - 1 - queue;
4460
4461 spin_lock_irqsave(&il->lock, flags);
4462
4463 il->qos_data.def_qos_parm.ac[q].cw_min =
4464 cpu_to_le16(params->cw_min);
4465 il->qos_data.def_qos_parm.ac[q].cw_max =
4466 cpu_to_le16(params->cw_max);
4467 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4468 il->qos_data.def_qos_parm.ac[q].edca_txop =
4469 cpu_to_le16((params->txop * 32));
4470
4471 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4472
4473 spin_unlock_irqrestore(&il->lock, flags);
4474
4475 D_MAC80211("leave\n");
4476 return 0;
4477}
4478EXPORT_SYMBOL(il_mac_conf_tx);
4479
4480int
4481il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4482{
4483 struct il_priv *il = hw->priv;
4484 int ret;
4485
4486 D_MAC80211("enter\n");
4487
4488 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4489
4490 D_MAC80211("leave ret %d\n", ret);
4491 return ret;
4492}
4493EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4494
4495static int
4496il_set_mode(struct il_priv *il)
4497{
4498 il_connection_init_rx_config(il);
4499
4500 if (il->ops->set_rxon_chain)
4501 il->ops->set_rxon_chain(il);
4502
4503 return il_commit_rxon(il);
4504}
4505
4506int
4507il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4508{
4509 struct il_priv *il = hw->priv;
4510 int err;
4511 bool reset;
4512
4513 mutex_lock(&il->mutex);
4514 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4515
4516 if (!il_is_ready_rf(il)) {
4517 IL_WARN("Try to add interface when device not ready\n");
4518 err = -EINVAL;
4519 goto out;
4520 }
4521
4522
4523
4524
4525
4526 reset = (il->vif == vif);
4527 if (il->vif && !reset) {
4528 err = -EOPNOTSUPP;
4529 goto out;
4530 }
4531
4532 il->vif = vif;
4533 il->iw_mode = vif->type;
4534
4535 err = il_set_mode(il);
4536 if (err) {
4537 IL_WARN("Fail to set mode %d\n", vif->type);
4538 if (!reset) {
4539 il->vif = NULL;
4540 il->iw_mode = NL80211_IFTYPE_STATION;
4541 }
4542 }
4543
4544out:
4545 D_MAC80211("leave err %d\n", err);
4546 mutex_unlock(&il->mutex);
4547
4548 return err;
4549}
4550EXPORT_SYMBOL(il_mac_add_interface);
4551
4552static void
4553il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4554 bool mode_change)
4555{
4556 lockdep_assert_held(&il->mutex);
4557
4558 if (il->scan_vif == vif) {
4559 il_scan_cancel_timeout(il, 200);
4560 il_force_scan_end(il);
4561 }
4562
4563 if (!mode_change)
4564 il_set_mode(il);
4565
4566}
4567
4568void
4569il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4570{
4571 struct il_priv *il = hw->priv;
4572
4573 mutex_lock(&il->mutex);
4574 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4575
4576 WARN_ON(il->vif != vif);
4577 il->vif = NULL;
4578
4579 il_teardown_interface(il, vif, false);
4580 memset(il->bssid, 0, ETH_ALEN);
4581
4582 D_MAC80211("leave\n");
4583 mutex_unlock(&il->mutex);
4584}
4585EXPORT_SYMBOL(il_mac_remove_interface);
4586
4587int
4588il_alloc_txq_mem(struct il_priv *il)
4589{
4590 if (!il->txq)
4591 il->txq =
4592 kzalloc(sizeof(struct il_tx_queue) *
4593 il->cfg->num_of_queues, GFP_KERNEL);
4594 if (!il->txq) {
4595 IL_ERR("Not enough memory for txq\n");
4596 return -ENOMEM;
4597 }
4598 return 0;
4599}
4600EXPORT_SYMBOL(il_alloc_txq_mem);
4601
4602void
4603il_free_txq_mem(struct il_priv *il)
4604{
4605 kfree(il->txq);
4606 il->txq = NULL;
4607}
4608EXPORT_SYMBOL(il_free_txq_mem);
4609
4610int
4611il_force_reset(struct il_priv *il, bool external)
4612{
4613 struct il_force_reset *force_reset;
4614
4615 if (test_bit(S_EXIT_PENDING, &il->status))
4616 return -EINVAL;
4617
4618 force_reset = &il->force_reset;
4619 force_reset->reset_request_count++;
4620 if (!external) {
4621 if (force_reset->last_force_reset_jiffies &&
4622 time_after(force_reset->last_force_reset_jiffies +
4623 force_reset->reset_duration, jiffies)) {
4624 D_INFO("force reset rejected\n");
4625 force_reset->reset_reject_count++;
4626 return -EAGAIN;
4627 }
4628 }
4629 force_reset->reset_success_count++;
4630 force_reset->last_force_reset_jiffies = jiffies;
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641 if (!external && !il->cfg->mod_params->restart_fw) {
4642 D_INFO("Cancel firmware reload based on "
4643 "module parameter setting\n");
4644 return 0;
4645 }
4646
4647 IL_ERR("On demand firmware reload\n");
4648
4649
4650 set_bit(S_FW_ERROR, &il->status);
4651 wake_up(&il->wait_command_queue);
4652
4653
4654
4655
4656 clear_bit(S_READY, &il->status);
4657 queue_work(il->workqueue, &il->restart);
4658
4659 return 0;
4660}
4661
4662int
4663il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4664 enum nl80211_iftype newtype, bool newp2p)
4665{
4666 struct il_priv *il = hw->priv;
4667 int err;
4668
4669 mutex_lock(&il->mutex);
4670 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4671 vif->type, vif->addr, newtype, newp2p);
4672
4673 if (newp2p) {
4674 err = -EOPNOTSUPP;
4675 goto out;
4676 }
4677
4678 if (!il->vif || !il_is_ready_rf(il)) {
4679
4680
4681
4682
4683 err = -EBUSY;
4684 goto out;
4685 }
4686
4687
4688 il_teardown_interface(il, vif, true);
4689 vif->type = newtype;
4690 vif->p2p = false;
4691 err = il_set_mode(il);
4692 WARN_ON(err);
4693
4694
4695
4696
4697
4698
4699
4700 err = 0;
4701
4702out:
4703 D_MAC80211("leave err %d\n", err);
4704 mutex_unlock(&il->mutex);
4705
4706 return err;
4707}
4708EXPORT_SYMBOL(il_mac_change_interface);
4709
4710
4711
4712
4713
4714static int
4715il_check_stuck_queue(struct il_priv *il, int cnt)
4716{
4717 struct il_tx_queue *txq = &il->txq[cnt];
4718 struct il_queue *q = &txq->q;
4719 unsigned long timeout;
4720 int ret;
4721
4722 if (q->read_ptr == q->write_ptr) {
4723 txq->time_stamp = jiffies;
4724 return 0;
4725 }
4726
4727 timeout =
4728 txq->time_stamp +
4729 msecs_to_jiffies(il->cfg->wd_timeout);
4730
4731 if (time_after(jiffies, timeout)) {
4732 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4733 il->cfg->wd_timeout);
4734 ret = il_force_reset(il, false);
4735 return (ret == -EAGAIN) ? 0 : 1;
4736 }
4737
4738 return 0;
4739}
4740
4741
4742
4743
4744
4745#define IL_WD_TICK(timeout) ((timeout) / 4)
4746
4747
4748
4749
4750
4751void
4752il_bg_watchdog(unsigned long data)
4753{
4754 struct il_priv *il = (struct il_priv *)data;
4755 int cnt;
4756 unsigned long timeout;
4757
4758 if (test_bit(S_EXIT_PENDING, &il->status))
4759 return;
4760
4761 timeout = il->cfg->wd_timeout;
4762 if (timeout == 0)
4763 return;
4764
4765
4766 if (il_check_stuck_queue(il, il->cmd_queue))
4767 return;
4768
4769
4770 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4771
4772 if (cnt == il->cmd_queue)
4773 continue;
4774 if (il_check_stuck_queue(il, cnt))
4775 return;
4776 }
4777
4778 mod_timer(&il->watchdog,
4779 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4780}
4781EXPORT_SYMBOL(il_bg_watchdog);
4782
4783void
4784il_setup_watchdog(struct il_priv *il)
4785{
4786 unsigned int timeout = il->cfg->wd_timeout;
4787
4788 if (timeout)
4789 mod_timer(&il->watchdog,
4790 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4791 else
4792 del_timer(&il->watchdog);
4793}
4794EXPORT_SYMBOL(il_setup_watchdog);
4795
4796
4797
4798
4799
4800
4801
4802u32
4803il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4804{
4805 u32 quot;
4806 u32 rem;
4807 u32 interval = beacon_interval * TIME_UNIT;
4808
4809 if (!interval || !usec)
4810 return 0;
4811
4812 quot =
4813 (usec /
4814 interval) & (il_beacon_time_mask_high(il,
4815 il->hw_params.
4816 beacon_time_tsf_bits) >> il->
4817 hw_params.beacon_time_tsf_bits);
4818 rem =
4819 (usec % interval) & il_beacon_time_mask_low(il,
4820 il->hw_params.
4821 beacon_time_tsf_bits);
4822
4823 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4824}
4825EXPORT_SYMBOL(il_usecs_to_beacons);
4826
4827
4828
4829
4830__le32
4831il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4832 u32 beacon_interval)
4833{
4834 u32 base_low = base & il_beacon_time_mask_low(il,
4835 il->hw_params.
4836 beacon_time_tsf_bits);
4837 u32 addon_low = addon & il_beacon_time_mask_low(il,
4838 il->hw_params.
4839 beacon_time_tsf_bits);
4840 u32 interval = beacon_interval * TIME_UNIT;
4841 u32 res = (base & il_beacon_time_mask_high(il,
4842 il->hw_params.
4843 beacon_time_tsf_bits)) +
4844 (addon & il_beacon_time_mask_high(il,
4845 il->hw_params.
4846 beacon_time_tsf_bits));
4847
4848 if (base_low > addon_low)
4849 res += base_low - addon_low;
4850 else if (base_low < addon_low) {
4851 res += interval + base_low - addon_low;
4852 res += (1 << il->hw_params.beacon_time_tsf_bits);
4853 } else
4854 res += (1 << il->hw_params.beacon_time_tsf_bits);
4855
4856 return cpu_to_le32(res);
4857}
4858EXPORT_SYMBOL(il_add_beacon_time);
4859
4860#ifdef CONFIG_PM
4861
4862int
4863il_pci_suspend(struct device *device)
4864{
4865 struct pci_dev *pdev = to_pci_dev(device);
4866 struct il_priv *il = pci_get_drvdata(pdev);
4867
4868
4869
4870
4871
4872
4873
4874
4875 il_apm_stop(il);
4876
4877 return 0;
4878}
4879EXPORT_SYMBOL(il_pci_suspend);
4880
4881int
4882il_pci_resume(struct device *device)
4883{
4884 struct pci_dev *pdev = to_pci_dev(device);
4885 struct il_priv *il = pci_get_drvdata(pdev);
4886 bool hw_rfkill = false;
4887
4888
4889
4890
4891
4892 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4893
4894 il_enable_interrupts(il);
4895
4896 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4897 hw_rfkill = true;
4898
4899 if (hw_rfkill)
4900 set_bit(S_RFKILL, &il->status);
4901 else
4902 clear_bit(S_RFKILL, &il->status);
4903
4904 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4905
4906 return 0;
4907}
4908EXPORT_SYMBOL(il_pci_resume);
4909
4910const struct dev_pm_ops il_pm_ops = {
4911 .suspend = il_pci_suspend,
4912 .resume = il_pci_resume,
4913 .freeze = il_pci_suspend,
4914 .thaw = il_pci_resume,
4915 .poweroff = il_pci_suspend,
4916 .restore = il_pci_resume,
4917};
4918EXPORT_SYMBOL(il_pm_ops);
4919
4920#endif
4921
4922static void
4923il_update_qos(struct il_priv *il)
4924{
4925 if (test_bit(S_EXIT_PENDING, &il->status))
4926 return;
4927
4928 il->qos_data.def_qos_parm.qos_flags = 0;
4929
4930 if (il->qos_data.qos_active)
4931 il->qos_data.def_qos_parm.qos_flags |=
4932 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
4933
4934 if (il->ht.enabled)
4935 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
4936
4937 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
4938 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
4939
4940 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
4941 &il->qos_data.def_qos_parm, NULL);
4942}
4943
4944
4945
4946
4947int
4948il_mac_config(struct ieee80211_hw *hw, u32 changed)
4949{
4950 struct il_priv *il = hw->priv;
4951 const struct il_channel_info *ch_info;
4952 struct ieee80211_conf *conf = &hw->conf;
4953 struct ieee80211_channel *channel = conf->channel;
4954 struct il_ht_config *ht_conf = &il->current_ht_config;
4955 unsigned long flags = 0;
4956 int ret = 0;
4957 u16 ch;
4958 int scan_active = 0;
4959 bool ht_changed = false;
4960
4961 mutex_lock(&il->mutex);
4962 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
4963 changed);
4964
4965 if (unlikely(test_bit(S_SCANNING, &il->status))) {
4966 scan_active = 1;
4967 D_MAC80211("scan active\n");
4968 }
4969
4970 if (changed &
4971 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
4972
4973 il->current_ht_config.smps = conf->smps_mode;
4974
4975
4976
4977
4978
4979
4980
4981
4982 if (il->ops->set_rxon_chain)
4983 il->ops->set_rxon_chain(il);
4984 }
4985
4986
4987
4988
4989 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
4990
4991 if (scan_active)
4992 goto set_ch_out;
4993
4994 ch = channel->hw_value;
4995 ch_info = il_get_channel_info(il, channel->band, ch);
4996 if (!il_is_channel_valid(ch_info)) {
4997 D_MAC80211("leave - invalid channel\n");
4998 ret = -EINVAL;
4999 goto set_ch_out;
5000 }
5001
5002 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5003 !il_is_channel_ibss(ch_info)) {
5004 D_MAC80211("leave - not IBSS channel\n");
5005 ret = -EINVAL;
5006 goto set_ch_out;
5007 }
5008
5009 spin_lock_irqsave(&il->lock, flags);
5010
5011
5012 if (il->ht.enabled != conf_is_ht(conf)) {
5013 il->ht.enabled = conf_is_ht(conf);
5014 ht_changed = true;
5015 }
5016 if (il->ht.enabled) {
5017 if (conf_is_ht40_minus(conf)) {
5018 il->ht.extension_chan_offset =
5019 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5020 il->ht.is_40mhz = true;
5021 } else if (conf_is_ht40_plus(conf)) {
5022 il->ht.extension_chan_offset =
5023 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5024 il->ht.is_40mhz = true;
5025 } else {
5026 il->ht.extension_chan_offset =
5027 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5028 il->ht.is_40mhz = false;
5029 }
5030 } else
5031 il->ht.is_40mhz = false;
5032
5033
5034
5035
5036
5037 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5038
5039
5040
5041
5042 if ((le16_to_cpu(il->staging.channel) != ch))
5043 il->staging.flags = 0;
5044
5045 il_set_rxon_channel(il, channel);
5046 il_set_rxon_ht(il, ht_conf);
5047
5048 il_set_flags_for_band(il, channel->band, il->vif);
5049
5050 spin_unlock_irqrestore(&il->lock, flags);
5051
5052 if (il->ops->update_bcast_stations)
5053 ret = il->ops->update_bcast_stations(il);
5054
5055set_ch_out:
5056
5057
5058
5059 il_set_rate(il);
5060 }
5061
5062 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5063 ret = il_power_update_mode(il, false);
5064 if (ret)
5065 D_MAC80211("Error setting sleep level\n");
5066 }
5067
5068 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5069 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5070 conf->power_level);
5071
5072 il_set_tx_power(il, conf->power_level, false);
5073 }
5074
5075 if (!il_is_ready(il)) {
5076 D_MAC80211("leave - not ready\n");
5077 goto out;
5078 }
5079
5080 if (scan_active)
5081 goto out;
5082
5083 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5084 il_commit_rxon(il);
5085 else
5086 D_INFO("Not re-sending same RXON configuration.\n");
5087 if (ht_changed)
5088 il_update_qos(il);
5089
5090out:
5091 D_MAC80211("leave ret %d\n", ret);
5092 mutex_unlock(&il->mutex);
5093
5094 return ret;
5095}
5096EXPORT_SYMBOL(il_mac_config);
5097
5098void
5099il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5100{
5101 struct il_priv *il = hw->priv;
5102 unsigned long flags;
5103
5104 mutex_lock(&il->mutex);
5105 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5106
5107 spin_lock_irqsave(&il->lock, flags);
5108
5109 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5110
5111
5112 if (il->beacon_skb)
5113 dev_kfree_skb(il->beacon_skb);
5114 il->beacon_skb = NULL;
5115 il->timestamp = 0;
5116
5117 spin_unlock_irqrestore(&il->lock, flags);
5118
5119 il_scan_cancel_timeout(il, 100);
5120 if (!il_is_ready_rf(il)) {
5121 D_MAC80211("leave - not ready\n");
5122 mutex_unlock(&il->mutex);
5123 return;
5124 }
5125
5126
5127 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5128 il_commit_rxon(il);
5129
5130 il_set_rate(il);
5131
5132 D_MAC80211("leave\n");
5133 mutex_unlock(&il->mutex);
5134}
5135EXPORT_SYMBOL(il_mac_reset_tsf);
5136
5137static void
5138il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5139{
5140 struct il_ht_config *ht_conf = &il->current_ht_config;
5141 struct ieee80211_sta *sta;
5142 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5143
5144 D_ASSOC("enter:\n");
5145
5146 if (!il->ht.enabled)
5147 return;
5148
5149 il->ht.protection =
5150 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5151 il->ht.non_gf_sta_present =
5152 !!(bss_conf->
5153 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5154
5155 ht_conf->single_chain_sufficient = false;
5156
5157 switch (vif->type) {
5158 case NL80211_IFTYPE_STATION:
5159 rcu_read_lock();
5160 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5161 if (sta) {
5162 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5163 int maxstreams;
5164
5165 maxstreams =
5166 (ht_cap->mcs.
5167 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5168 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5169 maxstreams += 1;
5170
5171 if (ht_cap->mcs.rx_mask[1] == 0 &&
5172 ht_cap->mcs.rx_mask[2] == 0)
5173 ht_conf->single_chain_sufficient = true;
5174 if (maxstreams <= 1)
5175 ht_conf->single_chain_sufficient = true;
5176 } else {
5177
5178
5179
5180
5181
5182
5183 ht_conf->single_chain_sufficient = true;
5184 }
5185 rcu_read_unlock();
5186 break;
5187 case NL80211_IFTYPE_ADHOC:
5188 ht_conf->single_chain_sufficient = true;
5189 break;
5190 default:
5191 break;
5192 }
5193
5194 D_ASSOC("leave\n");
5195}
5196
5197static inline void
5198il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5199{
5200
5201
5202
5203
5204
5205 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5206 il->staging.assoc_id = 0;
5207 il_commit_rxon(il);
5208}
5209
5210static void
5211il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5212{
5213 struct il_priv *il = hw->priv;
5214 unsigned long flags;
5215 __le64 timestamp;
5216 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5217
5218 if (!skb)
5219 return;
5220
5221 D_MAC80211("enter\n");
5222
5223 lockdep_assert_held(&il->mutex);
5224
5225 if (!il->beacon_enabled) {
5226 IL_ERR("update beacon with no beaconing enabled\n");
5227 dev_kfree_skb(skb);
5228 return;
5229 }
5230
5231 spin_lock_irqsave(&il->lock, flags);
5232
5233 if (il->beacon_skb)
5234 dev_kfree_skb(il->beacon_skb);
5235
5236 il->beacon_skb = skb;
5237
5238 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5239 il->timestamp = le64_to_cpu(timestamp);
5240
5241 D_MAC80211("leave\n");
5242 spin_unlock_irqrestore(&il->lock, flags);
5243
5244 if (!il_is_ready_rf(il)) {
5245 D_MAC80211("leave - RF not ready\n");
5246 return;
5247 }
5248
5249 il->ops->post_associate(il);
5250}
5251
5252void
5253il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5254 struct ieee80211_bss_conf *bss_conf, u32 changes)
5255{
5256 struct il_priv *il = hw->priv;
5257 int ret;
5258
5259 mutex_lock(&il->mutex);
5260 D_MAC80211("enter: changes 0x%x\n", changes);
5261
5262 if (!il_is_alive(il)) {
5263 D_MAC80211("leave - not alive\n");
5264 mutex_unlock(&il->mutex);
5265 return;
5266 }
5267
5268 if (changes & BSS_CHANGED_QOS) {
5269 unsigned long flags;
5270
5271 spin_lock_irqsave(&il->lock, flags);
5272 il->qos_data.qos_active = bss_conf->qos;
5273 il_update_qos(il);
5274 spin_unlock_irqrestore(&il->lock, flags);
5275 }
5276
5277 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5278
5279 if (vif->bss_conf.enable_beacon)
5280 il->beacon_enabled = true;
5281 else
5282 il->beacon_enabled = false;
5283 }
5284
5285 if (changes & BSS_CHANGED_BSSID) {
5286 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5287
5288
5289
5290
5291
5292
5293 if (il_scan_cancel_timeout(il, 100)) {
5294 D_MAC80211("leave - scan abort failed\n");
5295 mutex_unlock(&il->mutex);
5296 return;
5297 }
5298
5299
5300 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5301
5302
5303 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5304 }
5305
5306
5307
5308
5309
5310
5311 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5312 il_beacon_update(hw, vif);
5313
5314 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5315 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5316 if (bss_conf->use_short_preamble)
5317 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5318 else
5319 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5320 }
5321
5322 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5323 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5324 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5325 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5326 else
5327 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5328 if (bss_conf->use_cts_prot)
5329 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5330 else
5331 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5332 }
5333
5334 if (changes & BSS_CHANGED_BASIC_RATES) {
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349 }
5350
5351 if (changes & BSS_CHANGED_HT) {
5352 il_ht_conf(il, vif);
5353
5354 if (il->ops->set_rxon_chain)
5355 il->ops->set_rxon_chain(il);
5356 }
5357
5358 if (changes & BSS_CHANGED_ASSOC) {
5359 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5360 if (bss_conf->assoc) {
5361 il->timestamp = bss_conf->last_tsf;
5362
5363 if (!il_is_rfkill(il))
5364 il->ops->post_associate(il);
5365 } else
5366 il_set_no_assoc(il, vif);
5367 }
5368
5369 if (changes && il_is_associated(il) && bss_conf->aid) {
5370 D_MAC80211("Changes (%#x) while associated\n", changes);
5371 ret = il_send_rxon_assoc(il);
5372 if (!ret) {
5373
5374 memcpy((void *)&il->active, &il->staging,
5375 sizeof(struct il_rxon_cmd));
5376 }
5377 }
5378
5379 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5380 if (vif->bss_conf.enable_beacon) {
5381 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5382 ETH_ALEN);
5383 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5384 il->ops->config_ap(il);
5385 } else
5386 il_set_no_assoc(il, vif);
5387 }
5388
5389 if (changes & BSS_CHANGED_IBSS) {
5390 ret = il->ops->manage_ibss_station(il, vif,
5391 bss_conf->ibss_joined);
5392 if (ret)
5393 IL_ERR("failed to %s IBSS station %pM\n",
5394 bss_conf->ibss_joined ? "add" : "remove",
5395 bss_conf->bssid);
5396 }
5397
5398 D_MAC80211("leave\n");
5399 mutex_unlock(&il->mutex);
5400}
5401EXPORT_SYMBOL(il_mac_bss_info_changed);
5402
5403irqreturn_t
5404il_isr(int irq, void *data)
5405{
5406 struct il_priv *il = data;
5407 u32 inta, inta_mask;
5408 u32 inta_fh;
5409 unsigned long flags;
5410 if (!il)
5411 return IRQ_NONE;
5412
5413 spin_lock_irqsave(&il->lock, flags);
5414
5415
5416
5417
5418
5419 inta_mask = _il_rd(il, CSR_INT_MASK);
5420 _il_wr(il, CSR_INT_MASK, 0x00000000);
5421
5422
5423 inta = _il_rd(il, CSR_INT);
5424 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5425
5426
5427
5428
5429 if (!inta && !inta_fh) {
5430 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5431 goto none;
5432 }
5433
5434 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5435
5436
5437 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5438 goto unplugged;
5439 }
5440
5441 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5442 inta_fh);
5443
5444 inta &= ~CSR_INT_BIT_SCD;
5445
5446
5447 if (likely(inta || inta_fh))
5448 tasklet_schedule(&il->irq_tasklet);
5449
5450unplugged:
5451 spin_unlock_irqrestore(&il->lock, flags);
5452 return IRQ_HANDLED;
5453
5454none:
5455
5456
5457 if (test_bit(S_INT_ENABLED, &il->status))
5458 il_enable_interrupts(il);
5459 spin_unlock_irqrestore(&il->lock, flags);
5460 return IRQ_NONE;
5461}
5462EXPORT_SYMBOL(il_isr);
5463
5464
5465
5466
5467
5468void
5469il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5470 __le16 fc, __le32 *tx_flags)
5471{
5472 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5473 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5474 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5475 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5476
5477 if (!ieee80211_is_mgmt(fc))
5478 return;
5479
5480 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5481 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5482 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5483 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5484 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5485 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5486 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5487 break;
5488 }
5489 } else if (info->control.rates[0].
5490 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5491 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5492 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5493 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5494 }
5495}
5496EXPORT_SYMBOL(il_tx_cmd_protection);
5497