1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41#include <net/mac80211.h>
42
43#include "common.h"
44
45int
46_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
47{
48 const int interval = 10;
49 int t = 0;
50
51 do {
52 if ((_il_rd(il, addr) & mask) == (bits & mask))
53 return t;
54 udelay(interval);
55 t += interval;
56 } while (t < timeout);
57
58 return -ETIMEDOUT;
59}
60EXPORT_SYMBOL(_il_poll_bit);
61
62void
63il_set_bit(struct il_priv *p, u32 r, u32 m)
64{
65 unsigned long reg_flags;
66
67 spin_lock_irqsave(&p->reg_lock, reg_flags);
68 _il_set_bit(p, r, m);
69 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
70}
71EXPORT_SYMBOL(il_set_bit);
72
73void
74il_clear_bit(struct il_priv *p, u32 r, u32 m)
75{
76 unsigned long reg_flags;
77
78 spin_lock_irqsave(&p->reg_lock, reg_flags);
79 _il_clear_bit(p, r, m);
80 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
81}
82EXPORT_SYMBOL(il_clear_bit);
83
84bool
85_il_grab_nic_access(struct il_priv *il)
86{
87 int ret;
88 u32 val;
89
90
91 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110 ret =
111 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
112 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
113 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
114 if (unlikely(ret < 0)) {
115 val = _il_rd(il, CSR_GP_CNTRL);
116 WARN_ONCE(1, "Timeout waiting for ucode processor access "
117 "(CSR_GP_CNTRL 0x%08x)\n", val);
118 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
119 return false;
120 }
121
122 return true;
123}
124EXPORT_SYMBOL_GPL(_il_grab_nic_access);
125
126int
127il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
128{
129 const int interval = 10;
130 int t = 0;
131
132 do {
133 if ((il_rd(il, addr) & mask) == mask)
134 return t;
135 udelay(interval);
136 t += interval;
137 } while (t < timeout);
138
139 return -ETIMEDOUT;
140}
141EXPORT_SYMBOL(il_poll_bit);
142
143u32
144il_rd_prph(struct il_priv *il, u32 reg)
145{
146 unsigned long reg_flags;
147 u32 val;
148
149 spin_lock_irqsave(&il->reg_lock, reg_flags);
150 _il_grab_nic_access(il);
151 val = _il_rd_prph(il, reg);
152 _il_release_nic_access(il);
153 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
154 return val;
155}
156EXPORT_SYMBOL(il_rd_prph);
157
158void
159il_wr_prph(struct il_priv *il, u32 addr, u32 val)
160{
161 unsigned long reg_flags;
162
163 spin_lock_irqsave(&il->reg_lock, reg_flags);
164 if (likely(_il_grab_nic_access(il))) {
165 _il_wr_prph(il, addr, val);
166 _il_release_nic_access(il);
167 }
168 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
169}
170EXPORT_SYMBOL(il_wr_prph);
171
172u32
173il_read_targ_mem(struct il_priv *il, u32 addr)
174{
175 unsigned long reg_flags;
176 u32 value;
177
178 spin_lock_irqsave(&il->reg_lock, reg_flags);
179 _il_grab_nic_access(il);
180
181 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
182 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
183
184 _il_release_nic_access(il);
185 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
186 return value;
187}
188EXPORT_SYMBOL(il_read_targ_mem);
189
190void
191il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
192{
193 unsigned long reg_flags;
194
195 spin_lock_irqsave(&il->reg_lock, reg_flags);
196 if (likely(_il_grab_nic_access(il))) {
197 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
198 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
199 _il_release_nic_access(il);
200 }
201 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
202}
203EXPORT_SYMBOL(il_write_targ_mem);
204
205const char *
206il_get_cmd_string(u8 cmd)
207{
208 switch (cmd) {
209 IL_CMD(N_ALIVE);
210 IL_CMD(N_ERROR);
211 IL_CMD(C_RXON);
212 IL_CMD(C_RXON_ASSOC);
213 IL_CMD(C_QOS_PARAM);
214 IL_CMD(C_RXON_TIMING);
215 IL_CMD(C_ADD_STA);
216 IL_CMD(C_REM_STA);
217 IL_CMD(C_WEPKEY);
218 IL_CMD(N_3945_RX);
219 IL_CMD(C_TX);
220 IL_CMD(C_RATE_SCALE);
221 IL_CMD(C_LEDS);
222 IL_CMD(C_TX_LINK_QUALITY_CMD);
223 IL_CMD(C_CHANNEL_SWITCH);
224 IL_CMD(N_CHANNEL_SWITCH);
225 IL_CMD(C_SPECTRUM_MEASUREMENT);
226 IL_CMD(N_SPECTRUM_MEASUREMENT);
227 IL_CMD(C_POWER_TBL);
228 IL_CMD(N_PM_SLEEP);
229 IL_CMD(N_PM_DEBUG_STATS);
230 IL_CMD(C_SCAN);
231 IL_CMD(C_SCAN_ABORT);
232 IL_CMD(N_SCAN_START);
233 IL_CMD(N_SCAN_RESULTS);
234 IL_CMD(N_SCAN_COMPLETE);
235 IL_CMD(N_BEACON);
236 IL_CMD(C_TX_BEACON);
237 IL_CMD(C_TX_PWR_TBL);
238 IL_CMD(C_BT_CONFIG);
239 IL_CMD(C_STATS);
240 IL_CMD(N_STATS);
241 IL_CMD(N_CARD_STATE);
242 IL_CMD(N_MISSED_BEACONS);
243 IL_CMD(C_CT_KILL_CONFIG);
244 IL_CMD(C_SENSITIVITY);
245 IL_CMD(C_PHY_CALIBRATION);
246 IL_CMD(N_RX_PHY);
247 IL_CMD(N_RX_MPDU);
248 IL_CMD(N_RX);
249 IL_CMD(N_COMPRESSED_BA);
250 default:
251 return "UNKNOWN";
252
253 }
254}
255EXPORT_SYMBOL(il_get_cmd_string);
256
257#define HOST_COMPLETE_TIMEOUT (HZ / 2)
258
259static void
260il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
261 struct il_rx_pkt *pkt)
262{
263 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
264 IL_ERR("Bad return from %s (0x%08X)\n",
265 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
266 return;
267 }
268#ifdef CONFIG_IWLEGACY_DEBUG
269 switch (cmd->hdr.cmd) {
270 case C_TX_LINK_QUALITY_CMD:
271 case C_SENSITIVITY:
272 D_HC_DUMP("back from %s (0x%08X)\n",
273 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
274 break;
275 default:
276 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
277 pkt->hdr.flags);
278 }
279#endif
280}
281
282static int
283il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
284{
285 int ret;
286
287 BUG_ON(!(cmd->flags & CMD_ASYNC));
288
289
290 BUG_ON(cmd->flags & CMD_WANT_SKB);
291
292
293 if (!cmd->callback)
294 cmd->callback = il_generic_cmd_callback;
295
296 if (test_bit(S_EXIT_PENDING, &il->status))
297 return -EBUSY;
298
299 ret = il_enqueue_hcmd(il, cmd);
300 if (ret < 0) {
301 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
302 il_get_cmd_string(cmd->id), ret);
303 return ret;
304 }
305 return 0;
306}
307
308int
309il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
310{
311 int cmd_idx;
312 int ret;
313
314 lockdep_assert_held(&il->mutex);
315
316 BUG_ON(cmd->flags & CMD_ASYNC);
317
318
319 BUG_ON(cmd->callback);
320
321 D_INFO("Attempting to send sync command %s\n",
322 il_get_cmd_string(cmd->id));
323
324 set_bit(S_HCMD_ACTIVE, &il->status);
325 D_INFO("Setting HCMD_ACTIVE for command %s\n",
326 il_get_cmd_string(cmd->id));
327
328 cmd_idx = il_enqueue_hcmd(il, cmd);
329 if (cmd_idx < 0) {
330 ret = cmd_idx;
331 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
332 il_get_cmd_string(cmd->id), ret);
333 goto out;
334 }
335
336 ret = wait_event_timeout(il->wait_command_queue,
337 !test_bit(S_HCMD_ACTIVE, &il->status),
338 HOST_COMPLETE_TIMEOUT);
339 if (!ret) {
340 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
341 IL_ERR("Error sending %s: time out after %dms.\n",
342 il_get_cmd_string(cmd->id),
343 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
344
345 clear_bit(S_HCMD_ACTIVE, &il->status);
346 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
347 il_get_cmd_string(cmd->id));
348 ret = -ETIMEDOUT;
349 goto cancel;
350 }
351 }
352
353 if (test_bit(S_RFKILL, &il->status)) {
354 IL_ERR("Command %s aborted: RF KILL Switch\n",
355 il_get_cmd_string(cmd->id));
356 ret = -ECANCELED;
357 goto fail;
358 }
359 if (test_bit(S_FW_ERROR, &il->status)) {
360 IL_ERR("Command %s failed: FW Error\n",
361 il_get_cmd_string(cmd->id));
362 ret = -EIO;
363 goto fail;
364 }
365 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
366 IL_ERR("Error: Response NULL in '%s'\n",
367 il_get_cmd_string(cmd->id));
368 ret = -EIO;
369 goto cancel;
370 }
371
372 ret = 0;
373 goto out;
374
375cancel:
376 if (cmd->flags & CMD_WANT_SKB) {
377
378
379
380
381
382
383 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
384 }
385fail:
386 if (cmd->reply_page) {
387 il_free_pages(il, cmd->reply_page);
388 cmd->reply_page = 0;
389 }
390out:
391 return ret;
392}
393EXPORT_SYMBOL(il_send_cmd_sync);
394
395int
396il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
397{
398 if (cmd->flags & CMD_ASYNC)
399 return il_send_cmd_async(il, cmd);
400
401 return il_send_cmd_sync(il, cmd);
402}
403EXPORT_SYMBOL(il_send_cmd);
404
405int
406il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
407{
408 struct il_host_cmd cmd = {
409 .id = id,
410 .len = len,
411 .data = data,
412 };
413
414 return il_send_cmd_sync(il, &cmd);
415}
416EXPORT_SYMBOL(il_send_cmd_pdu);
417
418int
419il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
420 void (*callback) (struct il_priv *il,
421 struct il_device_cmd *cmd,
422 struct il_rx_pkt *pkt))
423{
424 struct il_host_cmd cmd = {
425 .id = id,
426 .len = len,
427 .data = data,
428 };
429
430 cmd.flags |= CMD_ASYNC;
431 cmd.callback = callback;
432
433 return il_send_cmd_async(il, &cmd);
434}
435EXPORT_SYMBOL(il_send_cmd_pdu_async);
436
437
438static int led_mode;
439module_param(led_mode, int, S_IRUGO);
440MODULE_PARM_DESC(led_mode,
441 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static const struct ieee80211_tpt_blink il_blink[] = {
457 {.throughput = 0, .blink_time = 334},
458 {.throughput = 1 * 1024 - 1, .blink_time = 260},
459 {.throughput = 5 * 1024 - 1, .blink_time = 220},
460 {.throughput = 10 * 1024 - 1, .blink_time = 190},
461 {.throughput = 20 * 1024 - 1, .blink_time = 170},
462 {.throughput = 50 * 1024 - 1, .blink_time = 150},
463 {.throughput = 70 * 1024 - 1, .blink_time = 130},
464 {.throughput = 100 * 1024 - 1, .blink_time = 110},
465 {.throughput = 200 * 1024 - 1, .blink_time = 80},
466 {.throughput = 300 * 1024 - 1, .blink_time = 50},
467};
468
469
470
471
472
473
474
475
476
477
478
479
480static inline u8
481il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
482{
483 if (!compensation) {
484 IL_ERR("undefined blink compensation: "
485 "use pre-defined blinking time\n");
486 return time;
487 }
488
489 return (u8) ((time * compensation) >> 6);
490}
491
492
493static int
494il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
495{
496 struct il_led_cmd led_cmd = {
497 .id = IL_LED_LINK,
498 .interval = IL_DEF_LED_INTRVL
499 };
500 int ret;
501
502 if (!test_bit(S_READY, &il->status))
503 return -EBUSY;
504
505 if (il->blink_on == on && il->blink_off == off)
506 return 0;
507
508 if (off == 0) {
509
510 on = IL_LED_SOLID;
511 }
512
513 D_LED("Led blink time compensation=%u\n",
514 il->cfg->led_compensation);
515 led_cmd.on =
516 il_blink_compensation(il, on,
517 il->cfg->led_compensation);
518 led_cmd.off =
519 il_blink_compensation(il, off,
520 il->cfg->led_compensation);
521
522 ret = il->ops->send_led_cmd(il, &led_cmd);
523 if (!ret) {
524 il->blink_on = on;
525 il->blink_off = off;
526 }
527 return ret;
528}
529
530static void
531il_led_brightness_set(struct led_classdev *led_cdev,
532 enum led_brightness brightness)
533{
534 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
535 unsigned long on = 0;
536
537 if (brightness > 0)
538 on = IL_LED_SOLID;
539
540 il_led_cmd(il, on, 0);
541}
542
543static int
544il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
545 unsigned long *delay_off)
546{
547 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
548
549 return il_led_cmd(il, *delay_on, *delay_off);
550}
551
552void
553il_leds_init(struct il_priv *il)
554{
555 int mode = led_mode;
556 int ret;
557
558 if (mode == IL_LED_DEFAULT)
559 mode = il->cfg->led_mode;
560
561 il->led.name =
562 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
563 il->led.brightness_set = il_led_brightness_set;
564 il->led.blink_set = il_led_blink_set;
565 il->led.max_brightness = 1;
566
567 switch (mode) {
568 case IL_LED_DEFAULT:
569 WARN_ON(1);
570 break;
571 case IL_LED_BLINK:
572 il->led.default_trigger =
573 ieee80211_create_tpt_led_trigger(il->hw,
574 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
575 il_blink,
576 ARRAY_SIZE(il_blink));
577 break;
578 case IL_LED_RF_STATE:
579 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
580 break;
581 }
582
583 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
584 if (ret) {
585 kfree(il->led.name);
586 return;
587 }
588
589 il->led_registered = true;
590}
591EXPORT_SYMBOL(il_leds_init);
592
593void
594il_leds_exit(struct il_priv *il)
595{
596 if (!il->led_registered)
597 return;
598
599 led_classdev_unregister(&il->led);
600 kfree(il->led.name);
601}
602EXPORT_SYMBOL(il_leds_exit);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636const u8 il_eeprom_band_1[14] = {
637 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
638};
639
640
641static const u8 il_eeprom_band_2[] = {
642 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
643};
644
645static const u8 il_eeprom_band_3[] = {
646 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
647};
648
649static const u8 il_eeprom_band_4[] = {
650 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
651};
652
653static const u8 il_eeprom_band_5[] = {
654 145, 149, 153, 157, 161, 165
655};
656
657static const u8 il_eeprom_band_6[] = {
658 1, 2, 3, 4, 5, 6, 7
659};
660
661static const u8 il_eeprom_band_7[] = {
662 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
663};
664
665
666
667
668
669
670
671static int
672il_eeprom_verify_signature(struct il_priv *il)
673{
674 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
675 int ret = 0;
676
677 D_EEPROM("EEPROM signature=0x%08x\n", gp);
678 switch (gp) {
679 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
680 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
681 break;
682 default:
683 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
684 ret = -ENOENT;
685 break;
686 }
687 return ret;
688}
689
690const u8 *
691il_eeprom_query_addr(const struct il_priv *il, size_t offset)
692{
693 BUG_ON(offset >= il->cfg->eeprom_size);
694 return &il->eeprom[offset];
695}
696EXPORT_SYMBOL(il_eeprom_query_addr);
697
698u16
699il_eeprom_query16(const struct il_priv *il, size_t offset)
700{
701 if (!il->eeprom)
702 return 0;
703 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
704}
705EXPORT_SYMBOL(il_eeprom_query16);
706
707
708
709
710
711
712
713
714int
715il_eeprom_init(struct il_priv *il)
716{
717 __le16 *e;
718 u32 gp = _il_rd(il, CSR_EEPROM_GP);
719 int sz;
720 int ret;
721 u16 addr;
722
723
724 sz = il->cfg->eeprom_size;
725 D_EEPROM("NVM size = %d\n", sz);
726 il->eeprom = kzalloc(sz, GFP_KERNEL);
727 if (!il->eeprom) {
728 ret = -ENOMEM;
729 goto alloc_err;
730 }
731 e = (__le16 *) il->eeprom;
732
733 il->ops->apm_init(il);
734
735 ret = il_eeprom_verify_signature(il);
736 if (ret < 0) {
737 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
738 ret = -ENOENT;
739 goto err;
740 }
741
742
743 ret = il->ops->eeprom_acquire_semaphore(il);
744 if (ret < 0) {
745 IL_ERR("Failed to acquire EEPROM semaphore.\n");
746 ret = -ENOENT;
747 goto err;
748 }
749
750
751 for (addr = 0; addr < sz; addr += sizeof(u16)) {
752 u32 r;
753
754 _il_wr(il, CSR_EEPROM_REG,
755 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
756
757 ret =
758 _il_poll_bit(il, CSR_EEPROM_REG,
759 CSR_EEPROM_REG_READ_VALID_MSK,
760 CSR_EEPROM_REG_READ_VALID_MSK,
761 IL_EEPROM_ACCESS_TIMEOUT);
762 if (ret < 0) {
763 IL_ERR("Time out reading EEPROM[%d]\n", addr);
764 goto done;
765 }
766 r = _il_rd(il, CSR_EEPROM_REG);
767 e[addr / 2] = cpu_to_le16(r >> 16);
768 }
769
770 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
771 il_eeprom_query16(il, EEPROM_VERSION));
772
773 ret = 0;
774done:
775 il->ops->eeprom_release_semaphore(il);
776
777err:
778 if (ret)
779 il_eeprom_free(il);
780
781 il_apm_stop(il);
782alloc_err:
783 return ret;
784}
785EXPORT_SYMBOL(il_eeprom_init);
786
787void
788il_eeprom_free(struct il_priv *il)
789{
790 kfree(il->eeprom);
791 il->eeprom = NULL;
792}
793EXPORT_SYMBOL(il_eeprom_free);
794
795static void
796il_init_band_reference(const struct il_priv *il, int eep_band,
797 int *eeprom_ch_count,
798 const struct il_eeprom_channel **eeprom_ch_info,
799 const u8 **eeprom_ch_idx)
800{
801 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
802
803 switch (eep_band) {
804 case 1:
805 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
806 *eeprom_ch_info =
807 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
808 offset);
809 *eeprom_ch_idx = il_eeprom_band_1;
810 break;
811 case 2:
812 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
813 *eeprom_ch_info =
814 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
815 offset);
816 *eeprom_ch_idx = il_eeprom_band_2;
817 break;
818 case 3:
819 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
820 *eeprom_ch_info =
821 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
822 offset);
823 *eeprom_ch_idx = il_eeprom_band_3;
824 break;
825 case 4:
826 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
827 *eeprom_ch_info =
828 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
829 offset);
830 *eeprom_ch_idx = il_eeprom_band_4;
831 break;
832 case 5:
833 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
834 *eeprom_ch_info =
835 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
836 offset);
837 *eeprom_ch_idx = il_eeprom_band_5;
838 break;
839 case 6:
840 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
841 *eeprom_ch_info =
842 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
843 offset);
844 *eeprom_ch_idx = il_eeprom_band_6;
845 break;
846 case 7:
847 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
848 *eeprom_ch_info =
849 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
850 offset);
851 *eeprom_ch_idx = il_eeprom_band_7;
852 break;
853 default:
854 BUG();
855 }
856}
857
858#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
859 ? # x " " : "")
860
861
862
863
864
865static int
866il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
867 const struct il_eeprom_channel *eeprom_ch,
868 u8 clear_ht40_extension_channel)
869{
870 struct il_channel_info *ch_info;
871
872 ch_info =
873 (struct il_channel_info *)il_get_channel_info(il, band, channel);
874
875 if (!il_is_channel_valid(ch_info))
876 return -1;
877
878 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
879 " Ad-Hoc %ssupported\n", ch_info->channel,
880 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
881 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
882 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
883 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
884 eeprom_ch->max_power_avg,
885 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
886 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
887
888 ch_info->ht40_eeprom = *eeprom_ch;
889 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
890 ch_info->ht40_flags = eeprom_ch->flags;
891 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
892 ch_info->ht40_extension_channel &=
893 ~clear_ht40_extension_channel;
894
895 return 0;
896}
897
898#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
899 ? # x " " : "")
900
901
902
903
904int
905il_init_channel_map(struct il_priv *il)
906{
907 int eeprom_ch_count = 0;
908 const u8 *eeprom_ch_idx = NULL;
909 const struct il_eeprom_channel *eeprom_ch_info = NULL;
910 int band, ch;
911 struct il_channel_info *ch_info;
912
913 if (il->channel_count) {
914 D_EEPROM("Channel map already initialized.\n");
915 return 0;
916 }
917
918 D_EEPROM("Initializing regulatory info from EEPROM\n");
919
920 il->channel_count =
921 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
922 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
923 ARRAY_SIZE(il_eeprom_band_5);
924
925 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
926
927 il->channel_info =
928 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
929 GFP_KERNEL);
930 if (!il->channel_info) {
931 IL_ERR("Could not allocate channel_info\n");
932 il->channel_count = 0;
933 return -ENOMEM;
934 }
935
936 ch_info = il->channel_info;
937
938
939
940
941 for (band = 1; band <= 5; band++) {
942
943 il_init_band_reference(il, band, &eeprom_ch_count,
944 &eeprom_ch_info, &eeprom_ch_idx);
945
946
947 for (ch = 0; ch < eeprom_ch_count; ch++) {
948 ch_info->channel = eeprom_ch_idx[ch];
949 ch_info->band =
950 (band ==
951 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
952
953
954
955 ch_info->eeprom = eeprom_ch_info[ch];
956
957
958
959 ch_info->flags = eeprom_ch_info[ch].flags;
960
961
962 ch_info->ht40_extension_channel =
963 IEEE80211_CHAN_NO_HT40;
964
965 if (!(il_is_channel_valid(ch_info))) {
966 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
967 "No traffic\n", ch_info->channel,
968 ch_info->flags,
969 il_is_channel_a_band(ch_info) ? "5.2" :
970 "2.4");
971 ch_info++;
972 continue;
973 }
974
975
976 ch_info->max_power_avg = ch_info->curr_txpow =
977 eeprom_ch_info[ch].max_power_avg;
978 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
979 ch_info->min_power = 0;
980
981 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
982 " Ad-Hoc %ssupported\n", ch_info->channel,
983 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
984 CHECK_AND_PRINT_I(VALID),
985 CHECK_AND_PRINT_I(IBSS),
986 CHECK_AND_PRINT_I(ACTIVE),
987 CHECK_AND_PRINT_I(RADAR),
988 CHECK_AND_PRINT_I(WIDE),
989 CHECK_AND_PRINT_I(DFS),
990 eeprom_ch_info[ch].flags,
991 eeprom_ch_info[ch].max_power_avg,
992 ((eeprom_ch_info[ch].
993 flags & EEPROM_CHANNEL_IBSS) &&
994 !(eeprom_ch_info[ch].
995 flags & EEPROM_CHANNEL_RADAR)) ? "" :
996 "not ");
997
998 ch_info++;
999 }
1000 }
1001
1002
1003 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
1004 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
1005 return 0;
1006
1007
1008 for (band = 6; band <= 7; band++) {
1009 enum ieee80211_band ieeeband;
1010
1011 il_init_band_reference(il, band, &eeprom_ch_count,
1012 &eeprom_ch_info, &eeprom_ch_idx);
1013
1014
1015 ieeeband =
1016 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1017
1018
1019 for (ch = 0; ch < eeprom_ch_count; ch++) {
1020
1021 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1022 &eeprom_ch_info[ch],
1023 IEEE80211_CHAN_NO_HT40PLUS);
1024
1025
1026 il_mod_ht40_chan_info(il, ieeeband,
1027 eeprom_ch_idx[ch] + 4,
1028 &eeprom_ch_info[ch],
1029 IEEE80211_CHAN_NO_HT40MINUS);
1030 }
1031 }
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL(il_init_channel_map);
1036
1037
1038
1039
1040void
1041il_free_channel_map(struct il_priv *il)
1042{
1043 kfree(il->channel_info);
1044 il->channel_count = 0;
1045}
1046EXPORT_SYMBOL(il_free_channel_map);
1047
1048
1049
1050
1051
1052
1053const struct il_channel_info *
1054il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
1055 u16 channel)
1056{
1057 int i;
1058
1059 switch (band) {
1060 case IEEE80211_BAND_5GHZ:
1061 for (i = 14; i < il->channel_count; i++) {
1062 if (il->channel_info[i].channel == channel)
1063 return &il->channel_info[i];
1064 }
1065 break;
1066 case IEEE80211_BAND_2GHZ:
1067 if (channel >= 1 && channel <= 14)
1068 return &il->channel_info[channel - 1];
1069 break;
1070 default:
1071 BUG();
1072 }
1073
1074 return NULL;
1075}
1076EXPORT_SYMBOL(il_get_channel_info);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091struct il_power_vec_entry {
1092 struct il_powertable_cmd cmd;
1093 u8 no_dtim;
1094};
1095
1096static void
1097il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1098{
1099 memset(cmd, 0, sizeof(*cmd));
1100
1101 if (il->power_data.pci_pm)
1102 cmd->flags |= IL_POWER_PCI_PM_MSK;
1103
1104 D_POWER("Sleep command for CAM\n");
1105}
1106
1107static int
1108il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1109{
1110 D_POWER("Sending power/sleep command\n");
1111 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1112 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1113 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1114 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1115 le32_to_cpu(cmd->sleep_interval[0]),
1116 le32_to_cpu(cmd->sleep_interval[1]),
1117 le32_to_cpu(cmd->sleep_interval[2]),
1118 le32_to_cpu(cmd->sleep_interval[3]),
1119 le32_to_cpu(cmd->sleep_interval[4]));
1120
1121 return il_send_cmd_pdu(il, C_POWER_TBL,
1122 sizeof(struct il_powertable_cmd), cmd);
1123}
1124
1125static int
1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1127{
1128 int ret;
1129 bool update_chains;
1130
1131 lockdep_assert_held(&il->mutex);
1132
1133
1134 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1135 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1136
1137 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1138 return 0;
1139
1140 if (!il_is_ready_rf(il))
1141 return -EIO;
1142
1143
1144 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1145 if (test_bit(S_SCANNING, &il->status) && !force) {
1146 D_INFO("Defer power set mode while scanning\n");
1147 return 0;
1148 }
1149
1150 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1151 set_bit(S_POWER_PMI, &il->status);
1152
1153 ret = il_set_power(il, cmd);
1154 if (!ret) {
1155 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1156 clear_bit(S_POWER_PMI, &il->status);
1157
1158 if (il->ops->update_chain_flags && update_chains)
1159 il->ops->update_chain_flags(il);
1160 else if (il->ops->update_chain_flags)
1161 D_POWER("Cannot update the power, chain noise "
1162 "calibration running: %d\n",
1163 il->chain_noise_data.state);
1164
1165 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1166 } else
1167 IL_ERR("set power fail, ret = %d", ret);
1168
1169 return ret;
1170}
1171
1172int
1173il_power_update_mode(struct il_priv *il, bool force)
1174{
1175 struct il_powertable_cmd cmd;
1176
1177 il_power_sleep_cam_cmd(il, &cmd);
1178 return il_power_set_mode(il, &cmd, force);
1179}
1180EXPORT_SYMBOL(il_power_update_mode);
1181
1182
1183void
1184il_power_initialize(struct il_priv *il)
1185{
1186 u16 lctl;
1187
1188 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1189 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1190
1191 il->power_data.debug_sleep_level_override = -1;
1192
1193 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1194}
1195EXPORT_SYMBOL(il_power_initialize);
1196
1197
1198
1199
1200#define IL_ACTIVE_DWELL_TIME_24 (30)
1201#define IL_ACTIVE_DWELL_TIME_52 (20)
1202
1203#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1204#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1205
1206
1207
1208
1209#define IL_PASSIVE_DWELL_TIME_24 (20)
1210#define IL_PASSIVE_DWELL_TIME_52 (10)
1211#define IL_PASSIVE_DWELL_BASE (100)
1212#define IL_CHANNEL_TUNE_TIME 5
1213
1214static int
1215il_send_scan_abort(struct il_priv *il)
1216{
1217 int ret;
1218 struct il_rx_pkt *pkt;
1219 struct il_host_cmd cmd = {
1220 .id = C_SCAN_ABORT,
1221 .flags = CMD_WANT_SKB,
1222 };
1223
1224
1225
1226
1227 if (!test_bit(S_READY, &il->status) ||
1228 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1229 !test_bit(S_SCAN_HW, &il->status) ||
1230 test_bit(S_FW_ERROR, &il->status) ||
1231 test_bit(S_EXIT_PENDING, &il->status))
1232 return -EIO;
1233
1234 ret = il_send_cmd_sync(il, &cmd);
1235 if (ret)
1236 return ret;
1237
1238 pkt = (struct il_rx_pkt *)cmd.reply_page;
1239 if (pkt->u.status != CAN_ABORT_STATUS) {
1240
1241
1242
1243
1244
1245
1246 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1247 ret = -EIO;
1248 }
1249
1250 il_free_pages(il, cmd.reply_page);
1251 return ret;
1252}
1253
1254static void
1255il_complete_scan(struct il_priv *il, bool aborted)
1256{
1257
1258 if (il->scan_request) {
1259 D_SCAN("Complete scan in mac80211\n");
1260 ieee80211_scan_completed(il->hw, aborted);
1261 }
1262
1263 il->scan_vif = NULL;
1264 il->scan_request = NULL;
1265}
1266
1267void
1268il_force_scan_end(struct il_priv *il)
1269{
1270 lockdep_assert_held(&il->mutex);
1271
1272 if (!test_bit(S_SCANNING, &il->status)) {
1273 D_SCAN("Forcing scan end while not scanning\n");
1274 return;
1275 }
1276
1277 D_SCAN("Forcing scan end\n");
1278 clear_bit(S_SCANNING, &il->status);
1279 clear_bit(S_SCAN_HW, &il->status);
1280 clear_bit(S_SCAN_ABORTING, &il->status);
1281 il_complete_scan(il, true);
1282}
1283
1284static void
1285il_do_scan_abort(struct il_priv *il)
1286{
1287 int ret;
1288
1289 lockdep_assert_held(&il->mutex);
1290
1291 if (!test_bit(S_SCANNING, &il->status)) {
1292 D_SCAN("Not performing scan to abort\n");
1293 return;
1294 }
1295
1296 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1297 D_SCAN("Scan abort in progress\n");
1298 return;
1299 }
1300
1301 ret = il_send_scan_abort(il);
1302 if (ret) {
1303 D_SCAN("Send scan abort failed %d\n", ret);
1304 il_force_scan_end(il);
1305 } else
1306 D_SCAN("Successfully send scan abort\n");
1307}
1308
1309
1310
1311
1312int
1313il_scan_cancel(struct il_priv *il)
1314{
1315 D_SCAN("Queuing abort scan\n");
1316 queue_work(il->workqueue, &il->abort_scan);
1317 return 0;
1318}
1319EXPORT_SYMBOL(il_scan_cancel);
1320
1321
1322
1323
1324
1325
1326int
1327il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1328{
1329 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1330
1331 lockdep_assert_held(&il->mutex);
1332
1333 D_SCAN("Scan cancel timeout\n");
1334
1335 il_do_scan_abort(il);
1336
1337 while (time_before_eq(jiffies, timeout)) {
1338 if (!test_bit(S_SCAN_HW, &il->status))
1339 break;
1340 msleep(20);
1341 }
1342
1343 return test_bit(S_SCAN_HW, &il->status);
1344}
1345EXPORT_SYMBOL(il_scan_cancel_timeout);
1346
1347
1348static void
1349il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1350{
1351#ifdef CONFIG_IWLEGACY_DEBUG
1352 struct il_rx_pkt *pkt = rxb_addr(rxb);
1353 struct il_scanreq_notification *notif =
1354 (struct il_scanreq_notification *)pkt->u.raw;
1355
1356 D_SCAN("Scan request status = 0x%x\n", notif->status);
1357#endif
1358}
1359
1360
1361static void
1362il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1363{
1364 struct il_rx_pkt *pkt = rxb_addr(rxb);
1365 struct il_scanstart_notification *notif =
1366 (struct il_scanstart_notification *)pkt->u.raw;
1367 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1368 D_SCAN("Scan start: " "%d [802.11%s] "
1369 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1370 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1371 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1372}
1373
1374
1375static void
1376il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1377{
1378#ifdef CONFIG_IWLEGACY_DEBUG
1379 struct il_rx_pkt *pkt = rxb_addr(rxb);
1380 struct il_scanresults_notification *notif =
1381 (struct il_scanresults_notification *)pkt->u.raw;
1382
1383 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1384 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1385 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1386 le32_to_cpu(notif->stats[0]),
1387 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1388#endif
1389}
1390
1391
1392static void
1393il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1394{
1395
1396#ifdef CONFIG_IWLEGACY_DEBUG
1397 struct il_rx_pkt *pkt = rxb_addr(rxb);
1398 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1399#endif
1400
1401 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1402 scan_notif->scanned_channels, scan_notif->tsf_low,
1403 scan_notif->tsf_high, scan_notif->status);
1404
1405
1406 clear_bit(S_SCAN_HW, &il->status);
1407
1408 D_SCAN("Scan on %sGHz took %dms\n",
1409 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1410 jiffies_to_msecs(jiffies - il->scan_start));
1411
1412 queue_work(il->workqueue, &il->scan_completed);
1413}
1414
1415void
1416il_setup_rx_scan_handlers(struct il_priv *il)
1417{
1418
1419 il->handlers[C_SCAN] = il_hdl_scan;
1420 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1421 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1422 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1423}
1424EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1425
1426u16
1427il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1428 u8 n_probes)
1429{
1430 if (band == IEEE80211_BAND_5GHZ)
1431 return IL_ACTIVE_DWELL_TIME_52 +
1432 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1433 else
1434 return IL_ACTIVE_DWELL_TIME_24 +
1435 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1436}
1437EXPORT_SYMBOL(il_get_active_dwell_time);
1438
1439u16
1440il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1441 struct ieee80211_vif *vif)
1442{
1443 u16 value;
1444
1445 u16 passive =
1446 (band ==
1447 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1448 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1449 IL_PASSIVE_DWELL_TIME_52;
1450
1451 if (il_is_any_associated(il)) {
1452
1453
1454
1455
1456
1457 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1458 if (value > IL_PASSIVE_DWELL_BASE || !value)
1459 value = IL_PASSIVE_DWELL_BASE;
1460 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1461 passive = min(value, passive);
1462 }
1463
1464 return passive;
1465}
1466EXPORT_SYMBOL(il_get_passive_dwell_time);
1467
1468void
1469il_init_scan_params(struct il_priv *il)
1470{
1471 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1472 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1473 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1474 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1475 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1476}
1477EXPORT_SYMBOL(il_init_scan_params);
1478
1479static int
1480il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1481{
1482 int ret;
1483
1484 lockdep_assert_held(&il->mutex);
1485
1486 cancel_delayed_work(&il->scan_check);
1487
1488 if (!il_is_ready_rf(il)) {
1489 IL_WARN("Request scan called when driver not ready.\n");
1490 return -EIO;
1491 }
1492
1493 if (test_bit(S_SCAN_HW, &il->status)) {
1494 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1495 return -EBUSY;
1496 }
1497
1498 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1499 D_SCAN("Scan request while abort pending.\n");
1500 return -EBUSY;
1501 }
1502
1503 D_SCAN("Starting scan...\n");
1504
1505 set_bit(S_SCANNING, &il->status);
1506 il->scan_start = jiffies;
1507
1508 ret = il->ops->request_scan(il, vif);
1509 if (ret) {
1510 clear_bit(S_SCANNING, &il->status);
1511 return ret;
1512 }
1513
1514 queue_delayed_work(il->workqueue, &il->scan_check,
1515 IL_SCAN_CHECK_WATCHDOG);
1516
1517 return 0;
1518}
1519
1520int
1521il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1522 struct cfg80211_scan_request *req)
1523{
1524 struct il_priv *il = hw->priv;
1525 int ret;
1526
1527 if (req->n_channels == 0) {
1528 IL_ERR("Can not scan on no channels.\n");
1529 return -EINVAL;
1530 }
1531
1532 mutex_lock(&il->mutex);
1533 D_MAC80211("enter\n");
1534
1535 if (test_bit(S_SCANNING, &il->status)) {
1536 D_SCAN("Scan already in progress.\n");
1537 ret = -EAGAIN;
1538 goto out_unlock;
1539 }
1540
1541
1542 il->scan_request = req;
1543 il->scan_vif = vif;
1544 il->scan_band = req->channels[0]->band;
1545
1546 ret = il_scan_initiate(il, vif);
1547
1548out_unlock:
1549 D_MAC80211("leave ret %d\n", ret);
1550 mutex_unlock(&il->mutex);
1551
1552 return ret;
1553}
1554EXPORT_SYMBOL(il_mac_hw_scan);
1555
1556static void
1557il_bg_scan_check(struct work_struct *data)
1558{
1559 struct il_priv *il =
1560 container_of(data, struct il_priv, scan_check.work);
1561
1562 D_SCAN("Scan check work\n");
1563
1564
1565
1566
1567 mutex_lock(&il->mutex);
1568 il_force_scan_end(il);
1569 mutex_unlock(&il->mutex);
1570}
1571
1572
1573
1574
1575
1576u16
1577il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1578 const u8 *ta, const u8 *ies, int ie_len, int left)
1579{
1580 int len = 0;
1581 u8 *pos = NULL;
1582
1583
1584
1585 left -= 24;
1586 if (left < 0)
1587 return 0;
1588
1589 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1590 eth_broadcast_addr(frame->da);
1591 memcpy(frame->sa, ta, ETH_ALEN);
1592 eth_broadcast_addr(frame->bssid);
1593 frame->seq_ctrl = 0;
1594
1595 len += 24;
1596
1597
1598 pos = &frame->u.probe_req.variable[0];
1599
1600
1601 left -= 2;
1602 if (left < 0)
1603 return 0;
1604 *pos++ = WLAN_EID_SSID;
1605 *pos++ = 0;
1606
1607 len += 2;
1608
1609 if (WARN_ON(left < ie_len))
1610 return len;
1611
1612 if (ies && ie_len) {
1613 memcpy(pos, ies, ie_len);
1614 len += ie_len;
1615 }
1616
1617 return (u16) len;
1618}
1619EXPORT_SYMBOL(il_fill_probe_req);
1620
1621static void
1622il_bg_abort_scan(struct work_struct *work)
1623{
1624 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1625
1626 D_SCAN("Abort scan work\n");
1627
1628
1629
1630 mutex_lock(&il->mutex);
1631 il_scan_cancel_timeout(il, 200);
1632 mutex_unlock(&il->mutex);
1633}
1634
1635static void
1636il_bg_scan_completed(struct work_struct *work)
1637{
1638 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1639 bool aborted;
1640
1641 D_SCAN("Completed scan.\n");
1642
1643 cancel_delayed_work(&il->scan_check);
1644
1645 mutex_lock(&il->mutex);
1646
1647 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1648 if (aborted)
1649 D_SCAN("Aborted scan completed.\n");
1650
1651 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1652 D_SCAN("Scan already completed.\n");
1653 goto out_settings;
1654 }
1655
1656 il_complete_scan(il, aborted);
1657
1658out_settings:
1659
1660 if (!il_is_ready_rf(il))
1661 goto out;
1662
1663
1664
1665
1666
1667 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1668 il_set_tx_power(il, il->tx_power_next, false);
1669
1670 il->ops->post_scan(il);
1671
1672out:
1673 mutex_unlock(&il->mutex);
1674}
1675
1676void
1677il_setup_scan_deferred_work(struct il_priv *il)
1678{
1679 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1680 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1681 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1682}
1683EXPORT_SYMBOL(il_setup_scan_deferred_work);
1684
1685void
1686il_cancel_scan_deferred_work(struct il_priv *il)
1687{
1688 cancel_work_sync(&il->abort_scan);
1689 cancel_work_sync(&il->scan_completed);
1690
1691 if (cancel_delayed_work_sync(&il->scan_check)) {
1692 mutex_lock(&il->mutex);
1693 il_force_scan_end(il);
1694 mutex_unlock(&il->mutex);
1695 }
1696}
1697EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1698
1699
1700static void
1701il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1702{
1703
1704 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1705 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1706 sta_id, il->stations[sta_id].sta.sta.addr);
1707
1708 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1709 D_ASSOC("STA id %u addr %pM already present"
1710 " in uCode (according to driver)\n", sta_id,
1711 il->stations[sta_id].sta.sta.addr);
1712 } else {
1713 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1714 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1715 il->stations[sta_id].sta.sta.addr);
1716 }
1717}
1718
1719static int
1720il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1721 struct il_rx_pkt *pkt, bool sync)
1722{
1723 u8 sta_id = addsta->sta.sta_id;
1724 unsigned long flags;
1725 int ret = -EIO;
1726
1727 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1728 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1729 return ret;
1730 }
1731
1732 D_INFO("Processing response for adding station %u\n", sta_id);
1733
1734 spin_lock_irqsave(&il->sta_lock, flags);
1735
1736 switch (pkt->u.add_sta.status) {
1737 case ADD_STA_SUCCESS_MSK:
1738 D_INFO("C_ADD_STA PASSED\n");
1739 il_sta_ucode_activate(il, sta_id);
1740 ret = 0;
1741 break;
1742 case ADD_STA_NO_ROOM_IN_TBL:
1743 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1744 break;
1745 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1746 IL_ERR("Adding station %d failed, no block ack resource.\n",
1747 sta_id);
1748 break;
1749 case ADD_STA_MODIFY_NON_EXIST_STA:
1750 IL_ERR("Attempting to modify non-existing station %d\n",
1751 sta_id);
1752 break;
1753 default:
1754 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1755 break;
1756 }
1757
1758 D_INFO("%s station id %u addr %pM\n",
1759 il->stations[sta_id].sta.mode ==
1760 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1761 il->stations[sta_id].sta.sta.addr);
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 D_INFO("%s station according to cmd buffer %pM\n",
1772 il->stations[sta_id].sta.mode ==
1773 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1774 spin_unlock_irqrestore(&il->sta_lock, flags);
1775
1776 return ret;
1777}
1778
1779static void
1780il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1781 struct il_rx_pkt *pkt)
1782{
1783 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1784
1785 il_process_add_sta_resp(il, addsta, pkt, false);
1786
1787}
1788
1789int
1790il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1791{
1792 struct il_rx_pkt *pkt = NULL;
1793 int ret = 0;
1794 u8 data[sizeof(*sta)];
1795 struct il_host_cmd cmd = {
1796 .id = C_ADD_STA,
1797 .flags = flags,
1798 .data = data,
1799 };
1800 u8 sta_id __maybe_unused = sta->sta.sta_id;
1801
1802 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1803 flags & CMD_ASYNC ? "a" : "");
1804
1805 if (flags & CMD_ASYNC)
1806 cmd.callback = il_add_sta_callback;
1807 else {
1808 cmd.flags |= CMD_WANT_SKB;
1809 might_sleep();
1810 }
1811
1812 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1813 ret = il_send_cmd(il, &cmd);
1814
1815 if (ret || (flags & CMD_ASYNC))
1816 return ret;
1817
1818 if (ret == 0) {
1819 pkt = (struct il_rx_pkt *)cmd.reply_page;
1820 ret = il_process_add_sta_resp(il, sta, pkt, true);
1821 }
1822 il_free_pages(il, cmd.reply_page);
1823
1824 return ret;
1825}
1826EXPORT_SYMBOL(il_send_add_sta);
1827
1828static void
1829il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1830{
1831 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1832 __le32 sta_flags;
1833
1834 if (!sta || !sta_ht_inf->ht_supported)
1835 goto done;
1836
1837 D_ASSOC("spatial multiplexing power save mode: %s\n",
1838 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1839 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1840 "disabled");
1841
1842 sta_flags = il->stations[idx].sta.station_flags;
1843
1844 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1845
1846 switch (sta->smps_mode) {
1847 case IEEE80211_SMPS_STATIC:
1848 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1849 break;
1850 case IEEE80211_SMPS_DYNAMIC:
1851 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1852 break;
1853 case IEEE80211_SMPS_OFF:
1854 break;
1855 default:
1856 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
1857 break;
1858 }
1859
1860 sta_flags |=
1861 cpu_to_le32((u32) sta_ht_inf->
1862 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1863
1864 sta_flags |=
1865 cpu_to_le32((u32) sta_ht_inf->
1866 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1867
1868 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1869 sta_flags |= STA_FLG_HT40_EN_MSK;
1870 else
1871 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1872
1873 il->stations[idx].sta.station_flags = sta_flags;
1874done:
1875 return;
1876}
1877
1878
1879
1880
1881
1882
1883u8
1884il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1885 struct ieee80211_sta *sta)
1886{
1887 struct il_station_entry *station;
1888 int i;
1889 u8 sta_id = IL_INVALID_STATION;
1890 u16 rate;
1891
1892 if (is_ap)
1893 sta_id = IL_AP_ID;
1894 else if (is_broadcast_ether_addr(addr))
1895 sta_id = il->hw_params.bcast_id;
1896 else
1897 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1898 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1899 addr)) {
1900 sta_id = i;
1901 break;
1902 }
1903
1904 if (!il->stations[i].used &&
1905 sta_id == IL_INVALID_STATION)
1906 sta_id = i;
1907 }
1908
1909
1910
1911
1912
1913 if (unlikely(sta_id == IL_INVALID_STATION))
1914 return sta_id;
1915
1916
1917
1918
1919
1920
1921 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1922 D_INFO("STA %d already in process of being added.\n", sta_id);
1923 return sta_id;
1924 }
1925
1926 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1927 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1928 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1929 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1930 sta_id, addr);
1931 return sta_id;
1932 }
1933
1934 station = &il->stations[sta_id];
1935 station->used = IL_STA_DRIVER_ACTIVE;
1936 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1937 il->num_stations++;
1938
1939
1940 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1941 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1942 station->sta.mode = 0;
1943 station->sta.sta.sta_id = sta_id;
1944 station->sta.station_flags = 0;
1945
1946
1947
1948
1949
1950
1951 il_set_ht_add_station(il, sta_id, sta);
1952
1953
1954 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1955
1956 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1957
1958 return sta_id;
1959
1960}
1961EXPORT_SYMBOL_GPL(il_prep_station);
1962
1963#define STA_WAIT_TIMEOUT (HZ/2)
1964
1965
1966
1967
1968int
1969il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
1970 struct ieee80211_sta *sta, u8 *sta_id_r)
1971{
1972 unsigned long flags_spin;
1973 int ret = 0;
1974 u8 sta_id;
1975 struct il_addsta_cmd sta_cmd;
1976
1977 *sta_id_r = 0;
1978 spin_lock_irqsave(&il->sta_lock, flags_spin);
1979 sta_id = il_prep_station(il, addr, is_ap, sta);
1980 if (sta_id == IL_INVALID_STATION) {
1981 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1982 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1983 return -EINVAL;
1984 }
1985
1986
1987
1988
1989
1990
1991 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1992 D_INFO("STA %d already in process of being added.\n", sta_id);
1993 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1994 return -EEXIST;
1995 }
1996
1997 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1998 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1999 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2000 sta_id, addr);
2001 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2002 return -EEXIST;
2003 }
2004
2005 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2006 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2007 sizeof(struct il_addsta_cmd));
2008 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2009
2010
2011 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2012 if (ret) {
2013 spin_lock_irqsave(&il->sta_lock, flags_spin);
2014 IL_ERR("Adding station %pM failed.\n",
2015 il->stations[sta_id].sta.sta.addr);
2016 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2017 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2018 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2019 }
2020 *sta_id_r = sta_id;
2021 return ret;
2022}
2023EXPORT_SYMBOL(il_add_station_common);
2024
2025
2026
2027
2028
2029
2030static void
2031il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2032{
2033
2034 if ((il->stations[sta_id].
2035 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2036 IL_STA_UCODE_ACTIVE)
2037 IL_ERR("removed non active STA %u\n", sta_id);
2038
2039 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2040
2041 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2042 D_ASSOC("Removed STA %u\n", sta_id);
2043}
2044
2045static int
2046il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2047 bool temporary)
2048{
2049 struct il_rx_pkt *pkt;
2050 int ret;
2051
2052 unsigned long flags_spin;
2053 struct il_rem_sta_cmd rm_sta_cmd;
2054
2055 struct il_host_cmd cmd = {
2056 .id = C_REM_STA,
2057 .len = sizeof(struct il_rem_sta_cmd),
2058 .flags = CMD_SYNC,
2059 .data = &rm_sta_cmd,
2060 };
2061
2062 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2063 rm_sta_cmd.num_sta = 1;
2064 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2065
2066 cmd.flags |= CMD_WANT_SKB;
2067
2068 ret = il_send_cmd(il, &cmd);
2069
2070 if (ret)
2071 return ret;
2072
2073 pkt = (struct il_rx_pkt *)cmd.reply_page;
2074 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2075 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2076 ret = -EIO;
2077 }
2078
2079 if (!ret) {
2080 switch (pkt->u.rem_sta.status) {
2081 case REM_STA_SUCCESS_MSK:
2082 if (!temporary) {
2083 spin_lock_irqsave(&il->sta_lock, flags_spin);
2084 il_sta_ucode_deactivate(il, sta_id);
2085 spin_unlock_irqrestore(&il->sta_lock,
2086 flags_spin);
2087 }
2088 D_ASSOC("C_REM_STA PASSED\n");
2089 break;
2090 default:
2091 ret = -EIO;
2092 IL_ERR("C_REM_STA failed\n");
2093 break;
2094 }
2095 }
2096 il_free_pages(il, cmd.reply_page);
2097
2098 return ret;
2099}
2100
2101
2102
2103
2104int
2105il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2106{
2107 unsigned long flags;
2108
2109 if (!il_is_ready(il)) {
2110 D_INFO("Unable to remove station %pM, device not ready.\n",
2111 addr);
2112
2113
2114
2115
2116
2117 return 0;
2118 }
2119
2120 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2121
2122 if (WARN_ON(sta_id == IL_INVALID_STATION))
2123 return -EINVAL;
2124
2125 spin_lock_irqsave(&il->sta_lock, flags);
2126
2127 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2128 D_INFO("Removing %pM but non DRIVER active\n", addr);
2129 goto out_err;
2130 }
2131
2132 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2133 D_INFO("Removing %pM but non UCODE active\n", addr);
2134 goto out_err;
2135 }
2136
2137 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2138 kfree(il->stations[sta_id].lq);
2139 il->stations[sta_id].lq = NULL;
2140 }
2141
2142 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2143
2144 il->num_stations--;
2145
2146 BUG_ON(il->num_stations < 0);
2147
2148 spin_unlock_irqrestore(&il->sta_lock, flags);
2149
2150 return il_send_remove_station(il, addr, sta_id, false);
2151out_err:
2152 spin_unlock_irqrestore(&il->sta_lock, flags);
2153 return -EINVAL;
2154}
2155EXPORT_SYMBOL_GPL(il_remove_station);
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165void
2166il_clear_ucode_stations(struct il_priv *il)
2167{
2168 int i;
2169 unsigned long flags_spin;
2170 bool cleared = false;
2171
2172 D_INFO("Clearing ucode stations in driver\n");
2173
2174 spin_lock_irqsave(&il->sta_lock, flags_spin);
2175 for (i = 0; i < il->hw_params.max_stations; i++) {
2176 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2177 D_INFO("Clearing ucode active for station %d\n", i);
2178 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2179 cleared = true;
2180 }
2181 }
2182 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2183
2184 if (!cleared)
2185 D_INFO("No active stations found to be cleared\n");
2186}
2187EXPORT_SYMBOL(il_clear_ucode_stations);
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197void
2198il_restore_stations(struct il_priv *il)
2199{
2200 struct il_addsta_cmd sta_cmd;
2201 struct il_link_quality_cmd lq;
2202 unsigned long flags_spin;
2203 int i;
2204 bool found = false;
2205 int ret;
2206 bool send_lq;
2207
2208 if (!il_is_ready(il)) {
2209 D_INFO("Not ready yet, not restoring any stations.\n");
2210 return;
2211 }
2212
2213 D_ASSOC("Restoring all known stations ... start.\n");
2214 spin_lock_irqsave(&il->sta_lock, flags_spin);
2215 for (i = 0; i < il->hw_params.max_stations; i++) {
2216 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2217 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2218 D_ASSOC("Restoring sta %pM\n",
2219 il->stations[i].sta.sta.addr);
2220 il->stations[i].sta.mode = 0;
2221 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2222 found = true;
2223 }
2224 }
2225
2226 for (i = 0; i < il->hw_params.max_stations; i++) {
2227 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2228 memcpy(&sta_cmd, &il->stations[i].sta,
2229 sizeof(struct il_addsta_cmd));
2230 send_lq = false;
2231 if (il->stations[i].lq) {
2232 memcpy(&lq, il->stations[i].lq,
2233 sizeof(struct il_link_quality_cmd));
2234 send_lq = true;
2235 }
2236 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2237 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2238 if (ret) {
2239 spin_lock_irqsave(&il->sta_lock, flags_spin);
2240 IL_ERR("Adding station %pM failed.\n",
2241 il->stations[i].sta.sta.addr);
2242 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2243 il->stations[i].used &=
2244 ~IL_STA_UCODE_INPROGRESS;
2245 spin_unlock_irqrestore(&il->sta_lock,
2246 flags_spin);
2247 }
2248
2249
2250
2251
2252 if (send_lq)
2253 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2254 spin_lock_irqsave(&il->sta_lock, flags_spin);
2255 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2256 }
2257 }
2258
2259 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2260 if (!found)
2261 D_INFO("Restoring all known stations"
2262 " .... no stations to be restored.\n");
2263 else
2264 D_INFO("Restoring all known stations" " .... complete.\n");
2265}
2266EXPORT_SYMBOL(il_restore_stations);
2267
2268int
2269il_get_free_ucode_key_idx(struct il_priv *il)
2270{
2271 int i;
2272
2273 for (i = 0; i < il->sta_key_max_num; i++)
2274 if (!test_and_set_bit(i, &il->ucode_key_table))
2275 return i;
2276
2277 return WEP_INVALID_OFFSET;
2278}
2279EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2280
2281void
2282il_dealloc_bcast_stations(struct il_priv *il)
2283{
2284 unsigned long flags;
2285 int i;
2286
2287 spin_lock_irqsave(&il->sta_lock, flags);
2288 for (i = 0; i < il->hw_params.max_stations; i++) {
2289 if (!(il->stations[i].used & IL_STA_BCAST))
2290 continue;
2291
2292 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2293 il->num_stations--;
2294 BUG_ON(il->num_stations < 0);
2295 kfree(il->stations[i].lq);
2296 il->stations[i].lq = NULL;
2297 }
2298 spin_unlock_irqrestore(&il->sta_lock, flags);
2299}
2300EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2301
2302#ifdef CONFIG_IWLEGACY_DEBUG
2303static void
2304il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2305{
2306 int i;
2307 D_RATE("lq station id 0x%x\n", lq->sta_id);
2308 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2309 lq->general_params.dual_stream_ant_msk);
2310
2311 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2312 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2313}
2314#else
2315static inline void
2316il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2317{
2318}
2319#endif
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332static bool
2333il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2334{
2335 int i;
2336
2337 if (il->ht.enabled)
2338 return true;
2339
2340 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2341 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2342 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2343 D_INFO("idx %d of LQ expects HT channel\n", i);
2344 return false;
2345 }
2346 }
2347 return true;
2348}
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360int
2361il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2362 u8 flags, bool init)
2363{
2364 int ret = 0;
2365 unsigned long flags_spin;
2366
2367 struct il_host_cmd cmd = {
2368 .id = C_TX_LINK_QUALITY_CMD,
2369 .len = sizeof(struct il_link_quality_cmd),
2370 .flags = flags,
2371 .data = lq,
2372 };
2373
2374 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2375 return -EINVAL;
2376
2377 spin_lock_irqsave(&il->sta_lock, flags_spin);
2378 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2379 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2380 return -EINVAL;
2381 }
2382 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2383
2384 il_dump_lq_cmd(il, lq);
2385 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2386
2387 if (il_is_lq_table_valid(il, lq))
2388 ret = il_send_cmd(il, &cmd);
2389 else
2390 ret = -EINVAL;
2391
2392 if (cmd.flags & CMD_ASYNC)
2393 return ret;
2394
2395 if (init) {
2396 D_INFO("init LQ command complete,"
2397 " clearing sta addition status for sta %d\n",
2398 lq->sta_id);
2399 spin_lock_irqsave(&il->sta_lock, flags_spin);
2400 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2401 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2402 }
2403 return ret;
2404}
2405EXPORT_SYMBOL(il_send_lq_cmd);
2406
2407int
2408il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2409 struct ieee80211_sta *sta)
2410{
2411 struct il_priv *il = hw->priv;
2412 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2413 int ret;
2414
2415 mutex_lock(&il->mutex);
2416 D_MAC80211("enter station %pM\n", sta->addr);
2417
2418 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2419 if (ret)
2420 IL_ERR("Error removing station %pM\n", sta->addr);
2421
2422 D_MAC80211("leave ret %d\n", ret);
2423 mutex_unlock(&il->mutex);
2424
2425 return ret;
2426}
2427EXPORT_SYMBOL(il_mac_sta_remove);
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499int
2500il_rx_queue_space(const struct il_rx_queue *q)
2501{
2502 int s = q->read - q->write;
2503 if (s <= 0)
2504 s += RX_QUEUE_SIZE;
2505
2506 s -= 2;
2507 if (s < 0)
2508 s = 0;
2509 return s;
2510}
2511EXPORT_SYMBOL(il_rx_queue_space);
2512
2513
2514
2515
2516void
2517il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2518{
2519 unsigned long flags;
2520 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2521 u32 reg;
2522
2523 spin_lock_irqsave(&q->lock, flags);
2524
2525 if (q->need_update == 0)
2526 goto exit_unlock;
2527
2528
2529 if (test_bit(S_POWER_PMI, &il->status)) {
2530 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2531
2532 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2533 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2534 reg);
2535 il_set_bit(il, CSR_GP_CNTRL,
2536 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2537 goto exit_unlock;
2538 }
2539
2540 q->write_actual = (q->write & ~0x7);
2541 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2542
2543
2544 } else {
2545
2546 q->write_actual = (q->write & ~0x7);
2547 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2548 }
2549
2550 q->need_update = 0;
2551
2552exit_unlock:
2553 spin_unlock_irqrestore(&q->lock, flags);
2554}
2555EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2556
2557int
2558il_rx_queue_alloc(struct il_priv *il)
2559{
2560 struct il_rx_queue *rxq = &il->rxq;
2561 struct device *dev = &il->pci_dev->dev;
2562 int i;
2563
2564 spin_lock_init(&rxq->lock);
2565 INIT_LIST_HEAD(&rxq->rx_free);
2566 INIT_LIST_HEAD(&rxq->rx_used);
2567
2568
2569 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2570 GFP_KERNEL);
2571 if (!rxq->bd)
2572 goto err_bd;
2573
2574 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2575 &rxq->rb_stts_dma, GFP_KERNEL);
2576 if (!rxq->rb_stts)
2577 goto err_rb;
2578
2579
2580 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2581 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2582
2583
2584
2585 rxq->read = rxq->write = 0;
2586 rxq->write_actual = 0;
2587 rxq->free_count = 0;
2588 rxq->need_update = 0;
2589 return 0;
2590
2591err_rb:
2592 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2593 rxq->bd_dma);
2594err_bd:
2595 return -ENOMEM;
2596}
2597EXPORT_SYMBOL(il_rx_queue_alloc);
2598
2599void
2600il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2601{
2602 struct il_rx_pkt *pkt = rxb_addr(rxb);
2603 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2604
2605 if (!report->state) {
2606 D_11H("Spectrum Measure Notification: Start\n");
2607 return;
2608 }
2609
2610 memcpy(&il->measure_report, report, sizeof(*report));
2611 il->measurement_status |= MEASUREMENT_READY;
2612}
2613EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2614
2615
2616
2617
2618int
2619il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2620 u32 decrypt_res, struct ieee80211_rx_status *stats)
2621{
2622 u16 fc = le16_to_cpu(hdr->frame_control);
2623
2624
2625
2626
2627
2628 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2629 return 0;
2630
2631 if (!(fc & IEEE80211_FCTL_PROTECTED))
2632 return 0;
2633
2634 D_RX("decrypt_res:0x%x\n", decrypt_res);
2635 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2636 case RX_RES_STATUS_SEC_TYPE_TKIP:
2637
2638
2639 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2640 RX_RES_STATUS_BAD_KEY_TTAK)
2641 break;
2642
2643 case RX_RES_STATUS_SEC_TYPE_WEP:
2644 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2645 RX_RES_STATUS_BAD_ICV_MIC) {
2646
2647
2648 D_RX("Packet destroyed\n");
2649 return -1;
2650 }
2651 case RX_RES_STATUS_SEC_TYPE_CCMP:
2652 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2653 RX_RES_STATUS_DECRYPT_OK) {
2654 D_RX("hw decrypt successfully!!!\n");
2655 stats->flag |= RX_FLAG_DECRYPTED;
2656 }
2657 break;
2658
2659 default:
2660 break;
2661 }
2662 return 0;
2663}
2664EXPORT_SYMBOL(il_set_decrypted_flag);
2665
2666
2667
2668
2669void
2670il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2671{
2672 u32 reg = 0;
2673 int txq_id = txq->q.id;
2674
2675 if (txq->need_update == 0)
2676 return;
2677
2678
2679 if (test_bit(S_POWER_PMI, &il->status)) {
2680
2681
2682
2683 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2684
2685 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2686 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2687 txq_id, reg);
2688 il_set_bit(il, CSR_GP_CNTRL,
2689 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2690 return;
2691 }
2692
2693 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2694
2695
2696
2697
2698
2699
2700 } else
2701 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2702 txq->need_update = 0;
2703}
2704EXPORT_SYMBOL(il_txq_update_write_ptr);
2705
2706
2707
2708
2709void
2710il_tx_queue_unmap(struct il_priv *il, int txq_id)
2711{
2712 struct il_tx_queue *txq = &il->txq[txq_id];
2713 struct il_queue *q = &txq->q;
2714
2715 if (q->n_bd == 0)
2716 return;
2717
2718 while (q->write_ptr != q->read_ptr) {
2719 il->ops->txq_free_tfd(il, txq);
2720 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2721 }
2722}
2723EXPORT_SYMBOL(il_tx_queue_unmap);
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733void
2734il_tx_queue_free(struct il_priv *il, int txq_id)
2735{
2736 struct il_tx_queue *txq = &il->txq[txq_id];
2737 struct device *dev = &il->pci_dev->dev;
2738 int i;
2739
2740 il_tx_queue_unmap(il, txq_id);
2741
2742
2743 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2744 kfree(txq->cmd[i]);
2745
2746
2747 if (txq->q.n_bd)
2748 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2749 txq->tfds, txq->q.dma_addr);
2750
2751
2752 kfree(txq->skbs);
2753 txq->skbs = NULL;
2754
2755
2756 kfree(txq->cmd);
2757 kfree(txq->meta);
2758 txq->cmd = NULL;
2759 txq->meta = NULL;
2760
2761
2762 memset(txq, 0, sizeof(*txq));
2763}
2764EXPORT_SYMBOL(il_tx_queue_free);
2765
2766
2767
2768
2769void
2770il_cmd_queue_unmap(struct il_priv *il)
2771{
2772 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2773 struct il_queue *q = &txq->q;
2774 int i;
2775
2776 if (q->n_bd == 0)
2777 return;
2778
2779 while (q->read_ptr != q->write_ptr) {
2780 i = il_get_cmd_idx(q, q->read_ptr, 0);
2781
2782 if (txq->meta[i].flags & CMD_MAPPED) {
2783 pci_unmap_single(il->pci_dev,
2784 dma_unmap_addr(&txq->meta[i], mapping),
2785 dma_unmap_len(&txq->meta[i], len),
2786 PCI_DMA_BIDIRECTIONAL);
2787 txq->meta[i].flags = 0;
2788 }
2789
2790 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2791 }
2792
2793 i = q->n_win;
2794 if (txq->meta[i].flags & CMD_MAPPED) {
2795 pci_unmap_single(il->pci_dev,
2796 dma_unmap_addr(&txq->meta[i], mapping),
2797 dma_unmap_len(&txq->meta[i], len),
2798 PCI_DMA_BIDIRECTIONAL);
2799 txq->meta[i].flags = 0;
2800 }
2801}
2802EXPORT_SYMBOL(il_cmd_queue_unmap);
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812void
2813il_cmd_queue_free(struct il_priv *il)
2814{
2815 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2816 struct device *dev = &il->pci_dev->dev;
2817 int i;
2818
2819 il_cmd_queue_unmap(il);
2820
2821
2822 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2823 kfree(txq->cmd[i]);
2824
2825
2826 if (txq->q.n_bd)
2827 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2828 txq->tfds, txq->q.dma_addr);
2829
2830
2831 kfree(txq->cmd);
2832 kfree(txq->meta);
2833 txq->cmd = NULL;
2834 txq->meta = NULL;
2835
2836
2837 memset(txq, 0, sizeof(*txq));
2838}
2839EXPORT_SYMBOL(il_cmd_queue_free);
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864int
2865il_queue_space(const struct il_queue *q)
2866{
2867 int s = q->read_ptr - q->write_ptr;
2868
2869 if (q->read_ptr > q->write_ptr)
2870 s -= q->n_bd;
2871
2872 if (s <= 0)
2873 s += q->n_win;
2874
2875 s -= 2;
2876 if (s < 0)
2877 s = 0;
2878 return s;
2879}
2880EXPORT_SYMBOL(il_queue_space);
2881
2882
2883
2884
2885
2886static int
2887il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2888{
2889
2890
2891
2892
2893 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2894
2895 q->n_bd = TFD_QUEUE_SIZE_MAX;
2896
2897 q->n_win = slots;
2898 q->id = id;
2899
2900
2901
2902 BUG_ON(!is_power_of_2(slots));
2903
2904 q->low_mark = q->n_win / 4;
2905 if (q->low_mark < 4)
2906 q->low_mark = 4;
2907
2908 q->high_mark = q->n_win / 8;
2909 if (q->high_mark < 2)
2910 q->high_mark = 2;
2911
2912 q->write_ptr = q->read_ptr = 0;
2913
2914 return 0;
2915}
2916
2917
2918
2919
2920static int
2921il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2922{
2923 struct device *dev = &il->pci_dev->dev;
2924 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2925
2926
2927
2928 if (id != il->cmd_queue) {
2929 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(struct skb *),
2930 GFP_KERNEL);
2931 if (!txq->skbs) {
2932 IL_ERR("Fail to alloc skbs\n");
2933 goto error;
2934 }
2935 } else
2936 txq->skbs = NULL;
2937
2938
2939
2940 txq->tfds =
2941 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2942 if (!txq->tfds)
2943 goto error;
2944
2945 txq->q.id = id;
2946
2947 return 0;
2948
2949error:
2950 kfree(txq->skbs);
2951 txq->skbs = NULL;
2952
2953 return -ENOMEM;
2954}
2955
2956
2957
2958
2959int
2960il_tx_queue_init(struct il_priv *il, u32 txq_id)
2961{
2962 int i, len, ret;
2963 int slots, actual_slots;
2964 struct il_tx_queue *txq = &il->txq[txq_id];
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 if (txq_id == il->cmd_queue) {
2975 slots = TFD_CMD_SLOTS;
2976 actual_slots = slots + 1;
2977 } else {
2978 slots = TFD_TX_CMD_SLOTS;
2979 actual_slots = slots;
2980 }
2981
2982 txq->meta =
2983 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2984 txq->cmd =
2985 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
2986
2987 if (!txq->meta || !txq->cmd)
2988 goto out_free_arrays;
2989
2990 len = sizeof(struct il_device_cmd);
2991 for (i = 0; i < actual_slots; i++) {
2992
2993 if (i == slots)
2994 len = IL_MAX_CMD_SIZE;
2995
2996 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
2997 if (!txq->cmd[i])
2998 goto err;
2999 }
3000
3001
3002 ret = il_tx_queue_alloc(il, txq, txq_id);
3003 if (ret)
3004 goto err;
3005
3006 txq->need_update = 0;
3007
3008
3009
3010
3011
3012
3013 if (txq_id < 4)
3014 il_set_swq_id(txq, txq_id, txq_id);
3015
3016
3017 il_queue_init(il, &txq->q, slots, txq_id);
3018
3019
3020 il->ops->txq_init(il, txq);
3021
3022 return 0;
3023err:
3024 for (i = 0; i < actual_slots; i++)
3025 kfree(txq->cmd[i]);
3026out_free_arrays:
3027 kfree(txq->meta);
3028 kfree(txq->cmd);
3029
3030 return -ENOMEM;
3031}
3032EXPORT_SYMBOL(il_tx_queue_init);
3033
3034void
3035il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3036{
3037 int slots, actual_slots;
3038 struct il_tx_queue *txq = &il->txq[txq_id];
3039
3040 if (txq_id == il->cmd_queue) {
3041 slots = TFD_CMD_SLOTS;
3042 actual_slots = TFD_CMD_SLOTS + 1;
3043 } else {
3044 slots = TFD_TX_CMD_SLOTS;
3045 actual_slots = TFD_TX_CMD_SLOTS;
3046 }
3047
3048 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3049 txq->need_update = 0;
3050
3051
3052 il_queue_init(il, &txq->q, slots, txq_id);
3053
3054
3055 il->ops->txq_init(il, txq);
3056}
3057EXPORT_SYMBOL(il_tx_queue_reset);
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070int
3071il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3072{
3073 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3074 struct il_queue *q = &txq->q;
3075 struct il_device_cmd *out_cmd;
3076 struct il_cmd_meta *out_meta;
3077 dma_addr_t phys_addr;
3078 unsigned long flags;
3079 int len;
3080 u32 idx;
3081 u16 fix_size;
3082
3083 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3084 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3085
3086
3087
3088
3089
3090
3091 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3092 !(cmd->flags & CMD_SIZE_HUGE));
3093 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3094
3095 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3096 IL_WARN("Not sending command - %s KILL\n",
3097 il_is_rfkill(il) ? "RF" : "CT");
3098 return -EIO;
3099 }
3100
3101 spin_lock_irqsave(&il->hcmd_lock, flags);
3102
3103 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3104 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3105
3106 IL_ERR("Restarting adapter due to command queue full\n");
3107 queue_work(il->workqueue, &il->restart);
3108 return -ENOSPC;
3109 }
3110
3111 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3112 out_cmd = txq->cmd[idx];
3113 out_meta = &txq->meta[idx];
3114
3115 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3116 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3117 return -ENOSPC;
3118 }
3119
3120 memset(out_meta, 0, sizeof(*out_meta));
3121 out_meta->flags = cmd->flags | CMD_MAPPED;
3122 if (cmd->flags & CMD_WANT_SKB)
3123 out_meta->source = cmd;
3124 if (cmd->flags & CMD_ASYNC)
3125 out_meta->callback = cmd->callback;
3126
3127 out_cmd->hdr.cmd = cmd->id;
3128 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3129
3130
3131
3132
3133 out_cmd->hdr.flags = 0;
3134 out_cmd->hdr.sequence =
3135 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3136 if (cmd->flags & CMD_SIZE_HUGE)
3137 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3138 len = sizeof(struct il_device_cmd);
3139 if (idx == TFD_CMD_SLOTS)
3140 len = IL_MAX_CMD_SIZE;
3141
3142#ifdef CONFIG_IWLEGACY_DEBUG
3143 switch (out_cmd->hdr.cmd) {
3144 case C_TX_LINK_QUALITY_CMD:
3145 case C_SENSITIVITY:
3146 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3147 "%d bytes at %d[%d]:%d\n",
3148 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3149 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3150 q->write_ptr, idx, il->cmd_queue);
3151 break;
3152 default:
3153 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3154 "%d bytes at %d[%d]:%d\n",
3155 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3156 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3157 idx, il->cmd_queue);
3158 }
3159#endif
3160
3161 phys_addr =
3162 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3163 PCI_DMA_BIDIRECTIONAL);
3164 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
3165 idx = -ENOMEM;
3166 goto out;
3167 }
3168 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3169 dma_unmap_len_set(out_meta, len, fix_size);
3170
3171 txq->need_update = 1;
3172
3173 if (il->ops->txq_update_byte_cnt_tbl)
3174
3175 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3176
3177 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3178 U32_PAD(cmd->len));
3179
3180
3181 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3182 il_txq_update_write_ptr(il, txq);
3183
3184out:
3185 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3186 return idx;
3187}
3188
3189
3190
3191
3192
3193
3194
3195
3196static void
3197il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3198{
3199 struct il_tx_queue *txq = &il->txq[txq_id];
3200 struct il_queue *q = &txq->q;
3201 int nfreed = 0;
3202
3203 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3204 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3205 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3206 q->write_ptr, q->read_ptr);
3207 return;
3208 }
3209
3210 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3211 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3212
3213 if (nfreed++ > 0) {
3214 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3215 q->write_ptr, q->read_ptr);
3216 queue_work(il->workqueue, &il->restart);
3217 }
3218
3219 }
3220}
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230void
3231il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3232{
3233 struct il_rx_pkt *pkt = rxb_addr(rxb);
3234 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3235 int txq_id = SEQ_TO_QUEUE(sequence);
3236 int idx = SEQ_TO_IDX(sequence);
3237 int cmd_idx;
3238 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3239 struct il_device_cmd *cmd;
3240 struct il_cmd_meta *meta;
3241 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3242 unsigned long flags;
3243
3244
3245
3246
3247 if (WARN
3248 (txq_id != il->cmd_queue,
3249 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3250 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3251 il->txq[il->cmd_queue].q.write_ptr)) {
3252 il_print_hex_error(il, pkt, 32);
3253 return;
3254 }
3255
3256 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3257 cmd = txq->cmd[cmd_idx];
3258 meta = &txq->meta[cmd_idx];
3259
3260 txq->time_stamp = jiffies;
3261
3262 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3263 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3264
3265
3266 if (meta->flags & CMD_WANT_SKB) {
3267 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3268 rxb->page = NULL;
3269 } else if (meta->callback)
3270 meta->callback(il, cmd, pkt);
3271
3272 spin_lock_irqsave(&il->hcmd_lock, flags);
3273
3274 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3275
3276 if (!(meta->flags & CMD_ASYNC)) {
3277 clear_bit(S_HCMD_ACTIVE, &il->status);
3278 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3279 il_get_cmd_string(cmd->hdr.cmd));
3280 wake_up(&il->wait_command_queue);
3281 }
3282
3283
3284 meta->flags = 0;
3285
3286 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3287}
3288EXPORT_SYMBOL(il_tx_cmd_complete);
3289
3290MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3291MODULE_VERSION(IWLWIFI_VERSION);
3292MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3293MODULE_LICENSE("GPL");
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311static bool bt_coex_active = true;
3312module_param(bt_coex_active, bool, S_IRUGO);
3313MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3314
3315u32 il_debug_level;
3316EXPORT_SYMBOL(il_debug_level);
3317
3318const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3319EXPORT_SYMBOL(il_bcast_addr);
3320
3321#define MAX_BIT_RATE_40_MHZ 150
3322#define MAX_BIT_RATE_20_MHZ 72
3323static void
3324il_init_ht_hw_capab(const struct il_priv *il,
3325 struct ieee80211_sta_ht_cap *ht_info,
3326 enum ieee80211_band band)
3327{
3328 u16 max_bit_rate = 0;
3329 u8 rx_chains_num = il->hw_params.rx_chains_num;
3330 u8 tx_chains_num = il->hw_params.tx_chains_num;
3331
3332 ht_info->cap = 0;
3333 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3334
3335 ht_info->ht_supported = true;
3336
3337 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3338 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3339 if (il->hw_params.ht40_channel & BIT(band)) {
3340 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3341 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3342 ht_info->mcs.rx_mask[4] = 0x01;
3343 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3344 }
3345
3346 if (il->cfg->mod_params->amsdu_size_8K)
3347 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3348
3349 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3350 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3351
3352 ht_info->mcs.rx_mask[0] = 0xFF;
3353 if (rx_chains_num >= 2)
3354 ht_info->mcs.rx_mask[1] = 0xFF;
3355 if (rx_chains_num >= 3)
3356 ht_info->mcs.rx_mask[2] = 0xFF;
3357
3358
3359 max_bit_rate *= rx_chains_num;
3360 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3361 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3362
3363
3364 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3365 if (tx_chains_num != rx_chains_num) {
3366 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3367 ht_info->mcs.tx_params |=
3368 ((tx_chains_num -
3369 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3370 }
3371}
3372
3373
3374
3375
3376int
3377il_init_geos(struct il_priv *il)
3378{
3379 struct il_channel_info *ch;
3380 struct ieee80211_supported_band *sband;
3381 struct ieee80211_channel *channels;
3382 struct ieee80211_channel *geo_ch;
3383 struct ieee80211_rate *rates;
3384 int i = 0;
3385 s8 max_tx_power = 0;
3386
3387 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3388 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3389 D_INFO("Geography modes already initialized.\n");
3390 set_bit(S_GEO_CONFIGURED, &il->status);
3391 return 0;
3392 }
3393
3394 channels =
3395 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3396 GFP_KERNEL);
3397 if (!channels)
3398 return -ENOMEM;
3399
3400 rates =
3401 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3402 GFP_KERNEL);
3403 if (!rates) {
3404 kfree(channels);
3405 return -ENOMEM;
3406 }
3407
3408
3409 sband = &il->bands[IEEE80211_BAND_5GHZ];
3410 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3411
3412 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3413 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3414
3415 if (il->cfg->sku & IL_SKU_N)
3416 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3417
3418 sband = &il->bands[IEEE80211_BAND_2GHZ];
3419 sband->channels = channels;
3420
3421 sband->bitrates = rates;
3422 sband->n_bitrates = RATE_COUNT_LEGACY;
3423
3424 if (il->cfg->sku & IL_SKU_N)
3425 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3426
3427 il->ieee_channels = channels;
3428 il->ieee_rates = rates;
3429
3430 for (i = 0; i < il->channel_count; i++) {
3431 ch = &il->channel_info[i];
3432
3433 if (!il_is_channel_valid(ch))
3434 continue;
3435
3436 sband = &il->bands[ch->band];
3437
3438 geo_ch = &sband->channels[sband->n_channels++];
3439
3440 geo_ch->center_freq =
3441 ieee80211_channel_to_frequency(ch->channel, ch->band);
3442 geo_ch->max_power = ch->max_power_avg;
3443 geo_ch->max_antenna_gain = 0xff;
3444 geo_ch->hw_value = ch->channel;
3445
3446 if (il_is_channel_valid(ch)) {
3447 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3448 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3449
3450 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3451 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3452
3453 if (ch->flags & EEPROM_CHANNEL_RADAR)
3454 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3455
3456 geo_ch->flags |= ch->ht40_extension_channel;
3457
3458 if (ch->max_power_avg > max_tx_power)
3459 max_tx_power = ch->max_power_avg;
3460 } else {
3461 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3462 }
3463
3464 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3465 geo_ch->center_freq,
3466 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3467 geo_ch->
3468 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3469 geo_ch->flags);
3470 }
3471
3472 il->tx_power_device_lmt = max_tx_power;
3473 il->tx_power_user_lmt = max_tx_power;
3474 il->tx_power_next = max_tx_power;
3475
3476 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3477 (il->cfg->sku & IL_SKU_A)) {
3478 IL_INFO("Incorrectly detected BG card as ABG. "
3479 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3480 il->pci_dev->device, il->pci_dev->subsystem_device);
3481 il->cfg->sku &= ~IL_SKU_A;
3482 }
3483
3484 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3485 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3486 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3487
3488 set_bit(S_GEO_CONFIGURED, &il->status);
3489
3490 return 0;
3491}
3492EXPORT_SYMBOL(il_init_geos);
3493
3494
3495
3496
3497void
3498il_free_geos(struct il_priv *il)
3499{
3500 kfree(il->ieee_channels);
3501 kfree(il->ieee_rates);
3502 clear_bit(S_GEO_CONFIGURED, &il->status);
3503}
3504EXPORT_SYMBOL(il_free_geos);
3505
3506static bool
3507il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3508 u16 channel, u8 extension_chan_offset)
3509{
3510 const struct il_channel_info *ch_info;
3511
3512 ch_info = il_get_channel_info(il, band, channel);
3513 if (!il_is_channel_valid(ch_info))
3514 return false;
3515
3516 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3517 return !(ch_info->
3518 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3519 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3520 return !(ch_info->
3521 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3522
3523 return false;
3524}
3525
3526bool
3527il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3528{
3529 if (!il->ht.enabled || !il->ht.is_40mhz)
3530 return false;
3531
3532
3533
3534
3535
3536 if (ht_cap && !ht_cap->ht_supported)
3537 return false;
3538
3539#ifdef CONFIG_IWLEGACY_DEBUGFS
3540 if (il->disable_ht40)
3541 return false;
3542#endif
3543
3544 return il_is_channel_extension(il, il->band,
3545 le16_to_cpu(il->staging.channel),
3546 il->ht.extension_chan_offset);
3547}
3548EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3549
3550static u16
3551il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3552{
3553 u16 new_val;
3554 u16 beacon_factor;
3555
3556
3557
3558
3559
3560 if (!beacon_val)
3561 return DEFAULT_BEACON_INTERVAL;
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3576 new_val = beacon_val / beacon_factor;
3577
3578 if (!new_val)
3579 new_val = max_beacon_val;
3580
3581 return new_val;
3582}
3583
3584int
3585il_send_rxon_timing(struct il_priv *il)
3586{
3587 u64 tsf;
3588 s32 interval_tm, rem;
3589 struct ieee80211_conf *conf = NULL;
3590 u16 beacon_int;
3591 struct ieee80211_vif *vif = il->vif;
3592
3593 conf = &il->hw->conf;
3594
3595 lockdep_assert_held(&il->mutex);
3596
3597 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3598
3599 il->timing.timestamp = cpu_to_le64(il->timestamp);
3600 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3601
3602 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3603
3604
3605
3606
3607
3608 il->timing.atim_win = 0;
3609
3610 beacon_int =
3611 il_adjust_beacon_interval(beacon_int,
3612 il->hw_params.max_beacon_itrvl *
3613 TIME_UNIT);
3614 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3615
3616 tsf = il->timestamp;
3617 interval_tm = beacon_int * TIME_UNIT;
3618 rem = do_div(tsf, interval_tm);
3619 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3620
3621 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3622
3623 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3624 le16_to_cpu(il->timing.beacon_interval),
3625 le32_to_cpu(il->timing.beacon_init_val),
3626 le16_to_cpu(il->timing.atim_win));
3627
3628 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3629 &il->timing);
3630}
3631EXPORT_SYMBOL(il_send_rxon_timing);
3632
3633void
3634il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3635{
3636 struct il_rxon_cmd *rxon = &il->staging;
3637
3638 if (hw_decrypt)
3639 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3640 else
3641 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3642
3643}
3644EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3645
3646
3647int
3648il_check_rxon_cmd(struct il_priv *il)
3649{
3650 struct il_rxon_cmd *rxon = &il->staging;
3651 bool error = false;
3652
3653 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3654 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3655 IL_WARN("check 2.4G: wrong narrow\n");
3656 error = true;
3657 }
3658 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3659 IL_WARN("check 2.4G: wrong radar\n");
3660 error = true;
3661 }
3662 } else {
3663 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3664 IL_WARN("check 5.2G: not short slot!\n");
3665 error = true;
3666 }
3667 if (rxon->flags & RXON_FLG_CCK_MSK) {
3668 IL_WARN("check 5.2G: CCK!\n");
3669 error = true;
3670 }
3671 }
3672 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3673 IL_WARN("mac/bssid mcast!\n");
3674 error = true;
3675 }
3676
3677
3678 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3679 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3680 IL_WARN("neither 1 nor 6 are basic\n");
3681 error = true;
3682 }
3683
3684 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3685 IL_WARN("aid > 2007\n");
3686 error = true;
3687 }
3688
3689 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3690 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3691 IL_WARN("CCK and short slot\n");
3692 error = true;
3693 }
3694
3695 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3696 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3697 IL_WARN("CCK and auto detect");
3698 error = true;
3699 }
3700
3701 if ((rxon->
3702 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3703 RXON_FLG_TGG_PROTECT_MSK) {
3704 IL_WARN("TGg but no auto-detect\n");
3705 error = true;
3706 }
3707
3708 if (error)
3709 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3710
3711 if (error) {
3712 IL_ERR("Invalid RXON\n");
3713 return -EINVAL;
3714 }
3715 return 0;
3716}
3717EXPORT_SYMBOL(il_check_rxon_cmd);
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727int
3728il_full_rxon_required(struct il_priv *il)
3729{
3730 const struct il_rxon_cmd *staging = &il->staging;
3731 const struct il_rxon_cmd *active = &il->active;
3732
3733#define CHK(cond) \
3734 if ((cond)) { \
3735 D_INFO("need full RXON - " #cond "\n"); \
3736 return 1; \
3737 }
3738
3739#define CHK_NEQ(c1, c2) \
3740 if ((c1) != (c2)) { \
3741 D_INFO("need full RXON - " \
3742 #c1 " != " #c2 " - %d != %d\n", \
3743 (c1), (c2)); \
3744 return 1; \
3745 }
3746
3747
3748 CHK(!il_is_associated(il));
3749 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
3750 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
3751 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
3752 active->wlap_bssid_addr));
3753 CHK_NEQ(staging->dev_type, active->dev_type);
3754 CHK_NEQ(staging->channel, active->channel);
3755 CHK_NEQ(staging->air_propagation, active->air_propagation);
3756 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3757 active->ofdm_ht_single_stream_basic_rates);
3758 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3759 active->ofdm_ht_dual_stream_basic_rates);
3760 CHK_NEQ(staging->assoc_id, active->assoc_id);
3761
3762
3763
3764
3765
3766
3767 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3768 active->flags & RXON_FLG_BAND_24G_MSK);
3769
3770
3771 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3772 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3773
3774#undef CHK
3775#undef CHK_NEQ
3776
3777 return 0;
3778}
3779EXPORT_SYMBOL(il_full_rxon_required);
3780
3781u8
3782il_get_lowest_plcp(struct il_priv *il)
3783{
3784
3785
3786
3787
3788 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3789 return RATE_1M_PLCP;
3790 else
3791 return RATE_6M_PLCP;
3792}
3793EXPORT_SYMBOL(il_get_lowest_plcp);
3794
3795static void
3796_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3797{
3798 struct il_rxon_cmd *rxon = &il->staging;
3799
3800 if (!il->ht.enabled) {
3801 rxon->flags &=
3802 ~(RXON_FLG_CHANNEL_MODE_MSK |
3803 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3804 | RXON_FLG_HT_PROT_MSK);
3805 return;
3806 }
3807
3808 rxon->flags |=
3809 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3810
3811
3812
3813
3814 rxon->flags &=
3815 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3816 if (il_is_ht40_tx_allowed(il, NULL)) {
3817
3818 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3819 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3820
3821 switch (il->ht.extension_chan_offset) {
3822 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3823 rxon->flags &=
3824 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3825 break;
3826 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3827 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3828 break;
3829 }
3830 } else {
3831
3832 switch (il->ht.extension_chan_offset) {
3833 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3834 rxon->flags &=
3835 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3836 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3837 break;
3838 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3839 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3840 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3841 break;
3842 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3843 default:
3844
3845 IL_ERR("invalid extension channel offset\n");
3846 break;
3847 }
3848 }
3849 } else {
3850 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3851 }
3852
3853 if (il->ops->set_rxon_chain)
3854 il->ops->set_rxon_chain(il);
3855
3856 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3857 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3858 il->ht.protection, il->ht.extension_chan_offset);
3859}
3860
3861void
3862il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3863{
3864 _il_set_rxon_ht(il, ht_conf);
3865}
3866EXPORT_SYMBOL(il_set_rxon_ht);
3867
3868
3869u8
3870il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3871{
3872 const struct il_channel_info *ch_info;
3873 int i;
3874 u8 channel = 0;
3875 u8 min, max;
3876
3877 if (band == IEEE80211_BAND_5GHZ) {
3878 min = 14;
3879 max = il->channel_count;
3880 } else {
3881 min = 0;
3882 max = 14;
3883 }
3884
3885 for (i = min; i < max; i++) {
3886 channel = il->channel_info[i].channel;
3887 if (channel == le16_to_cpu(il->staging.channel))
3888 continue;
3889
3890 ch_info = il_get_channel_info(il, band, channel);
3891 if (il_is_channel_valid(ch_info))
3892 break;
3893 }
3894
3895 return channel;
3896}
3897EXPORT_SYMBOL(il_get_single_channel_number);
3898
3899
3900
3901
3902
3903
3904
3905
3906int
3907il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3908{
3909 enum ieee80211_band band = ch->band;
3910 u16 channel = ch->hw_value;
3911
3912 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3913 return 0;
3914
3915 il->staging.channel = cpu_to_le16(channel);
3916 if (band == IEEE80211_BAND_5GHZ)
3917 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3918 else
3919 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3920
3921 il->band = band;
3922
3923 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3924
3925 return 0;
3926}
3927EXPORT_SYMBOL(il_set_rxon_channel);
3928
3929void
3930il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
3931 struct ieee80211_vif *vif)
3932{
3933 if (band == IEEE80211_BAND_5GHZ) {
3934 il->staging.flags &=
3935 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3936 RXON_FLG_CCK_MSK);
3937 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3938 } else {
3939
3940 if (vif && vif->bss_conf.use_short_slot)
3941 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3942 else
3943 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3944
3945 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3946 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3947 il->staging.flags &= ~RXON_FLG_CCK_MSK;
3948 }
3949}
3950EXPORT_SYMBOL(il_set_flags_for_band);
3951
3952
3953
3954
3955void
3956il_connection_init_rx_config(struct il_priv *il)
3957{
3958 const struct il_channel_info *ch_info;
3959
3960 memset(&il->staging, 0, sizeof(il->staging));
3961
3962 switch (il->iw_mode) {
3963 case NL80211_IFTYPE_UNSPECIFIED:
3964 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3965 break;
3966 case NL80211_IFTYPE_STATION:
3967 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3968 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3969 break;
3970 case NL80211_IFTYPE_ADHOC:
3971 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
3972 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3973 il->staging.filter_flags =
3974 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
3975 break;
3976 default:
3977 IL_ERR("Unsupported interface type %d\n", il->vif->type);
3978 return;
3979 }
3980
3981#if 0
3982
3983
3984 if (!hw_to_local(il->hw)->short_preamble)
3985 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3986 else
3987 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3988#endif
3989
3990 ch_info =
3991 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
3992
3993 if (!ch_info)
3994 ch_info = &il->channel_info[0];
3995
3996 il->staging.channel = cpu_to_le16(ch_info->channel);
3997 il->band = ch_info->band;
3998
3999 il_set_flags_for_band(il, il->band, il->vif);
4000
4001 il->staging.ofdm_basic_rates =
4002 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4003 il->staging.cck_basic_rates =
4004 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4005
4006
4007 il->staging.flags &=
4008 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4009 if (il->vif)
4010 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4011
4012 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4013 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4014}
4015EXPORT_SYMBOL(il_connection_init_rx_config);
4016
4017void
4018il_set_rate(struct il_priv *il)
4019{
4020 const struct ieee80211_supported_band *hw = NULL;
4021 struct ieee80211_rate *rate;
4022 int i;
4023
4024 hw = il_get_hw_mode(il, il->band);
4025 if (!hw) {
4026 IL_ERR("Failed to set rate: unable to get hw mode\n");
4027 return;
4028 }
4029
4030 il->active_rate = 0;
4031
4032 for (i = 0; i < hw->n_bitrates; i++) {
4033 rate = &(hw->bitrates[i]);
4034 if (rate->hw_value < RATE_COUNT_LEGACY)
4035 il->active_rate |= (1 << rate->hw_value);
4036 }
4037
4038 D_RATE("Set active_rate = %0x\n", il->active_rate);
4039
4040 il->staging.cck_basic_rates =
4041 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4042
4043 il->staging.ofdm_basic_rates =
4044 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4045}
4046EXPORT_SYMBOL(il_set_rate);
4047
4048void
4049il_chswitch_done(struct il_priv *il, bool is_success)
4050{
4051 if (test_bit(S_EXIT_PENDING, &il->status))
4052 return;
4053
4054 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4055 ieee80211_chswitch_done(il->vif, is_success);
4056}
4057EXPORT_SYMBOL(il_chswitch_done);
4058
4059void
4060il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4061{
4062 struct il_rx_pkt *pkt = rxb_addr(rxb);
4063 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4064 struct il_rxon_cmd *rxon = (void *)&il->active;
4065
4066 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4067 return;
4068
4069 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4070 rxon->channel = csa->channel;
4071 il->staging.channel = csa->channel;
4072 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4073 il_chswitch_done(il, true);
4074 } else {
4075 IL_ERR("CSA notif (fail) : channel %d\n",
4076 le16_to_cpu(csa->channel));
4077 il_chswitch_done(il, false);
4078 }
4079}
4080EXPORT_SYMBOL(il_hdl_csa);
4081
4082#ifdef CONFIG_IWLEGACY_DEBUG
4083void
4084il_print_rx_config_cmd(struct il_priv *il)
4085{
4086 struct il_rxon_cmd *rxon = &il->staging;
4087
4088 D_RADIO("RX CONFIG:\n");
4089 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4090 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4091 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4092 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4093 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4094 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4095 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4096 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4097 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4098 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4099}
4100EXPORT_SYMBOL(il_print_rx_config_cmd);
4101#endif
4102
4103
4104
4105void
4106il_irq_handle_error(struct il_priv *il)
4107{
4108
4109 set_bit(S_FW_ERROR, &il->status);
4110
4111
4112 clear_bit(S_HCMD_ACTIVE, &il->status);
4113
4114 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4115
4116 il->ops->dump_nic_error_log(il);
4117 if (il->ops->dump_fh)
4118 il->ops->dump_fh(il, NULL, false);
4119#ifdef CONFIG_IWLEGACY_DEBUG
4120 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4121 il_print_rx_config_cmd(il);
4122#endif
4123
4124 wake_up(&il->wait_command_queue);
4125
4126
4127
4128 clear_bit(S_READY, &il->status);
4129
4130 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4131 IL_DBG(IL_DL_FW_ERRORS,
4132 "Restarting adapter due to uCode error.\n");
4133
4134 if (il->cfg->mod_params->restart_fw)
4135 queue_work(il->workqueue, &il->restart);
4136 }
4137}
4138EXPORT_SYMBOL(il_irq_handle_error);
4139
4140static int
4141_il_apm_stop_master(struct il_priv *il)
4142{
4143 int ret = 0;
4144
4145
4146 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4147
4148 ret =
4149 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4150 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4151 if (ret < 0)
4152 IL_WARN("Master Disable Timed Out, 100 usec\n");
4153
4154 D_INFO("stop master\n");
4155
4156 return ret;
4157}
4158
4159void
4160_il_apm_stop(struct il_priv *il)
4161{
4162 lockdep_assert_held(&il->reg_lock);
4163
4164 D_INFO("Stop card, put in low power state\n");
4165
4166
4167 _il_apm_stop_master(il);
4168
4169
4170 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4171
4172 udelay(10);
4173
4174
4175
4176
4177
4178 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4179}
4180EXPORT_SYMBOL(_il_apm_stop);
4181
4182void
4183il_apm_stop(struct il_priv *il)
4184{
4185 unsigned long flags;
4186
4187 spin_lock_irqsave(&il->reg_lock, flags);
4188 _il_apm_stop(il);
4189 spin_unlock_irqrestore(&il->reg_lock, flags);
4190}
4191EXPORT_SYMBOL(il_apm_stop);
4192
4193
4194
4195
4196
4197
4198int
4199il_apm_init(struct il_priv *il)
4200{
4201 int ret = 0;
4202 u16 lctl;
4203
4204 D_INFO("Init card's basic functions\n");
4205
4206
4207
4208
4209
4210
4211
4212 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4213 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4214
4215
4216
4217
4218
4219 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4220 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4221
4222
4223 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4224
4225
4226
4227
4228
4229
4230 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4231 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241 if (il->cfg->set_l0s) {
4242 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4243 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
4244
4245 il_set_bit(il, CSR_GIO_REG,
4246 CSR_GIO_REG_VAL_L0S_ENABLED);
4247 D_POWER("L1 Enabled; Disabling L0S\n");
4248 } else {
4249
4250 il_clear_bit(il, CSR_GIO_REG,
4251 CSR_GIO_REG_VAL_L0S_ENABLED);
4252 D_POWER("L1 Disabled; Enabling L0S\n");
4253 }
4254 }
4255
4256
4257 if (il->cfg->pll_cfg_val)
4258 il_set_bit(il, CSR_ANA_PLL_CFG,
4259 il->cfg->pll_cfg_val);
4260
4261
4262
4263
4264
4265 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4266
4267
4268
4269
4270
4271
4272 ret =
4273 _il_poll_bit(il, CSR_GP_CNTRL,
4274 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4275 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4276 if (ret < 0) {
4277 D_INFO("Failed to init the card\n");
4278 goto out;
4279 }
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289 if (il->cfg->use_bsm)
4290 il_wr_prph(il, APMG_CLK_EN_REG,
4291 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4292 else
4293 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4294 udelay(20);
4295
4296
4297 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4298 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4299
4300out:
4301 return ret;
4302}
4303EXPORT_SYMBOL(il_apm_init);
4304
4305int
4306il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4307{
4308 int ret;
4309 s8 prev_tx_power;
4310 bool defer;
4311
4312 lockdep_assert_held(&il->mutex);
4313
4314 if (il->tx_power_user_lmt == tx_power && !force)
4315 return 0;
4316
4317 if (!il->ops->send_tx_power)
4318 return -EOPNOTSUPP;
4319
4320
4321 if (tx_power < 0) {
4322 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4323 return -EINVAL;
4324 }
4325
4326 if (tx_power > il->tx_power_device_lmt) {
4327 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4328 tx_power, il->tx_power_device_lmt);
4329 return -EINVAL;
4330 }
4331
4332 if (!il_is_ready_rf(il))
4333 return -EIO;
4334
4335
4336
4337 il->tx_power_next = tx_power;
4338
4339
4340 defer = test_bit(S_SCANNING, &il->status) ||
4341 memcmp(&il->active, &il->staging, sizeof(il->staging));
4342 if (defer && !force) {
4343 D_INFO("Deferring tx power set\n");
4344 return 0;
4345 }
4346
4347 prev_tx_power = il->tx_power_user_lmt;
4348 il->tx_power_user_lmt = tx_power;
4349
4350 ret = il->ops->send_tx_power(il);
4351
4352
4353 if (ret) {
4354 il->tx_power_user_lmt = prev_tx_power;
4355 il->tx_power_next = prev_tx_power;
4356 }
4357 return ret;
4358}
4359EXPORT_SYMBOL(il_set_tx_power);
4360
4361void
4362il_send_bt_config(struct il_priv *il)
4363{
4364 struct il_bt_cmd bt_cmd = {
4365 .lead_time = BT_LEAD_TIME_DEF,
4366 .max_kill = BT_MAX_KILL_DEF,
4367 .kill_ack_mask = 0,
4368 .kill_cts_mask = 0,
4369 };
4370
4371 if (!bt_coex_active)
4372 bt_cmd.flags = BT_COEX_DISABLE;
4373 else
4374 bt_cmd.flags = BT_COEX_ENABLE;
4375
4376 D_INFO("BT coex %s\n",
4377 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4378
4379 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4380 IL_ERR("failed to send BT Coex Config\n");
4381}
4382EXPORT_SYMBOL(il_send_bt_config);
4383
4384int
4385il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4386{
4387 struct il_stats_cmd stats_cmd = {
4388 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4389 };
4390
4391 if (flags & CMD_ASYNC)
4392 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4393 &stats_cmd, NULL);
4394 else
4395 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4396 &stats_cmd);
4397}
4398EXPORT_SYMBOL(il_send_stats_request);
4399
4400void
4401il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4402{
4403#ifdef CONFIG_IWLEGACY_DEBUG
4404 struct il_rx_pkt *pkt = rxb_addr(rxb);
4405 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4406 D_RX("sleep mode: %d, src: %d\n",
4407 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4408#endif
4409}
4410EXPORT_SYMBOL(il_hdl_pm_sleep);
4411
4412void
4413il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4414{
4415 struct il_rx_pkt *pkt = rxb_addr(rxb);
4416 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4417 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4418 il_get_cmd_string(pkt->hdr.cmd));
4419 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4420}
4421EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4422
4423void
4424il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4425{
4426 struct il_rx_pkt *pkt = rxb_addr(rxb);
4427
4428 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4429 "seq 0x%04X ser 0x%08X\n",
4430 le32_to_cpu(pkt->u.err_resp.error_type),
4431 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4432 pkt->u.err_resp.cmd_id,
4433 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4434 le32_to_cpu(pkt->u.err_resp.error_info));
4435}
4436EXPORT_SYMBOL(il_hdl_error);
4437
4438void
4439il_clear_isr_stats(struct il_priv *il)
4440{
4441 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4442}
4443
4444int
4445il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4446 const struct ieee80211_tx_queue_params *params)
4447{
4448 struct il_priv *il = hw->priv;
4449 unsigned long flags;
4450 int q;
4451
4452 D_MAC80211("enter\n");
4453
4454 if (!il_is_ready_rf(il)) {
4455 D_MAC80211("leave - RF not ready\n");
4456 return -EIO;
4457 }
4458
4459 if (queue >= AC_NUM) {
4460 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4461 return 0;
4462 }
4463
4464 q = AC_NUM - 1 - queue;
4465
4466 spin_lock_irqsave(&il->lock, flags);
4467
4468 il->qos_data.def_qos_parm.ac[q].cw_min =
4469 cpu_to_le16(params->cw_min);
4470 il->qos_data.def_qos_parm.ac[q].cw_max =
4471 cpu_to_le16(params->cw_max);
4472 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4473 il->qos_data.def_qos_parm.ac[q].edca_txop =
4474 cpu_to_le16((params->txop * 32));
4475
4476 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4477
4478 spin_unlock_irqrestore(&il->lock, flags);
4479
4480 D_MAC80211("leave\n");
4481 return 0;
4482}
4483EXPORT_SYMBOL(il_mac_conf_tx);
4484
4485int
4486il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4487{
4488 struct il_priv *il = hw->priv;
4489 int ret;
4490
4491 D_MAC80211("enter\n");
4492
4493 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4494
4495 D_MAC80211("leave ret %d\n", ret);
4496 return ret;
4497}
4498EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4499
4500static int
4501il_set_mode(struct il_priv *il)
4502{
4503 il_connection_init_rx_config(il);
4504
4505 if (il->ops->set_rxon_chain)
4506 il->ops->set_rxon_chain(il);
4507
4508 return il_commit_rxon(il);
4509}
4510
4511int
4512il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4513{
4514 struct il_priv *il = hw->priv;
4515 int err;
4516 bool reset;
4517
4518 mutex_lock(&il->mutex);
4519 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4520
4521 if (!il_is_ready_rf(il)) {
4522 IL_WARN("Try to add interface when device not ready\n");
4523 err = -EINVAL;
4524 goto out;
4525 }
4526
4527
4528
4529
4530
4531 reset = (il->vif == vif);
4532 if (il->vif && !reset) {
4533 err = -EOPNOTSUPP;
4534 goto out;
4535 }
4536
4537 il->vif = vif;
4538 il->iw_mode = vif->type;
4539
4540 err = il_set_mode(il);
4541 if (err) {
4542 IL_WARN("Fail to set mode %d\n", vif->type);
4543 if (!reset) {
4544 il->vif = NULL;
4545 il->iw_mode = NL80211_IFTYPE_STATION;
4546 }
4547 }
4548
4549out:
4550 D_MAC80211("leave err %d\n", err);
4551 mutex_unlock(&il->mutex);
4552
4553 return err;
4554}
4555EXPORT_SYMBOL(il_mac_add_interface);
4556
4557static void
4558il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4559{
4560 lockdep_assert_held(&il->mutex);
4561
4562 if (il->scan_vif == vif) {
4563 il_scan_cancel_timeout(il, 200);
4564 il_force_scan_end(il);
4565 }
4566
4567 il_set_mode(il);
4568}
4569
4570void
4571il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4572{
4573 struct il_priv *il = hw->priv;
4574
4575 mutex_lock(&il->mutex);
4576 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4577
4578 WARN_ON(il->vif != vif);
4579 il->vif = NULL;
4580 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4581 il_teardown_interface(il, vif);
4582 memset(il->bssid, 0, ETH_ALEN);
4583
4584 D_MAC80211("leave\n");
4585 mutex_unlock(&il->mutex);
4586}
4587EXPORT_SYMBOL(il_mac_remove_interface);
4588
4589int
4590il_alloc_txq_mem(struct il_priv *il)
4591{
4592 if (!il->txq)
4593 il->txq =
4594 kzalloc(sizeof(struct il_tx_queue) *
4595 il->cfg->num_of_queues, GFP_KERNEL);
4596 if (!il->txq) {
4597 IL_ERR("Not enough memory for txq\n");
4598 return -ENOMEM;
4599 }
4600 return 0;
4601}
4602EXPORT_SYMBOL(il_alloc_txq_mem);
4603
4604void
4605il_free_txq_mem(struct il_priv *il)
4606{
4607 kfree(il->txq);
4608 il->txq = NULL;
4609}
4610EXPORT_SYMBOL(il_free_txq_mem);
4611
4612int
4613il_force_reset(struct il_priv *il, bool external)
4614{
4615 struct il_force_reset *force_reset;
4616
4617 if (test_bit(S_EXIT_PENDING, &il->status))
4618 return -EINVAL;
4619
4620 force_reset = &il->force_reset;
4621 force_reset->reset_request_count++;
4622 if (!external) {
4623 if (force_reset->last_force_reset_jiffies &&
4624 time_after(force_reset->last_force_reset_jiffies +
4625 force_reset->reset_duration, jiffies)) {
4626 D_INFO("force reset rejected\n");
4627 force_reset->reset_reject_count++;
4628 return -EAGAIN;
4629 }
4630 }
4631 force_reset->reset_success_count++;
4632 force_reset->last_force_reset_jiffies = jiffies;
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643 if (!external && !il->cfg->mod_params->restart_fw) {
4644 D_INFO("Cancel firmware reload based on "
4645 "module parameter setting\n");
4646 return 0;
4647 }
4648
4649 IL_ERR("On demand firmware reload\n");
4650
4651
4652 set_bit(S_FW_ERROR, &il->status);
4653 wake_up(&il->wait_command_queue);
4654
4655
4656
4657
4658 clear_bit(S_READY, &il->status);
4659 queue_work(il->workqueue, &il->restart);
4660
4661 return 0;
4662}
4663EXPORT_SYMBOL(il_force_reset);
4664
4665int
4666il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4667 enum nl80211_iftype newtype, bool newp2p)
4668{
4669 struct il_priv *il = hw->priv;
4670 int err;
4671
4672 mutex_lock(&il->mutex);
4673 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4674 vif->type, vif->addr, newtype, newp2p);
4675
4676 if (newp2p) {
4677 err = -EOPNOTSUPP;
4678 goto out;
4679 }
4680
4681 if (!il->vif || !il_is_ready_rf(il)) {
4682
4683
4684
4685
4686 err = -EBUSY;
4687 goto out;
4688 }
4689
4690
4691 vif->type = newtype;
4692 vif->p2p = false;
4693 il->iw_mode = newtype;
4694 il_teardown_interface(il, vif);
4695 err = 0;
4696
4697out:
4698 D_MAC80211("leave err %d\n", err);
4699 mutex_unlock(&il->mutex);
4700
4701 return err;
4702}
4703EXPORT_SYMBOL(il_mac_change_interface);
4704
4705void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
4706{
4707 struct il_priv *il = hw->priv;
4708 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4709 int i;
4710
4711 mutex_lock(&il->mutex);
4712 D_MAC80211("enter\n");
4713
4714 if (il->txq == NULL)
4715 goto out;
4716
4717 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4718 struct il_queue *q;
4719
4720 if (i == il->cmd_queue)
4721 continue;
4722
4723 q = &il->txq[i].q;
4724 if (q->read_ptr == q->write_ptr)
4725 continue;
4726
4727 if (time_after(jiffies, timeout)) {
4728 IL_ERR("Failed to flush queue %d\n", q->id);
4729 break;
4730 }
4731
4732 msleep(20);
4733 }
4734out:
4735 D_MAC80211("leave\n");
4736 mutex_unlock(&il->mutex);
4737}
4738EXPORT_SYMBOL(il_mac_flush);
4739
4740
4741
4742
4743
4744static int
4745il_check_stuck_queue(struct il_priv *il, int cnt)
4746{
4747 struct il_tx_queue *txq = &il->txq[cnt];
4748 struct il_queue *q = &txq->q;
4749 unsigned long timeout;
4750 unsigned long now = jiffies;
4751 int ret;
4752
4753 if (q->read_ptr == q->write_ptr) {
4754 txq->time_stamp = now;
4755 return 0;
4756 }
4757
4758 timeout =
4759 txq->time_stamp +
4760 msecs_to_jiffies(il->cfg->wd_timeout);
4761
4762 if (time_after(now, timeout)) {
4763 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4764 jiffies_to_msecs(now - txq->time_stamp));
4765 ret = il_force_reset(il, false);
4766 return (ret == -EAGAIN) ? 0 : 1;
4767 }
4768
4769 return 0;
4770}
4771
4772
4773
4774
4775
4776#define IL_WD_TICK(timeout) ((timeout) / 4)
4777
4778
4779
4780
4781
4782void
4783il_bg_watchdog(unsigned long data)
4784{
4785 struct il_priv *il = (struct il_priv *)data;
4786 int cnt;
4787 unsigned long timeout;
4788
4789 if (test_bit(S_EXIT_PENDING, &il->status))
4790 return;
4791
4792 timeout = il->cfg->wd_timeout;
4793 if (timeout == 0)
4794 return;
4795
4796
4797 if (il_check_stuck_queue(il, il->cmd_queue))
4798 return;
4799
4800
4801 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4802
4803 if (cnt == il->cmd_queue)
4804 continue;
4805 if (il_check_stuck_queue(il, cnt))
4806 return;
4807 }
4808
4809 mod_timer(&il->watchdog,
4810 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4811}
4812EXPORT_SYMBOL(il_bg_watchdog);
4813
4814void
4815il_setup_watchdog(struct il_priv *il)
4816{
4817 unsigned int timeout = il->cfg->wd_timeout;
4818
4819 if (timeout)
4820 mod_timer(&il->watchdog,
4821 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4822 else
4823 del_timer(&il->watchdog);
4824}
4825EXPORT_SYMBOL(il_setup_watchdog);
4826
4827
4828
4829
4830
4831
4832
4833u32
4834il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4835{
4836 u32 quot;
4837 u32 rem;
4838 u32 interval = beacon_interval * TIME_UNIT;
4839
4840 if (!interval || !usec)
4841 return 0;
4842
4843 quot =
4844 (usec /
4845 interval) & (il_beacon_time_mask_high(il,
4846 il->hw_params.
4847 beacon_time_tsf_bits) >> il->
4848 hw_params.beacon_time_tsf_bits);
4849 rem =
4850 (usec % interval) & il_beacon_time_mask_low(il,
4851 il->hw_params.
4852 beacon_time_tsf_bits);
4853
4854 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4855}
4856EXPORT_SYMBOL(il_usecs_to_beacons);
4857
4858
4859
4860
4861__le32
4862il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4863 u32 beacon_interval)
4864{
4865 u32 base_low = base & il_beacon_time_mask_low(il,
4866 il->hw_params.
4867 beacon_time_tsf_bits);
4868 u32 addon_low = addon & il_beacon_time_mask_low(il,
4869 il->hw_params.
4870 beacon_time_tsf_bits);
4871 u32 interval = beacon_interval * TIME_UNIT;
4872 u32 res = (base & il_beacon_time_mask_high(il,
4873 il->hw_params.
4874 beacon_time_tsf_bits)) +
4875 (addon & il_beacon_time_mask_high(il,
4876 il->hw_params.
4877 beacon_time_tsf_bits));
4878
4879 if (base_low > addon_low)
4880 res += base_low - addon_low;
4881 else if (base_low < addon_low) {
4882 res += interval + base_low - addon_low;
4883 res += (1 << il->hw_params.beacon_time_tsf_bits);
4884 } else
4885 res += (1 << il->hw_params.beacon_time_tsf_bits);
4886
4887 return cpu_to_le32(res);
4888}
4889EXPORT_SYMBOL(il_add_beacon_time);
4890
4891#ifdef CONFIG_PM_SLEEP
4892
4893static int
4894il_pci_suspend(struct device *device)
4895{
4896 struct pci_dev *pdev = to_pci_dev(device);
4897 struct il_priv *il = pci_get_drvdata(pdev);
4898
4899
4900
4901
4902
4903
4904
4905
4906 il_apm_stop(il);
4907
4908 return 0;
4909}
4910
4911static int
4912il_pci_resume(struct device *device)
4913{
4914 struct pci_dev *pdev = to_pci_dev(device);
4915 struct il_priv *il = pci_get_drvdata(pdev);
4916 bool hw_rfkill = false;
4917
4918
4919
4920
4921
4922 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4923
4924 il_enable_interrupts(il);
4925
4926 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4927 hw_rfkill = true;
4928
4929 if (hw_rfkill)
4930 set_bit(S_RFKILL, &il->status);
4931 else
4932 clear_bit(S_RFKILL, &il->status);
4933
4934 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4935
4936 return 0;
4937}
4938
4939SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4940EXPORT_SYMBOL(il_pm_ops);
4941
4942#endif
4943
4944static void
4945il_update_qos(struct il_priv *il)
4946{
4947 if (test_bit(S_EXIT_PENDING, &il->status))
4948 return;
4949
4950 il->qos_data.def_qos_parm.qos_flags = 0;
4951
4952 if (il->qos_data.qos_active)
4953 il->qos_data.def_qos_parm.qos_flags |=
4954 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
4955
4956 if (il->ht.enabled)
4957 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
4958
4959 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
4960 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
4961
4962 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
4963 &il->qos_data.def_qos_parm, NULL);
4964}
4965
4966
4967
4968
4969int
4970il_mac_config(struct ieee80211_hw *hw, u32 changed)
4971{
4972 struct il_priv *il = hw->priv;
4973 const struct il_channel_info *ch_info;
4974 struct ieee80211_conf *conf = &hw->conf;
4975 struct ieee80211_channel *channel = conf->chandef.chan;
4976 struct il_ht_config *ht_conf = &il->current_ht_config;
4977 unsigned long flags = 0;
4978 int ret = 0;
4979 u16 ch;
4980 int scan_active = 0;
4981 bool ht_changed = false;
4982
4983 mutex_lock(&il->mutex);
4984 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
4985 changed);
4986
4987 if (unlikely(test_bit(S_SCANNING, &il->status))) {
4988 scan_active = 1;
4989 D_MAC80211("scan active\n");
4990 }
4991
4992 if (changed &
4993 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
4994
4995 il->current_ht_config.smps = conf->smps_mode;
4996
4997
4998
4999
5000
5001
5002
5003
5004 if (il->ops->set_rxon_chain)
5005 il->ops->set_rxon_chain(il);
5006 }
5007
5008
5009
5010
5011 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5012
5013 if (scan_active)
5014 goto set_ch_out;
5015
5016 ch = channel->hw_value;
5017 ch_info = il_get_channel_info(il, channel->band, ch);
5018 if (!il_is_channel_valid(ch_info)) {
5019 D_MAC80211("leave - invalid channel\n");
5020 ret = -EINVAL;
5021 goto set_ch_out;
5022 }
5023
5024 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5025 !il_is_channel_ibss(ch_info)) {
5026 D_MAC80211("leave - not IBSS channel\n");
5027 ret = -EINVAL;
5028 goto set_ch_out;
5029 }
5030
5031 spin_lock_irqsave(&il->lock, flags);
5032
5033
5034 if (il->ht.enabled != conf_is_ht(conf)) {
5035 il->ht.enabled = conf_is_ht(conf);
5036 ht_changed = true;
5037 }
5038 if (il->ht.enabled) {
5039 if (conf_is_ht40_minus(conf)) {
5040 il->ht.extension_chan_offset =
5041 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5042 il->ht.is_40mhz = true;
5043 } else if (conf_is_ht40_plus(conf)) {
5044 il->ht.extension_chan_offset =
5045 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5046 il->ht.is_40mhz = true;
5047 } else {
5048 il->ht.extension_chan_offset =
5049 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5050 il->ht.is_40mhz = false;
5051 }
5052 } else
5053 il->ht.is_40mhz = false;
5054
5055
5056
5057
5058
5059 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5060
5061
5062
5063
5064 if ((le16_to_cpu(il->staging.channel) != ch))
5065 il->staging.flags = 0;
5066
5067 il_set_rxon_channel(il, channel);
5068 il_set_rxon_ht(il, ht_conf);
5069
5070 il_set_flags_for_band(il, channel->band, il->vif);
5071
5072 spin_unlock_irqrestore(&il->lock, flags);
5073
5074 if (il->ops->update_bcast_stations)
5075 ret = il->ops->update_bcast_stations(il);
5076
5077set_ch_out:
5078
5079
5080
5081 il_set_rate(il);
5082 }
5083
5084 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5085 ret = il_power_update_mode(il, false);
5086 if (ret)
5087 D_MAC80211("Error setting sleep level\n");
5088 }
5089
5090 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5091 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5092 conf->power_level);
5093
5094 il_set_tx_power(il, conf->power_level, false);
5095 }
5096
5097 if (!il_is_ready(il)) {
5098 D_MAC80211("leave - not ready\n");
5099 goto out;
5100 }
5101
5102 if (scan_active)
5103 goto out;
5104
5105 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5106 il_commit_rxon(il);
5107 else
5108 D_INFO("Not re-sending same RXON configuration.\n");
5109 if (ht_changed)
5110 il_update_qos(il);
5111
5112out:
5113 D_MAC80211("leave ret %d\n", ret);
5114 mutex_unlock(&il->mutex);
5115
5116 return ret;
5117}
5118EXPORT_SYMBOL(il_mac_config);
5119
5120void
5121il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5122{
5123 struct il_priv *il = hw->priv;
5124 unsigned long flags;
5125
5126 mutex_lock(&il->mutex);
5127 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5128
5129 spin_lock_irqsave(&il->lock, flags);
5130
5131 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5132
5133
5134 if (il->beacon_skb)
5135 dev_kfree_skb(il->beacon_skb);
5136 il->beacon_skb = NULL;
5137 il->timestamp = 0;
5138
5139 spin_unlock_irqrestore(&il->lock, flags);
5140
5141 il_scan_cancel_timeout(il, 100);
5142 if (!il_is_ready_rf(il)) {
5143 D_MAC80211("leave - not ready\n");
5144 mutex_unlock(&il->mutex);
5145 return;
5146 }
5147
5148
5149 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5150 il_commit_rxon(il);
5151
5152 il_set_rate(il);
5153
5154 D_MAC80211("leave\n");
5155 mutex_unlock(&il->mutex);
5156}
5157EXPORT_SYMBOL(il_mac_reset_tsf);
5158
5159static void
5160il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5161{
5162 struct il_ht_config *ht_conf = &il->current_ht_config;
5163 struct ieee80211_sta *sta;
5164 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5165
5166 D_ASSOC("enter:\n");
5167
5168 if (!il->ht.enabled)
5169 return;
5170
5171 il->ht.protection =
5172 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5173 il->ht.non_gf_sta_present =
5174 !!(bss_conf->
5175 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5176
5177 ht_conf->single_chain_sufficient = false;
5178
5179 switch (vif->type) {
5180 case NL80211_IFTYPE_STATION:
5181 rcu_read_lock();
5182 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5183 if (sta) {
5184 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5185 int maxstreams;
5186
5187 maxstreams =
5188 (ht_cap->mcs.
5189 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5190 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5191 maxstreams += 1;
5192
5193 if (ht_cap->mcs.rx_mask[1] == 0 &&
5194 ht_cap->mcs.rx_mask[2] == 0)
5195 ht_conf->single_chain_sufficient = true;
5196 if (maxstreams <= 1)
5197 ht_conf->single_chain_sufficient = true;
5198 } else {
5199
5200
5201
5202
5203
5204
5205 ht_conf->single_chain_sufficient = true;
5206 }
5207 rcu_read_unlock();
5208 break;
5209 case NL80211_IFTYPE_ADHOC:
5210 ht_conf->single_chain_sufficient = true;
5211 break;
5212 default:
5213 break;
5214 }
5215
5216 D_ASSOC("leave\n");
5217}
5218
5219static inline void
5220il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5221{
5222
5223
5224
5225
5226
5227 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5228 il->staging.assoc_id = 0;
5229 il_commit_rxon(il);
5230}
5231
5232static void
5233il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5234{
5235 struct il_priv *il = hw->priv;
5236 unsigned long flags;
5237 __le64 timestamp;
5238 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5239
5240 if (!skb)
5241 return;
5242
5243 D_MAC80211("enter\n");
5244
5245 lockdep_assert_held(&il->mutex);
5246
5247 if (!il->beacon_enabled) {
5248 IL_ERR("update beacon with no beaconing enabled\n");
5249 dev_kfree_skb(skb);
5250 return;
5251 }
5252
5253 spin_lock_irqsave(&il->lock, flags);
5254
5255 if (il->beacon_skb)
5256 dev_kfree_skb(il->beacon_skb);
5257
5258 il->beacon_skb = skb;
5259
5260 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5261 il->timestamp = le64_to_cpu(timestamp);
5262
5263 D_MAC80211("leave\n");
5264 spin_unlock_irqrestore(&il->lock, flags);
5265
5266 if (!il_is_ready_rf(il)) {
5267 D_MAC80211("leave - RF not ready\n");
5268 return;
5269 }
5270
5271 il->ops->post_associate(il);
5272}
5273
5274void
5275il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5276 struct ieee80211_bss_conf *bss_conf, u32 changes)
5277{
5278 struct il_priv *il = hw->priv;
5279 int ret;
5280
5281 mutex_lock(&il->mutex);
5282 D_MAC80211("enter: changes 0x%x\n", changes);
5283
5284 if (!il_is_alive(il)) {
5285 D_MAC80211("leave - not alive\n");
5286 mutex_unlock(&il->mutex);
5287 return;
5288 }
5289
5290 if (changes & BSS_CHANGED_QOS) {
5291 unsigned long flags;
5292
5293 spin_lock_irqsave(&il->lock, flags);
5294 il->qos_data.qos_active = bss_conf->qos;
5295 il_update_qos(il);
5296 spin_unlock_irqrestore(&il->lock, flags);
5297 }
5298
5299 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5300
5301 if (vif->bss_conf.enable_beacon)
5302 il->beacon_enabled = true;
5303 else
5304 il->beacon_enabled = false;
5305 }
5306
5307 if (changes & BSS_CHANGED_BSSID) {
5308 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318 if (is_zero_ether_addr(bss_conf->bssid))
5319 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5320
5321
5322
5323
5324
5325
5326 if (il_scan_cancel_timeout(il, 100)) {
5327 D_MAC80211("leave - scan abort failed\n");
5328 mutex_unlock(&il->mutex);
5329 return;
5330 }
5331
5332
5333 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5334
5335
5336 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5337 }
5338
5339
5340
5341
5342
5343
5344 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5345 il_beacon_update(hw, vif);
5346
5347 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5348 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5349 if (bss_conf->use_short_preamble)
5350 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5351 else
5352 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5353 }
5354
5355 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5356 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5357 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5358 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5359 else
5360 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5361 if (bss_conf->use_cts_prot)
5362 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5363 else
5364 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5365 }
5366
5367 if (changes & BSS_CHANGED_BASIC_RATES) {
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382 }
5383
5384 if (changes & BSS_CHANGED_HT) {
5385 il_ht_conf(il, vif);
5386
5387 if (il->ops->set_rxon_chain)
5388 il->ops->set_rxon_chain(il);
5389 }
5390
5391 if (changes & BSS_CHANGED_ASSOC) {
5392 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5393 if (bss_conf->assoc) {
5394 il->timestamp = bss_conf->sync_tsf;
5395
5396 if (!il_is_rfkill(il))
5397 il->ops->post_associate(il);
5398 } else
5399 il_set_no_assoc(il, vif);
5400 }
5401
5402 if (changes && il_is_associated(il) && bss_conf->aid) {
5403 D_MAC80211("Changes (%#x) while associated\n", changes);
5404 ret = il_send_rxon_assoc(il);
5405 if (!ret) {
5406
5407 memcpy((void *)&il->active, &il->staging,
5408 sizeof(struct il_rxon_cmd));
5409 }
5410 }
5411
5412 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5413 if (vif->bss_conf.enable_beacon) {
5414 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5415 ETH_ALEN);
5416 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5417 il->ops->config_ap(il);
5418 } else
5419 il_set_no_assoc(il, vif);
5420 }
5421
5422 if (changes & BSS_CHANGED_IBSS) {
5423 ret = il->ops->manage_ibss_station(il, vif,
5424 bss_conf->ibss_joined);
5425 if (ret)
5426 IL_ERR("failed to %s IBSS station %pM\n",
5427 bss_conf->ibss_joined ? "add" : "remove",
5428 bss_conf->bssid);
5429 }
5430
5431 D_MAC80211("leave\n");
5432 mutex_unlock(&il->mutex);
5433}
5434EXPORT_SYMBOL(il_mac_bss_info_changed);
5435
5436irqreturn_t
5437il_isr(int irq, void *data)
5438{
5439 struct il_priv *il = data;
5440 u32 inta, inta_mask;
5441 u32 inta_fh;
5442 unsigned long flags;
5443 if (!il)
5444 return IRQ_NONE;
5445
5446 spin_lock_irqsave(&il->lock, flags);
5447
5448
5449
5450
5451
5452 inta_mask = _il_rd(il, CSR_INT_MASK);
5453 _il_wr(il, CSR_INT_MASK, 0x00000000);
5454
5455
5456 inta = _il_rd(il, CSR_INT);
5457 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5458
5459
5460
5461
5462 if (!inta && !inta_fh) {
5463 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5464 goto none;
5465 }
5466
5467 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5468
5469
5470 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5471 goto unplugged;
5472 }
5473
5474 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5475 inta_fh);
5476
5477 inta &= ~CSR_INT_BIT_SCD;
5478
5479
5480 if (likely(inta || inta_fh))
5481 tasklet_schedule(&il->irq_tasklet);
5482
5483unplugged:
5484 spin_unlock_irqrestore(&il->lock, flags);
5485 return IRQ_HANDLED;
5486
5487none:
5488
5489
5490 if (test_bit(S_INT_ENABLED, &il->status))
5491 il_enable_interrupts(il);
5492 spin_unlock_irqrestore(&il->lock, flags);
5493 return IRQ_NONE;
5494}
5495EXPORT_SYMBOL(il_isr);
5496
5497
5498
5499
5500
5501void
5502il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5503 __le16 fc, __le32 *tx_flags)
5504{
5505 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5506 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5507 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5508 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5509
5510 if (!ieee80211_is_mgmt(fc))
5511 return;
5512
5513 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5514 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5515 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5516 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5517 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5518 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5519 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5520 break;
5521 }
5522 } else if (info->control.rates[0].
5523 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5524 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5525 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5526 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5527 }
5528}
5529EXPORT_SYMBOL(il_tx_cmd_protection);
5530