1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/pci.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43
44int
45_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
46{
47 const int interval = 10;
48 int t = 0;
49
50 do {
51 if ((_il_rd(il, addr) & mask) == (bits & mask))
52 return t;
53 udelay(interval);
54 t += interval;
55 } while (t < timeout);
56
57 return -ETIMEDOUT;
58}
59EXPORT_SYMBOL(_il_poll_bit);
60
61void
62il_set_bit(struct il_priv *p, u32 r, u32 m)
63{
64 unsigned long reg_flags;
65
66 spin_lock_irqsave(&p->reg_lock, reg_flags);
67 _il_set_bit(p, r, m);
68 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
69}
70EXPORT_SYMBOL(il_set_bit);
71
72void
73il_clear_bit(struct il_priv *p, u32 r, u32 m)
74{
75 unsigned long reg_flags;
76
77 spin_lock_irqsave(&p->reg_lock, reg_flags);
78 _il_clear_bit(p, r, m);
79 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
80}
81EXPORT_SYMBOL(il_clear_bit);
82
83bool
84_il_grab_nic_access(struct il_priv *il)
85{
86 int ret;
87 u32 val;
88
89
90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 ret =
110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
113 if (unlikely(ret < 0)) {
114 val = _il_rd(il, CSR_GP_CNTRL);
115 WARN_ONCE(1, "Timeout waiting for ucode processor access "
116 "(CSR_GP_CNTRL 0x%08x)\n", val);
117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
118 return false;
119 }
120
121 return true;
122}
123EXPORT_SYMBOL_GPL(_il_grab_nic_access);
124
125int
126il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
127{
128 const int interval = 10;
129 int t = 0;
130
131 do {
132 if ((il_rd(il, addr) & mask) == mask)
133 return t;
134 udelay(interval);
135 t += interval;
136 } while (t < timeout);
137
138 return -ETIMEDOUT;
139}
140EXPORT_SYMBOL(il_poll_bit);
141
142u32
143il_rd_prph(struct il_priv *il, u32 reg)
144{
145 unsigned long reg_flags;
146 u32 val;
147
148 spin_lock_irqsave(&il->reg_lock, reg_flags);
149 _il_grab_nic_access(il);
150 val = _il_rd_prph(il, reg);
151 _il_release_nic_access(il);
152 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
153 return val;
154}
155EXPORT_SYMBOL(il_rd_prph);
156
157void
158il_wr_prph(struct il_priv *il, u32 addr, u32 val)
159{
160 unsigned long reg_flags;
161
162 spin_lock_irqsave(&il->reg_lock, reg_flags);
163 if (likely(_il_grab_nic_access(il))) {
164 _il_wr_prph(il, addr, val);
165 _il_release_nic_access(il);
166 }
167 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
168}
169EXPORT_SYMBOL(il_wr_prph);
170
171u32
172il_read_targ_mem(struct il_priv *il, u32 addr)
173{
174 unsigned long reg_flags;
175 u32 value;
176
177 spin_lock_irqsave(&il->reg_lock, reg_flags);
178 _il_grab_nic_access(il);
179
180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
181 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
182
183 _il_release_nic_access(il);
184 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
185 return value;
186}
187EXPORT_SYMBOL(il_read_targ_mem);
188
189void
190il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
191{
192 unsigned long reg_flags;
193
194 spin_lock_irqsave(&il->reg_lock, reg_flags);
195 if (likely(_il_grab_nic_access(il))) {
196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
197 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
198 _il_release_nic_access(il);
199 }
200 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
201}
202EXPORT_SYMBOL(il_write_targ_mem);
203
204const char *
205il_get_cmd_string(u8 cmd)
206{
207 switch (cmd) {
208 IL_CMD(N_ALIVE);
209 IL_CMD(N_ERROR);
210 IL_CMD(C_RXON);
211 IL_CMD(C_RXON_ASSOC);
212 IL_CMD(C_QOS_PARAM);
213 IL_CMD(C_RXON_TIMING);
214 IL_CMD(C_ADD_STA);
215 IL_CMD(C_REM_STA);
216 IL_CMD(C_WEPKEY);
217 IL_CMD(N_3945_RX);
218 IL_CMD(C_TX);
219 IL_CMD(C_RATE_SCALE);
220 IL_CMD(C_LEDS);
221 IL_CMD(C_TX_LINK_QUALITY_CMD);
222 IL_CMD(C_CHANNEL_SWITCH);
223 IL_CMD(N_CHANNEL_SWITCH);
224 IL_CMD(C_SPECTRUM_MEASUREMENT);
225 IL_CMD(N_SPECTRUM_MEASUREMENT);
226 IL_CMD(C_POWER_TBL);
227 IL_CMD(N_PM_SLEEP);
228 IL_CMD(N_PM_DEBUG_STATS);
229 IL_CMD(C_SCAN);
230 IL_CMD(C_SCAN_ABORT);
231 IL_CMD(N_SCAN_START);
232 IL_CMD(N_SCAN_RESULTS);
233 IL_CMD(N_SCAN_COMPLETE);
234 IL_CMD(N_BEACON);
235 IL_CMD(C_TX_BEACON);
236 IL_CMD(C_TX_PWR_TBL);
237 IL_CMD(C_BT_CONFIG);
238 IL_CMD(C_STATS);
239 IL_CMD(N_STATS);
240 IL_CMD(N_CARD_STATE);
241 IL_CMD(N_MISSED_BEACONS);
242 IL_CMD(C_CT_KILL_CONFIG);
243 IL_CMD(C_SENSITIVITY);
244 IL_CMD(C_PHY_CALIBRATION);
245 IL_CMD(N_RX_PHY);
246 IL_CMD(N_RX_MPDU);
247 IL_CMD(N_RX);
248 IL_CMD(N_COMPRESSED_BA);
249 default:
250 return "UNKNOWN";
251
252 }
253}
254EXPORT_SYMBOL(il_get_cmd_string);
255
256#define HOST_COMPLETE_TIMEOUT (HZ / 2)
257
258static void
259il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
260 struct il_rx_pkt *pkt)
261{
262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
263 IL_ERR("Bad return from %s (0x%08X)\n",
264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
265 return;
266 }
267#ifdef CONFIG_IWLEGACY_DEBUG
268 switch (cmd->hdr.cmd) {
269 case C_TX_LINK_QUALITY_CMD:
270 case C_SENSITIVITY:
271 D_HC_DUMP("back from %s (0x%08X)\n",
272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
273 break;
274 default:
275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
276 pkt->hdr.flags);
277 }
278#endif
279}
280
281static int
282il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
283{
284 int ret;
285
286 BUG_ON(!(cmd->flags & CMD_ASYNC));
287
288
289 BUG_ON(cmd->flags & CMD_WANT_SKB);
290
291
292 if (!cmd->callback)
293 cmd->callback = il_generic_cmd_callback;
294
295 if (test_bit(S_EXIT_PENDING, &il->status))
296 return -EBUSY;
297
298 ret = il_enqueue_hcmd(il, cmd);
299 if (ret < 0) {
300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
301 il_get_cmd_string(cmd->id), ret);
302 return ret;
303 }
304 return 0;
305}
306
307int
308il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
309{
310 int cmd_idx;
311 int ret;
312
313 lockdep_assert_held(&il->mutex);
314
315 BUG_ON(cmd->flags & CMD_ASYNC);
316
317
318 BUG_ON(cmd->callback);
319
320 D_INFO("Attempting to send sync command %s\n",
321 il_get_cmd_string(cmd->id));
322
323 set_bit(S_HCMD_ACTIVE, &il->status);
324 D_INFO("Setting HCMD_ACTIVE for command %s\n",
325 il_get_cmd_string(cmd->id));
326
327 cmd_idx = il_enqueue_hcmd(il, cmd);
328 if (cmd_idx < 0) {
329 ret = cmd_idx;
330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
331 il_get_cmd_string(cmd->id), ret);
332 goto out;
333 }
334
335 ret = wait_event_timeout(il->wait_command_queue,
336 !test_bit(S_HCMD_ACTIVE, &il->status),
337 HOST_COMPLETE_TIMEOUT);
338 if (!ret) {
339 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
340 IL_ERR("Error sending %s: time out after %dms.\n",
341 il_get_cmd_string(cmd->id),
342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
343
344 clear_bit(S_HCMD_ACTIVE, &il->status);
345 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
346 il_get_cmd_string(cmd->id));
347 ret = -ETIMEDOUT;
348 goto cancel;
349 }
350 }
351
352 if (test_bit(S_RFKILL, &il->status)) {
353 IL_ERR("Command %s aborted: RF KILL Switch\n",
354 il_get_cmd_string(cmd->id));
355 ret = -ECANCELED;
356 goto fail;
357 }
358 if (test_bit(S_FW_ERROR, &il->status)) {
359 IL_ERR("Command %s failed: FW Error\n",
360 il_get_cmd_string(cmd->id));
361 ret = -EIO;
362 goto fail;
363 }
364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
365 IL_ERR("Error: Response NULL in '%s'\n",
366 il_get_cmd_string(cmd->id));
367 ret = -EIO;
368 goto cancel;
369 }
370
371 ret = 0;
372 goto out;
373
374cancel:
375 if (cmd->flags & CMD_WANT_SKB) {
376
377
378
379
380
381
382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
383 }
384fail:
385 if (cmd->reply_page) {
386 il_free_pages(il, cmd->reply_page);
387 cmd->reply_page = 0;
388 }
389out:
390 return ret;
391}
392EXPORT_SYMBOL(il_send_cmd_sync);
393
394int
395il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
396{
397 if (cmd->flags & CMD_ASYNC)
398 return il_send_cmd_async(il, cmd);
399
400 return il_send_cmd_sync(il, cmd);
401}
402EXPORT_SYMBOL(il_send_cmd);
403
404int
405il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
406{
407 struct il_host_cmd cmd = {
408 .id = id,
409 .len = len,
410 .data = data,
411 };
412
413 return il_send_cmd_sync(il, &cmd);
414}
415EXPORT_SYMBOL(il_send_cmd_pdu);
416
417int
418il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
419 void (*callback) (struct il_priv *il,
420 struct il_device_cmd *cmd,
421 struct il_rx_pkt *pkt))
422{
423 struct il_host_cmd cmd = {
424 .id = id,
425 .len = len,
426 .data = data,
427 };
428
429 cmd.flags |= CMD_ASYNC;
430 cmd.callback = callback;
431
432 return il_send_cmd_async(il, &cmd);
433}
434EXPORT_SYMBOL(il_send_cmd_pdu_async);
435
436
437static int led_mode;
438module_param(led_mode, int, S_IRUGO);
439MODULE_PARM_DESC(led_mode,
440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455static const struct ieee80211_tpt_blink il_blink[] = {
456 {.throughput = 0, .blink_time = 334},
457 {.throughput = 1 * 1024 - 1, .blink_time = 260},
458 {.throughput = 5 * 1024 - 1, .blink_time = 220},
459 {.throughput = 10 * 1024 - 1, .blink_time = 190},
460 {.throughput = 20 * 1024 - 1, .blink_time = 170},
461 {.throughput = 50 * 1024 - 1, .blink_time = 150},
462 {.throughput = 70 * 1024 - 1, .blink_time = 130},
463 {.throughput = 100 * 1024 - 1, .blink_time = 110},
464 {.throughput = 200 * 1024 - 1, .blink_time = 80},
465 {.throughput = 300 * 1024 - 1, .blink_time = 50},
466};
467
468
469
470
471
472
473
474
475
476
477
478
479static inline u8
480il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
481{
482 if (!compensation) {
483 IL_ERR("undefined blink compensation: "
484 "use pre-defined blinking time\n");
485 return time;
486 }
487
488 return (u8) ((time * compensation) >> 6);
489}
490
491
492static int
493il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
494{
495 struct il_led_cmd led_cmd = {
496 .id = IL_LED_LINK,
497 .interval = IL_DEF_LED_INTRVL
498 };
499 int ret;
500
501 if (!test_bit(S_READY, &il->status))
502 return -EBUSY;
503
504 if (il->blink_on == on && il->blink_off == off)
505 return 0;
506
507 if (off == 0) {
508
509 on = IL_LED_SOLID;
510 }
511
512 D_LED("Led blink time compensation=%u\n",
513 il->cfg->led_compensation);
514 led_cmd.on =
515 il_blink_compensation(il, on,
516 il->cfg->led_compensation);
517 led_cmd.off =
518 il_blink_compensation(il, off,
519 il->cfg->led_compensation);
520
521 ret = il->ops->send_led_cmd(il, &led_cmd);
522 if (!ret) {
523 il->blink_on = on;
524 il->blink_off = off;
525 }
526 return ret;
527}
528
529static void
530il_led_brightness_set(struct led_classdev *led_cdev,
531 enum led_brightness brightness)
532{
533 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
534 unsigned long on = 0;
535
536 if (brightness > 0)
537 on = IL_LED_SOLID;
538
539 il_led_cmd(il, on, 0);
540}
541
542static int
543il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
544 unsigned long *delay_off)
545{
546 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
547
548 return il_led_cmd(il, *delay_on, *delay_off);
549}
550
551void
552il_leds_init(struct il_priv *il)
553{
554 int mode = led_mode;
555 int ret;
556
557 if (mode == IL_LED_DEFAULT)
558 mode = il->cfg->led_mode;
559
560 il->led.name =
561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
562 il->led.brightness_set = il_led_brightness_set;
563 il->led.blink_set = il_led_blink_set;
564 il->led.max_brightness = 1;
565
566 switch (mode) {
567 case IL_LED_DEFAULT:
568 WARN_ON(1);
569 break;
570 case IL_LED_BLINK:
571 il->led.default_trigger =
572 ieee80211_create_tpt_led_trigger(il->hw,
573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
574 il_blink,
575 ARRAY_SIZE(il_blink));
576 break;
577 case IL_LED_RF_STATE:
578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
579 break;
580 }
581
582 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
583 if (ret) {
584 kfree(il->led.name);
585 return;
586 }
587
588 il->led_registered = true;
589}
590EXPORT_SYMBOL(il_leds_init);
591
592void
593il_leds_exit(struct il_priv *il)
594{
595 if (!il->led_registered)
596 return;
597
598 led_classdev_unregister(&il->led);
599 kfree(il->led.name);
600}
601EXPORT_SYMBOL(il_leds_exit);
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635const u8 il_eeprom_band_1[14] = {
636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
637};
638
639
640static const u8 il_eeprom_band_2[] = {
641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
642};
643
644static const u8 il_eeprom_band_3[] = {
645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
646};
647
648static const u8 il_eeprom_band_4[] = {
649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
650};
651
652static const u8 il_eeprom_band_5[] = {
653 145, 149, 153, 157, 161, 165
654};
655
656static const u8 il_eeprom_band_6[] = {
657 1, 2, 3, 4, 5, 6, 7
658};
659
660static const u8 il_eeprom_band_7[] = {
661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
662};
663
664
665
666
667
668
669
670static int
671il_eeprom_verify_signature(struct il_priv *il)
672{
673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
674 int ret = 0;
675
676 D_EEPROM("EEPROM signature=0x%08x\n", gp);
677 switch (gp) {
678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
680 break;
681 default:
682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
683 ret = -ENOENT;
684 break;
685 }
686 return ret;
687}
688
689const u8 *
690il_eeprom_query_addr(const struct il_priv *il, size_t offset)
691{
692 BUG_ON(offset >= il->cfg->eeprom_size);
693 return &il->eeprom[offset];
694}
695EXPORT_SYMBOL(il_eeprom_query_addr);
696
697u16
698il_eeprom_query16(const struct il_priv *il, size_t offset)
699{
700 if (!il->eeprom)
701 return 0;
702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
703}
704EXPORT_SYMBOL(il_eeprom_query16);
705
706
707
708
709
710
711
712
713int
714il_eeprom_init(struct il_priv *il)
715{
716 __le16 *e;
717 u32 gp = _il_rd(il, CSR_EEPROM_GP);
718 int sz;
719 int ret;
720 u16 addr;
721
722
723 sz = il->cfg->eeprom_size;
724 D_EEPROM("NVM size = %d\n", sz);
725 il->eeprom = kzalloc(sz, GFP_KERNEL);
726 if (!il->eeprom) {
727 ret = -ENOMEM;
728 goto alloc_err;
729 }
730 e = (__le16 *) il->eeprom;
731
732 il->ops->apm_init(il);
733
734 ret = il_eeprom_verify_signature(il);
735 if (ret < 0) {
736 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
737 ret = -ENOENT;
738 goto err;
739 }
740
741
742 ret = il->ops->eeprom_acquire_semaphore(il);
743 if (ret < 0) {
744 IL_ERR("Failed to acquire EEPROM semaphore.\n");
745 ret = -ENOENT;
746 goto err;
747 }
748
749
750 for (addr = 0; addr < sz; addr += sizeof(u16)) {
751 u32 r;
752
753 _il_wr(il, CSR_EEPROM_REG,
754 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
755
756 ret =
757 _il_poll_bit(il, CSR_EEPROM_REG,
758 CSR_EEPROM_REG_READ_VALID_MSK,
759 CSR_EEPROM_REG_READ_VALID_MSK,
760 IL_EEPROM_ACCESS_TIMEOUT);
761 if (ret < 0) {
762 IL_ERR("Time out reading EEPROM[%d]\n", addr);
763 goto done;
764 }
765 r = _il_rd(il, CSR_EEPROM_REG);
766 e[addr / 2] = cpu_to_le16(r >> 16);
767 }
768
769 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
770 il_eeprom_query16(il, EEPROM_VERSION));
771
772 ret = 0;
773done:
774 il->ops->eeprom_release_semaphore(il);
775
776err:
777 if (ret)
778 il_eeprom_free(il);
779
780 il_apm_stop(il);
781alloc_err:
782 return ret;
783}
784EXPORT_SYMBOL(il_eeprom_init);
785
786void
787il_eeprom_free(struct il_priv *il)
788{
789 kfree(il->eeprom);
790 il->eeprom = NULL;
791}
792EXPORT_SYMBOL(il_eeprom_free);
793
794static void
795il_init_band_reference(const struct il_priv *il, int eep_band,
796 int *eeprom_ch_count,
797 const struct il_eeprom_channel **eeprom_ch_info,
798 const u8 **eeprom_ch_idx)
799{
800 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
801
802 switch (eep_band) {
803 case 1:
804 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
805 *eeprom_ch_info =
806 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
807 offset);
808 *eeprom_ch_idx = il_eeprom_band_1;
809 break;
810 case 2:
811 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
812 *eeprom_ch_info =
813 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
814 offset);
815 *eeprom_ch_idx = il_eeprom_band_2;
816 break;
817 case 3:
818 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
819 *eeprom_ch_info =
820 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
821 offset);
822 *eeprom_ch_idx = il_eeprom_band_3;
823 break;
824 case 4:
825 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
826 *eeprom_ch_info =
827 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
828 offset);
829 *eeprom_ch_idx = il_eeprom_band_4;
830 break;
831 case 5:
832 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
833 *eeprom_ch_info =
834 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
835 offset);
836 *eeprom_ch_idx = il_eeprom_band_5;
837 break;
838 case 6:
839 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
840 *eeprom_ch_info =
841 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
842 offset);
843 *eeprom_ch_idx = il_eeprom_band_6;
844 break;
845 case 7:
846 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
847 *eeprom_ch_info =
848 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
849 offset);
850 *eeprom_ch_idx = il_eeprom_band_7;
851 break;
852 default:
853 BUG();
854 }
855}
856
857#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
858 ? # x " " : "")
859
860
861
862
863
864static int
865il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
866 const struct il_eeprom_channel *eeprom_ch,
867 u8 clear_ht40_extension_channel)
868{
869 struct il_channel_info *ch_info;
870
871 ch_info =
872 (struct il_channel_info *)il_get_channel_info(il, band, channel);
873
874 if (!il_is_channel_valid(ch_info))
875 return -1;
876
877 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
878 " Ad-Hoc %ssupported\n", ch_info->channel,
879 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
880 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
881 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
882 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
883 eeprom_ch->max_power_avg,
884 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
885 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
886
887 ch_info->ht40_eeprom = *eeprom_ch;
888 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
889 ch_info->ht40_flags = eeprom_ch->flags;
890 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
891 ch_info->ht40_extension_channel &=
892 ~clear_ht40_extension_channel;
893
894 return 0;
895}
896
897#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
898 ? # x " " : "")
899
900
901
902
903int
904il_init_channel_map(struct il_priv *il)
905{
906 int eeprom_ch_count = 0;
907 const u8 *eeprom_ch_idx = NULL;
908 const struct il_eeprom_channel *eeprom_ch_info = NULL;
909 int band, ch;
910 struct il_channel_info *ch_info;
911
912 if (il->channel_count) {
913 D_EEPROM("Channel map already initialized.\n");
914 return 0;
915 }
916
917 D_EEPROM("Initializing regulatory info from EEPROM\n");
918
919 il->channel_count =
920 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
921 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
922 ARRAY_SIZE(il_eeprom_band_5);
923
924 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
925
926 il->channel_info =
927 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
928 GFP_KERNEL);
929 if (!il->channel_info) {
930 IL_ERR("Could not allocate channel_info\n");
931 il->channel_count = 0;
932 return -ENOMEM;
933 }
934
935 ch_info = il->channel_info;
936
937
938
939
940 for (band = 1; band <= 5; band++) {
941
942 il_init_band_reference(il, band, &eeprom_ch_count,
943 &eeprom_ch_info, &eeprom_ch_idx);
944
945
946 for (ch = 0; ch < eeprom_ch_count; ch++) {
947 ch_info->channel = eeprom_ch_idx[ch];
948 ch_info->band =
949 (band ==
950 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
951
952
953
954 ch_info->eeprom = eeprom_ch_info[ch];
955
956
957
958 ch_info->flags = eeprom_ch_info[ch].flags;
959
960
961 ch_info->ht40_extension_channel =
962 IEEE80211_CHAN_NO_HT40;
963
964 if (!(il_is_channel_valid(ch_info))) {
965 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
966 "No traffic\n", ch_info->channel,
967 ch_info->flags,
968 il_is_channel_a_band(ch_info) ? "5.2" :
969 "2.4");
970 ch_info++;
971 continue;
972 }
973
974
975 ch_info->max_power_avg = ch_info->curr_txpow =
976 eeprom_ch_info[ch].max_power_avg;
977 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
978 ch_info->min_power = 0;
979
980 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
981 " Ad-Hoc %ssupported\n", ch_info->channel,
982 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
983 CHECK_AND_PRINT_I(VALID),
984 CHECK_AND_PRINT_I(IBSS),
985 CHECK_AND_PRINT_I(ACTIVE),
986 CHECK_AND_PRINT_I(RADAR),
987 CHECK_AND_PRINT_I(WIDE),
988 CHECK_AND_PRINT_I(DFS),
989 eeprom_ch_info[ch].flags,
990 eeprom_ch_info[ch].max_power_avg,
991 ((eeprom_ch_info[ch].
992 flags & EEPROM_CHANNEL_IBSS) &&
993 !(eeprom_ch_info[ch].
994 flags & EEPROM_CHANNEL_RADAR)) ? "" :
995 "not ");
996
997 ch_info++;
998 }
999 }
1000
1001
1002 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
1003 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
1004 return 0;
1005
1006
1007 for (band = 6; band <= 7; band++) {
1008 enum ieee80211_band ieeeband;
1009
1010 il_init_band_reference(il, band, &eeprom_ch_count,
1011 &eeprom_ch_info, &eeprom_ch_idx);
1012
1013
1014 ieeeband =
1015 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1016
1017
1018 for (ch = 0; ch < eeprom_ch_count; ch++) {
1019
1020 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1021 &eeprom_ch_info[ch],
1022 IEEE80211_CHAN_NO_HT40PLUS);
1023
1024
1025 il_mod_ht40_chan_info(il, ieeeband,
1026 eeprom_ch_idx[ch] + 4,
1027 &eeprom_ch_info[ch],
1028 IEEE80211_CHAN_NO_HT40MINUS);
1029 }
1030 }
1031
1032 return 0;
1033}
1034EXPORT_SYMBOL(il_init_channel_map);
1035
1036
1037
1038
1039void
1040il_free_channel_map(struct il_priv *il)
1041{
1042 kfree(il->channel_info);
1043 il->channel_count = 0;
1044}
1045EXPORT_SYMBOL(il_free_channel_map);
1046
1047
1048
1049
1050
1051
1052const struct il_channel_info *
1053il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
1054 u16 channel)
1055{
1056 int i;
1057
1058 switch (band) {
1059 case IEEE80211_BAND_5GHZ:
1060 for (i = 14; i < il->channel_count; i++) {
1061 if (il->channel_info[i].channel == channel)
1062 return &il->channel_info[i];
1063 }
1064 break;
1065 case IEEE80211_BAND_2GHZ:
1066 if (channel >= 1 && channel <= 14)
1067 return &il->channel_info[channel - 1];
1068 break;
1069 default:
1070 BUG();
1071 }
1072
1073 return NULL;
1074}
1075EXPORT_SYMBOL(il_get_channel_info);
1076
1077
1078
1079
1080
1081
1082
1083
1084#define SLP_VEC(X0, X1, X2, X3, X4) { \
1085 cpu_to_le32(X0), \
1086 cpu_to_le32(X1), \
1087 cpu_to_le32(X2), \
1088 cpu_to_le32(X3), \
1089 cpu_to_le32(X4) \
1090}
1091
1092static void
1093il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1094{
1095 const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1096 SLP_VEC(2, 2, 4, 6, 0xFF),
1097 SLP_VEC(2, 4, 7, 10, 10),
1098 SLP_VEC(4, 7, 10, 10, 0xFF)
1099 };
1100 int i, dtim_period, no_dtim;
1101 u32 max_sleep;
1102 bool skip;
1103
1104 memset(cmd, 0, sizeof(*cmd));
1105
1106 if (il->power_data.pci_pm)
1107 cmd->flags |= IL_POWER_PCI_PM_MSK;
1108
1109
1110 if (il->power_data.ps_disabled)
1111 return;
1112
1113 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1114 cmd->keep_alive_seconds = 0;
1115 cmd->debug_flags = 0;
1116 cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1117 cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1118 cmd->keep_alive_beacons = 0;
1119
1120 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1121
1122 if (dtim_period <= 2) {
1123 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1124 no_dtim = 2;
1125 } else if (dtim_period <= 10) {
1126 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1127 no_dtim = 2;
1128 } else {
1129 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1130 no_dtim = 0;
1131 }
1132
1133 if (dtim_period == 0) {
1134 dtim_period = 1;
1135 skip = false;
1136 } else {
1137 skip = !!no_dtim;
1138 }
1139
1140 if (skip) {
1141 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1142
1143 max_sleep = le32_to_cpu(tmp);
1144 if (max_sleep == 0xFF)
1145 max_sleep = dtim_period * (skip + 1);
1146 else if (max_sleep > dtim_period)
1147 max_sleep = (max_sleep / dtim_period) * dtim_period;
1148 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1149 } else {
1150 max_sleep = dtim_period;
1151 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1152 }
1153
1154 for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1155 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1156 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1157}
1158
1159static int
1160il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1161{
1162 D_POWER("Sending power/sleep command\n");
1163 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1164 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1165 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1166 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1167 le32_to_cpu(cmd->sleep_interval[0]),
1168 le32_to_cpu(cmd->sleep_interval[1]),
1169 le32_to_cpu(cmd->sleep_interval[2]),
1170 le32_to_cpu(cmd->sleep_interval[3]),
1171 le32_to_cpu(cmd->sleep_interval[4]));
1172
1173 return il_send_cmd_pdu(il, C_POWER_TBL,
1174 sizeof(struct il_powertable_cmd), cmd);
1175}
1176
1177static int
1178il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1179{
1180 int ret;
1181 bool update_chains;
1182
1183 lockdep_assert_held(&il->mutex);
1184
1185
1186 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1187 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1188
1189 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1190 return 0;
1191
1192 if (!il_is_ready_rf(il))
1193 return -EIO;
1194
1195
1196 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1197 if (test_bit(S_SCANNING, &il->status) && !force) {
1198 D_INFO("Defer power set mode while scanning\n");
1199 return 0;
1200 }
1201
1202 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1203 set_bit(S_POWER_PMI, &il->status);
1204
1205 ret = il_set_power(il, cmd);
1206 if (!ret) {
1207 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1208 clear_bit(S_POWER_PMI, &il->status);
1209
1210 if (il->ops->update_chain_flags && update_chains)
1211 il->ops->update_chain_flags(il);
1212 else if (il->ops->update_chain_flags)
1213 D_POWER("Cannot update the power, chain noise "
1214 "calibration running: %d\n",
1215 il->chain_noise_data.state);
1216
1217 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1218 } else
1219 IL_ERR("set power fail, ret = %d", ret);
1220
1221 return ret;
1222}
1223
1224int
1225il_power_update_mode(struct il_priv *il, bool force)
1226{
1227 struct il_powertable_cmd cmd;
1228
1229 il_build_powertable_cmd(il, &cmd);
1230
1231 return il_power_set_mode(il, &cmd, force);
1232}
1233EXPORT_SYMBOL(il_power_update_mode);
1234
1235
1236void
1237il_power_initialize(struct il_priv *il)
1238{
1239 u16 lctl;
1240
1241 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1242 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1243
1244 il->power_data.debug_sleep_level_override = -1;
1245
1246 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1247}
1248EXPORT_SYMBOL(il_power_initialize);
1249
1250
1251
1252
1253#define IL_ACTIVE_DWELL_TIME_24 (30)
1254#define IL_ACTIVE_DWELL_TIME_52 (20)
1255
1256#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1257#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1258
1259
1260
1261
1262#define IL_PASSIVE_DWELL_TIME_24 (20)
1263#define IL_PASSIVE_DWELL_TIME_52 (10)
1264#define IL_PASSIVE_DWELL_BASE (100)
1265#define IL_CHANNEL_TUNE_TIME 5
1266
1267static int
1268il_send_scan_abort(struct il_priv *il)
1269{
1270 int ret;
1271 struct il_rx_pkt *pkt;
1272 struct il_host_cmd cmd = {
1273 .id = C_SCAN_ABORT,
1274 .flags = CMD_WANT_SKB,
1275 };
1276
1277
1278
1279
1280 if (!test_bit(S_READY, &il->status) ||
1281 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1282 !test_bit(S_SCAN_HW, &il->status) ||
1283 test_bit(S_FW_ERROR, &il->status) ||
1284 test_bit(S_EXIT_PENDING, &il->status))
1285 return -EIO;
1286
1287 ret = il_send_cmd_sync(il, &cmd);
1288 if (ret)
1289 return ret;
1290
1291 pkt = (struct il_rx_pkt *)cmd.reply_page;
1292 if (pkt->u.status != CAN_ABORT_STATUS) {
1293
1294
1295
1296
1297
1298
1299 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1300 ret = -EIO;
1301 }
1302
1303 il_free_pages(il, cmd.reply_page);
1304 return ret;
1305}
1306
1307static void
1308il_complete_scan(struct il_priv *il, bool aborted)
1309{
1310
1311 if (il->scan_request) {
1312 D_SCAN("Complete scan in mac80211\n");
1313 ieee80211_scan_completed(il->hw, aborted);
1314 }
1315
1316 il->scan_vif = NULL;
1317 il->scan_request = NULL;
1318}
1319
1320void
1321il_force_scan_end(struct il_priv *il)
1322{
1323 lockdep_assert_held(&il->mutex);
1324
1325 if (!test_bit(S_SCANNING, &il->status)) {
1326 D_SCAN("Forcing scan end while not scanning\n");
1327 return;
1328 }
1329
1330 D_SCAN("Forcing scan end\n");
1331 clear_bit(S_SCANNING, &il->status);
1332 clear_bit(S_SCAN_HW, &il->status);
1333 clear_bit(S_SCAN_ABORTING, &il->status);
1334 il_complete_scan(il, true);
1335}
1336
1337static void
1338il_do_scan_abort(struct il_priv *il)
1339{
1340 int ret;
1341
1342 lockdep_assert_held(&il->mutex);
1343
1344 if (!test_bit(S_SCANNING, &il->status)) {
1345 D_SCAN("Not performing scan to abort\n");
1346 return;
1347 }
1348
1349 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1350 D_SCAN("Scan abort in progress\n");
1351 return;
1352 }
1353
1354 ret = il_send_scan_abort(il);
1355 if (ret) {
1356 D_SCAN("Send scan abort failed %d\n", ret);
1357 il_force_scan_end(il);
1358 } else
1359 D_SCAN("Successfully send scan abort\n");
1360}
1361
1362
1363
1364
1365int
1366il_scan_cancel(struct il_priv *il)
1367{
1368 D_SCAN("Queuing abort scan\n");
1369 queue_work(il->workqueue, &il->abort_scan);
1370 return 0;
1371}
1372EXPORT_SYMBOL(il_scan_cancel);
1373
1374
1375
1376
1377
1378
1379int
1380il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1381{
1382 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1383
1384 lockdep_assert_held(&il->mutex);
1385
1386 D_SCAN("Scan cancel timeout\n");
1387
1388 il_do_scan_abort(il);
1389
1390 while (time_before_eq(jiffies, timeout)) {
1391 if (!test_bit(S_SCAN_HW, &il->status))
1392 break;
1393 msleep(20);
1394 }
1395
1396 return test_bit(S_SCAN_HW, &il->status);
1397}
1398EXPORT_SYMBOL(il_scan_cancel_timeout);
1399
1400
1401static void
1402il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1403{
1404#ifdef CONFIG_IWLEGACY_DEBUG
1405 struct il_rx_pkt *pkt = rxb_addr(rxb);
1406 struct il_scanreq_notification *notif =
1407 (struct il_scanreq_notification *)pkt->u.raw;
1408
1409 D_SCAN("Scan request status = 0x%x\n", notif->status);
1410#endif
1411}
1412
1413
1414static void
1415il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1416{
1417 struct il_rx_pkt *pkt = rxb_addr(rxb);
1418 struct il_scanstart_notification *notif =
1419 (struct il_scanstart_notification *)pkt->u.raw;
1420 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1421 D_SCAN("Scan start: " "%d [802.11%s] "
1422 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1423 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1424 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1425}
1426
1427
1428static void
1429il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1430{
1431#ifdef CONFIG_IWLEGACY_DEBUG
1432 struct il_rx_pkt *pkt = rxb_addr(rxb);
1433 struct il_scanresults_notification *notif =
1434 (struct il_scanresults_notification *)pkt->u.raw;
1435
1436 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1437 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1438 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1439 le32_to_cpu(notif->stats[0]),
1440 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1441#endif
1442}
1443
1444
1445static void
1446il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1447{
1448
1449#ifdef CONFIG_IWLEGACY_DEBUG
1450 struct il_rx_pkt *pkt = rxb_addr(rxb);
1451 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1452#endif
1453
1454 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1455 scan_notif->scanned_channels, scan_notif->tsf_low,
1456 scan_notif->tsf_high, scan_notif->status);
1457
1458
1459 clear_bit(S_SCAN_HW, &il->status);
1460
1461 D_SCAN("Scan on %sGHz took %dms\n",
1462 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1463 jiffies_to_msecs(jiffies - il->scan_start));
1464
1465 queue_work(il->workqueue, &il->scan_completed);
1466}
1467
1468void
1469il_setup_rx_scan_handlers(struct il_priv *il)
1470{
1471
1472 il->handlers[C_SCAN] = il_hdl_scan;
1473 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1474 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1475 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1476}
1477EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1478
1479u16
1480il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1481 u8 n_probes)
1482{
1483 if (band == IEEE80211_BAND_5GHZ)
1484 return IL_ACTIVE_DWELL_TIME_52 +
1485 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1486 else
1487 return IL_ACTIVE_DWELL_TIME_24 +
1488 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1489}
1490EXPORT_SYMBOL(il_get_active_dwell_time);
1491
1492u16
1493il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1494 struct ieee80211_vif *vif)
1495{
1496 u16 value;
1497
1498 u16 passive =
1499 (band ==
1500 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1501 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1502 IL_PASSIVE_DWELL_TIME_52;
1503
1504 if (il_is_any_associated(il)) {
1505
1506
1507
1508
1509
1510 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1511 if (value > IL_PASSIVE_DWELL_BASE || !value)
1512 value = IL_PASSIVE_DWELL_BASE;
1513 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1514 passive = min(value, passive);
1515 }
1516
1517 return passive;
1518}
1519EXPORT_SYMBOL(il_get_passive_dwell_time);
1520
1521void
1522il_init_scan_params(struct il_priv *il)
1523{
1524 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1525 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1526 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1527 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1528 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1529}
1530EXPORT_SYMBOL(il_init_scan_params);
1531
1532static int
1533il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1534{
1535 int ret;
1536
1537 lockdep_assert_held(&il->mutex);
1538
1539 cancel_delayed_work(&il->scan_check);
1540
1541 if (!il_is_ready_rf(il)) {
1542 IL_WARN("Request scan called when driver not ready.\n");
1543 return -EIO;
1544 }
1545
1546 if (test_bit(S_SCAN_HW, &il->status)) {
1547 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1548 return -EBUSY;
1549 }
1550
1551 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1552 D_SCAN("Scan request while abort pending.\n");
1553 return -EBUSY;
1554 }
1555
1556 D_SCAN("Starting scan...\n");
1557
1558 set_bit(S_SCANNING, &il->status);
1559 il->scan_start = jiffies;
1560
1561 ret = il->ops->request_scan(il, vif);
1562 if (ret) {
1563 clear_bit(S_SCANNING, &il->status);
1564 return ret;
1565 }
1566
1567 queue_delayed_work(il->workqueue, &il->scan_check,
1568 IL_SCAN_CHECK_WATCHDOG);
1569
1570 return 0;
1571}
1572
1573int
1574il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1575 struct ieee80211_scan_request *hw_req)
1576{
1577 struct cfg80211_scan_request *req = &hw_req->req;
1578 struct il_priv *il = hw->priv;
1579 int ret;
1580
1581 if (req->n_channels == 0) {
1582 IL_ERR("Can not scan on no channels.\n");
1583 return -EINVAL;
1584 }
1585
1586 mutex_lock(&il->mutex);
1587 D_MAC80211("enter\n");
1588
1589 if (test_bit(S_SCANNING, &il->status)) {
1590 D_SCAN("Scan already in progress.\n");
1591 ret = -EAGAIN;
1592 goto out_unlock;
1593 }
1594
1595
1596 il->scan_request = req;
1597 il->scan_vif = vif;
1598 il->scan_band = req->channels[0]->band;
1599
1600 ret = il_scan_initiate(il, vif);
1601
1602out_unlock:
1603 D_MAC80211("leave ret %d\n", ret);
1604 mutex_unlock(&il->mutex);
1605
1606 return ret;
1607}
1608EXPORT_SYMBOL(il_mac_hw_scan);
1609
1610static void
1611il_bg_scan_check(struct work_struct *data)
1612{
1613 struct il_priv *il =
1614 container_of(data, struct il_priv, scan_check.work);
1615
1616 D_SCAN("Scan check work\n");
1617
1618
1619
1620
1621 mutex_lock(&il->mutex);
1622 il_force_scan_end(il);
1623 mutex_unlock(&il->mutex);
1624}
1625
1626
1627
1628
1629
1630u16
1631il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1632 const u8 *ta, const u8 *ies, int ie_len, int left)
1633{
1634 int len = 0;
1635 u8 *pos = NULL;
1636
1637
1638
1639 left -= 24;
1640 if (left < 0)
1641 return 0;
1642
1643 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1644 eth_broadcast_addr(frame->da);
1645 memcpy(frame->sa, ta, ETH_ALEN);
1646 eth_broadcast_addr(frame->bssid);
1647 frame->seq_ctrl = 0;
1648
1649 len += 24;
1650
1651
1652 pos = &frame->u.probe_req.variable[0];
1653
1654
1655 left -= 2;
1656 if (left < 0)
1657 return 0;
1658 *pos++ = WLAN_EID_SSID;
1659 *pos++ = 0;
1660
1661 len += 2;
1662
1663 if (WARN_ON(left < ie_len))
1664 return len;
1665
1666 if (ies && ie_len) {
1667 memcpy(pos, ies, ie_len);
1668 len += ie_len;
1669 }
1670
1671 return (u16) len;
1672}
1673EXPORT_SYMBOL(il_fill_probe_req);
1674
1675static void
1676il_bg_abort_scan(struct work_struct *work)
1677{
1678 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1679
1680 D_SCAN("Abort scan work\n");
1681
1682
1683
1684 mutex_lock(&il->mutex);
1685 il_scan_cancel_timeout(il, 200);
1686 mutex_unlock(&il->mutex);
1687}
1688
1689static void
1690il_bg_scan_completed(struct work_struct *work)
1691{
1692 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1693 bool aborted;
1694
1695 D_SCAN("Completed scan.\n");
1696
1697 cancel_delayed_work(&il->scan_check);
1698
1699 mutex_lock(&il->mutex);
1700
1701 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1702 if (aborted)
1703 D_SCAN("Aborted scan completed.\n");
1704
1705 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1706 D_SCAN("Scan already completed.\n");
1707 goto out_settings;
1708 }
1709
1710 il_complete_scan(il, aborted);
1711
1712out_settings:
1713
1714 if (!il_is_ready_rf(il))
1715 goto out;
1716
1717
1718
1719
1720
1721 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1722 il_set_tx_power(il, il->tx_power_next, false);
1723
1724 il->ops->post_scan(il);
1725
1726out:
1727 mutex_unlock(&il->mutex);
1728}
1729
1730void
1731il_setup_scan_deferred_work(struct il_priv *il)
1732{
1733 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1734 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1735 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1736}
1737EXPORT_SYMBOL(il_setup_scan_deferred_work);
1738
1739void
1740il_cancel_scan_deferred_work(struct il_priv *il)
1741{
1742 cancel_work_sync(&il->abort_scan);
1743 cancel_work_sync(&il->scan_completed);
1744
1745 if (cancel_delayed_work_sync(&il->scan_check)) {
1746 mutex_lock(&il->mutex);
1747 il_force_scan_end(il);
1748 mutex_unlock(&il->mutex);
1749 }
1750}
1751EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1752
1753
1754static void
1755il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1756{
1757
1758 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1759 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1760 sta_id, il->stations[sta_id].sta.sta.addr);
1761
1762 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1763 D_ASSOC("STA id %u addr %pM already present"
1764 " in uCode (according to driver)\n", sta_id,
1765 il->stations[sta_id].sta.sta.addr);
1766 } else {
1767 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1768 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1769 il->stations[sta_id].sta.sta.addr);
1770 }
1771}
1772
1773static int
1774il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1775 struct il_rx_pkt *pkt, bool sync)
1776{
1777 u8 sta_id = addsta->sta.sta_id;
1778 unsigned long flags;
1779 int ret = -EIO;
1780
1781 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1782 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1783 return ret;
1784 }
1785
1786 D_INFO("Processing response for adding station %u\n", sta_id);
1787
1788 spin_lock_irqsave(&il->sta_lock, flags);
1789
1790 switch (pkt->u.add_sta.status) {
1791 case ADD_STA_SUCCESS_MSK:
1792 D_INFO("C_ADD_STA PASSED\n");
1793 il_sta_ucode_activate(il, sta_id);
1794 ret = 0;
1795 break;
1796 case ADD_STA_NO_ROOM_IN_TBL:
1797 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1798 break;
1799 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1800 IL_ERR("Adding station %d failed, no block ack resource.\n",
1801 sta_id);
1802 break;
1803 case ADD_STA_MODIFY_NON_EXIST_STA:
1804 IL_ERR("Attempting to modify non-existing station %d\n",
1805 sta_id);
1806 break;
1807 default:
1808 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1809 break;
1810 }
1811
1812 D_INFO("%s station id %u addr %pM\n",
1813 il->stations[sta_id].sta.mode ==
1814 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1815 il->stations[sta_id].sta.sta.addr);
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 D_INFO("%s station according to cmd buffer %pM\n",
1826 il->stations[sta_id].sta.mode ==
1827 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1828 spin_unlock_irqrestore(&il->sta_lock, flags);
1829
1830 return ret;
1831}
1832
1833static void
1834il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1835 struct il_rx_pkt *pkt)
1836{
1837 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1838
1839 il_process_add_sta_resp(il, addsta, pkt, false);
1840
1841}
1842
1843int
1844il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1845{
1846 struct il_rx_pkt *pkt = NULL;
1847 int ret = 0;
1848 u8 data[sizeof(*sta)];
1849 struct il_host_cmd cmd = {
1850 .id = C_ADD_STA,
1851 .flags = flags,
1852 .data = data,
1853 };
1854 u8 sta_id __maybe_unused = sta->sta.sta_id;
1855
1856 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1857 flags & CMD_ASYNC ? "a" : "");
1858
1859 if (flags & CMD_ASYNC)
1860 cmd.callback = il_add_sta_callback;
1861 else {
1862 cmd.flags |= CMD_WANT_SKB;
1863 might_sleep();
1864 }
1865
1866 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1867 ret = il_send_cmd(il, &cmd);
1868
1869 if (ret || (flags & CMD_ASYNC))
1870 return ret;
1871
1872 if (ret == 0) {
1873 pkt = (struct il_rx_pkt *)cmd.reply_page;
1874 ret = il_process_add_sta_resp(il, sta, pkt, true);
1875 }
1876 il_free_pages(il, cmd.reply_page);
1877
1878 return ret;
1879}
1880EXPORT_SYMBOL(il_send_add_sta);
1881
1882static void
1883il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1884{
1885 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1886 __le32 sta_flags;
1887
1888 if (!sta || !sta_ht_inf->ht_supported)
1889 goto done;
1890
1891 D_ASSOC("spatial multiplexing power save mode: %s\n",
1892 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1893 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1894 "disabled");
1895
1896 sta_flags = il->stations[idx].sta.station_flags;
1897
1898 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1899
1900 switch (sta->smps_mode) {
1901 case IEEE80211_SMPS_STATIC:
1902 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1903 break;
1904 case IEEE80211_SMPS_DYNAMIC:
1905 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1906 break;
1907 case IEEE80211_SMPS_OFF:
1908 break;
1909 default:
1910 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
1911 break;
1912 }
1913
1914 sta_flags |=
1915 cpu_to_le32((u32) sta_ht_inf->
1916 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1917
1918 sta_flags |=
1919 cpu_to_le32((u32) sta_ht_inf->
1920 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1921
1922 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1923 sta_flags |= STA_FLG_HT40_EN_MSK;
1924 else
1925 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1926
1927 il->stations[idx].sta.station_flags = sta_flags;
1928done:
1929 return;
1930}
1931
1932
1933
1934
1935
1936
1937u8
1938il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1939 struct ieee80211_sta *sta)
1940{
1941 struct il_station_entry *station;
1942 int i;
1943 u8 sta_id = IL_INVALID_STATION;
1944 u16 rate;
1945
1946 if (is_ap)
1947 sta_id = IL_AP_ID;
1948 else if (is_broadcast_ether_addr(addr))
1949 sta_id = il->hw_params.bcast_id;
1950 else
1951 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1952 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1953 addr)) {
1954 sta_id = i;
1955 break;
1956 }
1957
1958 if (!il->stations[i].used &&
1959 sta_id == IL_INVALID_STATION)
1960 sta_id = i;
1961 }
1962
1963
1964
1965
1966
1967 if (unlikely(sta_id == IL_INVALID_STATION))
1968 return sta_id;
1969
1970
1971
1972
1973
1974
1975 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1976 D_INFO("STA %d already in process of being added.\n", sta_id);
1977 return sta_id;
1978 }
1979
1980 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1981 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1982 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1983 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1984 sta_id, addr);
1985 return sta_id;
1986 }
1987
1988 station = &il->stations[sta_id];
1989 station->used = IL_STA_DRIVER_ACTIVE;
1990 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1991 il->num_stations++;
1992
1993
1994 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1995 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1996 station->sta.mode = 0;
1997 station->sta.sta.sta_id = sta_id;
1998 station->sta.station_flags = 0;
1999
2000
2001
2002
2003
2004
2005 il_set_ht_add_station(il, sta_id, sta);
2006
2007
2008 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
2009
2010 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
2011
2012 return sta_id;
2013
2014}
2015EXPORT_SYMBOL_GPL(il_prep_station);
2016
2017#define STA_WAIT_TIMEOUT (HZ/2)
2018
2019
2020
2021
2022int
2023il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2024 struct ieee80211_sta *sta, u8 *sta_id_r)
2025{
2026 unsigned long flags_spin;
2027 int ret = 0;
2028 u8 sta_id;
2029 struct il_addsta_cmd sta_cmd;
2030
2031 *sta_id_r = 0;
2032 spin_lock_irqsave(&il->sta_lock, flags_spin);
2033 sta_id = il_prep_station(il, addr, is_ap, sta);
2034 if (sta_id == IL_INVALID_STATION) {
2035 IL_ERR("Unable to prepare station %pM for addition\n", addr);
2036 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2037 return -EINVAL;
2038 }
2039
2040
2041
2042
2043
2044
2045 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2046 D_INFO("STA %d already in process of being added.\n", sta_id);
2047 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2048 return -EEXIST;
2049 }
2050
2051 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2052 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2053 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2054 sta_id, addr);
2055 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2056 return -EEXIST;
2057 }
2058
2059 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2060 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2061 sizeof(struct il_addsta_cmd));
2062 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2063
2064
2065 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2066 if (ret) {
2067 spin_lock_irqsave(&il->sta_lock, flags_spin);
2068 IL_ERR("Adding station %pM failed.\n",
2069 il->stations[sta_id].sta.sta.addr);
2070 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2071 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2072 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2073 }
2074 *sta_id_r = sta_id;
2075 return ret;
2076}
2077EXPORT_SYMBOL(il_add_station_common);
2078
2079
2080
2081
2082
2083
2084static void
2085il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2086{
2087
2088 if ((il->stations[sta_id].
2089 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2090 IL_STA_UCODE_ACTIVE)
2091 IL_ERR("removed non active STA %u\n", sta_id);
2092
2093 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2094
2095 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2096 D_ASSOC("Removed STA %u\n", sta_id);
2097}
2098
2099static int
2100il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2101 bool temporary)
2102{
2103 struct il_rx_pkt *pkt;
2104 int ret;
2105
2106 unsigned long flags_spin;
2107 struct il_rem_sta_cmd rm_sta_cmd;
2108
2109 struct il_host_cmd cmd = {
2110 .id = C_REM_STA,
2111 .len = sizeof(struct il_rem_sta_cmd),
2112 .flags = CMD_SYNC,
2113 .data = &rm_sta_cmd,
2114 };
2115
2116 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2117 rm_sta_cmd.num_sta = 1;
2118 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2119
2120 cmd.flags |= CMD_WANT_SKB;
2121
2122 ret = il_send_cmd(il, &cmd);
2123
2124 if (ret)
2125 return ret;
2126
2127 pkt = (struct il_rx_pkt *)cmd.reply_page;
2128 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2129 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2130 ret = -EIO;
2131 }
2132
2133 if (!ret) {
2134 switch (pkt->u.rem_sta.status) {
2135 case REM_STA_SUCCESS_MSK:
2136 if (!temporary) {
2137 spin_lock_irqsave(&il->sta_lock, flags_spin);
2138 il_sta_ucode_deactivate(il, sta_id);
2139 spin_unlock_irqrestore(&il->sta_lock,
2140 flags_spin);
2141 }
2142 D_ASSOC("C_REM_STA PASSED\n");
2143 break;
2144 default:
2145 ret = -EIO;
2146 IL_ERR("C_REM_STA failed\n");
2147 break;
2148 }
2149 }
2150 il_free_pages(il, cmd.reply_page);
2151
2152 return ret;
2153}
2154
2155
2156
2157
2158int
2159il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2160{
2161 unsigned long flags;
2162
2163 if (!il_is_ready(il)) {
2164 D_INFO("Unable to remove station %pM, device not ready.\n",
2165 addr);
2166
2167
2168
2169
2170
2171 return 0;
2172 }
2173
2174 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2175
2176 if (WARN_ON(sta_id == IL_INVALID_STATION))
2177 return -EINVAL;
2178
2179 spin_lock_irqsave(&il->sta_lock, flags);
2180
2181 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2182 D_INFO("Removing %pM but non DRIVER active\n", addr);
2183 goto out_err;
2184 }
2185
2186 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2187 D_INFO("Removing %pM but non UCODE active\n", addr);
2188 goto out_err;
2189 }
2190
2191 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2192 kfree(il->stations[sta_id].lq);
2193 il->stations[sta_id].lq = NULL;
2194 }
2195
2196 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2197
2198 il->num_stations--;
2199
2200 BUG_ON(il->num_stations < 0);
2201
2202 spin_unlock_irqrestore(&il->sta_lock, flags);
2203
2204 return il_send_remove_station(il, addr, sta_id, false);
2205out_err:
2206 spin_unlock_irqrestore(&il->sta_lock, flags);
2207 return -EINVAL;
2208}
2209EXPORT_SYMBOL_GPL(il_remove_station);
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219void
2220il_clear_ucode_stations(struct il_priv *il)
2221{
2222 int i;
2223 unsigned long flags_spin;
2224 bool cleared = false;
2225
2226 D_INFO("Clearing ucode stations in driver\n");
2227
2228 spin_lock_irqsave(&il->sta_lock, flags_spin);
2229 for (i = 0; i < il->hw_params.max_stations; i++) {
2230 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2231 D_INFO("Clearing ucode active for station %d\n", i);
2232 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2233 cleared = true;
2234 }
2235 }
2236 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2237
2238 if (!cleared)
2239 D_INFO("No active stations found to be cleared\n");
2240}
2241EXPORT_SYMBOL(il_clear_ucode_stations);
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251void
2252il_restore_stations(struct il_priv *il)
2253{
2254 struct il_addsta_cmd sta_cmd;
2255 struct il_link_quality_cmd lq;
2256 unsigned long flags_spin;
2257 int i;
2258 bool found = false;
2259 int ret;
2260 bool send_lq;
2261
2262 if (!il_is_ready(il)) {
2263 D_INFO("Not ready yet, not restoring any stations.\n");
2264 return;
2265 }
2266
2267 D_ASSOC("Restoring all known stations ... start.\n");
2268 spin_lock_irqsave(&il->sta_lock, flags_spin);
2269 for (i = 0; i < il->hw_params.max_stations; i++) {
2270 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2271 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2272 D_ASSOC("Restoring sta %pM\n",
2273 il->stations[i].sta.sta.addr);
2274 il->stations[i].sta.mode = 0;
2275 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2276 found = true;
2277 }
2278 }
2279
2280 for (i = 0; i < il->hw_params.max_stations; i++) {
2281 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2282 memcpy(&sta_cmd, &il->stations[i].sta,
2283 sizeof(struct il_addsta_cmd));
2284 send_lq = false;
2285 if (il->stations[i].lq) {
2286 memcpy(&lq, il->stations[i].lq,
2287 sizeof(struct il_link_quality_cmd));
2288 send_lq = true;
2289 }
2290 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2291 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2292 if (ret) {
2293 spin_lock_irqsave(&il->sta_lock, flags_spin);
2294 IL_ERR("Adding station %pM failed.\n",
2295 il->stations[i].sta.sta.addr);
2296 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2297 il->stations[i].used &=
2298 ~IL_STA_UCODE_INPROGRESS;
2299 spin_unlock_irqrestore(&il->sta_lock,
2300 flags_spin);
2301 }
2302
2303
2304
2305
2306 if (send_lq)
2307 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2308 spin_lock_irqsave(&il->sta_lock, flags_spin);
2309 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2310 }
2311 }
2312
2313 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2314 if (!found)
2315 D_INFO("Restoring all known stations"
2316 " .... no stations to be restored.\n");
2317 else
2318 D_INFO("Restoring all known stations" " .... complete.\n");
2319}
2320EXPORT_SYMBOL(il_restore_stations);
2321
2322int
2323il_get_free_ucode_key_idx(struct il_priv *il)
2324{
2325 int i;
2326
2327 for (i = 0; i < il->sta_key_max_num; i++)
2328 if (!test_and_set_bit(i, &il->ucode_key_table))
2329 return i;
2330
2331 return WEP_INVALID_OFFSET;
2332}
2333EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2334
2335void
2336il_dealloc_bcast_stations(struct il_priv *il)
2337{
2338 unsigned long flags;
2339 int i;
2340
2341 spin_lock_irqsave(&il->sta_lock, flags);
2342 for (i = 0; i < il->hw_params.max_stations; i++) {
2343 if (!(il->stations[i].used & IL_STA_BCAST))
2344 continue;
2345
2346 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2347 il->num_stations--;
2348 BUG_ON(il->num_stations < 0);
2349 kfree(il->stations[i].lq);
2350 il->stations[i].lq = NULL;
2351 }
2352 spin_unlock_irqrestore(&il->sta_lock, flags);
2353}
2354EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2355
2356#ifdef CONFIG_IWLEGACY_DEBUG
2357static void
2358il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2359{
2360 int i;
2361 D_RATE("lq station id 0x%x\n", lq->sta_id);
2362 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2363 lq->general_params.dual_stream_ant_msk);
2364
2365 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2366 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2367}
2368#else
2369static inline void
2370il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2371{
2372}
2373#endif
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static bool
2387il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2388{
2389 int i;
2390
2391 if (il->ht.enabled)
2392 return true;
2393
2394 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2395 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2396 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2397 D_INFO("idx %d of LQ expects HT channel\n", i);
2398 return false;
2399 }
2400 }
2401 return true;
2402}
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414int
2415il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2416 u8 flags, bool init)
2417{
2418 int ret = 0;
2419 unsigned long flags_spin;
2420
2421 struct il_host_cmd cmd = {
2422 .id = C_TX_LINK_QUALITY_CMD,
2423 .len = sizeof(struct il_link_quality_cmd),
2424 .flags = flags,
2425 .data = lq,
2426 };
2427
2428 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2429 return -EINVAL;
2430
2431 spin_lock_irqsave(&il->sta_lock, flags_spin);
2432 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2433 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2434 return -EINVAL;
2435 }
2436 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2437
2438 il_dump_lq_cmd(il, lq);
2439 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2440
2441 if (il_is_lq_table_valid(il, lq))
2442 ret = il_send_cmd(il, &cmd);
2443 else
2444 ret = -EINVAL;
2445
2446 if (cmd.flags & CMD_ASYNC)
2447 return ret;
2448
2449 if (init) {
2450 D_INFO("init LQ command complete,"
2451 " clearing sta addition status for sta %d\n",
2452 lq->sta_id);
2453 spin_lock_irqsave(&il->sta_lock, flags_spin);
2454 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2455 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2456 }
2457 return ret;
2458}
2459EXPORT_SYMBOL(il_send_lq_cmd);
2460
2461int
2462il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2463 struct ieee80211_sta *sta)
2464{
2465 struct il_priv *il = hw->priv;
2466 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2467 int ret;
2468
2469 mutex_lock(&il->mutex);
2470 D_MAC80211("enter station %pM\n", sta->addr);
2471
2472 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2473 if (ret)
2474 IL_ERR("Error removing station %pM\n", sta->addr);
2475
2476 D_MAC80211("leave ret %d\n", ret);
2477 mutex_unlock(&il->mutex);
2478
2479 return ret;
2480}
2481EXPORT_SYMBOL(il_mac_sta_remove);
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553int
2554il_rx_queue_space(const struct il_rx_queue *q)
2555{
2556 int s = q->read - q->write;
2557 if (s <= 0)
2558 s += RX_QUEUE_SIZE;
2559
2560 s -= 2;
2561 if (s < 0)
2562 s = 0;
2563 return s;
2564}
2565EXPORT_SYMBOL(il_rx_queue_space);
2566
2567
2568
2569
2570void
2571il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2572{
2573 unsigned long flags;
2574 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2575 u32 reg;
2576
2577 spin_lock_irqsave(&q->lock, flags);
2578
2579 if (q->need_update == 0)
2580 goto exit_unlock;
2581
2582
2583 if (test_bit(S_POWER_PMI, &il->status)) {
2584 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2585
2586 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2587 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2588 reg);
2589 il_set_bit(il, CSR_GP_CNTRL,
2590 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2591 goto exit_unlock;
2592 }
2593
2594 q->write_actual = (q->write & ~0x7);
2595 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2596
2597
2598 } else {
2599
2600 q->write_actual = (q->write & ~0x7);
2601 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2602 }
2603
2604 q->need_update = 0;
2605
2606exit_unlock:
2607 spin_unlock_irqrestore(&q->lock, flags);
2608}
2609EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2610
2611int
2612il_rx_queue_alloc(struct il_priv *il)
2613{
2614 struct il_rx_queue *rxq = &il->rxq;
2615 struct device *dev = &il->pci_dev->dev;
2616 int i;
2617
2618 spin_lock_init(&rxq->lock);
2619 INIT_LIST_HEAD(&rxq->rx_free);
2620 INIT_LIST_HEAD(&rxq->rx_used);
2621
2622
2623 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2624 GFP_KERNEL);
2625 if (!rxq->bd)
2626 goto err_bd;
2627
2628 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2629 &rxq->rb_stts_dma, GFP_KERNEL);
2630 if (!rxq->rb_stts)
2631 goto err_rb;
2632
2633
2634 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2635 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2636
2637
2638
2639 rxq->read = rxq->write = 0;
2640 rxq->write_actual = 0;
2641 rxq->free_count = 0;
2642 rxq->need_update = 0;
2643 return 0;
2644
2645err_rb:
2646 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2647 rxq->bd_dma);
2648err_bd:
2649 return -ENOMEM;
2650}
2651EXPORT_SYMBOL(il_rx_queue_alloc);
2652
2653void
2654il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2655{
2656 struct il_rx_pkt *pkt = rxb_addr(rxb);
2657 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2658
2659 if (!report->state) {
2660 D_11H("Spectrum Measure Notification: Start\n");
2661 return;
2662 }
2663
2664 memcpy(&il->measure_report, report, sizeof(*report));
2665 il->measurement_status |= MEASUREMENT_READY;
2666}
2667EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2668
2669
2670
2671
2672int
2673il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2674 u32 decrypt_res, struct ieee80211_rx_status *stats)
2675{
2676 u16 fc = le16_to_cpu(hdr->frame_control);
2677
2678
2679
2680
2681
2682 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2683 return 0;
2684
2685 if (!(fc & IEEE80211_FCTL_PROTECTED))
2686 return 0;
2687
2688 D_RX("decrypt_res:0x%x\n", decrypt_res);
2689 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2690 case RX_RES_STATUS_SEC_TYPE_TKIP:
2691
2692
2693 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2694 RX_RES_STATUS_BAD_KEY_TTAK)
2695 break;
2696
2697 case RX_RES_STATUS_SEC_TYPE_WEP:
2698 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2699 RX_RES_STATUS_BAD_ICV_MIC) {
2700
2701
2702 D_RX("Packet destroyed\n");
2703 return -1;
2704 }
2705 case RX_RES_STATUS_SEC_TYPE_CCMP:
2706 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2707 RX_RES_STATUS_DECRYPT_OK) {
2708 D_RX("hw decrypt successfully!!!\n");
2709 stats->flag |= RX_FLAG_DECRYPTED;
2710 }
2711 break;
2712
2713 default:
2714 break;
2715 }
2716 return 0;
2717}
2718EXPORT_SYMBOL(il_set_decrypted_flag);
2719
2720
2721
2722
2723void
2724il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2725{
2726 u32 reg = 0;
2727 int txq_id = txq->q.id;
2728
2729 if (txq->need_update == 0)
2730 return;
2731
2732
2733 if (test_bit(S_POWER_PMI, &il->status)) {
2734
2735
2736
2737 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2738
2739 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2740 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2741 txq_id, reg);
2742 il_set_bit(il, CSR_GP_CNTRL,
2743 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2744 return;
2745 }
2746
2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2748
2749
2750
2751
2752
2753
2754 } else
2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2756 txq->need_update = 0;
2757}
2758EXPORT_SYMBOL(il_txq_update_write_ptr);
2759
2760
2761
2762
2763void
2764il_tx_queue_unmap(struct il_priv *il, int txq_id)
2765{
2766 struct il_tx_queue *txq = &il->txq[txq_id];
2767 struct il_queue *q = &txq->q;
2768
2769 if (q->n_bd == 0)
2770 return;
2771
2772 while (q->write_ptr != q->read_ptr) {
2773 il->ops->txq_free_tfd(il, txq);
2774 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2775 }
2776}
2777EXPORT_SYMBOL(il_tx_queue_unmap);
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787void
2788il_tx_queue_free(struct il_priv *il, int txq_id)
2789{
2790 struct il_tx_queue *txq = &il->txq[txq_id];
2791 struct device *dev = &il->pci_dev->dev;
2792 int i;
2793
2794 il_tx_queue_unmap(il, txq_id);
2795
2796
2797 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2798 kfree(txq->cmd[i]);
2799
2800
2801 if (txq->q.n_bd)
2802 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2803 txq->tfds, txq->q.dma_addr);
2804
2805
2806 kfree(txq->skbs);
2807 txq->skbs = NULL;
2808
2809
2810 kfree(txq->cmd);
2811 kfree(txq->meta);
2812 txq->cmd = NULL;
2813 txq->meta = NULL;
2814
2815
2816 memset(txq, 0, sizeof(*txq));
2817}
2818EXPORT_SYMBOL(il_tx_queue_free);
2819
2820
2821
2822
2823void
2824il_cmd_queue_unmap(struct il_priv *il)
2825{
2826 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2827 struct il_queue *q = &txq->q;
2828 int i;
2829
2830 if (q->n_bd == 0)
2831 return;
2832
2833 while (q->read_ptr != q->write_ptr) {
2834 i = il_get_cmd_idx(q, q->read_ptr, 0);
2835
2836 if (txq->meta[i].flags & CMD_MAPPED) {
2837 pci_unmap_single(il->pci_dev,
2838 dma_unmap_addr(&txq->meta[i], mapping),
2839 dma_unmap_len(&txq->meta[i], len),
2840 PCI_DMA_BIDIRECTIONAL);
2841 txq->meta[i].flags = 0;
2842 }
2843
2844 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2845 }
2846
2847 i = q->n_win;
2848 if (txq->meta[i].flags & CMD_MAPPED) {
2849 pci_unmap_single(il->pci_dev,
2850 dma_unmap_addr(&txq->meta[i], mapping),
2851 dma_unmap_len(&txq->meta[i], len),
2852 PCI_DMA_BIDIRECTIONAL);
2853 txq->meta[i].flags = 0;
2854 }
2855}
2856EXPORT_SYMBOL(il_cmd_queue_unmap);
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866void
2867il_cmd_queue_free(struct il_priv *il)
2868{
2869 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2870 struct device *dev = &il->pci_dev->dev;
2871 int i;
2872
2873 il_cmd_queue_unmap(il);
2874
2875
2876 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2877 kfree(txq->cmd[i]);
2878
2879
2880 if (txq->q.n_bd)
2881 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2882 txq->tfds, txq->q.dma_addr);
2883
2884
2885 kfree(txq->cmd);
2886 kfree(txq->meta);
2887 txq->cmd = NULL;
2888 txq->meta = NULL;
2889
2890
2891 memset(txq, 0, sizeof(*txq));
2892}
2893EXPORT_SYMBOL(il_cmd_queue_free);
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918int
2919il_queue_space(const struct il_queue *q)
2920{
2921 int s = q->read_ptr - q->write_ptr;
2922
2923 if (q->read_ptr > q->write_ptr)
2924 s -= q->n_bd;
2925
2926 if (s <= 0)
2927 s += q->n_win;
2928
2929 s -= 2;
2930 if (s < 0)
2931 s = 0;
2932 return s;
2933}
2934EXPORT_SYMBOL(il_queue_space);
2935
2936
2937
2938
2939
2940static int
2941il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2942{
2943
2944
2945
2946
2947 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2948
2949 q->n_bd = TFD_QUEUE_SIZE_MAX;
2950
2951 q->n_win = slots;
2952 q->id = id;
2953
2954
2955
2956 BUG_ON(!is_power_of_2(slots));
2957
2958 q->low_mark = q->n_win / 4;
2959 if (q->low_mark < 4)
2960 q->low_mark = 4;
2961
2962 q->high_mark = q->n_win / 8;
2963 if (q->high_mark < 2)
2964 q->high_mark = 2;
2965
2966 q->write_ptr = q->read_ptr = 0;
2967
2968 return 0;
2969}
2970
2971
2972
2973
2974static int
2975il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2976{
2977 struct device *dev = &il->pci_dev->dev;
2978 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2979
2980
2981
2982 if (id != il->cmd_queue) {
2983 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2984 sizeof(struct sk_buff *),
2985 GFP_KERNEL);
2986 if (!txq->skbs) {
2987 IL_ERR("Fail to alloc skbs\n");
2988 goto error;
2989 }
2990 } else
2991 txq->skbs = NULL;
2992
2993
2994
2995 txq->tfds =
2996 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2997 if (!txq->tfds)
2998 goto error;
2999
3000 txq->q.id = id;
3001
3002 return 0;
3003
3004error:
3005 kfree(txq->skbs);
3006 txq->skbs = NULL;
3007
3008 return -ENOMEM;
3009}
3010
3011
3012
3013
3014int
3015il_tx_queue_init(struct il_priv *il, u32 txq_id)
3016{
3017 int i, len, ret;
3018 int slots, actual_slots;
3019 struct il_tx_queue *txq = &il->txq[txq_id];
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029 if (txq_id == il->cmd_queue) {
3030 slots = TFD_CMD_SLOTS;
3031 actual_slots = slots + 1;
3032 } else {
3033 slots = TFD_TX_CMD_SLOTS;
3034 actual_slots = slots;
3035 }
3036
3037 txq->meta =
3038 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
3039 txq->cmd =
3040 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
3041
3042 if (!txq->meta || !txq->cmd)
3043 goto out_free_arrays;
3044
3045 len = sizeof(struct il_device_cmd);
3046 for (i = 0; i < actual_slots; i++) {
3047
3048 if (i == slots)
3049 len = IL_MAX_CMD_SIZE;
3050
3051 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3052 if (!txq->cmd[i])
3053 goto err;
3054 }
3055
3056
3057 ret = il_tx_queue_alloc(il, txq, txq_id);
3058 if (ret)
3059 goto err;
3060
3061 txq->need_update = 0;
3062
3063
3064
3065
3066
3067
3068 if (txq_id < 4)
3069 il_set_swq_id(txq, txq_id, txq_id);
3070
3071
3072 il_queue_init(il, &txq->q, slots, txq_id);
3073
3074
3075 il->ops->txq_init(il, txq);
3076
3077 return 0;
3078err:
3079 for (i = 0; i < actual_slots; i++)
3080 kfree(txq->cmd[i]);
3081out_free_arrays:
3082 kfree(txq->meta);
3083 kfree(txq->cmd);
3084
3085 return -ENOMEM;
3086}
3087EXPORT_SYMBOL(il_tx_queue_init);
3088
3089void
3090il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3091{
3092 int slots, actual_slots;
3093 struct il_tx_queue *txq = &il->txq[txq_id];
3094
3095 if (txq_id == il->cmd_queue) {
3096 slots = TFD_CMD_SLOTS;
3097 actual_slots = TFD_CMD_SLOTS + 1;
3098 } else {
3099 slots = TFD_TX_CMD_SLOTS;
3100 actual_slots = TFD_TX_CMD_SLOTS;
3101 }
3102
3103 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3104 txq->need_update = 0;
3105
3106
3107 il_queue_init(il, &txq->q, slots, txq_id);
3108
3109
3110 il->ops->txq_init(il, txq);
3111}
3112EXPORT_SYMBOL(il_tx_queue_reset);
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125int
3126il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3127{
3128 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3129 struct il_queue *q = &txq->q;
3130 struct il_device_cmd *out_cmd;
3131 struct il_cmd_meta *out_meta;
3132 dma_addr_t phys_addr;
3133 unsigned long flags;
3134 int len;
3135 u32 idx;
3136 u16 fix_size;
3137
3138 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3139 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3140
3141
3142
3143
3144
3145
3146 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3147 !(cmd->flags & CMD_SIZE_HUGE));
3148 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3149
3150 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3151 IL_WARN("Not sending command - %s KILL\n",
3152 il_is_rfkill(il) ? "RF" : "CT");
3153 return -EIO;
3154 }
3155
3156 spin_lock_irqsave(&il->hcmd_lock, flags);
3157
3158 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3159 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3160
3161 IL_ERR("Restarting adapter due to command queue full\n");
3162 queue_work(il->workqueue, &il->restart);
3163 return -ENOSPC;
3164 }
3165
3166 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3167 out_cmd = txq->cmd[idx];
3168 out_meta = &txq->meta[idx];
3169
3170 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3171 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3172 return -ENOSPC;
3173 }
3174
3175 memset(out_meta, 0, sizeof(*out_meta));
3176 out_meta->flags = cmd->flags | CMD_MAPPED;
3177 if (cmd->flags & CMD_WANT_SKB)
3178 out_meta->source = cmd;
3179 if (cmd->flags & CMD_ASYNC)
3180 out_meta->callback = cmd->callback;
3181
3182 out_cmd->hdr.cmd = cmd->id;
3183 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3184
3185
3186
3187
3188 out_cmd->hdr.flags = 0;
3189 out_cmd->hdr.sequence =
3190 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3191 if (cmd->flags & CMD_SIZE_HUGE)
3192 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3193 len = sizeof(struct il_device_cmd);
3194 if (idx == TFD_CMD_SLOTS)
3195 len = IL_MAX_CMD_SIZE;
3196
3197#ifdef CONFIG_IWLEGACY_DEBUG
3198 switch (out_cmd->hdr.cmd) {
3199 case C_TX_LINK_QUALITY_CMD:
3200 case C_SENSITIVITY:
3201 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3202 "%d bytes at %d[%d]:%d\n",
3203 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3204 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3205 q->write_ptr, idx, il->cmd_queue);
3206 break;
3207 default:
3208 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3209 "%d bytes at %d[%d]:%d\n",
3210 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3211 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3212 idx, il->cmd_queue);
3213 }
3214#endif
3215
3216 phys_addr =
3217 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3218 PCI_DMA_BIDIRECTIONAL);
3219 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
3220 idx = -ENOMEM;
3221 goto out;
3222 }
3223 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3224 dma_unmap_len_set(out_meta, len, fix_size);
3225
3226 txq->need_update = 1;
3227
3228 if (il->ops->txq_update_byte_cnt_tbl)
3229
3230 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3231
3232 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3233 U32_PAD(cmd->len));
3234
3235
3236 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3237 il_txq_update_write_ptr(il, txq);
3238
3239out:
3240 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3241 return idx;
3242}
3243
3244
3245
3246
3247
3248
3249
3250
3251static void
3252il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3253{
3254 struct il_tx_queue *txq = &il->txq[txq_id];
3255 struct il_queue *q = &txq->q;
3256 int nfreed = 0;
3257
3258 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3259 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3260 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3261 q->write_ptr, q->read_ptr);
3262 return;
3263 }
3264
3265 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3266 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3267
3268 if (nfreed++ > 0) {
3269 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3270 q->write_ptr, q->read_ptr);
3271 queue_work(il->workqueue, &il->restart);
3272 }
3273
3274 }
3275}
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285void
3286il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3287{
3288 struct il_rx_pkt *pkt = rxb_addr(rxb);
3289 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3290 int txq_id = SEQ_TO_QUEUE(sequence);
3291 int idx = SEQ_TO_IDX(sequence);
3292 int cmd_idx;
3293 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3294 struct il_device_cmd *cmd;
3295 struct il_cmd_meta *meta;
3296 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3297 unsigned long flags;
3298
3299
3300
3301
3302 if (WARN
3303 (txq_id != il->cmd_queue,
3304 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3305 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3306 il->txq[il->cmd_queue].q.write_ptr)) {
3307 il_print_hex_error(il, pkt, 32);
3308 return;
3309 }
3310
3311 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3312 cmd = txq->cmd[cmd_idx];
3313 meta = &txq->meta[cmd_idx];
3314
3315 txq->time_stamp = jiffies;
3316
3317 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3318 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3319
3320
3321 if (meta->flags & CMD_WANT_SKB) {
3322 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3323 rxb->page = NULL;
3324 } else if (meta->callback)
3325 meta->callback(il, cmd, pkt);
3326
3327 spin_lock_irqsave(&il->hcmd_lock, flags);
3328
3329 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3330
3331 if (!(meta->flags & CMD_ASYNC)) {
3332 clear_bit(S_HCMD_ACTIVE, &il->status);
3333 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3334 il_get_cmd_string(cmd->hdr.cmd));
3335 wake_up(&il->wait_command_queue);
3336 }
3337
3338
3339 meta->flags = 0;
3340
3341 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3342}
3343EXPORT_SYMBOL(il_tx_cmd_complete);
3344
3345MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3346MODULE_VERSION(IWLWIFI_VERSION);
3347MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3348MODULE_LICENSE("GPL");
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366static bool bt_coex_active = true;
3367module_param(bt_coex_active, bool, S_IRUGO);
3368MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3369
3370u32 il_debug_level;
3371EXPORT_SYMBOL(il_debug_level);
3372
3373const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3374EXPORT_SYMBOL(il_bcast_addr);
3375
3376#define MAX_BIT_RATE_40_MHZ 150
3377#define MAX_BIT_RATE_20_MHZ 72
3378static void
3379il_init_ht_hw_capab(const struct il_priv *il,
3380 struct ieee80211_sta_ht_cap *ht_info,
3381 enum ieee80211_band band)
3382{
3383 u16 max_bit_rate = 0;
3384 u8 rx_chains_num = il->hw_params.rx_chains_num;
3385 u8 tx_chains_num = il->hw_params.tx_chains_num;
3386
3387 ht_info->cap = 0;
3388 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3389
3390 ht_info->ht_supported = true;
3391
3392 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3393 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3394 if (il->hw_params.ht40_channel & BIT(band)) {
3395 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3396 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3397 ht_info->mcs.rx_mask[4] = 0x01;
3398 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3399 }
3400
3401 if (il->cfg->mod_params->amsdu_size_8K)
3402 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3403
3404 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3405 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3406
3407 ht_info->mcs.rx_mask[0] = 0xFF;
3408 if (rx_chains_num >= 2)
3409 ht_info->mcs.rx_mask[1] = 0xFF;
3410 if (rx_chains_num >= 3)
3411 ht_info->mcs.rx_mask[2] = 0xFF;
3412
3413
3414 max_bit_rate *= rx_chains_num;
3415 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3416 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3417
3418
3419 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3420 if (tx_chains_num != rx_chains_num) {
3421 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3422 ht_info->mcs.tx_params |=
3423 ((tx_chains_num -
3424 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3425 }
3426}
3427
3428
3429
3430
3431int
3432il_init_geos(struct il_priv *il)
3433{
3434 struct il_channel_info *ch;
3435 struct ieee80211_supported_band *sband;
3436 struct ieee80211_channel *channels;
3437 struct ieee80211_channel *geo_ch;
3438 struct ieee80211_rate *rates;
3439 int i = 0;
3440 s8 max_tx_power = 0;
3441
3442 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3443 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3444 D_INFO("Geography modes already initialized.\n");
3445 set_bit(S_GEO_CONFIGURED, &il->status);
3446 return 0;
3447 }
3448
3449 channels =
3450 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3451 GFP_KERNEL);
3452 if (!channels)
3453 return -ENOMEM;
3454
3455 rates =
3456 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3457 GFP_KERNEL);
3458 if (!rates) {
3459 kfree(channels);
3460 return -ENOMEM;
3461 }
3462
3463
3464 sband = &il->bands[IEEE80211_BAND_5GHZ];
3465 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3466
3467 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3468 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3469
3470 if (il->cfg->sku & IL_SKU_N)
3471 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3472
3473 sband = &il->bands[IEEE80211_BAND_2GHZ];
3474 sband->channels = channels;
3475
3476 sband->bitrates = rates;
3477 sband->n_bitrates = RATE_COUNT_LEGACY;
3478
3479 if (il->cfg->sku & IL_SKU_N)
3480 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3481
3482 il->ieee_channels = channels;
3483 il->ieee_rates = rates;
3484
3485 for (i = 0; i < il->channel_count; i++) {
3486 ch = &il->channel_info[i];
3487
3488 if (!il_is_channel_valid(ch))
3489 continue;
3490
3491 sband = &il->bands[ch->band];
3492
3493 geo_ch = &sband->channels[sband->n_channels++];
3494
3495 geo_ch->center_freq =
3496 ieee80211_channel_to_frequency(ch->channel, ch->band);
3497 geo_ch->max_power = ch->max_power_avg;
3498 geo_ch->max_antenna_gain = 0xff;
3499 geo_ch->hw_value = ch->channel;
3500
3501 if (il_is_channel_valid(ch)) {
3502 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3503 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3504
3505 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3506 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3507
3508 if (ch->flags & EEPROM_CHANNEL_RADAR)
3509 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3510
3511 geo_ch->flags |= ch->ht40_extension_channel;
3512
3513 if (ch->max_power_avg > max_tx_power)
3514 max_tx_power = ch->max_power_avg;
3515 } else {
3516 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3517 }
3518
3519 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3520 geo_ch->center_freq,
3521 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3522 geo_ch->
3523 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3524 geo_ch->flags);
3525 }
3526
3527 il->tx_power_device_lmt = max_tx_power;
3528 il->tx_power_user_lmt = max_tx_power;
3529 il->tx_power_next = max_tx_power;
3530
3531 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3532 (il->cfg->sku & IL_SKU_A)) {
3533 IL_INFO("Incorrectly detected BG card as ABG. "
3534 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3535 il->pci_dev->device, il->pci_dev->subsystem_device);
3536 il->cfg->sku &= ~IL_SKU_A;
3537 }
3538
3539 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3540 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3541 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3542
3543 set_bit(S_GEO_CONFIGURED, &il->status);
3544
3545 return 0;
3546}
3547EXPORT_SYMBOL(il_init_geos);
3548
3549
3550
3551
3552void
3553il_free_geos(struct il_priv *il)
3554{
3555 kfree(il->ieee_channels);
3556 kfree(il->ieee_rates);
3557 clear_bit(S_GEO_CONFIGURED, &il->status);
3558}
3559EXPORT_SYMBOL(il_free_geos);
3560
3561static bool
3562il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3563 u16 channel, u8 extension_chan_offset)
3564{
3565 const struct il_channel_info *ch_info;
3566
3567 ch_info = il_get_channel_info(il, band, channel);
3568 if (!il_is_channel_valid(ch_info))
3569 return false;
3570
3571 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3572 return !(ch_info->
3573 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3574 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3575 return !(ch_info->
3576 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3577
3578 return false;
3579}
3580
3581bool
3582il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3583{
3584 if (!il->ht.enabled || !il->ht.is_40mhz)
3585 return false;
3586
3587
3588
3589
3590
3591 if (ht_cap && !ht_cap->ht_supported)
3592 return false;
3593
3594#ifdef CONFIG_IWLEGACY_DEBUGFS
3595 if (il->disable_ht40)
3596 return false;
3597#endif
3598
3599 return il_is_channel_extension(il, il->band,
3600 le16_to_cpu(il->staging.channel),
3601 il->ht.extension_chan_offset);
3602}
3603EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3604
3605static u16
3606il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3607{
3608 u16 new_val;
3609 u16 beacon_factor;
3610
3611
3612
3613
3614
3615 if (!beacon_val)
3616 return DEFAULT_BEACON_INTERVAL;
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3631 new_val = beacon_val / beacon_factor;
3632
3633 if (!new_val)
3634 new_val = max_beacon_val;
3635
3636 return new_val;
3637}
3638
3639int
3640il_send_rxon_timing(struct il_priv *il)
3641{
3642 u64 tsf;
3643 s32 interval_tm, rem;
3644 struct ieee80211_conf *conf = NULL;
3645 u16 beacon_int;
3646 struct ieee80211_vif *vif = il->vif;
3647
3648 conf = &il->hw->conf;
3649
3650 lockdep_assert_held(&il->mutex);
3651
3652 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3653
3654 il->timing.timestamp = cpu_to_le64(il->timestamp);
3655 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3656
3657 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3658
3659
3660
3661
3662
3663 il->timing.atim_win = 0;
3664
3665 beacon_int =
3666 il_adjust_beacon_interval(beacon_int,
3667 il->hw_params.max_beacon_itrvl *
3668 TIME_UNIT);
3669 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3670
3671 tsf = il->timestamp;
3672 interval_tm = beacon_int * TIME_UNIT;
3673 rem = do_div(tsf, interval_tm);
3674 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3675
3676 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3677
3678 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3679 le16_to_cpu(il->timing.beacon_interval),
3680 le32_to_cpu(il->timing.beacon_init_val),
3681 le16_to_cpu(il->timing.atim_win));
3682
3683 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3684 &il->timing);
3685}
3686EXPORT_SYMBOL(il_send_rxon_timing);
3687
3688void
3689il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3690{
3691 struct il_rxon_cmd *rxon = &il->staging;
3692
3693 if (hw_decrypt)
3694 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3695 else
3696 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3697
3698}
3699EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3700
3701
3702int
3703il_check_rxon_cmd(struct il_priv *il)
3704{
3705 struct il_rxon_cmd *rxon = &il->staging;
3706 bool error = false;
3707
3708 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3709 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3710 IL_WARN("check 2.4G: wrong narrow\n");
3711 error = true;
3712 }
3713 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3714 IL_WARN("check 2.4G: wrong radar\n");
3715 error = true;
3716 }
3717 } else {
3718 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3719 IL_WARN("check 5.2G: not short slot!\n");
3720 error = true;
3721 }
3722 if (rxon->flags & RXON_FLG_CCK_MSK) {
3723 IL_WARN("check 5.2G: CCK!\n");
3724 error = true;
3725 }
3726 }
3727 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3728 IL_WARN("mac/bssid mcast!\n");
3729 error = true;
3730 }
3731
3732
3733 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3734 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3735 IL_WARN("neither 1 nor 6 are basic\n");
3736 error = true;
3737 }
3738
3739 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3740 IL_WARN("aid > 2007\n");
3741 error = true;
3742 }
3743
3744 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3745 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3746 IL_WARN("CCK and short slot\n");
3747 error = true;
3748 }
3749
3750 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3751 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3752 IL_WARN("CCK and auto detect");
3753 error = true;
3754 }
3755
3756 if ((rxon->
3757 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3758 RXON_FLG_TGG_PROTECT_MSK) {
3759 IL_WARN("TGg but no auto-detect\n");
3760 error = true;
3761 }
3762
3763 if (error)
3764 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3765
3766 if (error) {
3767 IL_ERR("Invalid RXON\n");
3768 return -EINVAL;
3769 }
3770 return 0;
3771}
3772EXPORT_SYMBOL(il_check_rxon_cmd);
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782int
3783il_full_rxon_required(struct il_priv *il)
3784{
3785 const struct il_rxon_cmd *staging = &il->staging;
3786 const struct il_rxon_cmd *active = &il->active;
3787
3788#define CHK(cond) \
3789 if ((cond)) { \
3790 D_INFO("need full RXON - " #cond "\n"); \
3791 return 1; \
3792 }
3793
3794#define CHK_NEQ(c1, c2) \
3795 if ((c1) != (c2)) { \
3796 D_INFO("need full RXON - " \
3797 #c1 " != " #c2 " - %d != %d\n", \
3798 (c1), (c2)); \
3799 return 1; \
3800 }
3801
3802
3803 CHK(!il_is_associated(il));
3804 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
3805 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
3806 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
3807 active->wlap_bssid_addr));
3808 CHK_NEQ(staging->dev_type, active->dev_type);
3809 CHK_NEQ(staging->channel, active->channel);
3810 CHK_NEQ(staging->air_propagation, active->air_propagation);
3811 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3812 active->ofdm_ht_single_stream_basic_rates);
3813 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3814 active->ofdm_ht_dual_stream_basic_rates);
3815 CHK_NEQ(staging->assoc_id, active->assoc_id);
3816
3817
3818
3819
3820
3821
3822 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3823 active->flags & RXON_FLG_BAND_24G_MSK);
3824
3825
3826 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3827 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3828
3829#undef CHK
3830#undef CHK_NEQ
3831
3832 return 0;
3833}
3834EXPORT_SYMBOL(il_full_rxon_required);
3835
3836u8
3837il_get_lowest_plcp(struct il_priv *il)
3838{
3839
3840
3841
3842
3843 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3844 return RATE_1M_PLCP;
3845 else
3846 return RATE_6M_PLCP;
3847}
3848EXPORT_SYMBOL(il_get_lowest_plcp);
3849
3850static void
3851_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3852{
3853 struct il_rxon_cmd *rxon = &il->staging;
3854
3855 if (!il->ht.enabled) {
3856 rxon->flags &=
3857 ~(RXON_FLG_CHANNEL_MODE_MSK |
3858 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3859 | RXON_FLG_HT_PROT_MSK);
3860 return;
3861 }
3862
3863 rxon->flags |=
3864 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3865
3866
3867
3868
3869 rxon->flags &=
3870 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3871 if (il_is_ht40_tx_allowed(il, NULL)) {
3872
3873 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3874 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3875
3876 switch (il->ht.extension_chan_offset) {
3877 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3878 rxon->flags &=
3879 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3880 break;
3881 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3882 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3883 break;
3884 }
3885 } else {
3886
3887 switch (il->ht.extension_chan_offset) {
3888 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3889 rxon->flags &=
3890 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3891 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3892 break;
3893 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3894 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3895 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3896 break;
3897 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3898 default:
3899
3900 IL_ERR("invalid extension channel offset\n");
3901 break;
3902 }
3903 }
3904 } else {
3905 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3906 }
3907
3908 if (il->ops->set_rxon_chain)
3909 il->ops->set_rxon_chain(il);
3910
3911 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3912 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3913 il->ht.protection, il->ht.extension_chan_offset);
3914}
3915
3916void
3917il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3918{
3919 _il_set_rxon_ht(il, ht_conf);
3920}
3921EXPORT_SYMBOL(il_set_rxon_ht);
3922
3923
3924u8
3925il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3926{
3927 const struct il_channel_info *ch_info;
3928 int i;
3929 u8 channel = 0;
3930 u8 min, max;
3931
3932 if (band == IEEE80211_BAND_5GHZ) {
3933 min = 14;
3934 max = il->channel_count;
3935 } else {
3936 min = 0;
3937 max = 14;
3938 }
3939
3940 for (i = min; i < max; i++) {
3941 channel = il->channel_info[i].channel;
3942 if (channel == le16_to_cpu(il->staging.channel))
3943 continue;
3944
3945 ch_info = il_get_channel_info(il, band, channel);
3946 if (il_is_channel_valid(ch_info))
3947 break;
3948 }
3949
3950 return channel;
3951}
3952EXPORT_SYMBOL(il_get_single_channel_number);
3953
3954
3955
3956
3957
3958
3959
3960
3961int
3962il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3963{
3964 enum ieee80211_band band = ch->band;
3965 u16 channel = ch->hw_value;
3966
3967 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3968 return 0;
3969
3970 il->staging.channel = cpu_to_le16(channel);
3971 if (band == IEEE80211_BAND_5GHZ)
3972 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3973 else
3974 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3975
3976 il->band = band;
3977
3978 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3979
3980 return 0;
3981}
3982EXPORT_SYMBOL(il_set_rxon_channel);
3983
3984void
3985il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
3986 struct ieee80211_vif *vif)
3987{
3988 if (band == IEEE80211_BAND_5GHZ) {
3989 il->staging.flags &=
3990 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3991 RXON_FLG_CCK_MSK);
3992 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3993 } else {
3994
3995 if (vif && vif->bss_conf.use_short_slot)
3996 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3997 else
3998 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3999
4000 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
4001 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
4002 il->staging.flags &= ~RXON_FLG_CCK_MSK;
4003 }
4004}
4005EXPORT_SYMBOL(il_set_flags_for_band);
4006
4007
4008
4009
4010void
4011il_connection_init_rx_config(struct il_priv *il)
4012{
4013 const struct il_channel_info *ch_info;
4014
4015 memset(&il->staging, 0, sizeof(il->staging));
4016
4017 switch (il->iw_mode) {
4018 case NL80211_IFTYPE_UNSPECIFIED:
4019 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4020 break;
4021 case NL80211_IFTYPE_STATION:
4022 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4023 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4024 break;
4025 case NL80211_IFTYPE_ADHOC:
4026 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4027 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4028 il->staging.filter_flags =
4029 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
4030 break;
4031 default:
4032 IL_ERR("Unsupported interface type %d\n", il->vif->type);
4033 return;
4034 }
4035
4036#if 0
4037
4038
4039 if (!hw_to_local(il->hw)->short_preamble)
4040 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4041 else
4042 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4043#endif
4044
4045 ch_info =
4046 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4047
4048 if (!ch_info)
4049 ch_info = &il->channel_info[0];
4050
4051 il->staging.channel = cpu_to_le16(ch_info->channel);
4052 il->band = ch_info->band;
4053
4054 il_set_flags_for_band(il, il->band, il->vif);
4055
4056 il->staging.ofdm_basic_rates =
4057 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4058 il->staging.cck_basic_rates =
4059 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4060
4061
4062 il->staging.flags &=
4063 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4064 if (il->vif)
4065 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4066
4067 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4068 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4069}
4070EXPORT_SYMBOL(il_connection_init_rx_config);
4071
4072void
4073il_set_rate(struct il_priv *il)
4074{
4075 const struct ieee80211_supported_band *hw = NULL;
4076 struct ieee80211_rate *rate;
4077 int i;
4078
4079 hw = il_get_hw_mode(il, il->band);
4080 if (!hw) {
4081 IL_ERR("Failed to set rate: unable to get hw mode\n");
4082 return;
4083 }
4084
4085 il->active_rate = 0;
4086
4087 for (i = 0; i < hw->n_bitrates; i++) {
4088 rate = &(hw->bitrates[i]);
4089 if (rate->hw_value < RATE_COUNT_LEGACY)
4090 il->active_rate |= (1 << rate->hw_value);
4091 }
4092
4093 D_RATE("Set active_rate = %0x\n", il->active_rate);
4094
4095 il->staging.cck_basic_rates =
4096 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4097
4098 il->staging.ofdm_basic_rates =
4099 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4100}
4101EXPORT_SYMBOL(il_set_rate);
4102
4103void
4104il_chswitch_done(struct il_priv *il, bool is_success)
4105{
4106 if (test_bit(S_EXIT_PENDING, &il->status))
4107 return;
4108
4109 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4110 ieee80211_chswitch_done(il->vif, is_success);
4111}
4112EXPORT_SYMBOL(il_chswitch_done);
4113
4114void
4115il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4116{
4117 struct il_rx_pkt *pkt = rxb_addr(rxb);
4118 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4119 struct il_rxon_cmd *rxon = (void *)&il->active;
4120
4121 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4122 return;
4123
4124 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4125 rxon->channel = csa->channel;
4126 il->staging.channel = csa->channel;
4127 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4128 il_chswitch_done(il, true);
4129 } else {
4130 IL_ERR("CSA notif (fail) : channel %d\n",
4131 le16_to_cpu(csa->channel));
4132 il_chswitch_done(il, false);
4133 }
4134}
4135EXPORT_SYMBOL(il_hdl_csa);
4136
4137#ifdef CONFIG_IWLEGACY_DEBUG
4138void
4139il_print_rx_config_cmd(struct il_priv *il)
4140{
4141 struct il_rxon_cmd *rxon = &il->staging;
4142
4143 D_RADIO("RX CONFIG:\n");
4144 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4145 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4146 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4147 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4148 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4149 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4150 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4151 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4152 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4153 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4154}
4155EXPORT_SYMBOL(il_print_rx_config_cmd);
4156#endif
4157
4158
4159
4160void
4161il_irq_handle_error(struct il_priv *il)
4162{
4163
4164 set_bit(S_FW_ERROR, &il->status);
4165
4166
4167 clear_bit(S_HCMD_ACTIVE, &il->status);
4168
4169 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4170
4171 il->ops->dump_nic_error_log(il);
4172 if (il->ops->dump_fh)
4173 il->ops->dump_fh(il, NULL, false);
4174#ifdef CONFIG_IWLEGACY_DEBUG
4175 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4176 il_print_rx_config_cmd(il);
4177#endif
4178
4179 wake_up(&il->wait_command_queue);
4180
4181
4182
4183 clear_bit(S_READY, &il->status);
4184
4185 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4186 IL_DBG(IL_DL_FW_ERRORS,
4187 "Restarting adapter due to uCode error.\n");
4188
4189 if (il->cfg->mod_params->restart_fw)
4190 queue_work(il->workqueue, &il->restart);
4191 }
4192}
4193EXPORT_SYMBOL(il_irq_handle_error);
4194
4195static int
4196_il_apm_stop_master(struct il_priv *il)
4197{
4198 int ret = 0;
4199
4200
4201 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4202
4203 ret =
4204 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4205 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4206 if (ret < 0)
4207 IL_WARN("Master Disable Timed Out, 100 usec\n");
4208
4209 D_INFO("stop master\n");
4210
4211 return ret;
4212}
4213
4214void
4215_il_apm_stop(struct il_priv *il)
4216{
4217 lockdep_assert_held(&il->reg_lock);
4218
4219 D_INFO("Stop card, put in low power state\n");
4220
4221
4222 _il_apm_stop_master(il);
4223
4224
4225 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4226
4227 udelay(10);
4228
4229
4230
4231
4232
4233 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4234}
4235EXPORT_SYMBOL(_il_apm_stop);
4236
4237void
4238il_apm_stop(struct il_priv *il)
4239{
4240 unsigned long flags;
4241
4242 spin_lock_irqsave(&il->reg_lock, flags);
4243 _il_apm_stop(il);
4244 spin_unlock_irqrestore(&il->reg_lock, flags);
4245}
4246EXPORT_SYMBOL(il_apm_stop);
4247
4248
4249
4250
4251
4252
4253int
4254il_apm_init(struct il_priv *il)
4255{
4256 int ret = 0;
4257 u16 lctl;
4258
4259 D_INFO("Init card's basic functions\n");
4260
4261
4262
4263
4264
4265
4266
4267 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4268 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4269
4270
4271
4272
4273
4274 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4275 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4276
4277
4278 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4279
4280
4281
4282
4283
4284
4285 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4286 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296 if (il->cfg->set_l0s) {
4297 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4298 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
4299
4300 il_set_bit(il, CSR_GIO_REG,
4301 CSR_GIO_REG_VAL_L0S_ENABLED);
4302 D_POWER("L1 Enabled; Disabling L0S\n");
4303 } else {
4304
4305 il_clear_bit(il, CSR_GIO_REG,
4306 CSR_GIO_REG_VAL_L0S_ENABLED);
4307 D_POWER("L1 Disabled; Enabling L0S\n");
4308 }
4309 }
4310
4311
4312 if (il->cfg->pll_cfg_val)
4313 il_set_bit(il, CSR_ANA_PLL_CFG,
4314 il->cfg->pll_cfg_val);
4315
4316
4317
4318
4319
4320 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4321
4322
4323
4324
4325
4326
4327 ret =
4328 _il_poll_bit(il, CSR_GP_CNTRL,
4329 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4330 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4331 if (ret < 0) {
4332 D_INFO("Failed to init the card\n");
4333 goto out;
4334 }
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344 if (il->cfg->use_bsm)
4345 il_wr_prph(il, APMG_CLK_EN_REG,
4346 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4347 else
4348 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4349 udelay(20);
4350
4351
4352 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4353 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4354
4355out:
4356 return ret;
4357}
4358EXPORT_SYMBOL(il_apm_init);
4359
4360int
4361il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4362{
4363 int ret;
4364 s8 prev_tx_power;
4365 bool defer;
4366
4367 lockdep_assert_held(&il->mutex);
4368
4369 if (il->tx_power_user_lmt == tx_power && !force)
4370 return 0;
4371
4372 if (!il->ops->send_tx_power)
4373 return -EOPNOTSUPP;
4374
4375
4376 if (tx_power < 0) {
4377 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4378 return -EINVAL;
4379 }
4380
4381 if (tx_power > il->tx_power_device_lmt) {
4382 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4383 tx_power, il->tx_power_device_lmt);
4384 return -EINVAL;
4385 }
4386
4387 if (!il_is_ready_rf(il))
4388 return -EIO;
4389
4390
4391
4392 il->tx_power_next = tx_power;
4393
4394
4395 defer = test_bit(S_SCANNING, &il->status) ||
4396 memcmp(&il->active, &il->staging, sizeof(il->staging));
4397 if (defer && !force) {
4398 D_INFO("Deferring tx power set\n");
4399 return 0;
4400 }
4401
4402 prev_tx_power = il->tx_power_user_lmt;
4403 il->tx_power_user_lmt = tx_power;
4404
4405 ret = il->ops->send_tx_power(il);
4406
4407
4408 if (ret) {
4409 il->tx_power_user_lmt = prev_tx_power;
4410 il->tx_power_next = prev_tx_power;
4411 }
4412 return ret;
4413}
4414EXPORT_SYMBOL(il_set_tx_power);
4415
4416void
4417il_send_bt_config(struct il_priv *il)
4418{
4419 struct il_bt_cmd bt_cmd = {
4420 .lead_time = BT_LEAD_TIME_DEF,
4421 .max_kill = BT_MAX_KILL_DEF,
4422 .kill_ack_mask = 0,
4423 .kill_cts_mask = 0,
4424 };
4425
4426 if (!bt_coex_active)
4427 bt_cmd.flags = BT_COEX_DISABLE;
4428 else
4429 bt_cmd.flags = BT_COEX_ENABLE;
4430
4431 D_INFO("BT coex %s\n",
4432 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4433
4434 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4435 IL_ERR("failed to send BT Coex Config\n");
4436}
4437EXPORT_SYMBOL(il_send_bt_config);
4438
4439int
4440il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4441{
4442 struct il_stats_cmd stats_cmd = {
4443 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4444 };
4445
4446 if (flags & CMD_ASYNC)
4447 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4448 &stats_cmd, NULL);
4449 else
4450 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4451 &stats_cmd);
4452}
4453EXPORT_SYMBOL(il_send_stats_request);
4454
4455void
4456il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4457{
4458#ifdef CONFIG_IWLEGACY_DEBUG
4459 struct il_rx_pkt *pkt = rxb_addr(rxb);
4460 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4461 D_RX("sleep mode: %d, src: %d\n",
4462 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4463#endif
4464}
4465EXPORT_SYMBOL(il_hdl_pm_sleep);
4466
4467void
4468il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4469{
4470 struct il_rx_pkt *pkt = rxb_addr(rxb);
4471 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4472 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4473 il_get_cmd_string(pkt->hdr.cmd));
4474 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4475}
4476EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4477
4478void
4479il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4480{
4481 struct il_rx_pkt *pkt = rxb_addr(rxb);
4482
4483 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4484 "seq 0x%04X ser 0x%08X\n",
4485 le32_to_cpu(pkt->u.err_resp.error_type),
4486 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4487 pkt->u.err_resp.cmd_id,
4488 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4489 le32_to_cpu(pkt->u.err_resp.error_info));
4490}
4491EXPORT_SYMBOL(il_hdl_error);
4492
4493void
4494il_clear_isr_stats(struct il_priv *il)
4495{
4496 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4497}
4498
4499int
4500il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4501 const struct ieee80211_tx_queue_params *params)
4502{
4503 struct il_priv *il = hw->priv;
4504 unsigned long flags;
4505 int q;
4506
4507 D_MAC80211("enter\n");
4508
4509 if (!il_is_ready_rf(il)) {
4510 D_MAC80211("leave - RF not ready\n");
4511 return -EIO;
4512 }
4513
4514 if (queue >= AC_NUM) {
4515 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4516 return 0;
4517 }
4518
4519 q = AC_NUM - 1 - queue;
4520
4521 spin_lock_irqsave(&il->lock, flags);
4522
4523 il->qos_data.def_qos_parm.ac[q].cw_min =
4524 cpu_to_le16(params->cw_min);
4525 il->qos_data.def_qos_parm.ac[q].cw_max =
4526 cpu_to_le16(params->cw_max);
4527 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4528 il->qos_data.def_qos_parm.ac[q].edca_txop =
4529 cpu_to_le16((params->txop * 32));
4530
4531 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4532
4533 spin_unlock_irqrestore(&il->lock, flags);
4534
4535 D_MAC80211("leave\n");
4536 return 0;
4537}
4538EXPORT_SYMBOL(il_mac_conf_tx);
4539
4540int
4541il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4542{
4543 struct il_priv *il = hw->priv;
4544 int ret;
4545
4546 D_MAC80211("enter\n");
4547
4548 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4549
4550 D_MAC80211("leave ret %d\n", ret);
4551 return ret;
4552}
4553EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4554
4555static int
4556il_set_mode(struct il_priv *il)
4557{
4558 il_connection_init_rx_config(il);
4559
4560 if (il->ops->set_rxon_chain)
4561 il->ops->set_rxon_chain(il);
4562
4563 return il_commit_rxon(il);
4564}
4565
4566int
4567il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4568{
4569 struct il_priv *il = hw->priv;
4570 int err;
4571 bool reset;
4572
4573 mutex_lock(&il->mutex);
4574 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4575
4576 if (!il_is_ready_rf(il)) {
4577 IL_WARN("Try to add interface when device not ready\n");
4578 err = -EINVAL;
4579 goto out;
4580 }
4581
4582
4583
4584
4585
4586 reset = (il->vif == vif);
4587 if (il->vif && !reset) {
4588 err = -EOPNOTSUPP;
4589 goto out;
4590 }
4591
4592 il->vif = vif;
4593 il->iw_mode = vif->type;
4594
4595 err = il_set_mode(il);
4596 if (err) {
4597 IL_WARN("Fail to set mode %d\n", vif->type);
4598 if (!reset) {
4599 il->vif = NULL;
4600 il->iw_mode = NL80211_IFTYPE_STATION;
4601 }
4602 }
4603
4604out:
4605 D_MAC80211("leave err %d\n", err);
4606 mutex_unlock(&il->mutex);
4607
4608 return err;
4609}
4610EXPORT_SYMBOL(il_mac_add_interface);
4611
4612static void
4613il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4614{
4615 lockdep_assert_held(&il->mutex);
4616
4617 if (il->scan_vif == vif) {
4618 il_scan_cancel_timeout(il, 200);
4619 il_force_scan_end(il);
4620 }
4621
4622 il_set_mode(il);
4623}
4624
4625void
4626il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4627{
4628 struct il_priv *il = hw->priv;
4629
4630 mutex_lock(&il->mutex);
4631 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4632
4633 WARN_ON(il->vif != vif);
4634 il->vif = NULL;
4635 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4636 il_teardown_interface(il, vif);
4637 eth_zero_addr(il->bssid);
4638
4639 D_MAC80211("leave\n");
4640 mutex_unlock(&il->mutex);
4641}
4642EXPORT_SYMBOL(il_mac_remove_interface);
4643
4644int
4645il_alloc_txq_mem(struct il_priv *il)
4646{
4647 if (!il->txq)
4648 il->txq =
4649 kzalloc(sizeof(struct il_tx_queue) *
4650 il->cfg->num_of_queues, GFP_KERNEL);
4651 if (!il->txq) {
4652 IL_ERR("Not enough memory for txq\n");
4653 return -ENOMEM;
4654 }
4655 return 0;
4656}
4657EXPORT_SYMBOL(il_alloc_txq_mem);
4658
4659void
4660il_free_txq_mem(struct il_priv *il)
4661{
4662 kfree(il->txq);
4663 il->txq = NULL;
4664}
4665EXPORT_SYMBOL(il_free_txq_mem);
4666
4667int
4668il_force_reset(struct il_priv *il, bool external)
4669{
4670 struct il_force_reset *force_reset;
4671
4672 if (test_bit(S_EXIT_PENDING, &il->status))
4673 return -EINVAL;
4674
4675 force_reset = &il->force_reset;
4676 force_reset->reset_request_count++;
4677 if (!external) {
4678 if (force_reset->last_force_reset_jiffies &&
4679 time_after(force_reset->last_force_reset_jiffies +
4680 force_reset->reset_duration, jiffies)) {
4681 D_INFO("force reset rejected\n");
4682 force_reset->reset_reject_count++;
4683 return -EAGAIN;
4684 }
4685 }
4686 force_reset->reset_success_count++;
4687 force_reset->last_force_reset_jiffies = jiffies;
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698 if (!external && !il->cfg->mod_params->restart_fw) {
4699 D_INFO("Cancel firmware reload based on "
4700 "module parameter setting\n");
4701 return 0;
4702 }
4703
4704 IL_ERR("On demand firmware reload\n");
4705
4706
4707 set_bit(S_FW_ERROR, &il->status);
4708 wake_up(&il->wait_command_queue);
4709
4710
4711
4712
4713 clear_bit(S_READY, &il->status);
4714 queue_work(il->workqueue, &il->restart);
4715
4716 return 0;
4717}
4718EXPORT_SYMBOL(il_force_reset);
4719
4720int
4721il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4722 enum nl80211_iftype newtype, bool newp2p)
4723{
4724 struct il_priv *il = hw->priv;
4725 int err;
4726
4727 mutex_lock(&il->mutex);
4728 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4729 vif->type, vif->addr, newtype, newp2p);
4730
4731 if (newp2p) {
4732 err = -EOPNOTSUPP;
4733 goto out;
4734 }
4735
4736 if (!il->vif || !il_is_ready_rf(il)) {
4737
4738
4739
4740
4741 err = -EBUSY;
4742 goto out;
4743 }
4744
4745
4746 vif->type = newtype;
4747 vif->p2p = false;
4748 il->iw_mode = newtype;
4749 il_teardown_interface(il, vif);
4750 err = 0;
4751
4752out:
4753 D_MAC80211("leave err %d\n", err);
4754 mutex_unlock(&il->mutex);
4755
4756 return err;
4757}
4758EXPORT_SYMBOL(il_mac_change_interface);
4759
4760void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4761 u32 queues, bool drop)
4762{
4763 struct il_priv *il = hw->priv;
4764 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4765 int i;
4766
4767 mutex_lock(&il->mutex);
4768 D_MAC80211("enter\n");
4769
4770 if (il->txq == NULL)
4771 goto out;
4772
4773 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4774 struct il_queue *q;
4775
4776 if (i == il->cmd_queue)
4777 continue;
4778
4779 q = &il->txq[i].q;
4780 if (q->read_ptr == q->write_ptr)
4781 continue;
4782
4783 if (time_after(jiffies, timeout)) {
4784 IL_ERR("Failed to flush queue %d\n", q->id);
4785 break;
4786 }
4787
4788 msleep(20);
4789 }
4790out:
4791 D_MAC80211("leave\n");
4792 mutex_unlock(&il->mutex);
4793}
4794EXPORT_SYMBOL(il_mac_flush);
4795
4796
4797
4798
4799
4800static int
4801il_check_stuck_queue(struct il_priv *il, int cnt)
4802{
4803 struct il_tx_queue *txq = &il->txq[cnt];
4804 struct il_queue *q = &txq->q;
4805 unsigned long timeout;
4806 unsigned long now = jiffies;
4807 int ret;
4808
4809 if (q->read_ptr == q->write_ptr) {
4810 txq->time_stamp = now;
4811 return 0;
4812 }
4813
4814 timeout =
4815 txq->time_stamp +
4816 msecs_to_jiffies(il->cfg->wd_timeout);
4817
4818 if (time_after(now, timeout)) {
4819 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4820 jiffies_to_msecs(now - txq->time_stamp));
4821 ret = il_force_reset(il, false);
4822 return (ret == -EAGAIN) ? 0 : 1;
4823 }
4824
4825 return 0;
4826}
4827
4828
4829
4830
4831
4832#define IL_WD_TICK(timeout) ((timeout) / 4)
4833
4834
4835
4836
4837
4838void
4839il_bg_watchdog(unsigned long data)
4840{
4841 struct il_priv *il = (struct il_priv *)data;
4842 int cnt;
4843 unsigned long timeout;
4844
4845 if (test_bit(S_EXIT_PENDING, &il->status))
4846 return;
4847
4848 timeout = il->cfg->wd_timeout;
4849 if (timeout == 0)
4850 return;
4851
4852
4853 if (il_check_stuck_queue(il, il->cmd_queue))
4854 return;
4855
4856
4857 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4858
4859 if (cnt == il->cmd_queue)
4860 continue;
4861 if (il_check_stuck_queue(il, cnt))
4862 return;
4863 }
4864
4865 mod_timer(&il->watchdog,
4866 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4867}
4868EXPORT_SYMBOL(il_bg_watchdog);
4869
4870void
4871il_setup_watchdog(struct il_priv *il)
4872{
4873 unsigned int timeout = il->cfg->wd_timeout;
4874
4875 if (timeout)
4876 mod_timer(&il->watchdog,
4877 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4878 else
4879 del_timer(&il->watchdog);
4880}
4881EXPORT_SYMBOL(il_setup_watchdog);
4882
4883
4884
4885
4886
4887
4888
4889u32
4890il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4891{
4892 u32 quot;
4893 u32 rem;
4894 u32 interval = beacon_interval * TIME_UNIT;
4895
4896 if (!interval || !usec)
4897 return 0;
4898
4899 quot =
4900 (usec /
4901 interval) & (il_beacon_time_mask_high(il,
4902 il->hw_params.
4903 beacon_time_tsf_bits) >> il->
4904 hw_params.beacon_time_tsf_bits);
4905 rem =
4906 (usec % interval) & il_beacon_time_mask_low(il,
4907 il->hw_params.
4908 beacon_time_tsf_bits);
4909
4910 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4911}
4912EXPORT_SYMBOL(il_usecs_to_beacons);
4913
4914
4915
4916
4917__le32
4918il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4919 u32 beacon_interval)
4920{
4921 u32 base_low = base & il_beacon_time_mask_low(il,
4922 il->hw_params.
4923 beacon_time_tsf_bits);
4924 u32 addon_low = addon & il_beacon_time_mask_low(il,
4925 il->hw_params.
4926 beacon_time_tsf_bits);
4927 u32 interval = beacon_interval * TIME_UNIT;
4928 u32 res = (base & il_beacon_time_mask_high(il,
4929 il->hw_params.
4930 beacon_time_tsf_bits)) +
4931 (addon & il_beacon_time_mask_high(il,
4932 il->hw_params.
4933 beacon_time_tsf_bits));
4934
4935 if (base_low > addon_low)
4936 res += base_low - addon_low;
4937 else if (base_low < addon_low) {
4938 res += interval + base_low - addon_low;
4939 res += (1 << il->hw_params.beacon_time_tsf_bits);
4940 } else
4941 res += (1 << il->hw_params.beacon_time_tsf_bits);
4942
4943 return cpu_to_le32(res);
4944}
4945EXPORT_SYMBOL(il_add_beacon_time);
4946
4947#ifdef CONFIG_PM_SLEEP
4948
4949static int
4950il_pci_suspend(struct device *device)
4951{
4952 struct pci_dev *pdev = to_pci_dev(device);
4953 struct il_priv *il = pci_get_drvdata(pdev);
4954
4955
4956
4957
4958
4959
4960
4961
4962 il_apm_stop(il);
4963
4964 return 0;
4965}
4966
4967static int
4968il_pci_resume(struct device *device)
4969{
4970 struct pci_dev *pdev = to_pci_dev(device);
4971 struct il_priv *il = pci_get_drvdata(pdev);
4972 bool hw_rfkill = false;
4973
4974
4975
4976
4977
4978 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4979
4980 il_enable_interrupts(il);
4981
4982 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4983 hw_rfkill = true;
4984
4985 if (hw_rfkill)
4986 set_bit(S_RFKILL, &il->status);
4987 else
4988 clear_bit(S_RFKILL, &il->status);
4989
4990 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4991
4992 return 0;
4993}
4994
4995SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4996EXPORT_SYMBOL(il_pm_ops);
4997
4998#endif
4999
5000static void
5001il_update_qos(struct il_priv *il)
5002{
5003 if (test_bit(S_EXIT_PENDING, &il->status))
5004 return;
5005
5006 il->qos_data.def_qos_parm.qos_flags = 0;
5007
5008 if (il->qos_data.qos_active)
5009 il->qos_data.def_qos_parm.qos_flags |=
5010 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5011
5012 if (il->ht.enabled)
5013 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5014
5015 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5016 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5017
5018 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5019 &il->qos_data.def_qos_parm, NULL);
5020}
5021
5022
5023
5024
5025int
5026il_mac_config(struct ieee80211_hw *hw, u32 changed)
5027{
5028 struct il_priv *il = hw->priv;
5029 const struct il_channel_info *ch_info;
5030 struct ieee80211_conf *conf = &hw->conf;
5031 struct ieee80211_channel *channel = conf->chandef.chan;
5032 struct il_ht_config *ht_conf = &il->current_ht_config;
5033 unsigned long flags = 0;
5034 int ret = 0;
5035 u16 ch;
5036 int scan_active = 0;
5037 bool ht_changed = false;
5038
5039 mutex_lock(&il->mutex);
5040 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
5041 changed);
5042
5043 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5044 scan_active = 1;
5045 D_MAC80211("scan active\n");
5046 }
5047
5048 if (changed &
5049 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5050
5051 il->current_ht_config.smps = conf->smps_mode;
5052
5053
5054
5055
5056
5057
5058
5059
5060 if (il->ops->set_rxon_chain)
5061 il->ops->set_rxon_chain(il);
5062 }
5063
5064
5065
5066
5067 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5068
5069 if (scan_active)
5070 goto set_ch_out;
5071
5072 ch = channel->hw_value;
5073 ch_info = il_get_channel_info(il, channel->band, ch);
5074 if (!il_is_channel_valid(ch_info)) {
5075 D_MAC80211("leave - invalid channel\n");
5076 ret = -EINVAL;
5077 goto set_ch_out;
5078 }
5079
5080 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5081 !il_is_channel_ibss(ch_info)) {
5082 D_MAC80211("leave - not IBSS channel\n");
5083 ret = -EINVAL;
5084 goto set_ch_out;
5085 }
5086
5087 spin_lock_irqsave(&il->lock, flags);
5088
5089
5090 if (il->ht.enabled != conf_is_ht(conf)) {
5091 il->ht.enabled = conf_is_ht(conf);
5092 ht_changed = true;
5093 }
5094 if (il->ht.enabled) {
5095 if (conf_is_ht40_minus(conf)) {
5096 il->ht.extension_chan_offset =
5097 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5098 il->ht.is_40mhz = true;
5099 } else if (conf_is_ht40_plus(conf)) {
5100 il->ht.extension_chan_offset =
5101 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5102 il->ht.is_40mhz = true;
5103 } else {
5104 il->ht.extension_chan_offset =
5105 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5106 il->ht.is_40mhz = false;
5107 }
5108 } else
5109 il->ht.is_40mhz = false;
5110
5111
5112
5113
5114
5115 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5116
5117
5118
5119
5120 if ((le16_to_cpu(il->staging.channel) != ch))
5121 il->staging.flags = 0;
5122
5123 il_set_rxon_channel(il, channel);
5124 il_set_rxon_ht(il, ht_conf);
5125
5126 il_set_flags_for_band(il, channel->band, il->vif);
5127
5128 spin_unlock_irqrestore(&il->lock, flags);
5129
5130 if (il->ops->update_bcast_stations)
5131 ret = il->ops->update_bcast_stations(il);
5132
5133set_ch_out:
5134
5135
5136
5137 il_set_rate(il);
5138 }
5139
5140 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5141 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5142 ret = il_power_update_mode(il, false);
5143 if (ret)
5144 D_MAC80211("Error setting sleep level\n");
5145 }
5146
5147 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5148 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5149 conf->power_level);
5150
5151 il_set_tx_power(il, conf->power_level, false);
5152 }
5153
5154 if (!il_is_ready(il)) {
5155 D_MAC80211("leave - not ready\n");
5156 goto out;
5157 }
5158
5159 if (scan_active)
5160 goto out;
5161
5162 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5163 il_commit_rxon(il);
5164 else
5165 D_INFO("Not re-sending same RXON configuration.\n");
5166 if (ht_changed)
5167 il_update_qos(il);
5168
5169out:
5170 D_MAC80211("leave ret %d\n", ret);
5171 mutex_unlock(&il->mutex);
5172
5173 return ret;
5174}
5175EXPORT_SYMBOL(il_mac_config);
5176
5177void
5178il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5179{
5180 struct il_priv *il = hw->priv;
5181 unsigned long flags;
5182
5183 mutex_lock(&il->mutex);
5184 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5185
5186 spin_lock_irqsave(&il->lock, flags);
5187
5188 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5189
5190
5191 if (il->beacon_skb)
5192 dev_kfree_skb(il->beacon_skb);
5193 il->beacon_skb = NULL;
5194 il->timestamp = 0;
5195
5196 spin_unlock_irqrestore(&il->lock, flags);
5197
5198 il_scan_cancel_timeout(il, 100);
5199 if (!il_is_ready_rf(il)) {
5200 D_MAC80211("leave - not ready\n");
5201 mutex_unlock(&il->mutex);
5202 return;
5203 }
5204
5205
5206 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5207 il_commit_rxon(il);
5208
5209 il_set_rate(il);
5210
5211 D_MAC80211("leave\n");
5212 mutex_unlock(&il->mutex);
5213}
5214EXPORT_SYMBOL(il_mac_reset_tsf);
5215
5216static void
5217il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5218{
5219 struct il_ht_config *ht_conf = &il->current_ht_config;
5220 struct ieee80211_sta *sta;
5221 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5222
5223 D_ASSOC("enter:\n");
5224
5225 if (!il->ht.enabled)
5226 return;
5227
5228 il->ht.protection =
5229 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5230 il->ht.non_gf_sta_present =
5231 !!(bss_conf->
5232 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5233
5234 ht_conf->single_chain_sufficient = false;
5235
5236 switch (vif->type) {
5237 case NL80211_IFTYPE_STATION:
5238 rcu_read_lock();
5239 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5240 if (sta) {
5241 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5242 int maxstreams;
5243
5244 maxstreams =
5245 (ht_cap->mcs.
5246 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5247 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5248 maxstreams += 1;
5249
5250 if (ht_cap->mcs.rx_mask[1] == 0 &&
5251 ht_cap->mcs.rx_mask[2] == 0)
5252 ht_conf->single_chain_sufficient = true;
5253 if (maxstreams <= 1)
5254 ht_conf->single_chain_sufficient = true;
5255 } else {
5256
5257
5258
5259
5260
5261
5262 ht_conf->single_chain_sufficient = true;
5263 }
5264 rcu_read_unlock();
5265 break;
5266 case NL80211_IFTYPE_ADHOC:
5267 ht_conf->single_chain_sufficient = true;
5268 break;
5269 default:
5270 break;
5271 }
5272
5273 D_ASSOC("leave\n");
5274}
5275
5276static inline void
5277il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5278{
5279
5280
5281
5282
5283
5284 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5285 il->staging.assoc_id = 0;
5286 il_commit_rxon(il);
5287}
5288
5289static void
5290il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5291{
5292 struct il_priv *il = hw->priv;
5293 unsigned long flags;
5294 __le64 timestamp;
5295 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5296
5297 if (!skb)
5298 return;
5299
5300 D_MAC80211("enter\n");
5301
5302 lockdep_assert_held(&il->mutex);
5303
5304 if (!il->beacon_enabled) {
5305 IL_ERR("update beacon with no beaconing enabled\n");
5306 dev_kfree_skb(skb);
5307 return;
5308 }
5309
5310 spin_lock_irqsave(&il->lock, flags);
5311
5312 if (il->beacon_skb)
5313 dev_kfree_skb(il->beacon_skb);
5314
5315 il->beacon_skb = skb;
5316
5317 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5318 il->timestamp = le64_to_cpu(timestamp);
5319
5320 D_MAC80211("leave\n");
5321 spin_unlock_irqrestore(&il->lock, flags);
5322
5323 if (!il_is_ready_rf(il)) {
5324 D_MAC80211("leave - RF not ready\n");
5325 return;
5326 }
5327
5328 il->ops->post_associate(il);
5329}
5330
5331void
5332il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5333 struct ieee80211_bss_conf *bss_conf, u32 changes)
5334{
5335 struct il_priv *il = hw->priv;
5336 int ret;
5337
5338 mutex_lock(&il->mutex);
5339 D_MAC80211("enter: changes 0x%x\n", changes);
5340
5341 if (!il_is_alive(il)) {
5342 D_MAC80211("leave - not alive\n");
5343 mutex_unlock(&il->mutex);
5344 return;
5345 }
5346
5347 if (changes & BSS_CHANGED_QOS) {
5348 unsigned long flags;
5349
5350 spin_lock_irqsave(&il->lock, flags);
5351 il->qos_data.qos_active = bss_conf->qos;
5352 il_update_qos(il);
5353 spin_unlock_irqrestore(&il->lock, flags);
5354 }
5355
5356 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5357
5358 if (vif->bss_conf.enable_beacon)
5359 il->beacon_enabled = true;
5360 else
5361 il->beacon_enabled = false;
5362 }
5363
5364 if (changes & BSS_CHANGED_BSSID) {
5365 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375 if (is_zero_ether_addr(bss_conf->bssid))
5376 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5377
5378
5379
5380
5381
5382
5383 if (il_scan_cancel_timeout(il, 100)) {
5384 D_MAC80211("leave - scan abort failed\n");
5385 mutex_unlock(&il->mutex);
5386 return;
5387 }
5388
5389
5390 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5391
5392
5393 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5394 }
5395
5396
5397
5398
5399
5400
5401 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5402 il_beacon_update(hw, vif);
5403
5404 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5405 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5406 if (bss_conf->use_short_preamble)
5407 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5408 else
5409 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5410 }
5411
5412 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5413 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5414 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5415 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5416 else
5417 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5418 if (bss_conf->use_cts_prot)
5419 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5420 else
5421 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5422 }
5423
5424 if (changes & BSS_CHANGED_BASIC_RATES) {
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439 }
5440
5441 if (changes & BSS_CHANGED_HT) {
5442 il_ht_conf(il, vif);
5443
5444 if (il->ops->set_rxon_chain)
5445 il->ops->set_rxon_chain(il);
5446 }
5447
5448 if (changes & BSS_CHANGED_ASSOC) {
5449 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5450 if (bss_conf->assoc) {
5451 il->timestamp = bss_conf->sync_tsf;
5452
5453 if (!il_is_rfkill(il))
5454 il->ops->post_associate(il);
5455 } else
5456 il_set_no_assoc(il, vif);
5457 }
5458
5459 if (changes && il_is_associated(il) && bss_conf->aid) {
5460 D_MAC80211("Changes (%#x) while associated\n", changes);
5461 ret = il_send_rxon_assoc(il);
5462 if (!ret) {
5463
5464 memcpy((void *)&il->active, &il->staging,
5465 sizeof(struct il_rxon_cmd));
5466 }
5467 }
5468
5469 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5470 if (vif->bss_conf.enable_beacon) {
5471 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5472 ETH_ALEN);
5473 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5474 il->ops->config_ap(il);
5475 } else
5476 il_set_no_assoc(il, vif);
5477 }
5478
5479 if (changes & BSS_CHANGED_IBSS) {
5480 ret = il->ops->manage_ibss_station(il, vif,
5481 bss_conf->ibss_joined);
5482 if (ret)
5483 IL_ERR("failed to %s IBSS station %pM\n",
5484 bss_conf->ibss_joined ? "add" : "remove",
5485 bss_conf->bssid);
5486 }
5487
5488 D_MAC80211("leave\n");
5489 mutex_unlock(&il->mutex);
5490}
5491EXPORT_SYMBOL(il_mac_bss_info_changed);
5492
5493irqreturn_t
5494il_isr(int irq, void *data)
5495{
5496 struct il_priv *il = data;
5497 u32 inta, inta_mask;
5498 u32 inta_fh;
5499 unsigned long flags;
5500 if (!il)
5501 return IRQ_NONE;
5502
5503 spin_lock_irqsave(&il->lock, flags);
5504
5505
5506
5507
5508
5509 inta_mask = _il_rd(il, CSR_INT_MASK);
5510 _il_wr(il, CSR_INT_MASK, 0x00000000);
5511
5512
5513 inta = _il_rd(il, CSR_INT);
5514 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5515
5516
5517
5518
5519 if (!inta && !inta_fh) {
5520 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5521 goto none;
5522 }
5523
5524 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5525
5526
5527 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5528 goto unplugged;
5529 }
5530
5531 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5532 inta_fh);
5533
5534 inta &= ~CSR_INT_BIT_SCD;
5535
5536
5537 if (likely(inta || inta_fh))
5538 tasklet_schedule(&il->irq_tasklet);
5539
5540unplugged:
5541 spin_unlock_irqrestore(&il->lock, flags);
5542 return IRQ_HANDLED;
5543
5544none:
5545
5546
5547 if (test_bit(S_INT_ENABLED, &il->status))
5548 il_enable_interrupts(il);
5549 spin_unlock_irqrestore(&il->lock, flags);
5550 return IRQ_NONE;
5551}
5552EXPORT_SYMBOL(il_isr);
5553
5554
5555
5556
5557
5558void
5559il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5560 __le16 fc, __le32 *tx_flags)
5561{
5562 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5563 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5564 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5565 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5566
5567 if (!ieee80211_is_mgmt(fc))
5568 return;
5569
5570 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5571 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5572 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5573 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5574 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5575 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5576 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5577 break;
5578 }
5579 } else if (info->control.rates[0].
5580 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5581 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5582 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5583 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5584 }
5585}
5586EXPORT_SYMBOL(il_tx_cmd_protection);
5587