1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/pci.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43
44int
45_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
46{
47 const int interval = 10;
48 int t = 0;
49
50 do {
51 if ((_il_rd(il, addr) & mask) == (bits & mask))
52 return t;
53 udelay(interval);
54 t += interval;
55 } while (t < timeout);
56
57 return -ETIMEDOUT;
58}
59EXPORT_SYMBOL(_il_poll_bit);
60
61void
62il_set_bit(struct il_priv *p, u32 r, u32 m)
63{
64 unsigned long reg_flags;
65
66 spin_lock_irqsave(&p->reg_lock, reg_flags);
67 _il_set_bit(p, r, m);
68 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
69}
70EXPORT_SYMBOL(il_set_bit);
71
72void
73il_clear_bit(struct il_priv *p, u32 r, u32 m)
74{
75 unsigned long reg_flags;
76
77 spin_lock_irqsave(&p->reg_lock, reg_flags);
78 _il_clear_bit(p, r, m);
79 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
80}
81EXPORT_SYMBOL(il_clear_bit);
82
83bool
84_il_grab_nic_access(struct il_priv *il)
85{
86 int ret;
87 u32 val;
88
89
90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 ret =
110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
113 if (unlikely(ret < 0)) {
114 val = _il_rd(il, CSR_GP_CNTRL);
115 WARN_ONCE(1, "Timeout waiting for ucode processor access "
116 "(CSR_GP_CNTRL 0x%08x)\n", val);
117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
118 return false;
119 }
120
121 return true;
122}
123EXPORT_SYMBOL_GPL(_il_grab_nic_access);
124
125int
126il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
127{
128 const int interval = 10;
129 int t = 0;
130
131 do {
132 if ((il_rd(il, addr) & mask) == mask)
133 return t;
134 udelay(interval);
135 t += interval;
136 } while (t < timeout);
137
138 return -ETIMEDOUT;
139}
140EXPORT_SYMBOL(il_poll_bit);
141
142u32
143il_rd_prph(struct il_priv *il, u32 reg)
144{
145 unsigned long reg_flags;
146 u32 val;
147
148 spin_lock_irqsave(&il->reg_lock, reg_flags);
149 _il_grab_nic_access(il);
150 val = _il_rd_prph(il, reg);
151 _il_release_nic_access(il);
152 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
153 return val;
154}
155EXPORT_SYMBOL(il_rd_prph);
156
157void
158il_wr_prph(struct il_priv *il, u32 addr, u32 val)
159{
160 unsigned long reg_flags;
161
162 spin_lock_irqsave(&il->reg_lock, reg_flags);
163 if (likely(_il_grab_nic_access(il))) {
164 _il_wr_prph(il, addr, val);
165 _il_release_nic_access(il);
166 }
167 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
168}
169EXPORT_SYMBOL(il_wr_prph);
170
171u32
172il_read_targ_mem(struct il_priv *il, u32 addr)
173{
174 unsigned long reg_flags;
175 u32 value;
176
177 spin_lock_irqsave(&il->reg_lock, reg_flags);
178 _il_grab_nic_access(il);
179
180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
181 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
182
183 _il_release_nic_access(il);
184 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
185 return value;
186}
187EXPORT_SYMBOL(il_read_targ_mem);
188
189void
190il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
191{
192 unsigned long reg_flags;
193
194 spin_lock_irqsave(&il->reg_lock, reg_flags);
195 if (likely(_il_grab_nic_access(il))) {
196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
197 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
198 _il_release_nic_access(il);
199 }
200 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
201}
202EXPORT_SYMBOL(il_write_targ_mem);
203
204const char *
205il_get_cmd_string(u8 cmd)
206{
207 switch (cmd) {
208 IL_CMD(N_ALIVE);
209 IL_CMD(N_ERROR);
210 IL_CMD(C_RXON);
211 IL_CMD(C_RXON_ASSOC);
212 IL_CMD(C_QOS_PARAM);
213 IL_CMD(C_RXON_TIMING);
214 IL_CMD(C_ADD_STA);
215 IL_CMD(C_REM_STA);
216 IL_CMD(C_WEPKEY);
217 IL_CMD(N_3945_RX);
218 IL_CMD(C_TX);
219 IL_CMD(C_RATE_SCALE);
220 IL_CMD(C_LEDS);
221 IL_CMD(C_TX_LINK_QUALITY_CMD);
222 IL_CMD(C_CHANNEL_SWITCH);
223 IL_CMD(N_CHANNEL_SWITCH);
224 IL_CMD(C_SPECTRUM_MEASUREMENT);
225 IL_CMD(N_SPECTRUM_MEASUREMENT);
226 IL_CMD(C_POWER_TBL);
227 IL_CMD(N_PM_SLEEP);
228 IL_CMD(N_PM_DEBUG_STATS);
229 IL_CMD(C_SCAN);
230 IL_CMD(C_SCAN_ABORT);
231 IL_CMD(N_SCAN_START);
232 IL_CMD(N_SCAN_RESULTS);
233 IL_CMD(N_SCAN_COMPLETE);
234 IL_CMD(N_BEACON);
235 IL_CMD(C_TX_BEACON);
236 IL_CMD(C_TX_PWR_TBL);
237 IL_CMD(C_BT_CONFIG);
238 IL_CMD(C_STATS);
239 IL_CMD(N_STATS);
240 IL_CMD(N_CARD_STATE);
241 IL_CMD(N_MISSED_BEACONS);
242 IL_CMD(C_CT_KILL_CONFIG);
243 IL_CMD(C_SENSITIVITY);
244 IL_CMD(C_PHY_CALIBRATION);
245 IL_CMD(N_RX_PHY);
246 IL_CMD(N_RX_MPDU);
247 IL_CMD(N_RX);
248 IL_CMD(N_COMPRESSED_BA);
249 default:
250 return "UNKNOWN";
251
252 }
253}
254EXPORT_SYMBOL(il_get_cmd_string);
255
256#define HOST_COMPLETE_TIMEOUT (HZ / 2)
257
258static void
259il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
260 struct il_rx_pkt *pkt)
261{
262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
263 IL_ERR("Bad return from %s (0x%08X)\n",
264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
265 return;
266 }
267#ifdef CONFIG_IWLEGACY_DEBUG
268 switch (cmd->hdr.cmd) {
269 case C_TX_LINK_QUALITY_CMD:
270 case C_SENSITIVITY:
271 D_HC_DUMP("back from %s (0x%08X)\n",
272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
273 break;
274 default:
275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
276 pkt->hdr.flags);
277 }
278#endif
279}
280
281static int
282il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
283{
284 int ret;
285
286 BUG_ON(!(cmd->flags & CMD_ASYNC));
287
288
289 BUG_ON(cmd->flags & CMD_WANT_SKB);
290
291
292 if (!cmd->callback)
293 cmd->callback = il_generic_cmd_callback;
294
295 if (test_bit(S_EXIT_PENDING, &il->status))
296 return -EBUSY;
297
298 ret = il_enqueue_hcmd(il, cmd);
299 if (ret < 0) {
300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
301 il_get_cmd_string(cmd->id), ret);
302 return ret;
303 }
304 return 0;
305}
306
307int
308il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
309{
310 int cmd_idx;
311 int ret;
312
313 lockdep_assert_held(&il->mutex);
314
315 BUG_ON(cmd->flags & CMD_ASYNC);
316
317
318 BUG_ON(cmd->callback);
319
320 D_INFO("Attempting to send sync command %s\n",
321 il_get_cmd_string(cmd->id));
322
323 set_bit(S_HCMD_ACTIVE, &il->status);
324 D_INFO("Setting HCMD_ACTIVE for command %s\n",
325 il_get_cmd_string(cmd->id));
326
327 cmd_idx = il_enqueue_hcmd(il, cmd);
328 if (cmd_idx < 0) {
329 ret = cmd_idx;
330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
331 il_get_cmd_string(cmd->id), ret);
332 goto out;
333 }
334
335 ret = wait_event_timeout(il->wait_command_queue,
336 !test_bit(S_HCMD_ACTIVE, &il->status),
337 HOST_COMPLETE_TIMEOUT);
338 if (!ret) {
339 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
340 IL_ERR("Error sending %s: time out after %dms.\n",
341 il_get_cmd_string(cmd->id),
342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
343
344 clear_bit(S_HCMD_ACTIVE, &il->status);
345 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
346 il_get_cmd_string(cmd->id));
347 ret = -ETIMEDOUT;
348 goto cancel;
349 }
350 }
351
352 if (test_bit(S_RFKILL, &il->status)) {
353 IL_ERR("Command %s aborted: RF KILL Switch\n",
354 il_get_cmd_string(cmd->id));
355 ret = -ECANCELED;
356 goto fail;
357 }
358 if (test_bit(S_FW_ERROR, &il->status)) {
359 IL_ERR("Command %s failed: FW Error\n",
360 il_get_cmd_string(cmd->id));
361 ret = -EIO;
362 goto fail;
363 }
364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
365 IL_ERR("Error: Response NULL in '%s'\n",
366 il_get_cmd_string(cmd->id));
367 ret = -EIO;
368 goto cancel;
369 }
370
371 ret = 0;
372 goto out;
373
374cancel:
375 if (cmd->flags & CMD_WANT_SKB) {
376
377
378
379
380
381
382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
383 }
384fail:
385 if (cmd->reply_page) {
386 il_free_pages(il, cmd->reply_page);
387 cmd->reply_page = 0;
388 }
389out:
390 return ret;
391}
392EXPORT_SYMBOL(il_send_cmd_sync);
393
394int
395il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
396{
397 if (cmd->flags & CMD_ASYNC)
398 return il_send_cmd_async(il, cmd);
399
400 return il_send_cmd_sync(il, cmd);
401}
402EXPORT_SYMBOL(il_send_cmd);
403
404int
405il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
406{
407 struct il_host_cmd cmd = {
408 .id = id,
409 .len = len,
410 .data = data,
411 };
412
413 return il_send_cmd_sync(il, &cmd);
414}
415EXPORT_SYMBOL(il_send_cmd_pdu);
416
417int
418il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
419 void (*callback) (struct il_priv *il,
420 struct il_device_cmd *cmd,
421 struct il_rx_pkt *pkt))
422{
423 struct il_host_cmd cmd = {
424 .id = id,
425 .len = len,
426 .data = data,
427 };
428
429 cmd.flags |= CMD_ASYNC;
430 cmd.callback = callback;
431
432 return il_send_cmd_async(il, &cmd);
433}
434EXPORT_SYMBOL(il_send_cmd_pdu_async);
435
436
437static int led_mode;
438module_param(led_mode, int, 0444);
439MODULE_PARM_DESC(led_mode,
440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455static const struct ieee80211_tpt_blink il_blink[] = {
456 {.throughput = 0, .blink_time = 334},
457 {.throughput = 1 * 1024 - 1, .blink_time = 260},
458 {.throughput = 5 * 1024 - 1, .blink_time = 220},
459 {.throughput = 10 * 1024 - 1, .blink_time = 190},
460 {.throughput = 20 * 1024 - 1, .blink_time = 170},
461 {.throughput = 50 * 1024 - 1, .blink_time = 150},
462 {.throughput = 70 * 1024 - 1, .blink_time = 130},
463 {.throughput = 100 * 1024 - 1, .blink_time = 110},
464 {.throughput = 200 * 1024 - 1, .blink_time = 80},
465 {.throughput = 300 * 1024 - 1, .blink_time = 50},
466};
467
468
469
470
471
472
473
474
475
476
477
478
479static inline u8
480il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
481{
482 if (!compensation) {
483 IL_ERR("undefined blink compensation: "
484 "use pre-defined blinking time\n");
485 return time;
486 }
487
488 return (u8) ((time * compensation) >> 6);
489}
490
491
492static int
493il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
494{
495 struct il_led_cmd led_cmd = {
496 .id = IL_LED_LINK,
497 .interval = IL_DEF_LED_INTRVL
498 };
499 int ret;
500
501 if (!test_bit(S_READY, &il->status))
502 return -EBUSY;
503
504 if (il->blink_on == on && il->blink_off == off)
505 return 0;
506
507 if (off == 0) {
508
509 on = IL_LED_SOLID;
510 }
511
512 D_LED("Led blink time compensation=%u\n",
513 il->cfg->led_compensation);
514 led_cmd.on =
515 il_blink_compensation(il, on,
516 il->cfg->led_compensation);
517 led_cmd.off =
518 il_blink_compensation(il, off,
519 il->cfg->led_compensation);
520
521 ret = il->ops->send_led_cmd(il, &led_cmd);
522 if (!ret) {
523 il->blink_on = on;
524 il->blink_off = off;
525 }
526 return ret;
527}
528
529static void
530il_led_brightness_set(struct led_classdev *led_cdev,
531 enum led_brightness brightness)
532{
533 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
534 unsigned long on = 0;
535
536 if (brightness > 0)
537 on = IL_LED_SOLID;
538
539 il_led_cmd(il, on, 0);
540}
541
542static int
543il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
544 unsigned long *delay_off)
545{
546 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
547
548 return il_led_cmd(il, *delay_on, *delay_off);
549}
550
551void
552il_leds_init(struct il_priv *il)
553{
554 int mode = led_mode;
555 int ret;
556
557 if (mode == IL_LED_DEFAULT)
558 mode = il->cfg->led_mode;
559
560 il->led.name =
561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
562 il->led.brightness_set = il_led_brightness_set;
563 il->led.blink_set = il_led_blink_set;
564 il->led.max_brightness = 1;
565
566 switch (mode) {
567 case IL_LED_DEFAULT:
568 WARN_ON(1);
569 break;
570 case IL_LED_BLINK:
571 il->led.default_trigger =
572 ieee80211_create_tpt_led_trigger(il->hw,
573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
574 il_blink,
575 ARRAY_SIZE(il_blink));
576 break;
577 case IL_LED_RF_STATE:
578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
579 break;
580 }
581
582 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
583 if (ret) {
584 kfree(il->led.name);
585 return;
586 }
587
588 il->led_registered = true;
589}
590EXPORT_SYMBOL(il_leds_init);
591
592void
593il_leds_exit(struct il_priv *il)
594{
595 if (!il->led_registered)
596 return;
597
598 led_classdev_unregister(&il->led);
599 kfree(il->led.name);
600}
601EXPORT_SYMBOL(il_leds_exit);
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635const u8 il_eeprom_band_1[14] = {
636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
637};
638
639
640static const u8 il_eeprom_band_2[] = {
641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
642};
643
644static const u8 il_eeprom_band_3[] = {
645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
646};
647
648static const u8 il_eeprom_band_4[] = {
649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
650};
651
652static const u8 il_eeprom_band_5[] = {
653 145, 149, 153, 157, 161, 165
654};
655
656static const u8 il_eeprom_band_6[] = {
657 1, 2, 3, 4, 5, 6, 7
658};
659
660static const u8 il_eeprom_band_7[] = {
661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
662};
663
664
665
666
667
668
669
670static int
671il_eeprom_verify_signature(struct il_priv *il)
672{
673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
674 int ret = 0;
675
676 D_EEPROM("EEPROM signature=0x%08x\n", gp);
677 switch (gp) {
678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
680 break;
681 default:
682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
683 ret = -ENOENT;
684 break;
685 }
686 return ret;
687}
688
689const u8 *
690il_eeprom_query_addr(const struct il_priv *il, size_t offset)
691{
692 BUG_ON(offset >= il->cfg->eeprom_size);
693 return &il->eeprom[offset];
694}
695EXPORT_SYMBOL(il_eeprom_query_addr);
696
697u16
698il_eeprom_query16(const struct il_priv *il, size_t offset)
699{
700 if (!il->eeprom)
701 return 0;
702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
703}
704EXPORT_SYMBOL(il_eeprom_query16);
705
706
707
708
709
710
711
712
713int
714il_eeprom_init(struct il_priv *il)
715{
716 __le16 *e;
717 u32 gp = _il_rd(il, CSR_EEPROM_GP);
718 int sz;
719 int ret;
720 u16 addr;
721
722
723 sz = il->cfg->eeprom_size;
724 D_EEPROM("NVM size = %d\n", sz);
725 il->eeprom = kzalloc(sz, GFP_KERNEL);
726 if (!il->eeprom)
727 return -ENOMEM;
728
729 e = (__le16 *) il->eeprom;
730
731 il->ops->apm_init(il);
732
733 ret = il_eeprom_verify_signature(il);
734 if (ret < 0) {
735 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
736 ret = -ENOENT;
737 goto err;
738 }
739
740
741 ret = il->ops->eeprom_acquire_semaphore(il);
742 if (ret < 0) {
743 IL_ERR("Failed to acquire EEPROM semaphore.\n");
744 ret = -ENOENT;
745 goto err;
746 }
747
748
749 for (addr = 0; addr < sz; addr += sizeof(u16)) {
750 u32 r;
751
752 _il_wr(il, CSR_EEPROM_REG,
753 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
754
755 ret =
756 _il_poll_bit(il, CSR_EEPROM_REG,
757 CSR_EEPROM_REG_READ_VALID_MSK,
758 CSR_EEPROM_REG_READ_VALID_MSK,
759 IL_EEPROM_ACCESS_TIMEOUT);
760 if (ret < 0) {
761 IL_ERR("Time out reading EEPROM[%d]\n", addr);
762 goto done;
763 }
764 r = _il_rd(il, CSR_EEPROM_REG);
765 e[addr / 2] = cpu_to_le16(r >> 16);
766 }
767
768 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
769 il_eeprom_query16(il, EEPROM_VERSION));
770
771 ret = 0;
772done:
773 il->ops->eeprom_release_semaphore(il);
774
775err:
776 if (ret)
777 il_eeprom_free(il);
778
779 il_apm_stop(il);
780 return ret;
781}
782EXPORT_SYMBOL(il_eeprom_init);
783
784void
785il_eeprom_free(struct il_priv *il)
786{
787 kfree(il->eeprom);
788 il->eeprom = NULL;
789}
790EXPORT_SYMBOL(il_eeprom_free);
791
792static void
793il_init_band_reference(const struct il_priv *il, int eep_band,
794 int *eeprom_ch_count,
795 const struct il_eeprom_channel **eeprom_ch_info,
796 const u8 **eeprom_ch_idx)
797{
798 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
799
800 switch (eep_band) {
801 case 1:
802 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
803 *eeprom_ch_info =
804 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
805 offset);
806 *eeprom_ch_idx = il_eeprom_band_1;
807 break;
808 case 2:
809 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
810 *eeprom_ch_info =
811 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
812 offset);
813 *eeprom_ch_idx = il_eeprom_band_2;
814 break;
815 case 3:
816 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
817 *eeprom_ch_info =
818 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
819 offset);
820 *eeprom_ch_idx = il_eeprom_band_3;
821 break;
822 case 4:
823 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
824 *eeprom_ch_info =
825 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
826 offset);
827 *eeprom_ch_idx = il_eeprom_band_4;
828 break;
829 case 5:
830 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
831 *eeprom_ch_info =
832 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
833 offset);
834 *eeprom_ch_idx = il_eeprom_band_5;
835 break;
836 case 6:
837 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
838 *eeprom_ch_info =
839 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
840 offset);
841 *eeprom_ch_idx = il_eeprom_band_6;
842 break;
843 case 7:
844 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
845 *eeprom_ch_info =
846 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
847 offset);
848 *eeprom_ch_idx = il_eeprom_band_7;
849 break;
850 default:
851 BUG();
852 }
853}
854
855#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
856 ? # x " " : "")
857
858
859
860
861
862static int
863il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
864 const struct il_eeprom_channel *eeprom_ch,
865 u8 clear_ht40_extension_channel)
866{
867 struct il_channel_info *ch_info;
868
869 ch_info =
870 (struct il_channel_info *)il_get_channel_info(il, band, channel);
871
872 if (!il_is_channel_valid(ch_info))
873 return -1;
874
875 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
876 " Ad-Hoc %ssupported\n", ch_info->channel,
877 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
878 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
879 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
880 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
881 eeprom_ch->max_power_avg,
882 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
883 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
884
885 ch_info->ht40_eeprom = *eeprom_ch;
886 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
887 ch_info->ht40_flags = eeprom_ch->flags;
888 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
889 ch_info->ht40_extension_channel &=
890 ~clear_ht40_extension_channel;
891
892 return 0;
893}
894
895#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
896 ? # x " " : "")
897
898
899
900
901int
902il_init_channel_map(struct il_priv *il)
903{
904 int eeprom_ch_count = 0;
905 const u8 *eeprom_ch_idx = NULL;
906 const struct il_eeprom_channel *eeprom_ch_info = NULL;
907 int band, ch;
908 struct il_channel_info *ch_info;
909
910 if (il->channel_count) {
911 D_EEPROM("Channel map already initialized.\n");
912 return 0;
913 }
914
915 D_EEPROM("Initializing regulatory info from EEPROM\n");
916
917 il->channel_count =
918 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
919 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
920 ARRAY_SIZE(il_eeprom_band_5);
921
922 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
923
924 il->channel_info =
925 kcalloc(il->channel_count, sizeof(struct il_channel_info),
926 GFP_KERNEL);
927 if (!il->channel_info) {
928 IL_ERR("Could not allocate channel_info\n");
929 il->channel_count = 0;
930 return -ENOMEM;
931 }
932
933 ch_info = il->channel_info;
934
935
936
937
938 for (band = 1; band <= 5; band++) {
939
940 il_init_band_reference(il, band, &eeprom_ch_count,
941 &eeprom_ch_info, &eeprom_ch_idx);
942
943
944 for (ch = 0; ch < eeprom_ch_count; ch++) {
945 ch_info->channel = eeprom_ch_idx[ch];
946 ch_info->band =
947 (band ==
948 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
949
950
951
952 ch_info->eeprom = eeprom_ch_info[ch];
953
954
955
956 ch_info->flags = eeprom_ch_info[ch].flags;
957
958
959 ch_info->ht40_extension_channel =
960 IEEE80211_CHAN_NO_HT40;
961
962 if (!(il_is_channel_valid(ch_info))) {
963 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
964 "No traffic\n", ch_info->channel,
965 ch_info->flags,
966 il_is_channel_a_band(ch_info) ? "5.2" :
967 "2.4");
968 ch_info++;
969 continue;
970 }
971
972
973 ch_info->max_power_avg = ch_info->curr_txpow =
974 eeprom_ch_info[ch].max_power_avg;
975 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
976 ch_info->min_power = 0;
977
978 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
979 " Ad-Hoc %ssupported\n", ch_info->channel,
980 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
981 CHECK_AND_PRINT_I(VALID),
982 CHECK_AND_PRINT_I(IBSS),
983 CHECK_AND_PRINT_I(ACTIVE),
984 CHECK_AND_PRINT_I(RADAR),
985 CHECK_AND_PRINT_I(WIDE),
986 CHECK_AND_PRINT_I(DFS),
987 eeprom_ch_info[ch].flags,
988 eeprom_ch_info[ch].max_power_avg,
989 ((eeprom_ch_info[ch].
990 flags & EEPROM_CHANNEL_IBSS) &&
991 !(eeprom_ch_info[ch].
992 flags & EEPROM_CHANNEL_RADAR)) ? "" :
993 "not ");
994
995 ch_info++;
996 }
997 }
998
999
1000 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
1001 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
1002 return 0;
1003
1004
1005 for (band = 6; band <= 7; band++) {
1006 enum nl80211_band ieeeband;
1007
1008 il_init_band_reference(il, band, &eeprom_ch_count,
1009 &eeprom_ch_info, &eeprom_ch_idx);
1010
1011
1012 ieeeband =
1013 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
1014
1015
1016 for (ch = 0; ch < eeprom_ch_count; ch++) {
1017
1018 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1019 &eeprom_ch_info[ch],
1020 IEEE80211_CHAN_NO_HT40PLUS);
1021
1022
1023 il_mod_ht40_chan_info(il, ieeeband,
1024 eeprom_ch_idx[ch] + 4,
1025 &eeprom_ch_info[ch],
1026 IEEE80211_CHAN_NO_HT40MINUS);
1027 }
1028 }
1029
1030 return 0;
1031}
1032EXPORT_SYMBOL(il_init_channel_map);
1033
1034
1035
1036
1037void
1038il_free_channel_map(struct il_priv *il)
1039{
1040 kfree(il->channel_info);
1041 il->channel_count = 0;
1042}
1043EXPORT_SYMBOL(il_free_channel_map);
1044
1045
1046
1047
1048
1049
1050const struct il_channel_info *
1051il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
1052 u16 channel)
1053{
1054 int i;
1055
1056 switch (band) {
1057 case NL80211_BAND_5GHZ:
1058 for (i = 14; i < il->channel_count; i++) {
1059 if (il->channel_info[i].channel == channel)
1060 return &il->channel_info[i];
1061 }
1062 break;
1063 case NL80211_BAND_2GHZ:
1064 if (channel >= 1 && channel <= 14)
1065 return &il->channel_info[channel - 1];
1066 break;
1067 default:
1068 BUG();
1069 }
1070
1071 return NULL;
1072}
1073EXPORT_SYMBOL(il_get_channel_info);
1074
1075
1076
1077
1078
1079
1080
1081
1082#define SLP_VEC(X0, X1, X2, X3, X4) { \
1083 cpu_to_le32(X0), \
1084 cpu_to_le32(X1), \
1085 cpu_to_le32(X2), \
1086 cpu_to_le32(X3), \
1087 cpu_to_le32(X4) \
1088}
1089
1090static void
1091il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1092{
1093 const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1094 SLP_VEC(2, 2, 4, 6, 0xFF),
1095 SLP_VEC(2, 4, 7, 10, 10),
1096 SLP_VEC(4, 7, 10, 10, 0xFF)
1097 };
1098 int i, dtim_period, no_dtim;
1099 u32 max_sleep;
1100 bool skip;
1101
1102 memset(cmd, 0, sizeof(*cmd));
1103
1104 if (il->power_data.pci_pm)
1105 cmd->flags |= IL_POWER_PCI_PM_MSK;
1106
1107
1108 if (il->power_data.ps_disabled)
1109 return;
1110
1111 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1112 cmd->keep_alive_seconds = 0;
1113 cmd->debug_flags = 0;
1114 cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1115 cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1116 cmd->keep_alive_beacons = 0;
1117
1118 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1119
1120 if (dtim_period <= 2) {
1121 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1122 no_dtim = 2;
1123 } else if (dtim_period <= 10) {
1124 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1125 no_dtim = 2;
1126 } else {
1127 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1128 no_dtim = 0;
1129 }
1130
1131 if (dtim_period == 0) {
1132 dtim_period = 1;
1133 skip = false;
1134 } else {
1135 skip = !!no_dtim;
1136 }
1137
1138 if (skip) {
1139 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1140
1141 max_sleep = le32_to_cpu(tmp);
1142 if (max_sleep == 0xFF)
1143 max_sleep = dtim_period * (skip + 1);
1144 else if (max_sleep > dtim_period)
1145 max_sleep = (max_sleep / dtim_period) * dtim_period;
1146 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1147 } else {
1148 max_sleep = dtim_period;
1149 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1150 }
1151
1152 for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1153 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1154 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1155}
1156
1157static int
1158il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1159{
1160 D_POWER("Sending power/sleep command\n");
1161 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1162 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1163 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1164 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1165 le32_to_cpu(cmd->sleep_interval[0]),
1166 le32_to_cpu(cmd->sleep_interval[1]),
1167 le32_to_cpu(cmd->sleep_interval[2]),
1168 le32_to_cpu(cmd->sleep_interval[3]),
1169 le32_to_cpu(cmd->sleep_interval[4]));
1170
1171 return il_send_cmd_pdu(il, C_POWER_TBL,
1172 sizeof(struct il_powertable_cmd), cmd);
1173}
1174
1175static int
1176il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1177{
1178 int ret;
1179 bool update_chains;
1180
1181 lockdep_assert_held(&il->mutex);
1182
1183
1184 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1185 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1186
1187 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1188 return 0;
1189
1190 if (!il_is_ready_rf(il))
1191 return -EIO;
1192
1193
1194 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1195 if (test_bit(S_SCANNING, &il->status) && !force) {
1196 D_INFO("Defer power set mode while scanning\n");
1197 return 0;
1198 }
1199
1200 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1201 set_bit(S_POWER_PMI, &il->status);
1202
1203 ret = il_set_power(il, cmd);
1204 if (!ret) {
1205 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1206 clear_bit(S_POWER_PMI, &il->status);
1207
1208 if (il->ops->update_chain_flags && update_chains)
1209 il->ops->update_chain_flags(il);
1210 else if (il->ops->update_chain_flags)
1211 D_POWER("Cannot update the power, chain noise "
1212 "calibration running: %d\n",
1213 il->chain_noise_data.state);
1214
1215 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1216 } else
1217 IL_ERR("set power fail, ret = %d", ret);
1218
1219 return ret;
1220}
1221
1222int
1223il_power_update_mode(struct il_priv *il, bool force)
1224{
1225 struct il_powertable_cmd cmd;
1226
1227 il_build_powertable_cmd(il, &cmd);
1228
1229 return il_power_set_mode(il, &cmd, force);
1230}
1231EXPORT_SYMBOL(il_power_update_mode);
1232
1233
1234void
1235il_power_initialize(struct il_priv *il)
1236{
1237 u16 lctl;
1238
1239 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1240 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1241
1242 il->power_data.debug_sleep_level_override = -1;
1243
1244 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1245}
1246EXPORT_SYMBOL(il_power_initialize);
1247
1248
1249
1250
1251#define IL_ACTIVE_DWELL_TIME_24 (30)
1252#define IL_ACTIVE_DWELL_TIME_52 (20)
1253
1254#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1255#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1256
1257
1258
1259
1260#define IL_PASSIVE_DWELL_TIME_24 (20)
1261#define IL_PASSIVE_DWELL_TIME_52 (10)
1262#define IL_PASSIVE_DWELL_BASE (100)
1263#define IL_CHANNEL_TUNE_TIME 5
1264
1265static int
1266il_send_scan_abort(struct il_priv *il)
1267{
1268 int ret;
1269 struct il_rx_pkt *pkt;
1270 struct il_host_cmd cmd = {
1271 .id = C_SCAN_ABORT,
1272 .flags = CMD_WANT_SKB,
1273 };
1274
1275
1276
1277
1278 if (!test_bit(S_READY, &il->status) ||
1279 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1280 !test_bit(S_SCAN_HW, &il->status) ||
1281 test_bit(S_FW_ERROR, &il->status) ||
1282 test_bit(S_EXIT_PENDING, &il->status))
1283 return -EIO;
1284
1285 ret = il_send_cmd_sync(il, &cmd);
1286 if (ret)
1287 return ret;
1288
1289 pkt = (struct il_rx_pkt *)cmd.reply_page;
1290 if (pkt->u.status != CAN_ABORT_STATUS) {
1291
1292
1293
1294
1295
1296
1297 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1298 ret = -EIO;
1299 }
1300
1301 il_free_pages(il, cmd.reply_page);
1302 return ret;
1303}
1304
1305static void
1306il_complete_scan(struct il_priv *il, bool aborted)
1307{
1308 struct cfg80211_scan_info info = {
1309 .aborted = aborted,
1310 };
1311
1312
1313 if (il->scan_request) {
1314 D_SCAN("Complete scan in mac80211\n");
1315 ieee80211_scan_completed(il->hw, &info);
1316 }
1317
1318 il->scan_vif = NULL;
1319 il->scan_request = NULL;
1320}
1321
1322void
1323il_force_scan_end(struct il_priv *il)
1324{
1325 lockdep_assert_held(&il->mutex);
1326
1327 if (!test_bit(S_SCANNING, &il->status)) {
1328 D_SCAN("Forcing scan end while not scanning\n");
1329 return;
1330 }
1331
1332 D_SCAN("Forcing scan end\n");
1333 clear_bit(S_SCANNING, &il->status);
1334 clear_bit(S_SCAN_HW, &il->status);
1335 clear_bit(S_SCAN_ABORTING, &il->status);
1336 il_complete_scan(il, true);
1337}
1338
1339static void
1340il_do_scan_abort(struct il_priv *il)
1341{
1342 int ret;
1343
1344 lockdep_assert_held(&il->mutex);
1345
1346 if (!test_bit(S_SCANNING, &il->status)) {
1347 D_SCAN("Not performing scan to abort\n");
1348 return;
1349 }
1350
1351 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1352 D_SCAN("Scan abort in progress\n");
1353 return;
1354 }
1355
1356 ret = il_send_scan_abort(il);
1357 if (ret) {
1358 D_SCAN("Send scan abort failed %d\n", ret);
1359 il_force_scan_end(il);
1360 } else
1361 D_SCAN("Successfully send scan abort\n");
1362}
1363
1364
1365
1366
1367int
1368il_scan_cancel(struct il_priv *il)
1369{
1370 D_SCAN("Queuing abort scan\n");
1371 queue_work(il->workqueue, &il->abort_scan);
1372 return 0;
1373}
1374EXPORT_SYMBOL(il_scan_cancel);
1375
1376
1377
1378
1379
1380
1381int
1382il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1383{
1384 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1385
1386 lockdep_assert_held(&il->mutex);
1387
1388 D_SCAN("Scan cancel timeout\n");
1389
1390 il_do_scan_abort(il);
1391
1392 while (time_before_eq(jiffies, timeout)) {
1393 if (!test_bit(S_SCAN_HW, &il->status))
1394 break;
1395 msleep(20);
1396 }
1397
1398 return test_bit(S_SCAN_HW, &il->status);
1399}
1400EXPORT_SYMBOL(il_scan_cancel_timeout);
1401
1402
1403static void
1404il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1405{
1406#ifdef CONFIG_IWLEGACY_DEBUG
1407 struct il_rx_pkt *pkt = rxb_addr(rxb);
1408 struct il_scanreq_notification *notif =
1409 (struct il_scanreq_notification *)pkt->u.raw;
1410
1411 D_SCAN("Scan request status = 0x%x\n", notif->status);
1412#endif
1413}
1414
1415
1416static void
1417il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1418{
1419 struct il_rx_pkt *pkt = rxb_addr(rxb);
1420 struct il_scanstart_notification *notif =
1421 (struct il_scanstart_notification *)pkt->u.raw;
1422 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1423 D_SCAN("Scan start: " "%d [802.11%s] "
1424 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1425 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1426 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1427}
1428
1429
1430static void
1431il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1432{
1433#ifdef CONFIG_IWLEGACY_DEBUG
1434 struct il_rx_pkt *pkt = rxb_addr(rxb);
1435 struct il_scanresults_notification *notif =
1436 (struct il_scanresults_notification *)pkt->u.raw;
1437
1438 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1439 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1440 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1441 le32_to_cpu(notif->stats[0]),
1442 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1443#endif
1444}
1445
1446
1447static void
1448il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1449{
1450
1451#ifdef CONFIG_IWLEGACY_DEBUG
1452 struct il_rx_pkt *pkt = rxb_addr(rxb);
1453 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1454#endif
1455
1456 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1457 scan_notif->scanned_channels, scan_notif->tsf_low,
1458 scan_notif->tsf_high, scan_notif->status);
1459
1460
1461 clear_bit(S_SCAN_HW, &il->status);
1462
1463 D_SCAN("Scan on %sGHz took %dms\n",
1464 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
1465 jiffies_to_msecs(jiffies - il->scan_start));
1466
1467 queue_work(il->workqueue, &il->scan_completed);
1468}
1469
1470void
1471il_setup_rx_scan_handlers(struct il_priv *il)
1472{
1473
1474 il->handlers[C_SCAN] = il_hdl_scan;
1475 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1476 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1477 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1478}
1479EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1480
1481u16
1482il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1483 u8 n_probes)
1484{
1485 if (band == NL80211_BAND_5GHZ)
1486 return IL_ACTIVE_DWELL_TIME_52 +
1487 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1488 else
1489 return IL_ACTIVE_DWELL_TIME_24 +
1490 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1491}
1492EXPORT_SYMBOL(il_get_active_dwell_time);
1493
1494u16
1495il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1496 struct ieee80211_vif *vif)
1497{
1498 u16 value;
1499
1500 u16 passive =
1501 (band ==
1502 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1503 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1504 IL_PASSIVE_DWELL_TIME_52;
1505
1506 if (il_is_any_associated(il)) {
1507
1508
1509
1510
1511
1512 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1513 if (value > IL_PASSIVE_DWELL_BASE || !value)
1514 value = IL_PASSIVE_DWELL_BASE;
1515 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1516 passive = min(value, passive);
1517 }
1518
1519 return passive;
1520}
1521EXPORT_SYMBOL(il_get_passive_dwell_time);
1522
1523void
1524il_init_scan_params(struct il_priv *il)
1525{
1526 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1527 if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
1528 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
1529 if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
1530 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
1531}
1532EXPORT_SYMBOL(il_init_scan_params);
1533
1534static int
1535il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1536{
1537 int ret;
1538
1539 lockdep_assert_held(&il->mutex);
1540
1541 cancel_delayed_work(&il->scan_check);
1542
1543 if (!il_is_ready_rf(il)) {
1544 IL_WARN("Request scan called when driver not ready.\n");
1545 return -EIO;
1546 }
1547
1548 if (test_bit(S_SCAN_HW, &il->status)) {
1549 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1550 return -EBUSY;
1551 }
1552
1553 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1554 D_SCAN("Scan request while abort pending.\n");
1555 return -EBUSY;
1556 }
1557
1558 D_SCAN("Starting scan...\n");
1559
1560 set_bit(S_SCANNING, &il->status);
1561 il->scan_start = jiffies;
1562
1563 ret = il->ops->request_scan(il, vif);
1564 if (ret) {
1565 clear_bit(S_SCANNING, &il->status);
1566 return ret;
1567 }
1568
1569 queue_delayed_work(il->workqueue, &il->scan_check,
1570 IL_SCAN_CHECK_WATCHDOG);
1571
1572 return 0;
1573}
1574
1575int
1576il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1577 struct ieee80211_scan_request *hw_req)
1578{
1579 struct cfg80211_scan_request *req = &hw_req->req;
1580 struct il_priv *il = hw->priv;
1581 int ret;
1582
1583 if (req->n_channels == 0) {
1584 IL_ERR("Can not scan on no channels.\n");
1585 return -EINVAL;
1586 }
1587
1588 mutex_lock(&il->mutex);
1589 D_MAC80211("enter\n");
1590
1591 if (test_bit(S_SCANNING, &il->status)) {
1592 D_SCAN("Scan already in progress.\n");
1593 ret = -EAGAIN;
1594 goto out_unlock;
1595 }
1596
1597
1598 il->scan_request = req;
1599 il->scan_vif = vif;
1600 il->scan_band = req->channels[0]->band;
1601
1602 ret = il_scan_initiate(il, vif);
1603
1604out_unlock:
1605 D_MAC80211("leave ret %d\n", ret);
1606 mutex_unlock(&il->mutex);
1607
1608 return ret;
1609}
1610EXPORT_SYMBOL(il_mac_hw_scan);
1611
1612static void
1613il_bg_scan_check(struct work_struct *data)
1614{
1615 struct il_priv *il =
1616 container_of(data, struct il_priv, scan_check.work);
1617
1618 D_SCAN("Scan check work\n");
1619
1620
1621
1622
1623 mutex_lock(&il->mutex);
1624 il_force_scan_end(il);
1625 mutex_unlock(&il->mutex);
1626}
1627
1628
1629
1630
1631
1632u16
1633il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1634 const u8 *ta, const u8 *ies, int ie_len, int left)
1635{
1636 int len = 0;
1637 u8 *pos = NULL;
1638
1639
1640
1641 left -= 24;
1642 if (left < 0)
1643 return 0;
1644
1645 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1646 eth_broadcast_addr(frame->da);
1647 memcpy(frame->sa, ta, ETH_ALEN);
1648 eth_broadcast_addr(frame->bssid);
1649 frame->seq_ctrl = 0;
1650
1651 len += 24;
1652
1653
1654 pos = &frame->u.probe_req.variable[0];
1655
1656
1657 left -= 2;
1658 if (left < 0)
1659 return 0;
1660 *pos++ = WLAN_EID_SSID;
1661 *pos++ = 0;
1662
1663 len += 2;
1664
1665 if (WARN_ON(left < ie_len))
1666 return len;
1667
1668 if (ies && ie_len) {
1669 memcpy(pos, ies, ie_len);
1670 len += ie_len;
1671 }
1672
1673 return (u16) len;
1674}
1675EXPORT_SYMBOL(il_fill_probe_req);
1676
1677static void
1678il_bg_abort_scan(struct work_struct *work)
1679{
1680 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1681
1682 D_SCAN("Abort scan work\n");
1683
1684
1685
1686 mutex_lock(&il->mutex);
1687 il_scan_cancel_timeout(il, 200);
1688 mutex_unlock(&il->mutex);
1689}
1690
1691static void
1692il_bg_scan_completed(struct work_struct *work)
1693{
1694 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1695 bool aborted;
1696
1697 D_SCAN("Completed scan.\n");
1698
1699 cancel_delayed_work(&il->scan_check);
1700
1701 mutex_lock(&il->mutex);
1702
1703 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1704 if (aborted)
1705 D_SCAN("Aborted scan completed.\n");
1706
1707 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1708 D_SCAN("Scan already completed.\n");
1709 goto out_settings;
1710 }
1711
1712 il_complete_scan(il, aborted);
1713
1714out_settings:
1715
1716 if (!il_is_ready_rf(il))
1717 goto out;
1718
1719
1720
1721
1722
1723 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1724 il_set_tx_power(il, il->tx_power_next, false);
1725
1726 il->ops->post_scan(il);
1727
1728out:
1729 mutex_unlock(&il->mutex);
1730}
1731
1732void
1733il_setup_scan_deferred_work(struct il_priv *il)
1734{
1735 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1736 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1737 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1738}
1739EXPORT_SYMBOL(il_setup_scan_deferred_work);
1740
1741void
1742il_cancel_scan_deferred_work(struct il_priv *il)
1743{
1744 cancel_work_sync(&il->abort_scan);
1745 cancel_work_sync(&il->scan_completed);
1746
1747 if (cancel_delayed_work_sync(&il->scan_check)) {
1748 mutex_lock(&il->mutex);
1749 il_force_scan_end(il);
1750 mutex_unlock(&il->mutex);
1751 }
1752}
1753EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1754
1755
1756static void
1757il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1758{
1759
1760 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1761 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1762 sta_id, il->stations[sta_id].sta.sta.addr);
1763
1764 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1765 D_ASSOC("STA id %u addr %pM already present"
1766 " in uCode (according to driver)\n", sta_id,
1767 il->stations[sta_id].sta.sta.addr);
1768 } else {
1769 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1770 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1771 il->stations[sta_id].sta.sta.addr);
1772 }
1773}
1774
1775static int
1776il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1777 struct il_rx_pkt *pkt, bool sync)
1778{
1779 u8 sta_id = addsta->sta.sta_id;
1780 unsigned long flags;
1781 int ret = -EIO;
1782
1783 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1784 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1785 return ret;
1786 }
1787
1788 D_INFO("Processing response for adding station %u\n", sta_id);
1789
1790 spin_lock_irqsave(&il->sta_lock, flags);
1791
1792 switch (pkt->u.add_sta.status) {
1793 case ADD_STA_SUCCESS_MSK:
1794 D_INFO("C_ADD_STA PASSED\n");
1795 il_sta_ucode_activate(il, sta_id);
1796 ret = 0;
1797 break;
1798 case ADD_STA_NO_ROOM_IN_TBL:
1799 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1800 break;
1801 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1802 IL_ERR("Adding station %d failed, no block ack resource.\n",
1803 sta_id);
1804 break;
1805 case ADD_STA_MODIFY_NON_EXIST_STA:
1806 IL_ERR("Attempting to modify non-existing station %d\n",
1807 sta_id);
1808 break;
1809 default:
1810 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1811 break;
1812 }
1813
1814 D_INFO("%s station id %u addr %pM\n",
1815 il->stations[sta_id].sta.mode ==
1816 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1817 il->stations[sta_id].sta.sta.addr);
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 D_INFO("%s station according to cmd buffer %pM\n",
1828 il->stations[sta_id].sta.mode ==
1829 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1830 spin_unlock_irqrestore(&il->sta_lock, flags);
1831
1832 return ret;
1833}
1834
1835static void
1836il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1837 struct il_rx_pkt *pkt)
1838{
1839 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1840
1841 il_process_add_sta_resp(il, addsta, pkt, false);
1842
1843}
1844
1845int
1846il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1847{
1848 struct il_rx_pkt *pkt = NULL;
1849 int ret = 0;
1850 u8 data[sizeof(*sta)];
1851 struct il_host_cmd cmd = {
1852 .id = C_ADD_STA,
1853 .flags = flags,
1854 .data = data,
1855 };
1856 u8 sta_id __maybe_unused = sta->sta.sta_id;
1857
1858 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1859 flags & CMD_ASYNC ? "a" : "");
1860
1861 if (flags & CMD_ASYNC)
1862 cmd.callback = il_add_sta_callback;
1863 else {
1864 cmd.flags |= CMD_WANT_SKB;
1865 might_sleep();
1866 }
1867
1868 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1869 ret = il_send_cmd(il, &cmd);
1870 if (ret)
1871 return ret;
1872 if (flags & CMD_ASYNC)
1873 return 0;
1874
1875 pkt = (struct il_rx_pkt *)cmd.reply_page;
1876 ret = il_process_add_sta_resp(il, sta, pkt, true);
1877
1878 il_free_pages(il, cmd.reply_page);
1879
1880 return ret;
1881}
1882EXPORT_SYMBOL(il_send_add_sta);
1883
1884static void
1885il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1886{
1887 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1888 __le32 sta_flags;
1889
1890 if (!sta || !sta_ht_inf->ht_supported)
1891 goto done;
1892
1893 D_ASSOC("spatial multiplexing power save mode: %s\n",
1894 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1895 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1896 "disabled");
1897
1898 sta_flags = il->stations[idx].sta.station_flags;
1899
1900 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1901
1902 switch (sta->smps_mode) {
1903 case IEEE80211_SMPS_STATIC:
1904 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1905 break;
1906 case IEEE80211_SMPS_DYNAMIC:
1907 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1908 break;
1909 case IEEE80211_SMPS_OFF:
1910 break;
1911 default:
1912 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
1913 break;
1914 }
1915
1916 sta_flags |=
1917 cpu_to_le32((u32) sta_ht_inf->
1918 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1919
1920 sta_flags |=
1921 cpu_to_le32((u32) sta_ht_inf->
1922 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1923
1924 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1925 sta_flags |= STA_FLG_HT40_EN_MSK;
1926 else
1927 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1928
1929 il->stations[idx].sta.station_flags = sta_flags;
1930done:
1931 return;
1932}
1933
1934
1935
1936
1937
1938
1939u8
1940il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1941 struct ieee80211_sta *sta)
1942{
1943 struct il_station_entry *station;
1944 int i;
1945 u8 sta_id = IL_INVALID_STATION;
1946 u16 rate;
1947
1948 if (is_ap)
1949 sta_id = IL_AP_ID;
1950 else if (is_broadcast_ether_addr(addr))
1951 sta_id = il->hw_params.bcast_id;
1952 else
1953 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1954 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1955 addr)) {
1956 sta_id = i;
1957 break;
1958 }
1959
1960 if (!il->stations[i].used &&
1961 sta_id == IL_INVALID_STATION)
1962 sta_id = i;
1963 }
1964
1965
1966
1967
1968
1969 if (unlikely(sta_id == IL_INVALID_STATION))
1970 return sta_id;
1971
1972
1973
1974
1975
1976
1977 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1978 D_INFO("STA %d already in process of being added.\n", sta_id);
1979 return sta_id;
1980 }
1981
1982 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1983 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1984 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1985 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1986 sta_id, addr);
1987 return sta_id;
1988 }
1989
1990 station = &il->stations[sta_id];
1991 station->used = IL_STA_DRIVER_ACTIVE;
1992 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1993 il->num_stations++;
1994
1995
1996 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1997 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1998 station->sta.mode = 0;
1999 station->sta.sta.sta_id = sta_id;
2000 station->sta.station_flags = 0;
2001
2002
2003
2004
2005
2006
2007 il_set_ht_add_station(il, sta_id, sta);
2008
2009
2010 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
2011
2012 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
2013
2014 return sta_id;
2015
2016}
2017EXPORT_SYMBOL_GPL(il_prep_station);
2018
2019#define STA_WAIT_TIMEOUT (HZ/2)
2020
2021
2022
2023
2024int
2025il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2026 struct ieee80211_sta *sta, u8 *sta_id_r)
2027{
2028 unsigned long flags_spin;
2029 int ret = 0;
2030 u8 sta_id;
2031 struct il_addsta_cmd sta_cmd;
2032
2033 *sta_id_r = 0;
2034 spin_lock_irqsave(&il->sta_lock, flags_spin);
2035 sta_id = il_prep_station(il, addr, is_ap, sta);
2036 if (sta_id == IL_INVALID_STATION) {
2037 IL_ERR("Unable to prepare station %pM for addition\n", addr);
2038 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2039 return -EINVAL;
2040 }
2041
2042
2043
2044
2045
2046
2047 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2048 D_INFO("STA %d already in process of being added.\n", sta_id);
2049 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2050 return -EEXIST;
2051 }
2052
2053 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2054 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2055 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2056 sta_id, addr);
2057 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2058 return -EEXIST;
2059 }
2060
2061 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2062 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2063 sizeof(struct il_addsta_cmd));
2064 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2065
2066
2067 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2068 if (ret) {
2069 spin_lock_irqsave(&il->sta_lock, flags_spin);
2070 IL_ERR("Adding station %pM failed.\n",
2071 il->stations[sta_id].sta.sta.addr);
2072 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2073 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2074 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2075 }
2076 *sta_id_r = sta_id;
2077 return ret;
2078}
2079EXPORT_SYMBOL(il_add_station_common);
2080
2081
2082
2083
2084
2085
2086static void
2087il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2088{
2089
2090 if ((il->stations[sta_id].
2091 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2092 IL_STA_UCODE_ACTIVE)
2093 IL_ERR("removed non active STA %u\n", sta_id);
2094
2095 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2096
2097 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2098 D_ASSOC("Removed STA %u\n", sta_id);
2099}
2100
2101static int
2102il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2103 bool temporary)
2104{
2105 struct il_rx_pkt *pkt;
2106 int ret;
2107
2108 unsigned long flags_spin;
2109 struct il_rem_sta_cmd rm_sta_cmd;
2110
2111 struct il_host_cmd cmd = {
2112 .id = C_REM_STA,
2113 .len = sizeof(struct il_rem_sta_cmd),
2114 .flags = CMD_SYNC,
2115 .data = &rm_sta_cmd,
2116 };
2117
2118 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2119 rm_sta_cmd.num_sta = 1;
2120 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2121
2122 cmd.flags |= CMD_WANT_SKB;
2123
2124 ret = il_send_cmd(il, &cmd);
2125
2126 if (ret)
2127 return ret;
2128
2129 pkt = (struct il_rx_pkt *)cmd.reply_page;
2130 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2131 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2132 ret = -EIO;
2133 }
2134
2135 if (!ret) {
2136 switch (pkt->u.rem_sta.status) {
2137 case REM_STA_SUCCESS_MSK:
2138 if (!temporary) {
2139 spin_lock_irqsave(&il->sta_lock, flags_spin);
2140 il_sta_ucode_deactivate(il, sta_id);
2141 spin_unlock_irqrestore(&il->sta_lock,
2142 flags_spin);
2143 }
2144 D_ASSOC("C_REM_STA PASSED\n");
2145 break;
2146 default:
2147 ret = -EIO;
2148 IL_ERR("C_REM_STA failed\n");
2149 break;
2150 }
2151 }
2152 il_free_pages(il, cmd.reply_page);
2153
2154 return ret;
2155}
2156
2157
2158
2159
2160int
2161il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2162{
2163 unsigned long flags;
2164
2165 if (!il_is_ready(il)) {
2166 D_INFO("Unable to remove station %pM, device not ready.\n",
2167 addr);
2168
2169
2170
2171
2172
2173 return 0;
2174 }
2175
2176 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2177
2178 if (WARN_ON(sta_id == IL_INVALID_STATION))
2179 return -EINVAL;
2180
2181 spin_lock_irqsave(&il->sta_lock, flags);
2182
2183 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2184 D_INFO("Removing %pM but non DRIVER active\n", addr);
2185 goto out_err;
2186 }
2187
2188 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2189 D_INFO("Removing %pM but non UCODE active\n", addr);
2190 goto out_err;
2191 }
2192
2193 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2194 kfree(il->stations[sta_id].lq);
2195 il->stations[sta_id].lq = NULL;
2196 }
2197
2198 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2199
2200 il->num_stations--;
2201
2202 BUG_ON(il->num_stations < 0);
2203
2204 spin_unlock_irqrestore(&il->sta_lock, flags);
2205
2206 return il_send_remove_station(il, addr, sta_id, false);
2207out_err:
2208 spin_unlock_irqrestore(&il->sta_lock, flags);
2209 return -EINVAL;
2210}
2211EXPORT_SYMBOL_GPL(il_remove_station);
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221void
2222il_clear_ucode_stations(struct il_priv *il)
2223{
2224 int i;
2225 unsigned long flags_spin;
2226 bool cleared = false;
2227
2228 D_INFO("Clearing ucode stations in driver\n");
2229
2230 spin_lock_irqsave(&il->sta_lock, flags_spin);
2231 for (i = 0; i < il->hw_params.max_stations; i++) {
2232 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2233 D_INFO("Clearing ucode active for station %d\n", i);
2234 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2235 cleared = true;
2236 }
2237 }
2238 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2239
2240 if (!cleared)
2241 D_INFO("No active stations found to be cleared\n");
2242}
2243EXPORT_SYMBOL(il_clear_ucode_stations);
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253void
2254il_restore_stations(struct il_priv *il)
2255{
2256 struct il_addsta_cmd sta_cmd;
2257 struct il_link_quality_cmd lq;
2258 unsigned long flags_spin;
2259 int i;
2260 bool found = false;
2261 int ret;
2262 bool send_lq;
2263
2264 if (!il_is_ready(il)) {
2265 D_INFO("Not ready yet, not restoring any stations.\n");
2266 return;
2267 }
2268
2269 D_ASSOC("Restoring all known stations ... start.\n");
2270 spin_lock_irqsave(&il->sta_lock, flags_spin);
2271 for (i = 0; i < il->hw_params.max_stations; i++) {
2272 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2273 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2274 D_ASSOC("Restoring sta %pM\n",
2275 il->stations[i].sta.sta.addr);
2276 il->stations[i].sta.mode = 0;
2277 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2278 found = true;
2279 }
2280 }
2281
2282 for (i = 0; i < il->hw_params.max_stations; i++) {
2283 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2284 memcpy(&sta_cmd, &il->stations[i].sta,
2285 sizeof(struct il_addsta_cmd));
2286 send_lq = false;
2287 if (il->stations[i].lq) {
2288 memcpy(&lq, il->stations[i].lq,
2289 sizeof(struct il_link_quality_cmd));
2290 send_lq = true;
2291 }
2292 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2293 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2294 if (ret) {
2295 spin_lock_irqsave(&il->sta_lock, flags_spin);
2296 IL_ERR("Adding station %pM failed.\n",
2297 il->stations[i].sta.sta.addr);
2298 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2299 il->stations[i].used &=
2300 ~IL_STA_UCODE_INPROGRESS;
2301 spin_unlock_irqrestore(&il->sta_lock,
2302 flags_spin);
2303 }
2304
2305
2306
2307
2308 if (send_lq)
2309 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2310 spin_lock_irqsave(&il->sta_lock, flags_spin);
2311 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2312 }
2313 }
2314
2315 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2316 if (!found)
2317 D_INFO("Restoring all known stations"
2318 " .... no stations to be restored.\n");
2319 else
2320 D_INFO("Restoring all known stations" " .... complete.\n");
2321}
2322EXPORT_SYMBOL(il_restore_stations);
2323
2324int
2325il_get_free_ucode_key_idx(struct il_priv *il)
2326{
2327 int i;
2328
2329 for (i = 0; i < il->sta_key_max_num; i++)
2330 if (!test_and_set_bit(i, &il->ucode_key_table))
2331 return i;
2332
2333 return WEP_INVALID_OFFSET;
2334}
2335EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2336
2337void
2338il_dealloc_bcast_stations(struct il_priv *il)
2339{
2340 unsigned long flags;
2341 int i;
2342
2343 spin_lock_irqsave(&il->sta_lock, flags);
2344 for (i = 0; i < il->hw_params.max_stations; i++) {
2345 if (!(il->stations[i].used & IL_STA_BCAST))
2346 continue;
2347
2348 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2349 il->num_stations--;
2350 BUG_ON(il->num_stations < 0);
2351 kfree(il->stations[i].lq);
2352 il->stations[i].lq = NULL;
2353 }
2354 spin_unlock_irqrestore(&il->sta_lock, flags);
2355}
2356EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2357
2358#ifdef CONFIG_IWLEGACY_DEBUG
2359static void
2360il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2361{
2362 int i;
2363 D_RATE("lq station id 0x%x\n", lq->sta_id);
2364 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2365 lq->general_params.dual_stream_ant_msk);
2366
2367 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2368 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2369}
2370#else
2371static inline void
2372il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2373{
2374}
2375#endif
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388static bool
2389il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2390{
2391 int i;
2392
2393 if (il->ht.enabled)
2394 return true;
2395
2396 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2397 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2398 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2399 D_INFO("idx %d of LQ expects HT channel\n", i);
2400 return false;
2401 }
2402 }
2403 return true;
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416int
2417il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2418 u8 flags, bool init)
2419{
2420 int ret = 0;
2421 unsigned long flags_spin;
2422
2423 struct il_host_cmd cmd = {
2424 .id = C_TX_LINK_QUALITY_CMD,
2425 .len = sizeof(struct il_link_quality_cmd),
2426 .flags = flags,
2427 .data = lq,
2428 };
2429
2430 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2431 return -EINVAL;
2432
2433 spin_lock_irqsave(&il->sta_lock, flags_spin);
2434 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2435 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2436 return -EINVAL;
2437 }
2438 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2439
2440 il_dump_lq_cmd(il, lq);
2441 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2442
2443 if (il_is_lq_table_valid(il, lq))
2444 ret = il_send_cmd(il, &cmd);
2445 else
2446 ret = -EINVAL;
2447
2448 if (cmd.flags & CMD_ASYNC)
2449 return ret;
2450
2451 if (init) {
2452 D_INFO("init LQ command complete,"
2453 " clearing sta addition status for sta %d\n",
2454 lq->sta_id);
2455 spin_lock_irqsave(&il->sta_lock, flags_spin);
2456 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2457 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2458 }
2459 return ret;
2460}
2461EXPORT_SYMBOL(il_send_lq_cmd);
2462
2463int
2464il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2465 struct ieee80211_sta *sta)
2466{
2467 struct il_priv *il = hw->priv;
2468 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2469 int ret;
2470
2471 mutex_lock(&il->mutex);
2472 D_MAC80211("enter station %pM\n", sta->addr);
2473
2474 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2475 if (ret)
2476 IL_ERR("Error removing station %pM\n", sta->addr);
2477
2478 D_MAC80211("leave ret %d\n", ret);
2479 mutex_unlock(&il->mutex);
2480
2481 return ret;
2482}
2483EXPORT_SYMBOL(il_mac_sta_remove);
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555int
2556il_rx_queue_space(const struct il_rx_queue *q)
2557{
2558 int s = q->read - q->write;
2559 if (s <= 0)
2560 s += RX_QUEUE_SIZE;
2561
2562 s -= 2;
2563 if (s < 0)
2564 s = 0;
2565 return s;
2566}
2567EXPORT_SYMBOL(il_rx_queue_space);
2568
2569
2570
2571
2572void
2573il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2574{
2575 unsigned long flags;
2576 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2577 u32 reg;
2578
2579 spin_lock_irqsave(&q->lock, flags);
2580
2581 if (q->need_update == 0)
2582 goto exit_unlock;
2583
2584
2585 if (test_bit(S_POWER_PMI, &il->status)) {
2586 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2587
2588 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2589 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2590 reg);
2591 il_set_bit(il, CSR_GP_CNTRL,
2592 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2593 goto exit_unlock;
2594 }
2595
2596 q->write_actual = (q->write & ~0x7);
2597 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2598
2599
2600 } else {
2601
2602 q->write_actual = (q->write & ~0x7);
2603 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2604 }
2605
2606 q->need_update = 0;
2607
2608exit_unlock:
2609 spin_unlock_irqrestore(&q->lock, flags);
2610}
2611EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2612
2613int
2614il_rx_queue_alloc(struct il_priv *il)
2615{
2616 struct il_rx_queue *rxq = &il->rxq;
2617 struct device *dev = &il->pci_dev->dev;
2618 int i;
2619
2620 spin_lock_init(&rxq->lock);
2621 INIT_LIST_HEAD(&rxq->rx_free);
2622 INIT_LIST_HEAD(&rxq->rx_used);
2623
2624
2625 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2626 GFP_KERNEL);
2627 if (!rxq->bd)
2628 goto err_bd;
2629
2630 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2631 &rxq->rb_stts_dma, GFP_KERNEL);
2632 if (!rxq->rb_stts)
2633 goto err_rb;
2634
2635
2636 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2637 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2638
2639
2640
2641 rxq->read = rxq->write = 0;
2642 rxq->write_actual = 0;
2643 rxq->free_count = 0;
2644 rxq->need_update = 0;
2645 return 0;
2646
2647err_rb:
2648 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2649 rxq->bd_dma);
2650err_bd:
2651 return -ENOMEM;
2652}
2653EXPORT_SYMBOL(il_rx_queue_alloc);
2654
2655void
2656il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2657{
2658 struct il_rx_pkt *pkt = rxb_addr(rxb);
2659 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2660
2661 if (!report->state) {
2662 D_11H("Spectrum Measure Notification: Start\n");
2663 return;
2664 }
2665
2666 memcpy(&il->measure_report, report, sizeof(*report));
2667 il->measurement_status |= MEASUREMENT_READY;
2668}
2669EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2670
2671
2672
2673
2674int
2675il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2676 u32 decrypt_res, struct ieee80211_rx_status *stats)
2677{
2678 u16 fc = le16_to_cpu(hdr->frame_control);
2679
2680
2681
2682
2683
2684 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2685 return 0;
2686
2687 if (!(fc & IEEE80211_FCTL_PROTECTED))
2688 return 0;
2689
2690 D_RX("decrypt_res:0x%x\n", decrypt_res);
2691 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2692 case RX_RES_STATUS_SEC_TYPE_TKIP:
2693
2694
2695 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2696 RX_RES_STATUS_BAD_KEY_TTAK)
2697 break;
2698
2699 case RX_RES_STATUS_SEC_TYPE_WEP:
2700 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2701 RX_RES_STATUS_BAD_ICV_MIC) {
2702
2703
2704 D_RX("Packet destroyed\n");
2705 return -1;
2706 }
2707 case RX_RES_STATUS_SEC_TYPE_CCMP:
2708 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2709 RX_RES_STATUS_DECRYPT_OK) {
2710 D_RX("hw decrypt successfully!!!\n");
2711 stats->flag |= RX_FLAG_DECRYPTED;
2712 }
2713 break;
2714
2715 default:
2716 break;
2717 }
2718 return 0;
2719}
2720EXPORT_SYMBOL(il_set_decrypted_flag);
2721
2722
2723
2724
2725void
2726il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2727{
2728 u32 reg = 0;
2729 int txq_id = txq->q.id;
2730
2731 if (txq->need_update == 0)
2732 return;
2733
2734
2735 if (test_bit(S_POWER_PMI, &il->status)) {
2736
2737
2738
2739 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2740
2741 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2742 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2743 txq_id, reg);
2744 il_set_bit(il, CSR_GP_CNTRL,
2745 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2746 return;
2747 }
2748
2749 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2750
2751
2752
2753
2754
2755
2756 } else
2757 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2758 txq->need_update = 0;
2759}
2760EXPORT_SYMBOL(il_txq_update_write_ptr);
2761
2762
2763
2764
2765void
2766il_tx_queue_unmap(struct il_priv *il, int txq_id)
2767{
2768 struct il_tx_queue *txq = &il->txq[txq_id];
2769 struct il_queue *q = &txq->q;
2770
2771 if (q->n_bd == 0)
2772 return;
2773
2774 while (q->write_ptr != q->read_ptr) {
2775 il->ops->txq_free_tfd(il, txq);
2776 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2777 }
2778}
2779EXPORT_SYMBOL(il_tx_queue_unmap);
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789void
2790il_tx_queue_free(struct il_priv *il, int txq_id)
2791{
2792 struct il_tx_queue *txq = &il->txq[txq_id];
2793 struct device *dev = &il->pci_dev->dev;
2794 int i;
2795
2796 il_tx_queue_unmap(il, txq_id);
2797
2798
2799 if (txq->cmd) {
2800 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2801 kfree(txq->cmd[i]);
2802 }
2803
2804
2805 if (txq->q.n_bd)
2806 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2807 txq->tfds, txq->q.dma_addr);
2808
2809
2810 kfree(txq->skbs);
2811 txq->skbs = NULL;
2812
2813
2814 kfree(txq->cmd);
2815 kfree(txq->meta);
2816 txq->cmd = NULL;
2817 txq->meta = NULL;
2818
2819
2820 memset(txq, 0, sizeof(*txq));
2821}
2822EXPORT_SYMBOL(il_tx_queue_free);
2823
2824
2825
2826
2827void
2828il_cmd_queue_unmap(struct il_priv *il)
2829{
2830 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2831 struct il_queue *q = &txq->q;
2832 int i;
2833
2834 if (q->n_bd == 0)
2835 return;
2836
2837 while (q->read_ptr != q->write_ptr) {
2838 i = il_get_cmd_idx(q, q->read_ptr, 0);
2839
2840 if (txq->meta[i].flags & CMD_MAPPED) {
2841 pci_unmap_single(il->pci_dev,
2842 dma_unmap_addr(&txq->meta[i], mapping),
2843 dma_unmap_len(&txq->meta[i], len),
2844 PCI_DMA_BIDIRECTIONAL);
2845 txq->meta[i].flags = 0;
2846 }
2847
2848 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2849 }
2850
2851 i = q->n_win;
2852 if (txq->meta[i].flags & CMD_MAPPED) {
2853 pci_unmap_single(il->pci_dev,
2854 dma_unmap_addr(&txq->meta[i], mapping),
2855 dma_unmap_len(&txq->meta[i], len),
2856 PCI_DMA_BIDIRECTIONAL);
2857 txq->meta[i].flags = 0;
2858 }
2859}
2860EXPORT_SYMBOL(il_cmd_queue_unmap);
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870void
2871il_cmd_queue_free(struct il_priv *il)
2872{
2873 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2874 struct device *dev = &il->pci_dev->dev;
2875 int i;
2876
2877 il_cmd_queue_unmap(il);
2878
2879
2880 if (txq->cmd) {
2881 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2882 kfree(txq->cmd[i]);
2883 }
2884
2885
2886 if (txq->q.n_bd)
2887 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2888 txq->tfds, txq->q.dma_addr);
2889
2890
2891 kfree(txq->cmd);
2892 kfree(txq->meta);
2893 txq->cmd = NULL;
2894 txq->meta = NULL;
2895
2896
2897 memset(txq, 0, sizeof(*txq));
2898}
2899EXPORT_SYMBOL(il_cmd_queue_free);
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924int
2925il_queue_space(const struct il_queue *q)
2926{
2927 int s = q->read_ptr - q->write_ptr;
2928
2929 if (q->read_ptr > q->write_ptr)
2930 s -= q->n_bd;
2931
2932 if (s <= 0)
2933 s += q->n_win;
2934
2935 s -= 2;
2936 if (s < 0)
2937 s = 0;
2938 return s;
2939}
2940EXPORT_SYMBOL(il_queue_space);
2941
2942
2943
2944
2945
2946static int
2947il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2948{
2949
2950
2951
2952
2953 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2954
2955 q->n_bd = TFD_QUEUE_SIZE_MAX;
2956
2957 q->n_win = slots;
2958 q->id = id;
2959
2960
2961
2962 BUG_ON(!is_power_of_2(slots));
2963
2964 q->low_mark = q->n_win / 4;
2965 if (q->low_mark < 4)
2966 q->low_mark = 4;
2967
2968 q->high_mark = q->n_win / 8;
2969 if (q->high_mark < 2)
2970 q->high_mark = 2;
2971
2972 q->write_ptr = q->read_ptr = 0;
2973
2974 return 0;
2975}
2976
2977
2978
2979
2980static int
2981il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2982{
2983 struct device *dev = &il->pci_dev->dev;
2984 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2985
2986
2987
2988 if (id != il->cmd_queue) {
2989 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2990 sizeof(struct sk_buff *),
2991 GFP_KERNEL);
2992 if (!txq->skbs) {
2993 IL_ERR("Fail to alloc skbs\n");
2994 goto error;
2995 }
2996 } else
2997 txq->skbs = NULL;
2998
2999
3000
3001 txq->tfds =
3002 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
3003 if (!txq->tfds)
3004 goto error;
3005
3006 txq->q.id = id;
3007
3008 return 0;
3009
3010error:
3011 kfree(txq->skbs);
3012 txq->skbs = NULL;
3013
3014 return -ENOMEM;
3015}
3016
3017
3018
3019
3020int
3021il_tx_queue_init(struct il_priv *il, u32 txq_id)
3022{
3023 int i, len, ret;
3024 int slots, actual_slots;
3025 struct il_tx_queue *txq = &il->txq[txq_id];
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035 if (txq_id == il->cmd_queue) {
3036 slots = TFD_CMD_SLOTS;
3037 actual_slots = slots + 1;
3038 } else {
3039 slots = TFD_TX_CMD_SLOTS;
3040 actual_slots = slots;
3041 }
3042
3043 txq->meta =
3044 kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL);
3045 txq->cmd =
3046 kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL);
3047
3048 if (!txq->meta || !txq->cmd)
3049 goto out_free_arrays;
3050
3051 len = sizeof(struct il_device_cmd);
3052 for (i = 0; i < actual_slots; i++) {
3053
3054 if (i == slots)
3055 len = IL_MAX_CMD_SIZE;
3056
3057 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3058 if (!txq->cmd[i])
3059 goto err;
3060 }
3061
3062
3063 ret = il_tx_queue_alloc(il, txq, txq_id);
3064 if (ret)
3065 goto err;
3066
3067 txq->need_update = 0;
3068
3069
3070
3071
3072
3073
3074 if (txq_id < 4)
3075 il_set_swq_id(txq, txq_id, txq_id);
3076
3077
3078 il_queue_init(il, &txq->q, slots, txq_id);
3079
3080
3081 il->ops->txq_init(il, txq);
3082
3083 return 0;
3084err:
3085 for (i = 0; i < actual_slots; i++)
3086 kfree(txq->cmd[i]);
3087out_free_arrays:
3088 kfree(txq->meta);
3089 txq->meta = NULL;
3090 kfree(txq->cmd);
3091 txq->cmd = NULL;
3092
3093 return -ENOMEM;
3094}
3095EXPORT_SYMBOL(il_tx_queue_init);
3096
3097void
3098il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3099{
3100 int slots, actual_slots;
3101 struct il_tx_queue *txq = &il->txq[txq_id];
3102
3103 if (txq_id == il->cmd_queue) {
3104 slots = TFD_CMD_SLOTS;
3105 actual_slots = TFD_CMD_SLOTS + 1;
3106 } else {
3107 slots = TFD_TX_CMD_SLOTS;
3108 actual_slots = TFD_TX_CMD_SLOTS;
3109 }
3110
3111 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3112 txq->need_update = 0;
3113
3114
3115 il_queue_init(il, &txq->q, slots, txq_id);
3116
3117
3118 il->ops->txq_init(il, txq);
3119}
3120EXPORT_SYMBOL(il_tx_queue_reset);
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133int
3134il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3135{
3136 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3137 struct il_queue *q = &txq->q;
3138 struct il_device_cmd *out_cmd;
3139 struct il_cmd_meta *out_meta;
3140 dma_addr_t phys_addr;
3141 unsigned long flags;
3142 int len;
3143 u32 idx;
3144 u16 fix_size;
3145
3146 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3147 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3148
3149
3150
3151
3152
3153
3154 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3155 !(cmd->flags & CMD_SIZE_HUGE));
3156 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3157
3158 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3159 IL_WARN("Not sending command - %s KILL\n",
3160 il_is_rfkill(il) ? "RF" : "CT");
3161 return -EIO;
3162 }
3163
3164 spin_lock_irqsave(&il->hcmd_lock, flags);
3165
3166 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3167 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3168
3169 IL_ERR("Restarting adapter due to command queue full\n");
3170 queue_work(il->workqueue, &il->restart);
3171 return -ENOSPC;
3172 }
3173
3174 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3175 out_cmd = txq->cmd[idx];
3176 out_meta = &txq->meta[idx];
3177
3178 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3179 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3180 return -ENOSPC;
3181 }
3182
3183 memset(out_meta, 0, sizeof(*out_meta));
3184 out_meta->flags = cmd->flags | CMD_MAPPED;
3185 if (cmd->flags & CMD_WANT_SKB)
3186 out_meta->source = cmd;
3187 if (cmd->flags & CMD_ASYNC)
3188 out_meta->callback = cmd->callback;
3189
3190 out_cmd->hdr.cmd = cmd->id;
3191 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3192
3193
3194
3195
3196 out_cmd->hdr.flags = 0;
3197 out_cmd->hdr.sequence =
3198 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3199 if (cmd->flags & CMD_SIZE_HUGE)
3200 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3201 len = sizeof(struct il_device_cmd);
3202 if (idx == TFD_CMD_SLOTS)
3203 len = IL_MAX_CMD_SIZE;
3204
3205#ifdef CONFIG_IWLEGACY_DEBUG
3206 switch (out_cmd->hdr.cmd) {
3207 case C_TX_LINK_QUALITY_CMD:
3208 case C_SENSITIVITY:
3209 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3210 "%d bytes at %d[%d]:%d\n",
3211 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3212 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3213 q->write_ptr, idx, il->cmd_queue);
3214 break;
3215 default:
3216 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3217 "%d bytes at %d[%d]:%d\n",
3218 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3219 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3220 idx, il->cmd_queue);
3221 }
3222#endif
3223
3224 phys_addr =
3225 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3226 PCI_DMA_BIDIRECTIONAL);
3227 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
3228 idx = -ENOMEM;
3229 goto out;
3230 }
3231 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3232 dma_unmap_len_set(out_meta, len, fix_size);
3233
3234 txq->need_update = 1;
3235
3236 if (il->ops->txq_update_byte_cnt_tbl)
3237
3238 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3239
3240 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3241 U32_PAD(cmd->len));
3242
3243
3244 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3245 il_txq_update_write_ptr(il, txq);
3246
3247out:
3248 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3249 return idx;
3250}
3251
3252
3253
3254
3255
3256
3257
3258
3259static void
3260il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3261{
3262 struct il_tx_queue *txq = &il->txq[txq_id];
3263 struct il_queue *q = &txq->q;
3264 int nfreed = 0;
3265
3266 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3267 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3268 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3269 q->write_ptr, q->read_ptr);
3270 return;
3271 }
3272
3273 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3274 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3275
3276 if (nfreed++ > 0) {
3277 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3278 q->write_ptr, q->read_ptr);
3279 queue_work(il->workqueue, &il->restart);
3280 }
3281
3282 }
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293void
3294il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3295{
3296 struct il_rx_pkt *pkt = rxb_addr(rxb);
3297 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3298 int txq_id = SEQ_TO_QUEUE(sequence);
3299 int idx = SEQ_TO_IDX(sequence);
3300 int cmd_idx;
3301 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3302 struct il_device_cmd *cmd;
3303 struct il_cmd_meta *meta;
3304 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3305 unsigned long flags;
3306
3307
3308
3309
3310 if (WARN
3311 (txq_id != il->cmd_queue,
3312 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3313 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3314 il->txq[il->cmd_queue].q.write_ptr)) {
3315 il_print_hex_error(il, pkt, 32);
3316 return;
3317 }
3318
3319 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3320 cmd = txq->cmd[cmd_idx];
3321 meta = &txq->meta[cmd_idx];
3322
3323 txq->time_stamp = jiffies;
3324
3325 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3326 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3327
3328
3329 if (meta->flags & CMD_WANT_SKB) {
3330 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3331 rxb->page = NULL;
3332 } else if (meta->callback)
3333 meta->callback(il, cmd, pkt);
3334
3335 spin_lock_irqsave(&il->hcmd_lock, flags);
3336
3337 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3338
3339 if (!(meta->flags & CMD_ASYNC)) {
3340 clear_bit(S_HCMD_ACTIVE, &il->status);
3341 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3342 il_get_cmd_string(cmd->hdr.cmd));
3343 wake_up(&il->wait_command_queue);
3344 }
3345
3346
3347 meta->flags = 0;
3348
3349 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3350}
3351EXPORT_SYMBOL(il_tx_cmd_complete);
3352
3353MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3354MODULE_VERSION(IWLWIFI_VERSION);
3355MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3356MODULE_LICENSE("GPL");
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374static bool bt_coex_active = true;
3375module_param(bt_coex_active, bool, 0444);
3376MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3377
3378u32 il_debug_level;
3379EXPORT_SYMBOL(il_debug_level);
3380
3381const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3382EXPORT_SYMBOL(il_bcast_addr);
3383
3384#define MAX_BIT_RATE_40_MHZ 150
3385#define MAX_BIT_RATE_20_MHZ 72
3386static void
3387il_init_ht_hw_capab(const struct il_priv *il,
3388 struct ieee80211_sta_ht_cap *ht_info,
3389 enum nl80211_band band)
3390{
3391 u16 max_bit_rate = 0;
3392 u8 rx_chains_num = il->hw_params.rx_chains_num;
3393 u8 tx_chains_num = il->hw_params.tx_chains_num;
3394
3395 ht_info->cap = 0;
3396 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3397
3398 ht_info->ht_supported = true;
3399
3400 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3401 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3402 if (il->hw_params.ht40_channel & BIT(band)) {
3403 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3404 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3405 ht_info->mcs.rx_mask[4] = 0x01;
3406 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3407 }
3408
3409 if (il->cfg->mod_params->amsdu_size_8K)
3410 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3411
3412 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3413 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3414
3415 ht_info->mcs.rx_mask[0] = 0xFF;
3416 if (rx_chains_num >= 2)
3417 ht_info->mcs.rx_mask[1] = 0xFF;
3418 if (rx_chains_num >= 3)
3419 ht_info->mcs.rx_mask[2] = 0xFF;
3420
3421
3422 max_bit_rate *= rx_chains_num;
3423 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3424 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3425
3426
3427 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3428 if (tx_chains_num != rx_chains_num) {
3429 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3430 ht_info->mcs.tx_params |=
3431 ((tx_chains_num -
3432 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3433 }
3434}
3435
3436
3437
3438
3439int
3440il_init_geos(struct il_priv *il)
3441{
3442 struct il_channel_info *ch;
3443 struct ieee80211_supported_band *sband;
3444 struct ieee80211_channel *channels;
3445 struct ieee80211_channel *geo_ch;
3446 struct ieee80211_rate *rates;
3447 int i = 0;
3448 s8 max_tx_power = 0;
3449
3450 if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
3451 il->bands[NL80211_BAND_5GHZ].n_bitrates) {
3452 D_INFO("Geography modes already initialized.\n");
3453 set_bit(S_GEO_CONFIGURED, &il->status);
3454 return 0;
3455 }
3456
3457 channels =
3458 kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
3459 GFP_KERNEL);
3460 if (!channels)
3461 return -ENOMEM;
3462
3463 rates =
3464 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3465 GFP_KERNEL);
3466 if (!rates) {
3467 kfree(channels);
3468 return -ENOMEM;
3469 }
3470
3471
3472 sband = &il->bands[NL80211_BAND_5GHZ];
3473 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3474
3475 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3476 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3477
3478 if (il->cfg->sku & IL_SKU_N)
3479 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
3480
3481 sband = &il->bands[NL80211_BAND_2GHZ];
3482 sband->channels = channels;
3483
3484 sband->bitrates = rates;
3485 sband->n_bitrates = RATE_COUNT_LEGACY;
3486
3487 if (il->cfg->sku & IL_SKU_N)
3488 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
3489
3490 il->ieee_channels = channels;
3491 il->ieee_rates = rates;
3492
3493 for (i = 0; i < il->channel_count; i++) {
3494 ch = &il->channel_info[i];
3495
3496 if (!il_is_channel_valid(ch))
3497 continue;
3498
3499 sband = &il->bands[ch->band];
3500
3501 geo_ch = &sband->channels[sband->n_channels++];
3502
3503 geo_ch->center_freq =
3504 ieee80211_channel_to_frequency(ch->channel, ch->band);
3505 geo_ch->max_power = ch->max_power_avg;
3506 geo_ch->max_antenna_gain = 0xff;
3507 geo_ch->hw_value = ch->channel;
3508
3509 if (il_is_channel_valid(ch)) {
3510 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3511 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3512
3513 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3514 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3515
3516 if (ch->flags & EEPROM_CHANNEL_RADAR)
3517 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3518
3519 geo_ch->flags |= ch->ht40_extension_channel;
3520
3521 if (ch->max_power_avg > max_tx_power)
3522 max_tx_power = ch->max_power_avg;
3523 } else {
3524 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3525 }
3526
3527 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3528 geo_ch->center_freq,
3529 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3530 geo_ch->
3531 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3532 geo_ch->flags);
3533 }
3534
3535 il->tx_power_device_lmt = max_tx_power;
3536 il->tx_power_user_lmt = max_tx_power;
3537 il->tx_power_next = max_tx_power;
3538
3539 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
3540 (il->cfg->sku & IL_SKU_A)) {
3541 IL_INFO("Incorrectly detected BG card as ABG. "
3542 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3543 il->pci_dev->device, il->pci_dev->subsystem_device);
3544 il->cfg->sku &= ~IL_SKU_A;
3545 }
3546
3547 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3548 il->bands[NL80211_BAND_2GHZ].n_channels,
3549 il->bands[NL80211_BAND_5GHZ].n_channels);
3550
3551 set_bit(S_GEO_CONFIGURED, &il->status);
3552
3553 return 0;
3554}
3555EXPORT_SYMBOL(il_init_geos);
3556
3557
3558
3559
3560void
3561il_free_geos(struct il_priv *il)
3562{
3563 kfree(il->ieee_channels);
3564 kfree(il->ieee_rates);
3565 clear_bit(S_GEO_CONFIGURED, &il->status);
3566}
3567EXPORT_SYMBOL(il_free_geos);
3568
3569static bool
3570il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
3571 u16 channel, u8 extension_chan_offset)
3572{
3573 const struct il_channel_info *ch_info;
3574
3575 ch_info = il_get_channel_info(il, band, channel);
3576 if (!il_is_channel_valid(ch_info))
3577 return false;
3578
3579 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3580 return !(ch_info->
3581 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3582 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3583 return !(ch_info->
3584 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3585
3586 return false;
3587}
3588
3589bool
3590il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3591{
3592 if (!il->ht.enabled || !il->ht.is_40mhz)
3593 return false;
3594
3595
3596
3597
3598
3599 if (ht_cap && !ht_cap->ht_supported)
3600 return false;
3601
3602#ifdef CONFIG_IWLEGACY_DEBUGFS
3603 if (il->disable_ht40)
3604 return false;
3605#endif
3606
3607 return il_is_channel_extension(il, il->band,
3608 le16_to_cpu(il->staging.channel),
3609 il->ht.extension_chan_offset);
3610}
3611EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3612
3613static u16 noinline
3614il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3615{
3616 u16 new_val;
3617 u16 beacon_factor;
3618
3619
3620
3621
3622
3623 if (!beacon_val)
3624 return DEFAULT_BEACON_INTERVAL;
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3639 new_val = beacon_val / beacon_factor;
3640
3641 if (!new_val)
3642 new_val = max_beacon_val;
3643
3644 return new_val;
3645}
3646
3647int
3648il_send_rxon_timing(struct il_priv *il)
3649{
3650 u64 tsf;
3651 s32 interval_tm, rem;
3652 struct ieee80211_conf *conf = NULL;
3653 u16 beacon_int;
3654 struct ieee80211_vif *vif = il->vif;
3655
3656 conf = &il->hw->conf;
3657
3658 lockdep_assert_held(&il->mutex);
3659
3660 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3661
3662 il->timing.timestamp = cpu_to_le64(il->timestamp);
3663 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3664
3665 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3666
3667
3668
3669
3670
3671 il->timing.atim_win = 0;
3672
3673 beacon_int =
3674 il_adjust_beacon_interval(beacon_int,
3675 il->hw_params.max_beacon_itrvl *
3676 TIME_UNIT);
3677 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3678
3679 tsf = il->timestamp;
3680 interval_tm = beacon_int * TIME_UNIT;
3681 rem = do_div(tsf, interval_tm);
3682 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3683
3684 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3685
3686 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3687 le16_to_cpu(il->timing.beacon_interval),
3688 le32_to_cpu(il->timing.beacon_init_val),
3689 le16_to_cpu(il->timing.atim_win));
3690
3691 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3692 &il->timing);
3693}
3694EXPORT_SYMBOL(il_send_rxon_timing);
3695
3696void
3697il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3698{
3699 struct il_rxon_cmd *rxon = &il->staging;
3700
3701 if (hw_decrypt)
3702 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3703 else
3704 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3705
3706}
3707EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3708
3709
3710int
3711il_check_rxon_cmd(struct il_priv *il)
3712{
3713 struct il_rxon_cmd *rxon = &il->staging;
3714 bool error = false;
3715
3716 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3717 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3718 IL_WARN("check 2.4G: wrong narrow\n");
3719 error = true;
3720 }
3721 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3722 IL_WARN("check 2.4G: wrong radar\n");
3723 error = true;
3724 }
3725 } else {
3726 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3727 IL_WARN("check 5.2G: not short slot!\n");
3728 error = true;
3729 }
3730 if (rxon->flags & RXON_FLG_CCK_MSK) {
3731 IL_WARN("check 5.2G: CCK!\n");
3732 error = true;
3733 }
3734 }
3735 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3736 IL_WARN("mac/bssid mcast!\n");
3737 error = true;
3738 }
3739
3740
3741 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3742 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3743 IL_WARN("neither 1 nor 6 are basic\n");
3744 error = true;
3745 }
3746
3747 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3748 IL_WARN("aid > 2007\n");
3749 error = true;
3750 }
3751
3752 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3753 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3754 IL_WARN("CCK and short slot\n");
3755 error = true;
3756 }
3757
3758 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3759 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3760 IL_WARN("CCK and auto detect");
3761 error = true;
3762 }
3763
3764 if ((rxon->
3765 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3766 RXON_FLG_TGG_PROTECT_MSK) {
3767 IL_WARN("TGg but no auto-detect\n");
3768 error = true;
3769 }
3770
3771 if (error)
3772 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3773
3774 if (error) {
3775 IL_ERR("Invalid RXON\n");
3776 return -EINVAL;
3777 }
3778 return 0;
3779}
3780EXPORT_SYMBOL(il_check_rxon_cmd);
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790int
3791il_full_rxon_required(struct il_priv *il)
3792{
3793 const struct il_rxon_cmd *staging = &il->staging;
3794 const struct il_rxon_cmd *active = &il->active;
3795
3796#define CHK(cond) \
3797 if ((cond)) { \
3798 D_INFO("need full RXON - " #cond "\n"); \
3799 return 1; \
3800 }
3801
3802#define CHK_NEQ(c1, c2) \
3803 if ((c1) != (c2)) { \
3804 D_INFO("need full RXON - " \
3805 #c1 " != " #c2 " - %d != %d\n", \
3806 (c1), (c2)); \
3807 return 1; \
3808 }
3809
3810
3811 CHK(!il_is_associated(il));
3812 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
3813 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
3814 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
3815 active->wlap_bssid_addr));
3816 CHK_NEQ(staging->dev_type, active->dev_type);
3817 CHK_NEQ(staging->channel, active->channel);
3818 CHK_NEQ(staging->air_propagation, active->air_propagation);
3819 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3820 active->ofdm_ht_single_stream_basic_rates);
3821 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3822 active->ofdm_ht_dual_stream_basic_rates);
3823 CHK_NEQ(staging->assoc_id, active->assoc_id);
3824
3825
3826
3827
3828
3829
3830 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3831 active->flags & RXON_FLG_BAND_24G_MSK);
3832
3833
3834 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3835 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3836
3837#undef CHK
3838#undef CHK_NEQ
3839
3840 return 0;
3841}
3842EXPORT_SYMBOL(il_full_rxon_required);
3843
3844u8
3845il_get_lowest_plcp(struct il_priv *il)
3846{
3847
3848
3849
3850
3851 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3852 return RATE_1M_PLCP;
3853 else
3854 return RATE_6M_PLCP;
3855}
3856EXPORT_SYMBOL(il_get_lowest_plcp);
3857
3858static void
3859_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3860{
3861 struct il_rxon_cmd *rxon = &il->staging;
3862
3863 if (!il->ht.enabled) {
3864 rxon->flags &=
3865 ~(RXON_FLG_CHANNEL_MODE_MSK |
3866 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3867 | RXON_FLG_HT_PROT_MSK);
3868 return;
3869 }
3870
3871 rxon->flags |=
3872 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3873
3874
3875
3876
3877 rxon->flags &=
3878 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3879 if (il_is_ht40_tx_allowed(il, NULL)) {
3880
3881 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3882 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3883
3884 switch (il->ht.extension_chan_offset) {
3885 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3886 rxon->flags &=
3887 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3888 break;
3889 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3890 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3891 break;
3892 }
3893 } else {
3894
3895 switch (il->ht.extension_chan_offset) {
3896 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3897 rxon->flags &=
3898 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3899 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3900 break;
3901 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3902 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3903 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3904 break;
3905 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3906 default:
3907
3908 IL_ERR("invalid extension channel offset\n");
3909 break;
3910 }
3911 }
3912 } else {
3913 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3914 }
3915
3916 if (il->ops->set_rxon_chain)
3917 il->ops->set_rxon_chain(il);
3918
3919 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3920 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3921 il->ht.protection, il->ht.extension_chan_offset);
3922}
3923
3924void
3925il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3926{
3927 _il_set_rxon_ht(il, ht_conf);
3928}
3929EXPORT_SYMBOL(il_set_rxon_ht);
3930
3931
3932u8
3933il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
3934{
3935 const struct il_channel_info *ch_info;
3936 int i;
3937 u8 channel = 0;
3938 u8 min, max;
3939
3940 if (band == NL80211_BAND_5GHZ) {
3941 min = 14;
3942 max = il->channel_count;
3943 } else {
3944 min = 0;
3945 max = 14;
3946 }
3947
3948 for (i = min; i < max; i++) {
3949 channel = il->channel_info[i].channel;
3950 if (channel == le16_to_cpu(il->staging.channel))
3951 continue;
3952
3953 ch_info = il_get_channel_info(il, band, channel);
3954 if (il_is_channel_valid(ch_info))
3955 break;
3956 }
3957
3958 return channel;
3959}
3960EXPORT_SYMBOL(il_get_single_channel_number);
3961
3962
3963
3964
3965
3966
3967
3968
3969int
3970il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3971{
3972 enum nl80211_band band = ch->band;
3973 u16 channel = ch->hw_value;
3974
3975 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3976 return 0;
3977
3978 il->staging.channel = cpu_to_le16(channel);
3979 if (band == NL80211_BAND_5GHZ)
3980 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3981 else
3982 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3983
3984 il->band = band;
3985
3986 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3987
3988 return 0;
3989}
3990EXPORT_SYMBOL(il_set_rxon_channel);
3991
3992void
3993il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
3994 struct ieee80211_vif *vif)
3995{
3996 if (band == NL80211_BAND_5GHZ) {
3997 il->staging.flags &=
3998 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3999 RXON_FLG_CCK_MSK);
4000 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
4001 } else {
4002
4003 if (vif && vif->bss_conf.use_short_slot)
4004 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
4005 else
4006 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
4007
4008 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
4009 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
4010 il->staging.flags &= ~RXON_FLG_CCK_MSK;
4011 }
4012}
4013EXPORT_SYMBOL(il_set_flags_for_band);
4014
4015
4016
4017
4018void
4019il_connection_init_rx_config(struct il_priv *il)
4020{
4021 const struct il_channel_info *ch_info;
4022
4023 memset(&il->staging, 0, sizeof(il->staging));
4024
4025 switch (il->iw_mode) {
4026 case NL80211_IFTYPE_UNSPECIFIED:
4027 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4028 break;
4029 case NL80211_IFTYPE_STATION:
4030 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4031 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4032 break;
4033 case NL80211_IFTYPE_ADHOC:
4034 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4035 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4036 il->staging.filter_flags =
4037 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
4038 break;
4039 default:
4040 IL_ERR("Unsupported interface type %d\n", il->vif->type);
4041 return;
4042 }
4043
4044#if 0
4045
4046
4047 if (!hw_to_local(il->hw)->short_preamble)
4048 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4049 else
4050 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4051#endif
4052
4053 ch_info =
4054 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4055
4056 if (!ch_info)
4057 ch_info = &il->channel_info[0];
4058
4059 il->staging.channel = cpu_to_le16(ch_info->channel);
4060 il->band = ch_info->band;
4061
4062 il_set_flags_for_band(il, il->band, il->vif);
4063
4064 il->staging.ofdm_basic_rates =
4065 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4066 il->staging.cck_basic_rates =
4067 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4068
4069
4070 il->staging.flags &=
4071 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4072 if (il->vif)
4073 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4074
4075 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4076 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4077}
4078EXPORT_SYMBOL(il_connection_init_rx_config);
4079
4080void
4081il_set_rate(struct il_priv *il)
4082{
4083 const struct ieee80211_supported_band *hw = NULL;
4084 struct ieee80211_rate *rate;
4085 int i;
4086
4087 hw = il_get_hw_mode(il, il->band);
4088 if (!hw) {
4089 IL_ERR("Failed to set rate: unable to get hw mode\n");
4090 return;
4091 }
4092
4093 il->active_rate = 0;
4094
4095 for (i = 0; i < hw->n_bitrates; i++) {
4096 rate = &(hw->bitrates[i]);
4097 if (rate->hw_value < RATE_COUNT_LEGACY)
4098 il->active_rate |= (1 << rate->hw_value);
4099 }
4100
4101 D_RATE("Set active_rate = %0x\n", il->active_rate);
4102
4103 il->staging.cck_basic_rates =
4104 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4105
4106 il->staging.ofdm_basic_rates =
4107 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4108}
4109EXPORT_SYMBOL(il_set_rate);
4110
4111void
4112il_chswitch_done(struct il_priv *il, bool is_success)
4113{
4114 if (test_bit(S_EXIT_PENDING, &il->status))
4115 return;
4116
4117 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4118 ieee80211_chswitch_done(il->vif, is_success);
4119}
4120EXPORT_SYMBOL(il_chswitch_done);
4121
4122void
4123il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4124{
4125 struct il_rx_pkt *pkt = rxb_addr(rxb);
4126 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4127 struct il_rxon_cmd *rxon = (void *)&il->active;
4128
4129 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4130 return;
4131
4132 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4133 rxon->channel = csa->channel;
4134 il->staging.channel = csa->channel;
4135 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4136 il_chswitch_done(il, true);
4137 } else {
4138 IL_ERR("CSA notif (fail) : channel %d\n",
4139 le16_to_cpu(csa->channel));
4140 il_chswitch_done(il, false);
4141 }
4142}
4143EXPORT_SYMBOL(il_hdl_csa);
4144
4145#ifdef CONFIG_IWLEGACY_DEBUG
4146void
4147il_print_rx_config_cmd(struct il_priv *il)
4148{
4149 struct il_rxon_cmd *rxon = &il->staging;
4150
4151 D_RADIO("RX CONFIG:\n");
4152 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4153 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4154 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4155 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4156 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4157 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4158 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4159 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4160 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4161 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4162}
4163EXPORT_SYMBOL(il_print_rx_config_cmd);
4164#endif
4165
4166
4167
4168void
4169il_irq_handle_error(struct il_priv *il)
4170{
4171
4172 set_bit(S_FW_ERROR, &il->status);
4173
4174
4175 clear_bit(S_HCMD_ACTIVE, &il->status);
4176
4177 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4178
4179 il->ops->dump_nic_error_log(il);
4180 if (il->ops->dump_fh)
4181 il->ops->dump_fh(il, NULL, false);
4182#ifdef CONFIG_IWLEGACY_DEBUG
4183 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4184 il_print_rx_config_cmd(il);
4185#endif
4186
4187 wake_up(&il->wait_command_queue);
4188
4189
4190
4191 clear_bit(S_READY, &il->status);
4192
4193 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4194 IL_DBG(IL_DL_FW_ERRORS,
4195 "Restarting adapter due to uCode error.\n");
4196
4197 if (il->cfg->mod_params->restart_fw)
4198 queue_work(il->workqueue, &il->restart);
4199 }
4200}
4201EXPORT_SYMBOL(il_irq_handle_error);
4202
4203static int
4204_il_apm_stop_master(struct il_priv *il)
4205{
4206 int ret = 0;
4207
4208
4209 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4210
4211 ret =
4212 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4213 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4214 if (ret < 0)
4215 IL_WARN("Master Disable Timed Out, 100 usec\n");
4216
4217 D_INFO("stop master\n");
4218
4219 return ret;
4220}
4221
4222void
4223_il_apm_stop(struct il_priv *il)
4224{
4225 lockdep_assert_held(&il->reg_lock);
4226
4227 D_INFO("Stop card, put in low power state\n");
4228
4229
4230 _il_apm_stop_master(il);
4231
4232
4233 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4234
4235 udelay(10);
4236
4237
4238
4239
4240
4241 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4242}
4243EXPORT_SYMBOL(_il_apm_stop);
4244
4245void
4246il_apm_stop(struct il_priv *il)
4247{
4248 unsigned long flags;
4249
4250 spin_lock_irqsave(&il->reg_lock, flags);
4251 _il_apm_stop(il);
4252 spin_unlock_irqrestore(&il->reg_lock, flags);
4253}
4254EXPORT_SYMBOL(il_apm_stop);
4255
4256
4257
4258
4259
4260
4261int
4262il_apm_init(struct il_priv *il)
4263{
4264 int ret = 0;
4265 u16 lctl;
4266
4267 D_INFO("Init card's basic functions\n");
4268
4269
4270
4271
4272
4273
4274
4275 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4276 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4277
4278
4279
4280
4281
4282 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4283 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4284
4285
4286 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4287
4288
4289
4290
4291
4292
4293 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4294 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304 if (il->cfg->set_l0s) {
4305 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4306 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
4307
4308 il_set_bit(il, CSR_GIO_REG,
4309 CSR_GIO_REG_VAL_L0S_ENABLED);
4310 D_POWER("L1 Enabled; Disabling L0S\n");
4311 } else {
4312
4313 il_clear_bit(il, CSR_GIO_REG,
4314 CSR_GIO_REG_VAL_L0S_ENABLED);
4315 D_POWER("L1 Disabled; Enabling L0S\n");
4316 }
4317 }
4318
4319
4320 if (il->cfg->pll_cfg_val)
4321 il_set_bit(il, CSR_ANA_PLL_CFG,
4322 il->cfg->pll_cfg_val);
4323
4324
4325
4326
4327
4328 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4329
4330
4331
4332
4333
4334
4335 ret =
4336 _il_poll_bit(il, CSR_GP_CNTRL,
4337 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4338 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4339 if (ret < 0) {
4340 D_INFO("Failed to init the card\n");
4341 goto out;
4342 }
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352 if (il->cfg->use_bsm)
4353 il_wr_prph(il, APMG_CLK_EN_REG,
4354 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4355 else
4356 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4357 udelay(20);
4358
4359
4360 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4361 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4362
4363out:
4364 return ret;
4365}
4366EXPORT_SYMBOL(il_apm_init);
4367
4368int
4369il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4370{
4371 int ret;
4372 s8 prev_tx_power;
4373 bool defer;
4374
4375 lockdep_assert_held(&il->mutex);
4376
4377 if (il->tx_power_user_lmt == tx_power && !force)
4378 return 0;
4379
4380 if (!il->ops->send_tx_power)
4381 return -EOPNOTSUPP;
4382
4383
4384 if (tx_power < 0) {
4385 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4386 return -EINVAL;
4387 }
4388
4389 if (tx_power > il->tx_power_device_lmt) {
4390 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4391 tx_power, il->tx_power_device_lmt);
4392 return -EINVAL;
4393 }
4394
4395 if (!il_is_ready_rf(il))
4396 return -EIO;
4397
4398
4399
4400 il->tx_power_next = tx_power;
4401
4402
4403 defer = test_bit(S_SCANNING, &il->status) ||
4404 memcmp(&il->active, &il->staging, sizeof(il->staging));
4405 if (defer && !force) {
4406 D_INFO("Deferring tx power set\n");
4407 return 0;
4408 }
4409
4410 prev_tx_power = il->tx_power_user_lmt;
4411 il->tx_power_user_lmt = tx_power;
4412
4413 ret = il->ops->send_tx_power(il);
4414
4415
4416 if (ret) {
4417 il->tx_power_user_lmt = prev_tx_power;
4418 il->tx_power_next = prev_tx_power;
4419 }
4420 return ret;
4421}
4422EXPORT_SYMBOL(il_set_tx_power);
4423
4424void
4425il_send_bt_config(struct il_priv *il)
4426{
4427 struct il_bt_cmd bt_cmd = {
4428 .lead_time = BT_LEAD_TIME_DEF,
4429 .max_kill = BT_MAX_KILL_DEF,
4430 .kill_ack_mask = 0,
4431 .kill_cts_mask = 0,
4432 };
4433
4434 if (!bt_coex_active)
4435 bt_cmd.flags = BT_COEX_DISABLE;
4436 else
4437 bt_cmd.flags = BT_COEX_ENABLE;
4438
4439 D_INFO("BT coex %s\n",
4440 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4441
4442 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4443 IL_ERR("failed to send BT Coex Config\n");
4444}
4445EXPORT_SYMBOL(il_send_bt_config);
4446
4447int
4448il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4449{
4450 struct il_stats_cmd stats_cmd = {
4451 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4452 };
4453
4454 if (flags & CMD_ASYNC)
4455 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4456 &stats_cmd, NULL);
4457 else
4458 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4459 &stats_cmd);
4460}
4461EXPORT_SYMBOL(il_send_stats_request);
4462
4463void
4464il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4465{
4466#ifdef CONFIG_IWLEGACY_DEBUG
4467 struct il_rx_pkt *pkt = rxb_addr(rxb);
4468 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4469 D_RX("sleep mode: %d, src: %d\n",
4470 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4471#endif
4472}
4473EXPORT_SYMBOL(il_hdl_pm_sleep);
4474
4475void
4476il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4477{
4478 struct il_rx_pkt *pkt = rxb_addr(rxb);
4479 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4480 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4481 il_get_cmd_string(pkt->hdr.cmd));
4482 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4483}
4484EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4485
4486void
4487il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4488{
4489 struct il_rx_pkt *pkt = rxb_addr(rxb);
4490
4491 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4492 "seq 0x%04X ser 0x%08X\n",
4493 le32_to_cpu(pkt->u.err_resp.error_type),
4494 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4495 pkt->u.err_resp.cmd_id,
4496 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4497 le32_to_cpu(pkt->u.err_resp.error_info));
4498}
4499EXPORT_SYMBOL(il_hdl_error);
4500
4501void
4502il_clear_isr_stats(struct il_priv *il)
4503{
4504 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4505}
4506
4507int
4508il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4509 const struct ieee80211_tx_queue_params *params)
4510{
4511 struct il_priv *il = hw->priv;
4512 unsigned long flags;
4513 int q;
4514
4515 D_MAC80211("enter\n");
4516
4517 if (!il_is_ready_rf(il)) {
4518 D_MAC80211("leave - RF not ready\n");
4519 return -EIO;
4520 }
4521
4522 if (queue >= AC_NUM) {
4523 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4524 return 0;
4525 }
4526
4527 q = AC_NUM - 1 - queue;
4528
4529 spin_lock_irqsave(&il->lock, flags);
4530
4531 il->qos_data.def_qos_parm.ac[q].cw_min =
4532 cpu_to_le16(params->cw_min);
4533 il->qos_data.def_qos_parm.ac[q].cw_max =
4534 cpu_to_le16(params->cw_max);
4535 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4536 il->qos_data.def_qos_parm.ac[q].edca_txop =
4537 cpu_to_le16((params->txop * 32));
4538
4539 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4540
4541 spin_unlock_irqrestore(&il->lock, flags);
4542
4543 D_MAC80211("leave\n");
4544 return 0;
4545}
4546EXPORT_SYMBOL(il_mac_conf_tx);
4547
4548int
4549il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4550{
4551 struct il_priv *il = hw->priv;
4552 int ret;
4553
4554 D_MAC80211("enter\n");
4555
4556 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4557
4558 D_MAC80211("leave ret %d\n", ret);
4559 return ret;
4560}
4561EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4562
4563static int
4564il_set_mode(struct il_priv *il)
4565{
4566 il_connection_init_rx_config(il);
4567
4568 if (il->ops->set_rxon_chain)
4569 il->ops->set_rxon_chain(il);
4570
4571 return il_commit_rxon(il);
4572}
4573
4574int
4575il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4576{
4577 struct il_priv *il = hw->priv;
4578 int err;
4579 bool reset;
4580
4581 mutex_lock(&il->mutex);
4582 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4583
4584 if (!il_is_ready_rf(il)) {
4585 IL_WARN("Try to add interface when device not ready\n");
4586 err = -EINVAL;
4587 goto out;
4588 }
4589
4590
4591
4592
4593
4594 reset = (il->vif == vif);
4595 if (il->vif && !reset) {
4596 err = -EOPNOTSUPP;
4597 goto out;
4598 }
4599
4600 il->vif = vif;
4601 il->iw_mode = vif->type;
4602
4603 err = il_set_mode(il);
4604 if (err) {
4605 IL_WARN("Fail to set mode %d\n", vif->type);
4606 if (!reset) {
4607 il->vif = NULL;
4608 il->iw_mode = NL80211_IFTYPE_STATION;
4609 }
4610 }
4611
4612out:
4613 D_MAC80211("leave err %d\n", err);
4614 mutex_unlock(&il->mutex);
4615
4616 return err;
4617}
4618EXPORT_SYMBOL(il_mac_add_interface);
4619
4620static void
4621il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4622{
4623 lockdep_assert_held(&il->mutex);
4624
4625 if (il->scan_vif == vif) {
4626 il_scan_cancel_timeout(il, 200);
4627 il_force_scan_end(il);
4628 }
4629
4630 il_set_mode(il);
4631}
4632
4633void
4634il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4635{
4636 struct il_priv *il = hw->priv;
4637
4638 mutex_lock(&il->mutex);
4639 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4640
4641 WARN_ON(il->vif != vif);
4642 il->vif = NULL;
4643 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4644 il_teardown_interface(il, vif);
4645 eth_zero_addr(il->bssid);
4646
4647 D_MAC80211("leave\n");
4648 mutex_unlock(&il->mutex);
4649}
4650EXPORT_SYMBOL(il_mac_remove_interface);
4651
4652int
4653il_alloc_txq_mem(struct il_priv *il)
4654{
4655 if (!il->txq)
4656 il->txq =
4657 kcalloc(il->cfg->num_of_queues,
4658 sizeof(struct il_tx_queue),
4659 GFP_KERNEL);
4660 if (!il->txq) {
4661 IL_ERR("Not enough memory for txq\n");
4662 return -ENOMEM;
4663 }
4664 return 0;
4665}
4666EXPORT_SYMBOL(il_alloc_txq_mem);
4667
4668void
4669il_free_txq_mem(struct il_priv *il)
4670{
4671 kfree(il->txq);
4672 il->txq = NULL;
4673}
4674EXPORT_SYMBOL(il_free_txq_mem);
4675
4676int
4677il_force_reset(struct il_priv *il, bool external)
4678{
4679 struct il_force_reset *force_reset;
4680
4681 if (test_bit(S_EXIT_PENDING, &il->status))
4682 return -EINVAL;
4683
4684 force_reset = &il->force_reset;
4685 force_reset->reset_request_count++;
4686 if (!external) {
4687 if (force_reset->last_force_reset_jiffies &&
4688 time_after(force_reset->last_force_reset_jiffies +
4689 force_reset->reset_duration, jiffies)) {
4690 D_INFO("force reset rejected\n");
4691 force_reset->reset_reject_count++;
4692 return -EAGAIN;
4693 }
4694 }
4695 force_reset->reset_success_count++;
4696 force_reset->last_force_reset_jiffies = jiffies;
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707 if (!external && !il->cfg->mod_params->restart_fw) {
4708 D_INFO("Cancel firmware reload based on "
4709 "module parameter setting\n");
4710 return 0;
4711 }
4712
4713 IL_ERR("On demand firmware reload\n");
4714
4715
4716 set_bit(S_FW_ERROR, &il->status);
4717 wake_up(&il->wait_command_queue);
4718
4719
4720
4721
4722 clear_bit(S_READY, &il->status);
4723 queue_work(il->workqueue, &il->restart);
4724
4725 return 0;
4726}
4727EXPORT_SYMBOL(il_force_reset);
4728
4729int
4730il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4731 enum nl80211_iftype newtype, bool newp2p)
4732{
4733 struct il_priv *il = hw->priv;
4734 int err;
4735
4736 mutex_lock(&il->mutex);
4737 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4738 vif->type, vif->addr, newtype, newp2p);
4739
4740 if (newp2p) {
4741 err = -EOPNOTSUPP;
4742 goto out;
4743 }
4744
4745 if (!il->vif || !il_is_ready_rf(il)) {
4746
4747
4748
4749
4750 err = -EBUSY;
4751 goto out;
4752 }
4753
4754
4755 vif->type = newtype;
4756 vif->p2p = false;
4757 il->iw_mode = newtype;
4758 il_teardown_interface(il, vif);
4759 err = 0;
4760
4761out:
4762 D_MAC80211("leave err %d\n", err);
4763 mutex_unlock(&il->mutex);
4764
4765 return err;
4766}
4767EXPORT_SYMBOL(il_mac_change_interface);
4768
4769void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4770 u32 queues, bool drop)
4771{
4772 struct il_priv *il = hw->priv;
4773 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4774 int i;
4775
4776 mutex_lock(&il->mutex);
4777 D_MAC80211("enter\n");
4778
4779 if (il->txq == NULL)
4780 goto out;
4781
4782 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4783 struct il_queue *q;
4784
4785 if (i == il->cmd_queue)
4786 continue;
4787
4788 q = &il->txq[i].q;
4789 if (q->read_ptr == q->write_ptr)
4790 continue;
4791
4792 if (time_after(jiffies, timeout)) {
4793 IL_ERR("Failed to flush queue %d\n", q->id);
4794 break;
4795 }
4796
4797 msleep(20);
4798 }
4799out:
4800 D_MAC80211("leave\n");
4801 mutex_unlock(&il->mutex);
4802}
4803EXPORT_SYMBOL(il_mac_flush);
4804
4805
4806
4807
4808
4809static int
4810il_check_stuck_queue(struct il_priv *il, int cnt)
4811{
4812 struct il_tx_queue *txq = &il->txq[cnt];
4813 struct il_queue *q = &txq->q;
4814 unsigned long timeout;
4815 unsigned long now = jiffies;
4816 int ret;
4817
4818 if (q->read_ptr == q->write_ptr) {
4819 txq->time_stamp = now;
4820 return 0;
4821 }
4822
4823 timeout =
4824 txq->time_stamp +
4825 msecs_to_jiffies(il->cfg->wd_timeout);
4826
4827 if (time_after(now, timeout)) {
4828 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4829 jiffies_to_msecs(now - txq->time_stamp));
4830 ret = il_force_reset(il, false);
4831 return (ret == -EAGAIN) ? 0 : 1;
4832 }
4833
4834 return 0;
4835}
4836
4837
4838
4839
4840
4841#define IL_WD_TICK(timeout) ((timeout) / 4)
4842
4843
4844
4845
4846
4847void
4848il_bg_watchdog(struct timer_list *t)
4849{
4850 struct il_priv *il = from_timer(il, t, watchdog);
4851 int cnt;
4852 unsigned long timeout;
4853
4854 if (test_bit(S_EXIT_PENDING, &il->status))
4855 return;
4856
4857 timeout = il->cfg->wd_timeout;
4858 if (timeout == 0)
4859 return;
4860
4861
4862 if (il_check_stuck_queue(il, il->cmd_queue))
4863 return;
4864
4865
4866 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4867
4868 if (cnt == il->cmd_queue)
4869 continue;
4870 if (il_check_stuck_queue(il, cnt))
4871 return;
4872 }
4873
4874 mod_timer(&il->watchdog,
4875 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4876}
4877EXPORT_SYMBOL(il_bg_watchdog);
4878
4879void
4880il_setup_watchdog(struct il_priv *il)
4881{
4882 unsigned int timeout = il->cfg->wd_timeout;
4883
4884 if (timeout)
4885 mod_timer(&il->watchdog,
4886 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4887 else
4888 del_timer(&il->watchdog);
4889}
4890EXPORT_SYMBOL(il_setup_watchdog);
4891
4892
4893
4894
4895
4896
4897
4898u32
4899il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4900{
4901 u32 quot;
4902 u32 rem;
4903 u32 interval = beacon_interval * TIME_UNIT;
4904
4905 if (!interval || !usec)
4906 return 0;
4907
4908 quot =
4909 (usec /
4910 interval) & (il_beacon_time_mask_high(il,
4911 il->hw_params.
4912 beacon_time_tsf_bits) >> il->
4913 hw_params.beacon_time_tsf_bits);
4914 rem =
4915 (usec % interval) & il_beacon_time_mask_low(il,
4916 il->hw_params.
4917 beacon_time_tsf_bits);
4918
4919 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4920}
4921EXPORT_SYMBOL(il_usecs_to_beacons);
4922
4923
4924
4925
4926__le32
4927il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4928 u32 beacon_interval)
4929{
4930 u32 base_low = base & il_beacon_time_mask_low(il,
4931 il->hw_params.
4932 beacon_time_tsf_bits);
4933 u32 addon_low = addon & il_beacon_time_mask_low(il,
4934 il->hw_params.
4935 beacon_time_tsf_bits);
4936 u32 interval = beacon_interval * TIME_UNIT;
4937 u32 res = (base & il_beacon_time_mask_high(il,
4938 il->hw_params.
4939 beacon_time_tsf_bits)) +
4940 (addon & il_beacon_time_mask_high(il,
4941 il->hw_params.
4942 beacon_time_tsf_bits));
4943
4944 if (base_low > addon_low)
4945 res += base_low - addon_low;
4946 else if (base_low < addon_low) {
4947 res += interval + base_low - addon_low;
4948 res += (1 << il->hw_params.beacon_time_tsf_bits);
4949 } else
4950 res += (1 << il->hw_params.beacon_time_tsf_bits);
4951
4952 return cpu_to_le32(res);
4953}
4954EXPORT_SYMBOL(il_add_beacon_time);
4955
4956#ifdef CONFIG_PM_SLEEP
4957
4958static int
4959il_pci_suspend(struct device *device)
4960{
4961 struct pci_dev *pdev = to_pci_dev(device);
4962 struct il_priv *il = pci_get_drvdata(pdev);
4963
4964
4965
4966
4967
4968
4969
4970
4971 il_apm_stop(il);
4972
4973 return 0;
4974}
4975
4976static int
4977il_pci_resume(struct device *device)
4978{
4979 struct pci_dev *pdev = to_pci_dev(device);
4980 struct il_priv *il = pci_get_drvdata(pdev);
4981 bool hw_rfkill = false;
4982
4983
4984
4985
4986
4987 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4988
4989 il_enable_interrupts(il);
4990
4991 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4992 hw_rfkill = true;
4993
4994 if (hw_rfkill)
4995 set_bit(S_RFKILL, &il->status);
4996 else
4997 clear_bit(S_RFKILL, &il->status);
4998
4999 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
5000
5001 return 0;
5002}
5003
5004SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
5005EXPORT_SYMBOL(il_pm_ops);
5006
5007#endif
5008
5009static void
5010il_update_qos(struct il_priv *il)
5011{
5012 if (test_bit(S_EXIT_PENDING, &il->status))
5013 return;
5014
5015 il->qos_data.def_qos_parm.qos_flags = 0;
5016
5017 if (il->qos_data.qos_active)
5018 il->qos_data.def_qos_parm.qos_flags |=
5019 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5020
5021 if (il->ht.enabled)
5022 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5023
5024 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5025 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5026
5027 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5028 &il->qos_data.def_qos_parm, NULL);
5029}
5030
5031
5032
5033
5034int
5035il_mac_config(struct ieee80211_hw *hw, u32 changed)
5036{
5037 struct il_priv *il = hw->priv;
5038 const struct il_channel_info *ch_info;
5039 struct ieee80211_conf *conf = &hw->conf;
5040 struct ieee80211_channel *channel = conf->chandef.chan;
5041 struct il_ht_config *ht_conf = &il->current_ht_config;
5042 unsigned long flags = 0;
5043 int ret = 0;
5044 u16 ch;
5045 int scan_active = 0;
5046 bool ht_changed = false;
5047
5048 mutex_lock(&il->mutex);
5049 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
5050 changed);
5051
5052 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5053 scan_active = 1;
5054 D_MAC80211("scan active\n");
5055 }
5056
5057 if (changed &
5058 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5059
5060 il->current_ht_config.smps = conf->smps_mode;
5061
5062
5063
5064
5065
5066
5067
5068
5069 if (il->ops->set_rxon_chain)
5070 il->ops->set_rxon_chain(il);
5071 }
5072
5073
5074
5075
5076 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5077
5078 if (scan_active)
5079 goto set_ch_out;
5080
5081 ch = channel->hw_value;
5082 ch_info = il_get_channel_info(il, channel->band, ch);
5083 if (!il_is_channel_valid(ch_info)) {
5084 D_MAC80211("leave - invalid channel\n");
5085 ret = -EINVAL;
5086 goto set_ch_out;
5087 }
5088
5089 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5090 !il_is_channel_ibss(ch_info)) {
5091 D_MAC80211("leave - not IBSS channel\n");
5092 ret = -EINVAL;
5093 goto set_ch_out;
5094 }
5095
5096 spin_lock_irqsave(&il->lock, flags);
5097
5098
5099 if (il->ht.enabled != conf_is_ht(conf)) {
5100 il->ht.enabled = conf_is_ht(conf);
5101 ht_changed = true;
5102 }
5103 if (il->ht.enabled) {
5104 if (conf_is_ht40_minus(conf)) {
5105 il->ht.extension_chan_offset =
5106 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5107 il->ht.is_40mhz = true;
5108 } else if (conf_is_ht40_plus(conf)) {
5109 il->ht.extension_chan_offset =
5110 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5111 il->ht.is_40mhz = true;
5112 } else {
5113 il->ht.extension_chan_offset =
5114 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5115 il->ht.is_40mhz = false;
5116 }
5117 } else
5118 il->ht.is_40mhz = false;
5119
5120
5121
5122
5123
5124 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5125
5126
5127
5128
5129 if ((le16_to_cpu(il->staging.channel) != ch))
5130 il->staging.flags = 0;
5131
5132 il_set_rxon_channel(il, channel);
5133 il_set_rxon_ht(il, ht_conf);
5134
5135 il_set_flags_for_band(il, channel->band, il->vif);
5136
5137 spin_unlock_irqrestore(&il->lock, flags);
5138
5139 if (il->ops->update_bcast_stations)
5140 ret = il->ops->update_bcast_stations(il);
5141
5142set_ch_out:
5143
5144
5145
5146 il_set_rate(il);
5147 }
5148
5149 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5150 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5151 if (!il->power_data.ps_disabled)
5152 IL_WARN_ONCE("Enabling power save might cause firmware crashes\n");
5153 ret = il_power_update_mode(il, false);
5154 if (ret)
5155 D_MAC80211("Error setting sleep level\n");
5156 }
5157
5158 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5159 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5160 conf->power_level);
5161
5162 il_set_tx_power(il, conf->power_level, false);
5163 }
5164
5165 if (!il_is_ready(il)) {
5166 D_MAC80211("leave - not ready\n");
5167 goto out;
5168 }
5169
5170 if (scan_active)
5171 goto out;
5172
5173 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5174 il_commit_rxon(il);
5175 else
5176 D_INFO("Not re-sending same RXON configuration.\n");
5177 if (ht_changed)
5178 il_update_qos(il);
5179
5180out:
5181 D_MAC80211("leave ret %d\n", ret);
5182 mutex_unlock(&il->mutex);
5183
5184 return ret;
5185}
5186EXPORT_SYMBOL(il_mac_config);
5187
5188void
5189il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5190{
5191 struct il_priv *il = hw->priv;
5192 unsigned long flags;
5193
5194 mutex_lock(&il->mutex);
5195 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5196
5197 spin_lock_irqsave(&il->lock, flags);
5198
5199 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5200
5201
5202 if (il->beacon_skb)
5203 dev_kfree_skb(il->beacon_skb);
5204 il->beacon_skb = NULL;
5205 il->timestamp = 0;
5206
5207 spin_unlock_irqrestore(&il->lock, flags);
5208
5209 il_scan_cancel_timeout(il, 100);
5210 if (!il_is_ready_rf(il)) {
5211 D_MAC80211("leave - not ready\n");
5212 mutex_unlock(&il->mutex);
5213 return;
5214 }
5215
5216
5217 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5218 il_commit_rxon(il);
5219
5220 il_set_rate(il);
5221
5222 D_MAC80211("leave\n");
5223 mutex_unlock(&il->mutex);
5224}
5225EXPORT_SYMBOL(il_mac_reset_tsf);
5226
5227static void
5228il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5229{
5230 struct il_ht_config *ht_conf = &il->current_ht_config;
5231 struct ieee80211_sta *sta;
5232 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5233
5234 D_ASSOC("enter:\n");
5235
5236 if (!il->ht.enabled)
5237 return;
5238
5239 il->ht.protection =
5240 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5241 il->ht.non_gf_sta_present =
5242 !!(bss_conf->
5243 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5244
5245 ht_conf->single_chain_sufficient = false;
5246
5247 switch (vif->type) {
5248 case NL80211_IFTYPE_STATION:
5249 rcu_read_lock();
5250 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5251 if (sta) {
5252 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5253 int maxstreams;
5254
5255 maxstreams =
5256 (ht_cap->mcs.
5257 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5258 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5259 maxstreams += 1;
5260
5261 if (ht_cap->mcs.rx_mask[1] == 0 &&
5262 ht_cap->mcs.rx_mask[2] == 0)
5263 ht_conf->single_chain_sufficient = true;
5264 if (maxstreams <= 1)
5265 ht_conf->single_chain_sufficient = true;
5266 } else {
5267
5268
5269
5270
5271
5272
5273 ht_conf->single_chain_sufficient = true;
5274 }
5275 rcu_read_unlock();
5276 break;
5277 case NL80211_IFTYPE_ADHOC:
5278 ht_conf->single_chain_sufficient = true;
5279 break;
5280 default:
5281 break;
5282 }
5283
5284 D_ASSOC("leave\n");
5285}
5286
5287static inline void
5288il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5289{
5290
5291
5292
5293
5294
5295 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5296 il->staging.assoc_id = 0;
5297 il_commit_rxon(il);
5298}
5299
5300static void
5301il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5302{
5303 struct il_priv *il = hw->priv;
5304 unsigned long flags;
5305 __le64 timestamp;
5306 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5307
5308 if (!skb)
5309 return;
5310
5311 D_MAC80211("enter\n");
5312
5313 lockdep_assert_held(&il->mutex);
5314
5315 if (!il->beacon_enabled) {
5316 IL_ERR("update beacon with no beaconing enabled\n");
5317 dev_kfree_skb(skb);
5318 return;
5319 }
5320
5321 spin_lock_irqsave(&il->lock, flags);
5322
5323 if (il->beacon_skb)
5324 dev_kfree_skb(il->beacon_skb);
5325
5326 il->beacon_skb = skb;
5327
5328 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5329 il->timestamp = le64_to_cpu(timestamp);
5330
5331 D_MAC80211("leave\n");
5332 spin_unlock_irqrestore(&il->lock, flags);
5333
5334 if (!il_is_ready_rf(il)) {
5335 D_MAC80211("leave - RF not ready\n");
5336 return;
5337 }
5338
5339 il->ops->post_associate(il);
5340}
5341
5342void
5343il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5344 struct ieee80211_bss_conf *bss_conf, u32 changes)
5345{
5346 struct il_priv *il = hw->priv;
5347 int ret;
5348
5349 mutex_lock(&il->mutex);
5350 D_MAC80211("enter: changes 0x%x\n", changes);
5351
5352 if (!il_is_alive(il)) {
5353 D_MAC80211("leave - not alive\n");
5354 mutex_unlock(&il->mutex);
5355 return;
5356 }
5357
5358 if (changes & BSS_CHANGED_QOS) {
5359 unsigned long flags;
5360
5361 spin_lock_irqsave(&il->lock, flags);
5362 il->qos_data.qos_active = bss_conf->qos;
5363 il_update_qos(il);
5364 spin_unlock_irqrestore(&il->lock, flags);
5365 }
5366
5367 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5368
5369 if (vif->bss_conf.enable_beacon)
5370 il->beacon_enabled = true;
5371 else
5372 il->beacon_enabled = false;
5373 }
5374
5375 if (changes & BSS_CHANGED_BSSID) {
5376 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386 if (is_zero_ether_addr(bss_conf->bssid))
5387 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5388
5389
5390
5391
5392
5393
5394 if (il_scan_cancel_timeout(il, 100)) {
5395 D_MAC80211("leave - scan abort failed\n");
5396 mutex_unlock(&il->mutex);
5397 return;
5398 }
5399
5400
5401 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5402
5403
5404 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5405 }
5406
5407
5408
5409
5410
5411
5412 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5413 il_beacon_update(hw, vif);
5414
5415 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5416 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5417 if (bss_conf->use_short_preamble)
5418 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5419 else
5420 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5421 }
5422
5423 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5424 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5425 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
5426 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5427 else
5428 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5429 if (bss_conf->use_cts_prot)
5430 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5431 else
5432 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5433 }
5434
5435 if (changes & BSS_CHANGED_BASIC_RATES) {
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450 }
5451
5452 if (changes & BSS_CHANGED_HT) {
5453 il_ht_conf(il, vif);
5454
5455 if (il->ops->set_rxon_chain)
5456 il->ops->set_rxon_chain(il);
5457 }
5458
5459 if (changes & BSS_CHANGED_ASSOC) {
5460 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5461 if (bss_conf->assoc) {
5462 il->timestamp = bss_conf->sync_tsf;
5463
5464 if (!il_is_rfkill(il))
5465 il->ops->post_associate(il);
5466 } else
5467 il_set_no_assoc(il, vif);
5468 }
5469
5470 if (changes && il_is_associated(il) && bss_conf->aid) {
5471 D_MAC80211("Changes (%#x) while associated\n", changes);
5472 ret = il_send_rxon_assoc(il);
5473 if (!ret) {
5474
5475 memcpy((void *)&il->active, &il->staging,
5476 sizeof(struct il_rxon_cmd));
5477 }
5478 }
5479
5480 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5481 if (vif->bss_conf.enable_beacon) {
5482 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5483 ETH_ALEN);
5484 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5485 il->ops->config_ap(il);
5486 } else
5487 il_set_no_assoc(il, vif);
5488 }
5489
5490 if (changes & BSS_CHANGED_IBSS) {
5491 ret = il->ops->manage_ibss_station(il, vif,
5492 bss_conf->ibss_joined);
5493 if (ret)
5494 IL_ERR("failed to %s IBSS station %pM\n",
5495 bss_conf->ibss_joined ? "add" : "remove",
5496 bss_conf->bssid);
5497 }
5498
5499 D_MAC80211("leave\n");
5500 mutex_unlock(&il->mutex);
5501}
5502EXPORT_SYMBOL(il_mac_bss_info_changed);
5503
5504irqreturn_t
5505il_isr(int irq, void *data)
5506{
5507 struct il_priv *il = data;
5508 u32 inta, inta_mask;
5509 u32 inta_fh;
5510 unsigned long flags;
5511 if (!il)
5512 return IRQ_NONE;
5513
5514 spin_lock_irqsave(&il->lock, flags);
5515
5516
5517
5518
5519
5520 inta_mask = _il_rd(il, CSR_INT_MASK);
5521 _il_wr(il, CSR_INT_MASK, 0x00000000);
5522
5523
5524 inta = _il_rd(il, CSR_INT);
5525 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5526
5527
5528
5529
5530 if (!inta && !inta_fh) {
5531 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5532 goto none;
5533 }
5534
5535 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5536
5537
5538 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5539 goto unplugged;
5540 }
5541
5542 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5543 inta_fh);
5544
5545 inta &= ~CSR_INT_BIT_SCD;
5546
5547
5548 if (likely(inta || inta_fh))
5549 tasklet_schedule(&il->irq_tasklet);
5550
5551unplugged:
5552 spin_unlock_irqrestore(&il->lock, flags);
5553 return IRQ_HANDLED;
5554
5555none:
5556
5557
5558 if (test_bit(S_INT_ENABLED, &il->status))
5559 il_enable_interrupts(il);
5560 spin_unlock_irqrestore(&il->lock, flags);
5561 return IRQ_NONE;
5562}
5563EXPORT_SYMBOL(il_isr);
5564
5565
5566
5567
5568
5569void
5570il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5571 __le16 fc, __le32 *tx_flags)
5572{
5573 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5574 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5575 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5576 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5577
5578 if (!ieee80211_is_mgmt(fc))
5579 return;
5580
5581 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5582 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5583 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5584 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5585 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5586 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5587 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5588 break;
5589 }
5590 } else if (info->control.rates[0].
5591 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5592 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5593 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5594 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5595 }
5596}
5597EXPORT_SYMBOL(il_tx_cmd_protection);
5598