1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/etherdevice.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/lockdep.h>
18#include <linux/pci.h>
19#include <linux/dma-mapping.h>
20#include <linux/delay.h>
21#include <linux/skbuff.h>
22#include <net/mac80211.h>
23
24#include "common.h"
25
26int
27_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
28{
29 const int interval = 10;
30 int t = 0;
31
32 do {
33 if ((_il_rd(il, addr) & mask) == (bits & mask))
34 return t;
35 udelay(interval);
36 t += interval;
37 } while (t < timeout);
38
39 return -ETIMEDOUT;
40}
41EXPORT_SYMBOL(_il_poll_bit);
42
43void
44il_set_bit(struct il_priv *p, u32 r, u32 m)
45{
46 unsigned long reg_flags;
47
48 spin_lock_irqsave(&p->reg_lock, reg_flags);
49 _il_set_bit(p, r, m);
50 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
51}
52EXPORT_SYMBOL(il_set_bit);
53
54void
55il_clear_bit(struct il_priv *p, u32 r, u32 m)
56{
57 unsigned long reg_flags;
58
59 spin_lock_irqsave(&p->reg_lock, reg_flags);
60 _il_clear_bit(p, r, m);
61 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
62}
63EXPORT_SYMBOL(il_clear_bit);
64
65bool
66_il_grab_nic_access(struct il_priv *il)
67{
68 int ret;
69 u32 val;
70
71
72 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91 ret =
92 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
93 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
94 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
95 if (unlikely(ret < 0)) {
96 val = _il_rd(il, CSR_GP_CNTRL);
97 WARN_ONCE(1, "Timeout waiting for ucode processor access "
98 "(CSR_GP_CNTRL 0x%08x)\n", val);
99 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
100 return false;
101 }
102
103 return true;
104}
105EXPORT_SYMBOL_GPL(_il_grab_nic_access);
106
107int
108il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
109{
110 const int interval = 10;
111 int t = 0;
112
113 do {
114 if ((il_rd(il, addr) & mask) == mask)
115 return t;
116 udelay(interval);
117 t += interval;
118 } while (t < timeout);
119
120 return -ETIMEDOUT;
121}
122EXPORT_SYMBOL(il_poll_bit);
123
124u32
125il_rd_prph(struct il_priv *il, u32 reg)
126{
127 unsigned long reg_flags;
128 u32 val;
129
130 spin_lock_irqsave(&il->reg_lock, reg_flags);
131 _il_grab_nic_access(il);
132 val = _il_rd_prph(il, reg);
133 _il_release_nic_access(il);
134 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
135 return val;
136}
137EXPORT_SYMBOL(il_rd_prph);
138
139void
140il_wr_prph(struct il_priv *il, u32 addr, u32 val)
141{
142 unsigned long reg_flags;
143
144 spin_lock_irqsave(&il->reg_lock, reg_flags);
145 if (likely(_il_grab_nic_access(il))) {
146 _il_wr_prph(il, addr, val);
147 _il_release_nic_access(il);
148 }
149 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
150}
151EXPORT_SYMBOL(il_wr_prph);
152
153u32
154il_read_targ_mem(struct il_priv *il, u32 addr)
155{
156 unsigned long reg_flags;
157 u32 value;
158
159 spin_lock_irqsave(&il->reg_lock, reg_flags);
160 _il_grab_nic_access(il);
161
162 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
163 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
164
165 _il_release_nic_access(il);
166 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
167 return value;
168}
169EXPORT_SYMBOL(il_read_targ_mem);
170
171void
172il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
173{
174 unsigned long reg_flags;
175
176 spin_lock_irqsave(&il->reg_lock, reg_flags);
177 if (likely(_il_grab_nic_access(il))) {
178 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
179 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
180 _il_release_nic_access(il);
181 }
182 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
183}
184EXPORT_SYMBOL(il_write_targ_mem);
185
186const char *
187il_get_cmd_string(u8 cmd)
188{
189 switch (cmd) {
190 IL_CMD(N_ALIVE);
191 IL_CMD(N_ERROR);
192 IL_CMD(C_RXON);
193 IL_CMD(C_RXON_ASSOC);
194 IL_CMD(C_QOS_PARAM);
195 IL_CMD(C_RXON_TIMING);
196 IL_CMD(C_ADD_STA);
197 IL_CMD(C_REM_STA);
198 IL_CMD(C_WEPKEY);
199 IL_CMD(N_3945_RX);
200 IL_CMD(C_TX);
201 IL_CMD(C_RATE_SCALE);
202 IL_CMD(C_LEDS);
203 IL_CMD(C_TX_LINK_QUALITY_CMD);
204 IL_CMD(C_CHANNEL_SWITCH);
205 IL_CMD(N_CHANNEL_SWITCH);
206 IL_CMD(C_SPECTRUM_MEASUREMENT);
207 IL_CMD(N_SPECTRUM_MEASUREMENT);
208 IL_CMD(C_POWER_TBL);
209 IL_CMD(N_PM_SLEEP);
210 IL_CMD(N_PM_DEBUG_STATS);
211 IL_CMD(C_SCAN);
212 IL_CMD(C_SCAN_ABORT);
213 IL_CMD(N_SCAN_START);
214 IL_CMD(N_SCAN_RESULTS);
215 IL_CMD(N_SCAN_COMPLETE);
216 IL_CMD(N_BEACON);
217 IL_CMD(C_TX_BEACON);
218 IL_CMD(C_TX_PWR_TBL);
219 IL_CMD(C_BT_CONFIG);
220 IL_CMD(C_STATS);
221 IL_CMD(N_STATS);
222 IL_CMD(N_CARD_STATE);
223 IL_CMD(N_MISSED_BEACONS);
224 IL_CMD(C_CT_KILL_CONFIG);
225 IL_CMD(C_SENSITIVITY);
226 IL_CMD(C_PHY_CALIBRATION);
227 IL_CMD(N_RX_PHY);
228 IL_CMD(N_RX_MPDU);
229 IL_CMD(N_RX);
230 IL_CMD(N_COMPRESSED_BA);
231 default:
232 return "UNKNOWN";
233
234 }
235}
236EXPORT_SYMBOL(il_get_cmd_string);
237
238#define HOST_COMPLETE_TIMEOUT (HZ / 2)
239
240static void
241il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
242 struct il_rx_pkt *pkt)
243{
244 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
245 IL_ERR("Bad return from %s (0x%08X)\n",
246 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
247 return;
248 }
249#ifdef CONFIG_IWLEGACY_DEBUG
250 switch (cmd->hdr.cmd) {
251 case C_TX_LINK_QUALITY_CMD:
252 case C_SENSITIVITY:
253 D_HC_DUMP("back from %s (0x%08X)\n",
254 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
255 break;
256 default:
257 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
258 pkt->hdr.flags);
259 }
260#endif
261}
262
263static int
264il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
265{
266 int ret;
267
268 BUG_ON(!(cmd->flags & CMD_ASYNC));
269
270
271 BUG_ON(cmd->flags & CMD_WANT_SKB);
272
273
274 if (!cmd->callback)
275 cmd->callback = il_generic_cmd_callback;
276
277 if (test_bit(S_EXIT_PENDING, &il->status))
278 return -EBUSY;
279
280 ret = il_enqueue_hcmd(il, cmd);
281 if (ret < 0) {
282 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
283 il_get_cmd_string(cmd->id), ret);
284 return ret;
285 }
286 return 0;
287}
288
289int
290il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
291{
292 int cmd_idx;
293 int ret;
294
295 lockdep_assert_held(&il->mutex);
296
297 BUG_ON(cmd->flags & CMD_ASYNC);
298
299
300 BUG_ON(cmd->callback);
301
302 D_INFO("Attempting to send sync command %s\n",
303 il_get_cmd_string(cmd->id));
304
305 set_bit(S_HCMD_ACTIVE, &il->status);
306 D_INFO("Setting HCMD_ACTIVE for command %s\n",
307 il_get_cmd_string(cmd->id));
308
309 cmd_idx = il_enqueue_hcmd(il, cmd);
310 if (cmd_idx < 0) {
311 ret = cmd_idx;
312 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
313 il_get_cmd_string(cmd->id), ret);
314 goto out;
315 }
316
317 ret = wait_event_timeout(il->wait_command_queue,
318 !test_bit(S_HCMD_ACTIVE, &il->status),
319 HOST_COMPLETE_TIMEOUT);
320 if (!ret) {
321 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
322 IL_ERR("Error sending %s: time out after %dms.\n",
323 il_get_cmd_string(cmd->id),
324 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
325
326 clear_bit(S_HCMD_ACTIVE, &il->status);
327 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
328 il_get_cmd_string(cmd->id));
329 ret = -ETIMEDOUT;
330 goto cancel;
331 }
332 }
333
334 if (test_bit(S_RFKILL, &il->status)) {
335 IL_ERR("Command %s aborted: RF KILL Switch\n",
336 il_get_cmd_string(cmd->id));
337 ret = -ECANCELED;
338 goto fail;
339 }
340 if (test_bit(S_FW_ERROR, &il->status)) {
341 IL_ERR("Command %s failed: FW Error\n",
342 il_get_cmd_string(cmd->id));
343 ret = -EIO;
344 goto fail;
345 }
346 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
347 IL_ERR("Error: Response NULL in '%s'\n",
348 il_get_cmd_string(cmd->id));
349 ret = -EIO;
350 goto cancel;
351 }
352
353 ret = 0;
354 goto out;
355
356cancel:
357 if (cmd->flags & CMD_WANT_SKB) {
358
359
360
361
362
363
364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
365 }
366fail:
367 if (cmd->reply_page) {
368 il_free_pages(il, cmd->reply_page);
369 cmd->reply_page = 0;
370 }
371out:
372 return ret;
373}
374EXPORT_SYMBOL(il_send_cmd_sync);
375
376int
377il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
378{
379 if (cmd->flags & CMD_ASYNC)
380 return il_send_cmd_async(il, cmd);
381
382 return il_send_cmd_sync(il, cmd);
383}
384EXPORT_SYMBOL(il_send_cmd);
385
386int
387il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
388{
389 struct il_host_cmd cmd = {
390 .id = id,
391 .len = len,
392 .data = data,
393 };
394
395 return il_send_cmd_sync(il, &cmd);
396}
397EXPORT_SYMBOL(il_send_cmd_pdu);
398
399int
400il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
401 void (*callback) (struct il_priv *il,
402 struct il_device_cmd *cmd,
403 struct il_rx_pkt *pkt))
404{
405 struct il_host_cmd cmd = {
406 .id = id,
407 .len = len,
408 .data = data,
409 };
410
411 cmd.flags |= CMD_ASYNC;
412 cmd.callback = callback;
413
414 return il_send_cmd_async(il, &cmd);
415}
416EXPORT_SYMBOL(il_send_cmd_pdu_async);
417
418
419static int led_mode;
420module_param(led_mode, int, 0444);
421MODULE_PARM_DESC(led_mode,
422 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437static const struct ieee80211_tpt_blink il_blink[] = {
438 {.throughput = 0, .blink_time = 334},
439 {.throughput = 1 * 1024 - 1, .blink_time = 260},
440 {.throughput = 5 * 1024 - 1, .blink_time = 220},
441 {.throughput = 10 * 1024 - 1, .blink_time = 190},
442 {.throughput = 20 * 1024 - 1, .blink_time = 170},
443 {.throughput = 50 * 1024 - 1, .blink_time = 150},
444 {.throughput = 70 * 1024 - 1, .blink_time = 130},
445 {.throughput = 100 * 1024 - 1, .blink_time = 110},
446 {.throughput = 200 * 1024 - 1, .blink_time = 80},
447 {.throughput = 300 * 1024 - 1, .blink_time = 50},
448};
449
450
451
452
453
454
455
456
457
458
459
460
461static inline u8
462il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
463{
464 if (!compensation) {
465 IL_ERR("undefined blink compensation: "
466 "use pre-defined blinking time\n");
467 return time;
468 }
469
470 return (u8) ((time * compensation) >> 6);
471}
472
473
474static int
475il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
476{
477 struct il_led_cmd led_cmd = {
478 .id = IL_LED_LINK,
479 .interval = IL_DEF_LED_INTRVL
480 };
481 int ret;
482
483 if (!test_bit(S_READY, &il->status))
484 return -EBUSY;
485
486 if (il->blink_on == on && il->blink_off == off)
487 return 0;
488
489 if (off == 0) {
490
491 on = IL_LED_SOLID;
492 }
493
494 D_LED("Led blink time compensation=%u\n",
495 il->cfg->led_compensation);
496 led_cmd.on =
497 il_blink_compensation(il, on,
498 il->cfg->led_compensation);
499 led_cmd.off =
500 il_blink_compensation(il, off,
501 il->cfg->led_compensation);
502
503 ret = il->ops->send_led_cmd(il, &led_cmd);
504 if (!ret) {
505 il->blink_on = on;
506 il->blink_off = off;
507 }
508 return ret;
509}
510
511static void
512il_led_brightness_set(struct led_classdev *led_cdev,
513 enum led_brightness brightness)
514{
515 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
516 unsigned long on = 0;
517
518 if (brightness > 0)
519 on = IL_LED_SOLID;
520
521 il_led_cmd(il, on, 0);
522}
523
524static int
525il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
526 unsigned long *delay_off)
527{
528 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
529
530 return il_led_cmd(il, *delay_on, *delay_off);
531}
532
533void
534il_leds_init(struct il_priv *il)
535{
536 int mode = led_mode;
537 int ret;
538
539 if (mode == IL_LED_DEFAULT)
540 mode = il->cfg->led_mode;
541
542 il->led.name =
543 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
544 il->led.brightness_set = il_led_brightness_set;
545 il->led.blink_set = il_led_blink_set;
546 il->led.max_brightness = 1;
547
548 switch (mode) {
549 case IL_LED_DEFAULT:
550 WARN_ON(1);
551 break;
552 case IL_LED_BLINK:
553 il->led.default_trigger =
554 ieee80211_create_tpt_led_trigger(il->hw,
555 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
556 il_blink,
557 ARRAY_SIZE(il_blink));
558 break;
559 case IL_LED_RF_STATE:
560 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
561 break;
562 }
563
564 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
565 if (ret) {
566 kfree(il->led.name);
567 return;
568 }
569
570 il->led_registered = true;
571}
572EXPORT_SYMBOL(il_leds_init);
573
574void
575il_leds_exit(struct il_priv *il)
576{
577 if (!il->led_registered)
578 return;
579
580 led_classdev_unregister(&il->led);
581 kfree(il->led.name);
582}
583EXPORT_SYMBOL(il_leds_exit);
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617const u8 il_eeprom_band_1[14] = {
618 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
619};
620
621
622static const u8 il_eeprom_band_2[] = {
623 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
624};
625
626static const u8 il_eeprom_band_3[] = {
627 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
628};
629
630static const u8 il_eeprom_band_4[] = {
631 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
632};
633
634static const u8 il_eeprom_band_5[] = {
635 145, 149, 153, 157, 161, 165
636};
637
638static const u8 il_eeprom_band_6[] = {
639 1, 2, 3, 4, 5, 6, 7
640};
641
642static const u8 il_eeprom_band_7[] = {
643 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
644};
645
646
647
648
649
650
651
652static int
653il_eeprom_verify_signature(struct il_priv *il)
654{
655 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
656 int ret = 0;
657
658 D_EEPROM("EEPROM signature=0x%08x\n", gp);
659 switch (gp) {
660 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
661 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
662 break;
663 default:
664 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
665 ret = -ENOENT;
666 break;
667 }
668 return ret;
669}
670
671const u8 *
672il_eeprom_query_addr(const struct il_priv *il, size_t offset)
673{
674 BUG_ON(offset >= il->cfg->eeprom_size);
675 return &il->eeprom[offset];
676}
677EXPORT_SYMBOL(il_eeprom_query_addr);
678
679u16
680il_eeprom_query16(const struct il_priv *il, size_t offset)
681{
682 if (!il->eeprom)
683 return 0;
684 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
685}
686EXPORT_SYMBOL(il_eeprom_query16);
687
688
689
690
691
692
693
694
695int
696il_eeprom_init(struct il_priv *il)
697{
698 __le16 *e;
699 u32 gp = _il_rd(il, CSR_EEPROM_GP);
700 int sz;
701 int ret;
702 int addr;
703
704
705 sz = il->cfg->eeprom_size;
706 D_EEPROM("NVM size = %d\n", sz);
707 il->eeprom = kzalloc(sz, GFP_KERNEL);
708 if (!il->eeprom)
709 return -ENOMEM;
710
711 e = (__le16 *) il->eeprom;
712
713 il->ops->apm_init(il);
714
715 ret = il_eeprom_verify_signature(il);
716 if (ret < 0) {
717 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
718 ret = -ENOENT;
719 goto err;
720 }
721
722
723 ret = il->ops->eeprom_acquire_semaphore(il);
724 if (ret < 0) {
725 IL_ERR("Failed to acquire EEPROM semaphore.\n");
726 ret = -ENOENT;
727 goto err;
728 }
729
730
731 for (addr = 0; addr < sz; addr += sizeof(u16)) {
732 u32 r;
733
734 _il_wr(il, CSR_EEPROM_REG,
735 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
736
737 ret =
738 _il_poll_bit(il, CSR_EEPROM_REG,
739 CSR_EEPROM_REG_READ_VALID_MSK,
740 CSR_EEPROM_REG_READ_VALID_MSK,
741 IL_EEPROM_ACCESS_TIMEOUT);
742 if (ret < 0) {
743 IL_ERR("Time out reading EEPROM[%d]\n", addr);
744 goto done;
745 }
746 r = _il_rd(il, CSR_EEPROM_REG);
747 e[addr / 2] = cpu_to_le16(r >> 16);
748 }
749
750 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
751 il_eeprom_query16(il, EEPROM_VERSION));
752
753 ret = 0;
754done:
755 il->ops->eeprom_release_semaphore(il);
756
757err:
758 if (ret)
759 il_eeprom_free(il);
760
761 il_apm_stop(il);
762 return ret;
763}
764EXPORT_SYMBOL(il_eeprom_init);
765
766void
767il_eeprom_free(struct il_priv *il)
768{
769 kfree(il->eeprom);
770 il->eeprom = NULL;
771}
772EXPORT_SYMBOL(il_eeprom_free);
773
774static void
775il_init_band_reference(const struct il_priv *il, int eep_band,
776 int *eeprom_ch_count,
777 const struct il_eeprom_channel **eeprom_ch_info,
778 const u8 **eeprom_ch_idx)
779{
780 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
781
782 switch (eep_band) {
783 case 1:
784 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
785 *eeprom_ch_info =
786 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
787 offset);
788 *eeprom_ch_idx = il_eeprom_band_1;
789 break;
790 case 2:
791 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
792 *eeprom_ch_info =
793 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
794 offset);
795 *eeprom_ch_idx = il_eeprom_band_2;
796 break;
797 case 3:
798 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
799 *eeprom_ch_info =
800 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
801 offset);
802 *eeprom_ch_idx = il_eeprom_band_3;
803 break;
804 case 4:
805 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
806 *eeprom_ch_info =
807 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
808 offset);
809 *eeprom_ch_idx = il_eeprom_band_4;
810 break;
811 case 5:
812 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
813 *eeprom_ch_info =
814 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
815 offset);
816 *eeprom_ch_idx = il_eeprom_band_5;
817 break;
818 case 6:
819 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
820 *eeprom_ch_info =
821 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
822 offset);
823 *eeprom_ch_idx = il_eeprom_band_6;
824 break;
825 case 7:
826 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
827 *eeprom_ch_info =
828 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
829 offset);
830 *eeprom_ch_idx = il_eeprom_band_7;
831 break;
832 default:
833 BUG();
834 }
835}
836
837#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
838 ? # x " " : "")
839
840
841
842
843
844static int
845il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
846 const struct il_eeprom_channel *eeprom_ch,
847 u8 clear_ht40_extension_channel)
848{
849 struct il_channel_info *ch_info;
850
851 ch_info =
852 (struct il_channel_info *)il_get_channel_info(il, band, channel);
853
854 if (!il_is_channel_valid(ch_info))
855 return -1;
856
857 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
858 " Ad-Hoc %ssupported\n", ch_info->channel,
859 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
860 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
861 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
862 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
863 eeprom_ch->max_power_avg,
864 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
865 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
866
867 ch_info->ht40_eeprom = *eeprom_ch;
868 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
869 ch_info->ht40_flags = eeprom_ch->flags;
870 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
871 ch_info->ht40_extension_channel &=
872 ~clear_ht40_extension_channel;
873
874 return 0;
875}
876
877#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
878 ? # x " " : "")
879
880
881
882
883int
884il_init_channel_map(struct il_priv *il)
885{
886 int eeprom_ch_count = 0;
887 const u8 *eeprom_ch_idx = NULL;
888 const struct il_eeprom_channel *eeprom_ch_info = NULL;
889 int band, ch;
890 struct il_channel_info *ch_info;
891
892 if (il->channel_count) {
893 D_EEPROM("Channel map already initialized.\n");
894 return 0;
895 }
896
897 D_EEPROM("Initializing regulatory info from EEPROM\n");
898
899 il->channel_count =
900 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
901 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
902 ARRAY_SIZE(il_eeprom_band_5);
903
904 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
905
906 il->channel_info =
907 kcalloc(il->channel_count, sizeof(struct il_channel_info),
908 GFP_KERNEL);
909 if (!il->channel_info) {
910 IL_ERR("Could not allocate channel_info\n");
911 il->channel_count = 0;
912 return -ENOMEM;
913 }
914
915 ch_info = il->channel_info;
916
917
918
919
920 for (band = 1; band <= 5; band++) {
921
922 il_init_band_reference(il, band, &eeprom_ch_count,
923 &eeprom_ch_info, &eeprom_ch_idx);
924
925
926 for (ch = 0; ch < eeprom_ch_count; ch++) {
927 ch_info->channel = eeprom_ch_idx[ch];
928 ch_info->band =
929 (band ==
930 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
931
932
933
934 ch_info->eeprom = eeprom_ch_info[ch];
935
936
937
938 ch_info->flags = eeprom_ch_info[ch].flags;
939
940
941 ch_info->ht40_extension_channel =
942 IEEE80211_CHAN_NO_HT40;
943
944 if (!(il_is_channel_valid(ch_info))) {
945 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
946 "No traffic\n", ch_info->channel,
947 ch_info->flags,
948 il_is_channel_a_band(ch_info) ? "5.2" :
949 "2.4");
950 ch_info++;
951 continue;
952 }
953
954
955 ch_info->max_power_avg = ch_info->curr_txpow =
956 eeprom_ch_info[ch].max_power_avg;
957 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
958 ch_info->min_power = 0;
959
960 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
961 " Ad-Hoc %ssupported\n", ch_info->channel,
962 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
963 CHECK_AND_PRINT_I(VALID),
964 CHECK_AND_PRINT_I(IBSS),
965 CHECK_AND_PRINT_I(ACTIVE),
966 CHECK_AND_PRINT_I(RADAR),
967 CHECK_AND_PRINT_I(WIDE),
968 CHECK_AND_PRINT_I(DFS),
969 eeprom_ch_info[ch].flags,
970 eeprom_ch_info[ch].max_power_avg,
971 ((eeprom_ch_info[ch].
972 flags & EEPROM_CHANNEL_IBSS) &&
973 !(eeprom_ch_info[ch].
974 flags & EEPROM_CHANNEL_RADAR)) ? "" :
975 "not ");
976
977 ch_info++;
978 }
979 }
980
981
982 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
983 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
984 return 0;
985
986
987 for (band = 6; band <= 7; band++) {
988 enum nl80211_band ieeeband;
989
990 il_init_band_reference(il, band, &eeprom_ch_count,
991 &eeprom_ch_info, &eeprom_ch_idx);
992
993
994 ieeeband =
995 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
996
997
998 for (ch = 0; ch < eeprom_ch_count; ch++) {
999
1000 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1001 &eeprom_ch_info[ch],
1002 IEEE80211_CHAN_NO_HT40PLUS);
1003
1004
1005 il_mod_ht40_chan_info(il, ieeeband,
1006 eeprom_ch_idx[ch] + 4,
1007 &eeprom_ch_info[ch],
1008 IEEE80211_CHAN_NO_HT40MINUS);
1009 }
1010 }
1011
1012 return 0;
1013}
1014EXPORT_SYMBOL(il_init_channel_map);
1015
1016
1017
1018
1019void
1020il_free_channel_map(struct il_priv *il)
1021{
1022 kfree(il->channel_info);
1023 il->channel_count = 0;
1024}
1025EXPORT_SYMBOL(il_free_channel_map);
1026
1027
1028
1029
1030
1031
1032const struct il_channel_info *
1033il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
1034 u16 channel)
1035{
1036 int i;
1037
1038 switch (band) {
1039 case NL80211_BAND_5GHZ:
1040 for (i = 14; i < il->channel_count; i++) {
1041 if (il->channel_info[i].channel == channel)
1042 return &il->channel_info[i];
1043 }
1044 break;
1045 case NL80211_BAND_2GHZ:
1046 if (channel >= 1 && channel <= 14)
1047 return &il->channel_info[channel - 1];
1048 break;
1049 default:
1050 BUG();
1051 }
1052
1053 return NULL;
1054}
1055EXPORT_SYMBOL(il_get_channel_info);
1056
1057
1058
1059
1060
1061
1062
1063
1064#define SLP_VEC(X0, X1, X2, X3, X4) { \
1065 cpu_to_le32(X0), \
1066 cpu_to_le32(X1), \
1067 cpu_to_le32(X2), \
1068 cpu_to_le32(X3), \
1069 cpu_to_le32(X4) \
1070}
1071
1072static void
1073il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1074{
1075 static const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1076 SLP_VEC(2, 2, 4, 6, 0xFF),
1077 SLP_VEC(2, 4, 7, 10, 10),
1078 SLP_VEC(4, 7, 10, 10, 0xFF)
1079 };
1080 int i, dtim_period, no_dtim;
1081 u32 max_sleep;
1082 bool skip;
1083
1084 memset(cmd, 0, sizeof(*cmd));
1085
1086 if (il->power_data.pci_pm)
1087 cmd->flags |= IL_POWER_PCI_PM_MSK;
1088
1089
1090 if (il->power_data.ps_disabled)
1091 return;
1092
1093 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1094 cmd->keep_alive_seconds = 0;
1095 cmd->debug_flags = 0;
1096 cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1097 cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1098 cmd->keep_alive_beacons = 0;
1099
1100 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1101
1102 if (dtim_period <= 2) {
1103 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1104 no_dtim = 2;
1105 } else if (dtim_period <= 10) {
1106 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1107 no_dtim = 2;
1108 } else {
1109 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1110 no_dtim = 0;
1111 }
1112
1113 if (dtim_period == 0) {
1114 dtim_period = 1;
1115 skip = false;
1116 } else {
1117 skip = !!no_dtim;
1118 }
1119
1120 if (skip) {
1121 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1122
1123 max_sleep = le32_to_cpu(tmp);
1124 if (max_sleep == 0xFF)
1125 max_sleep = dtim_period * (skip + 1);
1126 else if (max_sleep > dtim_period)
1127 max_sleep = (max_sleep / dtim_period) * dtim_period;
1128 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1129 } else {
1130 max_sleep = dtim_period;
1131 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1132 }
1133
1134 for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1135 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1136 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1137}
1138
1139static int
1140il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1141{
1142 D_POWER("Sending power/sleep command\n");
1143 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1144 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1145 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1146 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1147 le32_to_cpu(cmd->sleep_interval[0]),
1148 le32_to_cpu(cmd->sleep_interval[1]),
1149 le32_to_cpu(cmd->sleep_interval[2]),
1150 le32_to_cpu(cmd->sleep_interval[3]),
1151 le32_to_cpu(cmd->sleep_interval[4]));
1152
1153 return il_send_cmd_pdu(il, C_POWER_TBL,
1154 sizeof(struct il_powertable_cmd), cmd);
1155}
1156
1157static int
1158il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1159{
1160 int ret;
1161 bool update_chains;
1162
1163 lockdep_assert_held(&il->mutex);
1164
1165
1166 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1167 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1168
1169 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1170 return 0;
1171
1172 if (!il_is_ready_rf(il))
1173 return -EIO;
1174
1175
1176 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1177 if (test_bit(S_SCANNING, &il->status) && !force) {
1178 D_INFO("Defer power set mode while scanning\n");
1179 return 0;
1180 }
1181
1182 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1183 set_bit(S_POWER_PMI, &il->status);
1184
1185 ret = il_set_power(il, cmd);
1186 if (!ret) {
1187 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1188 clear_bit(S_POWER_PMI, &il->status);
1189
1190 if (il->ops->update_chain_flags && update_chains)
1191 il->ops->update_chain_flags(il);
1192 else if (il->ops->update_chain_flags)
1193 D_POWER("Cannot update the power, chain noise "
1194 "calibration running: %d\n",
1195 il->chain_noise_data.state);
1196
1197 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1198 } else
1199 IL_ERR("set power fail, ret = %d", ret);
1200
1201 return ret;
1202}
1203
1204int
1205il_power_update_mode(struct il_priv *il, bool force)
1206{
1207 struct il_powertable_cmd cmd;
1208
1209 il_build_powertable_cmd(il, &cmd);
1210
1211 return il_power_set_mode(il, &cmd, force);
1212}
1213EXPORT_SYMBOL(il_power_update_mode);
1214
1215
1216void
1217il_power_initialize(struct il_priv *il)
1218{
1219 u16 lctl;
1220
1221 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1222 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1223
1224 il->power_data.debug_sleep_level_override = -1;
1225
1226 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1227}
1228EXPORT_SYMBOL(il_power_initialize);
1229
1230
1231
1232
1233#define IL_ACTIVE_DWELL_TIME_24 (30)
1234#define IL_ACTIVE_DWELL_TIME_52 (20)
1235
1236#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1237#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1238
1239
1240
1241
1242#define IL_PASSIVE_DWELL_TIME_24 (20)
1243#define IL_PASSIVE_DWELL_TIME_52 (10)
1244#define IL_PASSIVE_DWELL_BASE (100)
1245#define IL_CHANNEL_TUNE_TIME 5
1246
1247static int
1248il_send_scan_abort(struct il_priv *il)
1249{
1250 int ret;
1251 struct il_rx_pkt *pkt;
1252 struct il_host_cmd cmd = {
1253 .id = C_SCAN_ABORT,
1254 .flags = CMD_WANT_SKB,
1255 };
1256
1257
1258
1259
1260 if (!test_bit(S_READY, &il->status) ||
1261 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1262 !test_bit(S_SCAN_HW, &il->status) ||
1263 test_bit(S_FW_ERROR, &il->status) ||
1264 test_bit(S_EXIT_PENDING, &il->status))
1265 return -EIO;
1266
1267 ret = il_send_cmd_sync(il, &cmd);
1268 if (ret)
1269 return ret;
1270
1271 pkt = (struct il_rx_pkt *)cmd.reply_page;
1272 if (pkt->u.status != CAN_ABORT_STATUS) {
1273
1274
1275
1276
1277
1278
1279 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1280 ret = -EIO;
1281 }
1282
1283 il_free_pages(il, cmd.reply_page);
1284 return ret;
1285}
1286
1287static void
1288il_complete_scan(struct il_priv *il, bool aborted)
1289{
1290 struct cfg80211_scan_info info = {
1291 .aborted = aborted,
1292 };
1293
1294
1295 if (il->scan_request) {
1296 D_SCAN("Complete scan in mac80211\n");
1297 ieee80211_scan_completed(il->hw, &info);
1298 }
1299
1300 il->scan_vif = NULL;
1301 il->scan_request = NULL;
1302}
1303
1304void
1305il_force_scan_end(struct il_priv *il)
1306{
1307 lockdep_assert_held(&il->mutex);
1308
1309 if (!test_bit(S_SCANNING, &il->status)) {
1310 D_SCAN("Forcing scan end while not scanning\n");
1311 return;
1312 }
1313
1314 D_SCAN("Forcing scan end\n");
1315 clear_bit(S_SCANNING, &il->status);
1316 clear_bit(S_SCAN_HW, &il->status);
1317 clear_bit(S_SCAN_ABORTING, &il->status);
1318 il_complete_scan(il, true);
1319}
1320
1321static void
1322il_do_scan_abort(struct il_priv *il)
1323{
1324 int ret;
1325
1326 lockdep_assert_held(&il->mutex);
1327
1328 if (!test_bit(S_SCANNING, &il->status)) {
1329 D_SCAN("Not performing scan to abort\n");
1330 return;
1331 }
1332
1333 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1334 D_SCAN("Scan abort in progress\n");
1335 return;
1336 }
1337
1338 ret = il_send_scan_abort(il);
1339 if (ret) {
1340 D_SCAN("Send scan abort failed %d\n", ret);
1341 il_force_scan_end(il);
1342 } else
1343 D_SCAN("Successfully send scan abort\n");
1344}
1345
1346
1347
1348
1349int
1350il_scan_cancel(struct il_priv *il)
1351{
1352 D_SCAN("Queuing abort scan\n");
1353 queue_work(il->workqueue, &il->abort_scan);
1354 return 0;
1355}
1356EXPORT_SYMBOL(il_scan_cancel);
1357
1358
1359
1360
1361
1362
1363int
1364il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1365{
1366 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1367
1368 lockdep_assert_held(&il->mutex);
1369
1370 D_SCAN("Scan cancel timeout\n");
1371
1372 il_do_scan_abort(il);
1373
1374 while (time_before_eq(jiffies, timeout)) {
1375 if (!test_bit(S_SCAN_HW, &il->status))
1376 break;
1377 msleep(20);
1378 }
1379
1380 return test_bit(S_SCAN_HW, &il->status);
1381}
1382EXPORT_SYMBOL(il_scan_cancel_timeout);
1383
1384
1385static void
1386il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1387{
1388#ifdef CONFIG_IWLEGACY_DEBUG
1389 struct il_rx_pkt *pkt = rxb_addr(rxb);
1390 struct il_scanreq_notification *notif =
1391 (struct il_scanreq_notification *)pkt->u.raw;
1392
1393 D_SCAN("Scan request status = 0x%x\n", notif->status);
1394#endif
1395}
1396
1397
1398static void
1399il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1400{
1401 struct il_rx_pkt *pkt = rxb_addr(rxb);
1402 struct il_scanstart_notification *notif =
1403 (struct il_scanstart_notification *)pkt->u.raw;
1404 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1405 D_SCAN("Scan start: " "%d [802.11%s] "
1406 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1407 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1408 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1409}
1410
1411
1412static void
1413il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1414{
1415#ifdef CONFIG_IWLEGACY_DEBUG
1416 struct il_rx_pkt *pkt = rxb_addr(rxb);
1417 struct il_scanresults_notification *notif =
1418 (struct il_scanresults_notification *)pkt->u.raw;
1419
1420 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1421 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1422 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1423 le32_to_cpu(notif->stats[0]),
1424 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1425#endif
1426}
1427
1428
1429static void
1430il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1431{
1432
1433 struct il_rx_pkt *pkt = rxb_addr(rxb);
1434 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1435
1436 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1437 scan_notif->scanned_channels, scan_notif->tsf_low,
1438 scan_notif->tsf_high, scan_notif->status);
1439
1440
1441 clear_bit(S_SCAN_HW, &il->status);
1442
1443 D_SCAN("Scan on %sGHz took %dms\n",
1444 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
1445 jiffies_to_msecs(jiffies - il->scan_start));
1446
1447 queue_work(il->workqueue, &il->scan_completed);
1448}
1449
1450void
1451il_setup_rx_scan_handlers(struct il_priv *il)
1452{
1453
1454 il->handlers[C_SCAN] = il_hdl_scan;
1455 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1456 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1457 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1458}
1459EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1460
1461u16
1462il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1463 u8 n_probes)
1464{
1465 if (band == NL80211_BAND_5GHZ)
1466 return IL_ACTIVE_DWELL_TIME_52 +
1467 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1468 else
1469 return IL_ACTIVE_DWELL_TIME_24 +
1470 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1471}
1472EXPORT_SYMBOL(il_get_active_dwell_time);
1473
1474u16
1475il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1476 struct ieee80211_vif *vif)
1477{
1478 u16 value;
1479
1480 u16 passive =
1481 (band ==
1482 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1483 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1484 IL_PASSIVE_DWELL_TIME_52;
1485
1486 if (il_is_any_associated(il)) {
1487
1488
1489
1490
1491
1492 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1493 if (value > IL_PASSIVE_DWELL_BASE || !value)
1494 value = IL_PASSIVE_DWELL_BASE;
1495 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1496 passive = min(value, passive);
1497 }
1498
1499 return passive;
1500}
1501EXPORT_SYMBOL(il_get_passive_dwell_time);
1502
1503void
1504il_init_scan_params(struct il_priv *il)
1505{
1506 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1507 if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
1508 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
1509 if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
1510 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
1511}
1512EXPORT_SYMBOL(il_init_scan_params);
1513
1514static int
1515il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1516{
1517 int ret;
1518
1519 lockdep_assert_held(&il->mutex);
1520
1521 cancel_delayed_work(&il->scan_check);
1522
1523 if (!il_is_ready_rf(il)) {
1524 IL_WARN("Request scan called when driver not ready.\n");
1525 return -EIO;
1526 }
1527
1528 if (test_bit(S_SCAN_HW, &il->status)) {
1529 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1530 return -EBUSY;
1531 }
1532
1533 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1534 D_SCAN("Scan request while abort pending.\n");
1535 return -EBUSY;
1536 }
1537
1538 D_SCAN("Starting scan...\n");
1539
1540 set_bit(S_SCANNING, &il->status);
1541 il->scan_start = jiffies;
1542
1543 ret = il->ops->request_scan(il, vif);
1544 if (ret) {
1545 clear_bit(S_SCANNING, &il->status);
1546 return ret;
1547 }
1548
1549 queue_delayed_work(il->workqueue, &il->scan_check,
1550 IL_SCAN_CHECK_WATCHDOG);
1551
1552 return 0;
1553}
1554
1555int
1556il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1557 struct ieee80211_scan_request *hw_req)
1558{
1559 struct cfg80211_scan_request *req = &hw_req->req;
1560 struct il_priv *il = hw->priv;
1561 int ret;
1562
1563 if (req->n_channels == 0) {
1564 IL_ERR("Can not scan on no channels.\n");
1565 return -EINVAL;
1566 }
1567
1568 mutex_lock(&il->mutex);
1569 D_MAC80211("enter\n");
1570
1571 if (test_bit(S_SCANNING, &il->status)) {
1572 D_SCAN("Scan already in progress.\n");
1573 ret = -EAGAIN;
1574 goto out_unlock;
1575 }
1576
1577
1578 il->scan_request = req;
1579 il->scan_vif = vif;
1580 il->scan_band = req->channels[0]->band;
1581
1582 ret = il_scan_initiate(il, vif);
1583
1584out_unlock:
1585 D_MAC80211("leave ret %d\n", ret);
1586 mutex_unlock(&il->mutex);
1587
1588 return ret;
1589}
1590EXPORT_SYMBOL(il_mac_hw_scan);
1591
1592static void
1593il_bg_scan_check(struct work_struct *data)
1594{
1595 struct il_priv *il =
1596 container_of(data, struct il_priv, scan_check.work);
1597
1598 D_SCAN("Scan check work\n");
1599
1600
1601
1602
1603 mutex_lock(&il->mutex);
1604 il_force_scan_end(il);
1605 mutex_unlock(&il->mutex);
1606}
1607
1608
1609
1610
1611u16
1612il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1613 const u8 *ta, const u8 *ies, int ie_len, int left)
1614{
1615 int len = 0;
1616 u8 *pos = NULL;
1617
1618
1619
1620 left -= 24;
1621 if (left < 0)
1622 return 0;
1623
1624 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1625 eth_broadcast_addr(frame->da);
1626 memcpy(frame->sa, ta, ETH_ALEN);
1627 eth_broadcast_addr(frame->bssid);
1628 frame->seq_ctrl = 0;
1629
1630 len += 24;
1631
1632
1633 pos = &frame->u.probe_req.variable[0];
1634
1635
1636 left -= 2;
1637 if (left < 0)
1638 return 0;
1639 *pos++ = WLAN_EID_SSID;
1640 *pos++ = 0;
1641
1642 len += 2;
1643
1644 if (WARN_ON(left < ie_len))
1645 return len;
1646
1647 if (ies && ie_len) {
1648 memcpy(pos, ies, ie_len);
1649 len += ie_len;
1650 }
1651
1652 return (u16) len;
1653}
1654EXPORT_SYMBOL(il_fill_probe_req);
1655
1656static void
1657il_bg_abort_scan(struct work_struct *work)
1658{
1659 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1660
1661 D_SCAN("Abort scan work\n");
1662
1663
1664
1665 mutex_lock(&il->mutex);
1666 il_scan_cancel_timeout(il, 200);
1667 mutex_unlock(&il->mutex);
1668}
1669
1670static void
1671il_bg_scan_completed(struct work_struct *work)
1672{
1673 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1674 bool aborted;
1675
1676 D_SCAN("Completed scan.\n");
1677
1678 cancel_delayed_work(&il->scan_check);
1679
1680 mutex_lock(&il->mutex);
1681
1682 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1683 if (aborted)
1684 D_SCAN("Aborted scan completed.\n");
1685
1686 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1687 D_SCAN("Scan already completed.\n");
1688 goto out_settings;
1689 }
1690
1691 il_complete_scan(il, aborted);
1692
1693out_settings:
1694
1695 if (!il_is_ready_rf(il))
1696 goto out;
1697
1698
1699
1700
1701
1702 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1703 il_set_tx_power(il, il->tx_power_next, false);
1704
1705 il->ops->post_scan(il);
1706
1707out:
1708 mutex_unlock(&il->mutex);
1709}
1710
1711void
1712il_setup_scan_deferred_work(struct il_priv *il)
1713{
1714 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1715 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1716 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1717}
1718EXPORT_SYMBOL(il_setup_scan_deferred_work);
1719
1720void
1721il_cancel_scan_deferred_work(struct il_priv *il)
1722{
1723 cancel_work_sync(&il->abort_scan);
1724 cancel_work_sync(&il->scan_completed);
1725
1726 if (cancel_delayed_work_sync(&il->scan_check)) {
1727 mutex_lock(&il->mutex);
1728 il_force_scan_end(il);
1729 mutex_unlock(&il->mutex);
1730 }
1731}
1732EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1733
1734
1735static void
1736il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1737{
1738
1739 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1740 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1741 sta_id, il->stations[sta_id].sta.sta.addr);
1742
1743 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1744 D_ASSOC("STA id %u addr %pM already present"
1745 " in uCode (according to driver)\n", sta_id,
1746 il->stations[sta_id].sta.sta.addr);
1747 } else {
1748 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1749 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1750 il->stations[sta_id].sta.sta.addr);
1751 }
1752}
1753
1754static int
1755il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1756 struct il_rx_pkt *pkt, bool sync)
1757{
1758 u8 sta_id = addsta->sta.sta_id;
1759 unsigned long flags;
1760 int ret = -EIO;
1761
1762 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1763 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1764 return ret;
1765 }
1766
1767 D_INFO("Processing response for adding station %u\n", sta_id);
1768
1769 spin_lock_irqsave(&il->sta_lock, flags);
1770
1771 switch (pkt->u.add_sta.status) {
1772 case ADD_STA_SUCCESS_MSK:
1773 D_INFO("C_ADD_STA PASSED\n");
1774 il_sta_ucode_activate(il, sta_id);
1775 ret = 0;
1776 break;
1777 case ADD_STA_NO_ROOM_IN_TBL:
1778 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1779 break;
1780 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1781 IL_ERR("Adding station %d failed, no block ack resource.\n",
1782 sta_id);
1783 break;
1784 case ADD_STA_MODIFY_NON_EXIST_STA:
1785 IL_ERR("Attempting to modify non-existing station %d\n",
1786 sta_id);
1787 break;
1788 default:
1789 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1790 break;
1791 }
1792
1793 D_INFO("%s station id %u addr %pM\n",
1794 il->stations[sta_id].sta.mode ==
1795 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1796 il->stations[sta_id].sta.sta.addr);
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 D_INFO("%s station according to cmd buffer %pM\n",
1807 il->stations[sta_id].sta.mode ==
1808 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1809 spin_unlock_irqrestore(&il->sta_lock, flags);
1810
1811 return ret;
1812}
1813
1814static void
1815il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1816 struct il_rx_pkt *pkt)
1817{
1818 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1819
1820 il_process_add_sta_resp(il, addsta, pkt, false);
1821
1822}
1823
1824int
1825il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1826{
1827 struct il_rx_pkt *pkt = NULL;
1828 int ret = 0;
1829 u8 data[sizeof(*sta)];
1830 struct il_host_cmd cmd = {
1831 .id = C_ADD_STA,
1832 .flags = flags,
1833 .data = data,
1834 };
1835 u8 sta_id __maybe_unused = sta->sta.sta_id;
1836
1837 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1838 flags & CMD_ASYNC ? "a" : "");
1839
1840 if (flags & CMD_ASYNC)
1841 cmd.callback = il_add_sta_callback;
1842 else {
1843 cmd.flags |= CMD_WANT_SKB;
1844 might_sleep();
1845 }
1846
1847 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1848 ret = il_send_cmd(il, &cmd);
1849 if (ret)
1850 return ret;
1851 if (flags & CMD_ASYNC)
1852 return 0;
1853
1854 pkt = (struct il_rx_pkt *)cmd.reply_page;
1855 ret = il_process_add_sta_resp(il, sta, pkt, true);
1856
1857 il_free_pages(il, cmd.reply_page);
1858
1859 return ret;
1860}
1861EXPORT_SYMBOL(il_send_add_sta);
1862
1863static void
1864il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1865{
1866 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1867 __le32 sta_flags;
1868
1869 if (!sta || !sta_ht_inf->ht_supported)
1870 goto done;
1871
1872 D_ASSOC("spatial multiplexing power save mode: %s\n",
1873 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1874 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1875 "disabled");
1876
1877 sta_flags = il->stations[idx].sta.station_flags;
1878
1879 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1880
1881 switch (sta->smps_mode) {
1882 case IEEE80211_SMPS_STATIC:
1883 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1884 break;
1885 case IEEE80211_SMPS_DYNAMIC:
1886 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1887 break;
1888 case IEEE80211_SMPS_OFF:
1889 break;
1890 default:
1891 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
1892 break;
1893 }
1894
1895 sta_flags |=
1896 cpu_to_le32((u32) sta_ht_inf->
1897 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1898
1899 sta_flags |=
1900 cpu_to_le32((u32) sta_ht_inf->
1901 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1902
1903 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1904 sta_flags |= STA_FLG_HT40_EN_MSK;
1905 else
1906 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1907
1908 il->stations[idx].sta.station_flags = sta_flags;
1909done:
1910 return;
1911}
1912
1913
1914
1915
1916
1917
1918u8
1919il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1920 struct ieee80211_sta *sta)
1921{
1922 struct il_station_entry *station;
1923 int i;
1924 u8 sta_id = IL_INVALID_STATION;
1925 u16 rate;
1926
1927 if (is_ap)
1928 sta_id = IL_AP_ID;
1929 else if (is_broadcast_ether_addr(addr))
1930 sta_id = il->hw_params.bcast_id;
1931 else
1932 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1933 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1934 addr)) {
1935 sta_id = i;
1936 break;
1937 }
1938
1939 if (!il->stations[i].used &&
1940 sta_id == IL_INVALID_STATION)
1941 sta_id = i;
1942 }
1943
1944
1945
1946
1947
1948 if (unlikely(sta_id == IL_INVALID_STATION))
1949 return sta_id;
1950
1951
1952
1953
1954
1955
1956 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1957 D_INFO("STA %d already in process of being added.\n", sta_id);
1958 return sta_id;
1959 }
1960
1961 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1962 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1963 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1964 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1965 sta_id, addr);
1966 return sta_id;
1967 }
1968
1969 station = &il->stations[sta_id];
1970 station->used = IL_STA_DRIVER_ACTIVE;
1971 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1972 il->num_stations++;
1973
1974
1975 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1976 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1977 station->sta.mode = 0;
1978 station->sta.sta.sta_id = sta_id;
1979 station->sta.station_flags = 0;
1980
1981
1982
1983
1984
1985
1986 il_set_ht_add_station(il, sta_id, sta);
1987
1988
1989 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1990
1991 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1992
1993 return sta_id;
1994
1995}
1996EXPORT_SYMBOL_GPL(il_prep_station);
1997
1998#define STA_WAIT_TIMEOUT (HZ/2)
1999
2000
2001
2002
2003int
2004il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2005 struct ieee80211_sta *sta, u8 *sta_id_r)
2006{
2007 unsigned long flags_spin;
2008 int ret = 0;
2009 u8 sta_id;
2010 struct il_addsta_cmd sta_cmd;
2011
2012 *sta_id_r = 0;
2013 spin_lock_irqsave(&il->sta_lock, flags_spin);
2014 sta_id = il_prep_station(il, addr, is_ap, sta);
2015 if (sta_id == IL_INVALID_STATION) {
2016 IL_ERR("Unable to prepare station %pM for addition\n", addr);
2017 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2018 return -EINVAL;
2019 }
2020
2021
2022
2023
2024
2025
2026 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2027 D_INFO("STA %d already in process of being added.\n", sta_id);
2028 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2029 return -EEXIST;
2030 }
2031
2032 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2033 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2034 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2035 sta_id, addr);
2036 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2037 return -EEXIST;
2038 }
2039
2040 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2041 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2042 sizeof(struct il_addsta_cmd));
2043 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2044
2045
2046 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2047 if (ret) {
2048 spin_lock_irqsave(&il->sta_lock, flags_spin);
2049 IL_ERR("Adding station %pM failed.\n",
2050 il->stations[sta_id].sta.sta.addr);
2051 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2052 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2053 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2054 }
2055 *sta_id_r = sta_id;
2056 return ret;
2057}
2058EXPORT_SYMBOL(il_add_station_common);
2059
2060
2061
2062
2063
2064
2065static void
2066il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2067{
2068
2069 if ((il->stations[sta_id].
2070 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2071 IL_STA_UCODE_ACTIVE)
2072 IL_ERR("removed non active STA %u\n", sta_id);
2073
2074 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2075
2076 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2077 D_ASSOC("Removed STA %u\n", sta_id);
2078}
2079
2080static int
2081il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2082 bool temporary)
2083{
2084 struct il_rx_pkt *pkt;
2085 int ret;
2086
2087 unsigned long flags_spin;
2088 struct il_rem_sta_cmd rm_sta_cmd;
2089
2090 struct il_host_cmd cmd = {
2091 .id = C_REM_STA,
2092 .len = sizeof(struct il_rem_sta_cmd),
2093 .flags = CMD_SYNC,
2094 .data = &rm_sta_cmd,
2095 };
2096
2097 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2098 rm_sta_cmd.num_sta = 1;
2099 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2100
2101 cmd.flags |= CMD_WANT_SKB;
2102
2103 ret = il_send_cmd(il, &cmd);
2104
2105 if (ret)
2106 return ret;
2107
2108 pkt = (struct il_rx_pkt *)cmd.reply_page;
2109 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2110 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2111 ret = -EIO;
2112 }
2113
2114 if (!ret) {
2115 switch (pkt->u.rem_sta.status) {
2116 case REM_STA_SUCCESS_MSK:
2117 if (!temporary) {
2118 spin_lock_irqsave(&il->sta_lock, flags_spin);
2119 il_sta_ucode_deactivate(il, sta_id);
2120 spin_unlock_irqrestore(&il->sta_lock,
2121 flags_spin);
2122 }
2123 D_ASSOC("C_REM_STA PASSED\n");
2124 break;
2125 default:
2126 ret = -EIO;
2127 IL_ERR("C_REM_STA failed\n");
2128 break;
2129 }
2130 }
2131 il_free_pages(il, cmd.reply_page);
2132
2133 return ret;
2134}
2135
2136
2137
2138
2139int
2140il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2141{
2142 unsigned long flags;
2143
2144 if (!il_is_ready(il)) {
2145 D_INFO("Unable to remove station %pM, device not ready.\n",
2146 addr);
2147
2148
2149
2150
2151
2152 return 0;
2153 }
2154
2155 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2156
2157 if (WARN_ON(sta_id == IL_INVALID_STATION))
2158 return -EINVAL;
2159
2160 spin_lock_irqsave(&il->sta_lock, flags);
2161
2162 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2163 D_INFO("Removing %pM but non DRIVER active\n", addr);
2164 goto out_err;
2165 }
2166
2167 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2168 D_INFO("Removing %pM but non UCODE active\n", addr);
2169 goto out_err;
2170 }
2171
2172 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2173 kfree(il->stations[sta_id].lq);
2174 il->stations[sta_id].lq = NULL;
2175 }
2176
2177 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2178
2179 il->num_stations--;
2180
2181 BUG_ON(il->num_stations < 0);
2182
2183 spin_unlock_irqrestore(&il->sta_lock, flags);
2184
2185 return il_send_remove_station(il, addr, sta_id, false);
2186out_err:
2187 spin_unlock_irqrestore(&il->sta_lock, flags);
2188 return -EINVAL;
2189}
2190EXPORT_SYMBOL_GPL(il_remove_station);
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200void
2201il_clear_ucode_stations(struct il_priv *il)
2202{
2203 int i;
2204 unsigned long flags_spin;
2205 bool cleared = false;
2206
2207 D_INFO("Clearing ucode stations in driver\n");
2208
2209 spin_lock_irqsave(&il->sta_lock, flags_spin);
2210 for (i = 0; i < il->hw_params.max_stations; i++) {
2211 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2212 D_INFO("Clearing ucode active for station %d\n", i);
2213 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2214 cleared = true;
2215 }
2216 }
2217 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2218
2219 if (!cleared)
2220 D_INFO("No active stations found to be cleared\n");
2221}
2222EXPORT_SYMBOL(il_clear_ucode_stations);
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232void
2233il_restore_stations(struct il_priv *il)
2234{
2235 struct il_addsta_cmd sta_cmd;
2236 struct il_link_quality_cmd lq;
2237 unsigned long flags_spin;
2238 int i;
2239 bool found = false;
2240 int ret;
2241 bool send_lq;
2242
2243 if (!il_is_ready(il)) {
2244 D_INFO("Not ready yet, not restoring any stations.\n");
2245 return;
2246 }
2247
2248 D_ASSOC("Restoring all known stations ... start.\n");
2249 spin_lock_irqsave(&il->sta_lock, flags_spin);
2250 for (i = 0; i < il->hw_params.max_stations; i++) {
2251 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2252 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2253 D_ASSOC("Restoring sta %pM\n",
2254 il->stations[i].sta.sta.addr);
2255 il->stations[i].sta.mode = 0;
2256 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2257 found = true;
2258 }
2259 }
2260
2261 for (i = 0; i < il->hw_params.max_stations; i++) {
2262 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2263 memcpy(&sta_cmd, &il->stations[i].sta,
2264 sizeof(struct il_addsta_cmd));
2265 send_lq = false;
2266 if (il->stations[i].lq) {
2267 memcpy(&lq, il->stations[i].lq,
2268 sizeof(struct il_link_quality_cmd));
2269 send_lq = true;
2270 }
2271 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2272 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2273 if (ret) {
2274 spin_lock_irqsave(&il->sta_lock, flags_spin);
2275 IL_ERR("Adding station %pM failed.\n",
2276 il->stations[i].sta.sta.addr);
2277 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2278 il->stations[i].used &=
2279 ~IL_STA_UCODE_INPROGRESS;
2280 spin_unlock_irqrestore(&il->sta_lock,
2281 flags_spin);
2282 }
2283
2284
2285
2286
2287 if (send_lq)
2288 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2289 spin_lock_irqsave(&il->sta_lock, flags_spin);
2290 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2291 }
2292 }
2293
2294 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2295 if (!found)
2296 D_INFO("Restoring all known stations"
2297 " .... no stations to be restored.\n");
2298 else
2299 D_INFO("Restoring all known stations" " .... complete.\n");
2300}
2301EXPORT_SYMBOL(il_restore_stations);
2302
2303int
2304il_get_free_ucode_key_idx(struct il_priv *il)
2305{
2306 int i;
2307
2308 for (i = 0; i < il->sta_key_max_num; i++)
2309 if (!test_and_set_bit(i, &il->ucode_key_table))
2310 return i;
2311
2312 return WEP_INVALID_OFFSET;
2313}
2314EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2315
2316void
2317il_dealloc_bcast_stations(struct il_priv *il)
2318{
2319 unsigned long flags;
2320 int i;
2321
2322 spin_lock_irqsave(&il->sta_lock, flags);
2323 for (i = 0; i < il->hw_params.max_stations; i++) {
2324 if (!(il->stations[i].used & IL_STA_BCAST))
2325 continue;
2326
2327 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2328 il->num_stations--;
2329 BUG_ON(il->num_stations < 0);
2330 kfree(il->stations[i].lq);
2331 il->stations[i].lq = NULL;
2332 }
2333 spin_unlock_irqrestore(&il->sta_lock, flags);
2334}
2335EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2336
2337#ifdef CONFIG_IWLEGACY_DEBUG
2338static void
2339il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2340{
2341 int i;
2342 D_RATE("lq station id 0x%x\n", lq->sta_id);
2343 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2344 lq->general_params.dual_stream_ant_msk);
2345
2346 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2347 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2348}
2349#else
2350static inline void
2351il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2352{
2353}
2354#endif
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367static bool
2368il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2369{
2370 int i;
2371
2372 if (il->ht.enabled)
2373 return true;
2374
2375 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2376 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2377 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2378 D_INFO("idx %d of LQ expects HT channel\n", i);
2379 return false;
2380 }
2381 }
2382 return true;
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395int
2396il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2397 u8 flags, bool init)
2398{
2399 int ret = 0;
2400 unsigned long flags_spin;
2401
2402 struct il_host_cmd cmd = {
2403 .id = C_TX_LINK_QUALITY_CMD,
2404 .len = sizeof(struct il_link_quality_cmd),
2405 .flags = flags,
2406 .data = lq,
2407 };
2408
2409 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2410 return -EINVAL;
2411
2412 spin_lock_irqsave(&il->sta_lock, flags_spin);
2413 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2414 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2415 return -EINVAL;
2416 }
2417 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2418
2419 il_dump_lq_cmd(il, lq);
2420 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2421
2422 if (il_is_lq_table_valid(il, lq))
2423 ret = il_send_cmd(il, &cmd);
2424 else
2425 ret = -EINVAL;
2426
2427 if (cmd.flags & CMD_ASYNC)
2428 return ret;
2429
2430 if (init) {
2431 D_INFO("init LQ command complete,"
2432 " clearing sta addition status for sta %d\n",
2433 lq->sta_id);
2434 spin_lock_irqsave(&il->sta_lock, flags_spin);
2435 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2436 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2437 }
2438 return ret;
2439}
2440EXPORT_SYMBOL(il_send_lq_cmd);
2441
2442int
2443il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2444 struct ieee80211_sta *sta)
2445{
2446 struct il_priv *il = hw->priv;
2447 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2448 int ret;
2449
2450 mutex_lock(&il->mutex);
2451 D_MAC80211("enter station %pM\n", sta->addr);
2452
2453 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2454 if (ret)
2455 IL_ERR("Error removing station %pM\n", sta->addr);
2456
2457 D_MAC80211("leave ret %d\n", ret);
2458 mutex_unlock(&il->mutex);
2459
2460 return ret;
2461}
2462EXPORT_SYMBOL(il_mac_sta_remove);
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534int
2535il_rx_queue_space(const struct il_rx_queue *q)
2536{
2537 int s = q->read - q->write;
2538 if (s <= 0)
2539 s += RX_QUEUE_SIZE;
2540
2541 s -= 2;
2542 if (s < 0)
2543 s = 0;
2544 return s;
2545}
2546EXPORT_SYMBOL(il_rx_queue_space);
2547
2548
2549
2550
2551void
2552il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2553{
2554 unsigned long flags;
2555 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2556 u32 reg;
2557
2558 spin_lock_irqsave(&q->lock, flags);
2559
2560 if (q->need_update == 0)
2561 goto exit_unlock;
2562
2563
2564 if (test_bit(S_POWER_PMI, &il->status)) {
2565 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2566
2567 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2568 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2569 reg);
2570 il_set_bit(il, CSR_GP_CNTRL,
2571 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2572 goto exit_unlock;
2573 }
2574
2575 q->write_actual = (q->write & ~0x7);
2576 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2577
2578
2579 } else {
2580
2581 q->write_actual = (q->write & ~0x7);
2582 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2583 }
2584
2585 q->need_update = 0;
2586
2587exit_unlock:
2588 spin_unlock_irqrestore(&q->lock, flags);
2589}
2590EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2591
2592int
2593il_rx_queue_alloc(struct il_priv *il)
2594{
2595 struct il_rx_queue *rxq = &il->rxq;
2596 struct device *dev = &il->pci_dev->dev;
2597 int i;
2598
2599 spin_lock_init(&rxq->lock);
2600 INIT_LIST_HEAD(&rxq->rx_free);
2601 INIT_LIST_HEAD(&rxq->rx_used);
2602
2603
2604 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2605 GFP_KERNEL);
2606 if (!rxq->bd)
2607 goto err_bd;
2608
2609 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2610 &rxq->rb_stts_dma, GFP_KERNEL);
2611 if (!rxq->rb_stts)
2612 goto err_rb;
2613
2614
2615 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2616 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2617
2618
2619
2620 rxq->read = rxq->write = 0;
2621 rxq->write_actual = 0;
2622 rxq->free_count = 0;
2623 rxq->need_update = 0;
2624 return 0;
2625
2626err_rb:
2627 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2628 rxq->bd_dma);
2629err_bd:
2630 return -ENOMEM;
2631}
2632EXPORT_SYMBOL(il_rx_queue_alloc);
2633
2634void
2635il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2636{
2637 struct il_rx_pkt *pkt = rxb_addr(rxb);
2638 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2639
2640 if (!report->state) {
2641 D_11H("Spectrum Measure Notification: Start\n");
2642 return;
2643 }
2644
2645 memcpy(&il->measure_report, report, sizeof(*report));
2646 il->measurement_status |= MEASUREMENT_READY;
2647}
2648EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2649
2650
2651
2652
2653int
2654il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2655 u32 decrypt_res, struct ieee80211_rx_status *stats)
2656{
2657 u16 fc = le16_to_cpu(hdr->frame_control);
2658
2659
2660
2661
2662
2663 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2664 return 0;
2665
2666 if (!(fc & IEEE80211_FCTL_PROTECTED))
2667 return 0;
2668
2669 D_RX("decrypt_res:0x%x\n", decrypt_res);
2670 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2671 case RX_RES_STATUS_SEC_TYPE_TKIP:
2672
2673
2674 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2675 RX_RES_STATUS_BAD_KEY_TTAK)
2676 break;
2677 fallthrough;
2678
2679 case RX_RES_STATUS_SEC_TYPE_WEP:
2680 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2681 RX_RES_STATUS_BAD_ICV_MIC) {
2682
2683
2684 D_RX("Packet destroyed\n");
2685 return -1;
2686 }
2687 fallthrough;
2688 case RX_RES_STATUS_SEC_TYPE_CCMP:
2689 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2690 RX_RES_STATUS_DECRYPT_OK) {
2691 D_RX("hw decrypt successfully!!!\n");
2692 stats->flag |= RX_FLAG_DECRYPTED;
2693 }
2694 break;
2695
2696 default:
2697 break;
2698 }
2699 return 0;
2700}
2701EXPORT_SYMBOL(il_set_decrypted_flag);
2702
2703
2704
2705
2706void
2707il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2708{
2709 u32 reg = 0;
2710 int txq_id = txq->q.id;
2711
2712 if (txq->need_update == 0)
2713 return;
2714
2715
2716 if (test_bit(S_POWER_PMI, &il->status)) {
2717
2718
2719
2720 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2721
2722 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2723 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2724 txq_id, reg);
2725 il_set_bit(il, CSR_GP_CNTRL,
2726 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2727 return;
2728 }
2729
2730 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2731
2732
2733
2734
2735
2736
2737 } else
2738 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2739 txq->need_update = 0;
2740}
2741EXPORT_SYMBOL(il_txq_update_write_ptr);
2742
2743
2744
2745
2746void
2747il_tx_queue_unmap(struct il_priv *il, int txq_id)
2748{
2749 struct il_tx_queue *txq = &il->txq[txq_id];
2750 struct il_queue *q = &txq->q;
2751
2752 if (q->n_bd == 0)
2753 return;
2754
2755 while (q->write_ptr != q->read_ptr) {
2756 il->ops->txq_free_tfd(il, txq);
2757 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2758 }
2759}
2760EXPORT_SYMBOL(il_tx_queue_unmap);
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770void
2771il_tx_queue_free(struct il_priv *il, int txq_id)
2772{
2773 struct il_tx_queue *txq = &il->txq[txq_id];
2774 struct device *dev = &il->pci_dev->dev;
2775 int i;
2776
2777 il_tx_queue_unmap(il, txq_id);
2778
2779
2780 if (txq->cmd) {
2781 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2782 kfree(txq->cmd[i]);
2783 }
2784
2785
2786 if (txq->q.n_bd)
2787 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2788 txq->tfds, txq->q.dma_addr);
2789
2790
2791 kfree(txq->skbs);
2792 txq->skbs = NULL;
2793
2794
2795 kfree(txq->cmd);
2796 kfree(txq->meta);
2797 txq->cmd = NULL;
2798 txq->meta = NULL;
2799
2800
2801 memset(txq, 0, sizeof(*txq));
2802}
2803EXPORT_SYMBOL(il_tx_queue_free);
2804
2805
2806
2807
2808void
2809il_cmd_queue_unmap(struct il_priv *il)
2810{
2811 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2812 struct il_queue *q = &txq->q;
2813 int i;
2814
2815 if (q->n_bd == 0)
2816 return;
2817
2818 while (q->read_ptr != q->write_ptr) {
2819 i = il_get_cmd_idx(q, q->read_ptr, 0);
2820
2821 if (txq->meta[i].flags & CMD_MAPPED) {
2822 dma_unmap_single(&il->pci_dev->dev,
2823 dma_unmap_addr(&txq->meta[i], mapping),
2824 dma_unmap_len(&txq->meta[i], len),
2825 DMA_BIDIRECTIONAL);
2826 txq->meta[i].flags = 0;
2827 }
2828
2829 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2830 }
2831
2832 i = q->n_win;
2833 if (txq->meta[i].flags & CMD_MAPPED) {
2834 dma_unmap_single(&il->pci_dev->dev,
2835 dma_unmap_addr(&txq->meta[i], mapping),
2836 dma_unmap_len(&txq->meta[i], len),
2837 DMA_BIDIRECTIONAL);
2838 txq->meta[i].flags = 0;
2839 }
2840}
2841EXPORT_SYMBOL(il_cmd_queue_unmap);
2842
2843
2844
2845
2846
2847
2848
2849
2850void
2851il_cmd_queue_free(struct il_priv *il)
2852{
2853 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2854 struct device *dev = &il->pci_dev->dev;
2855 int i;
2856
2857 il_cmd_queue_unmap(il);
2858
2859
2860 if (txq->cmd) {
2861 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2862 kfree(txq->cmd[i]);
2863 }
2864
2865
2866 if (txq->q.n_bd)
2867 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2868 txq->tfds, txq->q.dma_addr);
2869
2870
2871 kfree(txq->cmd);
2872 kfree(txq->meta);
2873 txq->cmd = NULL;
2874 txq->meta = NULL;
2875
2876
2877 memset(txq, 0, sizeof(*txq));
2878}
2879EXPORT_SYMBOL(il_cmd_queue_free);
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904int
2905il_queue_space(const struct il_queue *q)
2906{
2907 int s = q->read_ptr - q->write_ptr;
2908
2909 if (q->read_ptr > q->write_ptr)
2910 s -= q->n_bd;
2911
2912 if (s <= 0)
2913 s += q->n_win;
2914
2915 s -= 2;
2916 if (s < 0)
2917 s = 0;
2918 return s;
2919}
2920EXPORT_SYMBOL(il_queue_space);
2921
2922
2923
2924
2925
2926static int
2927il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2928{
2929
2930
2931
2932
2933 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2934
2935 q->n_bd = TFD_QUEUE_SIZE_MAX;
2936
2937 q->n_win = slots;
2938 q->id = id;
2939
2940
2941
2942 BUG_ON(!is_power_of_2(slots));
2943
2944 q->low_mark = q->n_win / 4;
2945 if (q->low_mark < 4)
2946 q->low_mark = 4;
2947
2948 q->high_mark = q->n_win / 8;
2949 if (q->high_mark < 2)
2950 q->high_mark = 2;
2951
2952 q->write_ptr = q->read_ptr = 0;
2953
2954 return 0;
2955}
2956
2957
2958
2959
2960static int
2961il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2962{
2963 struct device *dev = &il->pci_dev->dev;
2964 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2965
2966
2967
2968 if (id != il->cmd_queue) {
2969 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2970 sizeof(struct sk_buff *),
2971 GFP_KERNEL);
2972 if (!txq->skbs) {
2973 IL_ERR("Fail to alloc skbs\n");
2974 goto error;
2975 }
2976 } else
2977 txq->skbs = NULL;
2978
2979
2980
2981 txq->tfds =
2982 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2983 if (!txq->tfds)
2984 goto error;
2985
2986 txq->q.id = id;
2987
2988 return 0;
2989
2990error:
2991 kfree(txq->skbs);
2992 txq->skbs = NULL;
2993
2994 return -ENOMEM;
2995}
2996
2997
2998
2999
3000int
3001il_tx_queue_init(struct il_priv *il, u32 txq_id)
3002{
3003 int i, len, ret;
3004 int slots, actual_slots;
3005 struct il_tx_queue *txq = &il->txq[txq_id];
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015 if (txq_id == il->cmd_queue) {
3016 slots = TFD_CMD_SLOTS;
3017 actual_slots = slots + 1;
3018 } else {
3019 slots = TFD_TX_CMD_SLOTS;
3020 actual_slots = slots;
3021 }
3022
3023 txq->meta =
3024 kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL);
3025 txq->cmd =
3026 kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL);
3027
3028 if (!txq->meta || !txq->cmd)
3029 goto out_free_arrays;
3030
3031 len = sizeof(struct il_device_cmd);
3032 for (i = 0; i < actual_slots; i++) {
3033
3034 if (i == slots)
3035 len = IL_MAX_CMD_SIZE;
3036
3037 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3038 if (!txq->cmd[i])
3039 goto err;
3040 }
3041
3042
3043 ret = il_tx_queue_alloc(il, txq, txq_id);
3044 if (ret)
3045 goto err;
3046
3047 txq->need_update = 0;
3048
3049
3050
3051
3052
3053
3054 if (txq_id < 4)
3055 il_set_swq_id(txq, txq_id, txq_id);
3056
3057
3058 il_queue_init(il, &txq->q, slots, txq_id);
3059
3060
3061 il->ops->txq_init(il, txq);
3062
3063 return 0;
3064err:
3065 for (i = 0; i < actual_slots; i++)
3066 kfree(txq->cmd[i]);
3067out_free_arrays:
3068 kfree(txq->meta);
3069 txq->meta = NULL;
3070 kfree(txq->cmd);
3071 txq->cmd = NULL;
3072
3073 return -ENOMEM;
3074}
3075EXPORT_SYMBOL(il_tx_queue_init);
3076
3077void
3078il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3079{
3080 int slots, actual_slots;
3081 struct il_tx_queue *txq = &il->txq[txq_id];
3082
3083 if (txq_id == il->cmd_queue) {
3084 slots = TFD_CMD_SLOTS;
3085 actual_slots = TFD_CMD_SLOTS + 1;
3086 } else {
3087 slots = TFD_TX_CMD_SLOTS;
3088 actual_slots = TFD_TX_CMD_SLOTS;
3089 }
3090
3091 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3092 txq->need_update = 0;
3093
3094
3095 il_queue_init(il, &txq->q, slots, txq_id);
3096
3097
3098 il->ops->txq_init(il, txq);
3099}
3100EXPORT_SYMBOL(il_tx_queue_reset);
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113int
3114il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3115{
3116 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3117 struct il_queue *q = &txq->q;
3118 struct il_device_cmd *out_cmd;
3119 struct il_cmd_meta *out_meta;
3120 dma_addr_t phys_addr;
3121 unsigned long flags;
3122 u32 idx;
3123 u16 fix_size;
3124
3125 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3126 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3127
3128
3129
3130
3131
3132
3133 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3134 !(cmd->flags & CMD_SIZE_HUGE));
3135 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3136
3137 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3138 IL_WARN("Not sending command - %s KILL\n",
3139 il_is_rfkill(il) ? "RF" : "CT");
3140 return -EIO;
3141 }
3142
3143 spin_lock_irqsave(&il->hcmd_lock, flags);
3144
3145 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3146 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3147
3148 IL_ERR("Restarting adapter due to command queue full\n");
3149 queue_work(il->workqueue, &il->restart);
3150 return -ENOSPC;
3151 }
3152
3153 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3154 out_cmd = txq->cmd[idx];
3155 out_meta = &txq->meta[idx];
3156
3157 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3158 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3159 return -ENOSPC;
3160 }
3161
3162 memset(out_meta, 0, sizeof(*out_meta));
3163 out_meta->flags = cmd->flags | CMD_MAPPED;
3164 if (cmd->flags & CMD_WANT_SKB)
3165 out_meta->source = cmd;
3166 if (cmd->flags & CMD_ASYNC)
3167 out_meta->callback = cmd->callback;
3168
3169 out_cmd->hdr.cmd = cmd->id;
3170 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3171
3172
3173
3174
3175 out_cmd->hdr.flags = 0;
3176 out_cmd->hdr.sequence =
3177 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3178 if (cmd->flags & CMD_SIZE_HUGE)
3179 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3180
3181#ifdef CONFIG_IWLEGACY_DEBUG
3182 switch (out_cmd->hdr.cmd) {
3183 case C_TX_LINK_QUALITY_CMD:
3184 case C_SENSITIVITY:
3185 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3186 "%d bytes at %d[%d]:%d\n",
3187 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3188 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3189 q->write_ptr, idx, il->cmd_queue);
3190 break;
3191 default:
3192 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3193 "%d bytes at %d[%d]:%d\n",
3194 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3195 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3196 idx, il->cmd_queue);
3197 }
3198#endif
3199
3200 phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size,
3201 DMA_BIDIRECTIONAL);
3202 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) {
3203 idx = -ENOMEM;
3204 goto out;
3205 }
3206 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3207 dma_unmap_len_set(out_meta, len, fix_size);
3208
3209 txq->need_update = 1;
3210
3211 if (il->ops->txq_update_byte_cnt_tbl)
3212
3213 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3214
3215 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3216 U32_PAD(cmd->len));
3217
3218
3219 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3220 il_txq_update_write_ptr(il, txq);
3221
3222out:
3223 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3224 return idx;
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234static void
3235il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3236{
3237 struct il_tx_queue *txq = &il->txq[txq_id];
3238 struct il_queue *q = &txq->q;
3239 int nfreed = 0;
3240
3241 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3242 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3243 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3244 q->write_ptr, q->read_ptr);
3245 return;
3246 }
3247
3248 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3249 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3250
3251 if (nfreed++ > 0) {
3252 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3253 q->write_ptr, q->read_ptr);
3254 queue_work(il->workqueue, &il->restart);
3255 }
3256
3257 }
3258}
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268void
3269il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3270{
3271 struct il_rx_pkt *pkt = rxb_addr(rxb);
3272 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3273 int txq_id = SEQ_TO_QUEUE(sequence);
3274 int idx = SEQ_TO_IDX(sequence);
3275 int cmd_idx;
3276 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3277 struct il_device_cmd *cmd;
3278 struct il_cmd_meta *meta;
3279 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3280 unsigned long flags;
3281
3282
3283
3284
3285 if (WARN
3286 (txq_id != il->cmd_queue,
3287 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3288 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3289 il->txq[il->cmd_queue].q.write_ptr)) {
3290 il_print_hex_error(il, pkt, 32);
3291 return;
3292 }
3293
3294 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3295 cmd = txq->cmd[cmd_idx];
3296 meta = &txq->meta[cmd_idx];
3297
3298 txq->time_stamp = jiffies;
3299
3300 dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
3301 dma_unmap_len(meta, len), DMA_BIDIRECTIONAL);
3302
3303
3304 if (meta->flags & CMD_WANT_SKB) {
3305 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3306 rxb->page = NULL;
3307 } else if (meta->callback)
3308 meta->callback(il, cmd, pkt);
3309
3310 spin_lock_irqsave(&il->hcmd_lock, flags);
3311
3312 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3313
3314 if (!(meta->flags & CMD_ASYNC)) {
3315 clear_bit(S_HCMD_ACTIVE, &il->status);
3316 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3317 il_get_cmd_string(cmd->hdr.cmd));
3318 wake_up(&il->wait_command_queue);
3319 }
3320
3321
3322 meta->flags = 0;
3323
3324 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3325}
3326EXPORT_SYMBOL(il_tx_cmd_complete);
3327
3328MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3329MODULE_VERSION(IWLWIFI_VERSION);
3330MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3331MODULE_LICENSE("GPL");
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349static bool bt_coex_active = true;
3350module_param(bt_coex_active, bool, 0444);
3351MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3352
3353u32 il_debug_level;
3354EXPORT_SYMBOL(il_debug_level);
3355
3356const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3357EXPORT_SYMBOL(il_bcast_addr);
3358
3359#define MAX_BIT_RATE_40_MHZ 150
3360#define MAX_BIT_RATE_20_MHZ 72
3361static void
3362il_init_ht_hw_capab(const struct il_priv *il,
3363 struct ieee80211_sta_ht_cap *ht_info,
3364 enum nl80211_band band)
3365{
3366 u16 max_bit_rate = 0;
3367 u8 rx_chains_num = il->hw_params.rx_chains_num;
3368 u8 tx_chains_num = il->hw_params.tx_chains_num;
3369
3370 ht_info->cap = 0;
3371 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3372
3373 ht_info->ht_supported = true;
3374
3375 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3376 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3377 if (il->hw_params.ht40_channel & BIT(band)) {
3378 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3379 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3380 ht_info->mcs.rx_mask[4] = 0x01;
3381 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3382 }
3383
3384 if (il->cfg->mod_params->amsdu_size_8K)
3385 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3386
3387 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3388 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3389
3390 ht_info->mcs.rx_mask[0] = 0xFF;
3391 if (rx_chains_num >= 2)
3392 ht_info->mcs.rx_mask[1] = 0xFF;
3393 if (rx_chains_num >= 3)
3394 ht_info->mcs.rx_mask[2] = 0xFF;
3395
3396
3397 max_bit_rate *= rx_chains_num;
3398 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3399 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3400
3401
3402 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3403 if (tx_chains_num != rx_chains_num) {
3404 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3405 ht_info->mcs.tx_params |=
3406 ((tx_chains_num -
3407 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3408 }
3409}
3410
3411
3412
3413
3414int
3415il_init_geos(struct il_priv *il)
3416{
3417 struct il_channel_info *ch;
3418 struct ieee80211_supported_band *sband;
3419 struct ieee80211_channel *channels;
3420 struct ieee80211_channel *geo_ch;
3421 struct ieee80211_rate *rates;
3422 int i = 0;
3423 s8 max_tx_power = 0;
3424
3425 if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
3426 il->bands[NL80211_BAND_5GHZ].n_bitrates) {
3427 D_INFO("Geography modes already initialized.\n");
3428 set_bit(S_GEO_CONFIGURED, &il->status);
3429 return 0;
3430 }
3431
3432 channels =
3433 kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
3434 GFP_KERNEL);
3435 if (!channels)
3436 return -ENOMEM;
3437
3438 rates =
3439 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3440 GFP_KERNEL);
3441 if (!rates) {
3442 kfree(channels);
3443 return -ENOMEM;
3444 }
3445
3446
3447 sband = &il->bands[NL80211_BAND_5GHZ];
3448 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3449
3450 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3451 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3452
3453 if (il->cfg->sku & IL_SKU_N)
3454 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
3455
3456 sband = &il->bands[NL80211_BAND_2GHZ];
3457 sband->channels = channels;
3458
3459 sband->bitrates = rates;
3460 sband->n_bitrates = RATE_COUNT_LEGACY;
3461
3462 if (il->cfg->sku & IL_SKU_N)
3463 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
3464
3465 il->ieee_channels = channels;
3466 il->ieee_rates = rates;
3467
3468 for (i = 0; i < il->channel_count; i++) {
3469 ch = &il->channel_info[i];
3470
3471 if (!il_is_channel_valid(ch))
3472 continue;
3473
3474 sband = &il->bands[ch->band];
3475
3476 geo_ch = &sband->channels[sband->n_channels++];
3477
3478 geo_ch->center_freq =
3479 ieee80211_channel_to_frequency(ch->channel, ch->band);
3480 geo_ch->max_power = ch->max_power_avg;
3481 geo_ch->max_antenna_gain = 0xff;
3482 geo_ch->hw_value = ch->channel;
3483
3484 if (il_is_channel_valid(ch)) {
3485 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3486 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3487
3488 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3489 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3490
3491 if (ch->flags & EEPROM_CHANNEL_RADAR)
3492 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3493
3494 geo_ch->flags |= ch->ht40_extension_channel;
3495
3496 if (ch->max_power_avg > max_tx_power)
3497 max_tx_power = ch->max_power_avg;
3498 } else {
3499 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3500 }
3501
3502 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3503 geo_ch->center_freq,
3504 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3505 geo_ch->
3506 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3507 geo_ch->flags);
3508 }
3509
3510 il->tx_power_device_lmt = max_tx_power;
3511 il->tx_power_user_lmt = max_tx_power;
3512 il->tx_power_next = max_tx_power;
3513
3514 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
3515 (il->cfg->sku & IL_SKU_A)) {
3516 IL_INFO("Incorrectly detected BG card as ABG. "
3517 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3518 il->pci_dev->device, il->pci_dev->subsystem_device);
3519 il->cfg->sku &= ~IL_SKU_A;
3520 }
3521
3522 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3523 il->bands[NL80211_BAND_2GHZ].n_channels,
3524 il->bands[NL80211_BAND_5GHZ].n_channels);
3525
3526 set_bit(S_GEO_CONFIGURED, &il->status);
3527
3528 return 0;
3529}
3530EXPORT_SYMBOL(il_init_geos);
3531
3532
3533
3534
3535void
3536il_free_geos(struct il_priv *il)
3537{
3538 kfree(il->ieee_channels);
3539 kfree(il->ieee_rates);
3540 clear_bit(S_GEO_CONFIGURED, &il->status);
3541}
3542EXPORT_SYMBOL(il_free_geos);
3543
3544static bool
3545il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
3546 u16 channel, u8 extension_chan_offset)
3547{
3548 const struct il_channel_info *ch_info;
3549
3550 ch_info = il_get_channel_info(il, band, channel);
3551 if (!il_is_channel_valid(ch_info))
3552 return false;
3553
3554 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3555 return !(ch_info->
3556 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3557 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3558 return !(ch_info->
3559 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3560
3561 return false;
3562}
3563
3564bool
3565il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3566{
3567 if (!il->ht.enabled || !il->ht.is_40mhz)
3568 return false;
3569
3570
3571
3572
3573
3574 if (ht_cap && !ht_cap->ht_supported)
3575 return false;
3576
3577#ifdef CONFIG_IWLEGACY_DEBUGFS
3578 if (il->disable_ht40)
3579 return false;
3580#endif
3581
3582 return il_is_channel_extension(il, il->band,
3583 le16_to_cpu(il->staging.channel),
3584 il->ht.extension_chan_offset);
3585}
3586EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3587
3588static u16 noinline
3589il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3590{
3591 u16 new_val;
3592 u16 beacon_factor;
3593
3594
3595
3596
3597
3598 if (!beacon_val)
3599 return DEFAULT_BEACON_INTERVAL;
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3614 new_val = beacon_val / beacon_factor;
3615
3616 if (!new_val)
3617 new_val = max_beacon_val;
3618
3619 return new_val;
3620}
3621
3622int
3623il_send_rxon_timing(struct il_priv *il)
3624{
3625 u64 tsf;
3626 s32 interval_tm, rem;
3627 struct ieee80211_conf *conf = NULL;
3628 u16 beacon_int;
3629 struct ieee80211_vif *vif = il->vif;
3630
3631 conf = &il->hw->conf;
3632
3633 lockdep_assert_held(&il->mutex);
3634
3635 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3636
3637 il->timing.timestamp = cpu_to_le64(il->timestamp);
3638 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3639
3640 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3641
3642
3643
3644
3645
3646 il->timing.atim_win = 0;
3647
3648 beacon_int =
3649 il_adjust_beacon_interval(beacon_int,
3650 il->hw_params.max_beacon_itrvl *
3651 TIME_UNIT);
3652 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3653
3654 tsf = il->timestamp;
3655 interval_tm = beacon_int * TIME_UNIT;
3656 rem = do_div(tsf, interval_tm);
3657 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3658
3659 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3660
3661 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3662 le16_to_cpu(il->timing.beacon_interval),
3663 le32_to_cpu(il->timing.beacon_init_val),
3664 le16_to_cpu(il->timing.atim_win));
3665
3666 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3667 &il->timing);
3668}
3669EXPORT_SYMBOL(il_send_rxon_timing);
3670
3671void
3672il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3673{
3674 struct il_rxon_cmd *rxon = &il->staging;
3675
3676 if (hw_decrypt)
3677 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3678 else
3679 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3680
3681}
3682EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3683
3684
3685int
3686il_check_rxon_cmd(struct il_priv *il)
3687{
3688 struct il_rxon_cmd *rxon = &il->staging;
3689 bool error = false;
3690
3691 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3692 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3693 IL_WARN("check 2.4G: wrong narrow\n");
3694 error = true;
3695 }
3696 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3697 IL_WARN("check 2.4G: wrong radar\n");
3698 error = true;
3699 }
3700 } else {
3701 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3702 IL_WARN("check 5.2G: not short slot!\n");
3703 error = true;
3704 }
3705 if (rxon->flags & RXON_FLG_CCK_MSK) {
3706 IL_WARN("check 5.2G: CCK!\n");
3707 error = true;
3708 }
3709 }
3710 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3711 IL_WARN("mac/bssid mcast!\n");
3712 error = true;
3713 }
3714
3715
3716 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3717 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3718 IL_WARN("neither 1 nor 6 are basic\n");
3719 error = true;
3720 }
3721
3722 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3723 IL_WARN("aid > 2007\n");
3724 error = true;
3725 }
3726
3727 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3728 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3729 IL_WARN("CCK and short slot\n");
3730 error = true;
3731 }
3732
3733 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3734 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3735 IL_WARN("CCK and auto detect");
3736 error = true;
3737 }
3738
3739 if ((rxon->
3740 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3741 RXON_FLG_TGG_PROTECT_MSK) {
3742 IL_WARN("TGg but no auto-detect\n");
3743 error = true;
3744 }
3745
3746 if (error)
3747 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3748
3749 if (error) {
3750 IL_ERR("Invalid RXON\n");
3751 return -EINVAL;
3752 }
3753 return 0;
3754}
3755EXPORT_SYMBOL(il_check_rxon_cmd);
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765int
3766il_full_rxon_required(struct il_priv *il)
3767{
3768 const struct il_rxon_cmd *staging = &il->staging;
3769 const struct il_rxon_cmd *active = &il->active;
3770
3771#define CHK(cond) \
3772 if ((cond)) { \
3773 D_INFO("need full RXON - " #cond "\n"); \
3774 return 1; \
3775 }
3776
3777#define CHK_NEQ(c1, c2) \
3778 if ((c1) != (c2)) { \
3779 D_INFO("need full RXON - " \
3780 #c1 " != " #c2 " - %d != %d\n", \
3781 (c1), (c2)); \
3782 return 1; \
3783 }
3784
3785
3786 CHK(!il_is_associated(il));
3787 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
3788 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
3789 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
3790 active->wlap_bssid_addr));
3791 CHK_NEQ(staging->dev_type, active->dev_type);
3792 CHK_NEQ(staging->channel, active->channel);
3793 CHK_NEQ(staging->air_propagation, active->air_propagation);
3794 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3795 active->ofdm_ht_single_stream_basic_rates);
3796 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3797 active->ofdm_ht_dual_stream_basic_rates);
3798 CHK_NEQ(staging->assoc_id, active->assoc_id);
3799
3800
3801
3802
3803
3804
3805 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3806 active->flags & RXON_FLG_BAND_24G_MSK);
3807
3808
3809 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3810 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3811
3812#undef CHK
3813#undef CHK_NEQ
3814
3815 return 0;
3816}
3817EXPORT_SYMBOL(il_full_rxon_required);
3818
3819u8
3820il_get_lowest_plcp(struct il_priv *il)
3821{
3822
3823
3824
3825
3826 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3827 return RATE_1M_PLCP;
3828 else
3829 return RATE_6M_PLCP;
3830}
3831EXPORT_SYMBOL(il_get_lowest_plcp);
3832
3833static void
3834_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3835{
3836 struct il_rxon_cmd *rxon = &il->staging;
3837
3838 if (!il->ht.enabled) {
3839 rxon->flags &=
3840 ~(RXON_FLG_CHANNEL_MODE_MSK |
3841 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3842 | RXON_FLG_HT_PROT_MSK);
3843 return;
3844 }
3845
3846 rxon->flags |=
3847 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3848
3849
3850
3851
3852 rxon->flags &=
3853 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3854 if (il_is_ht40_tx_allowed(il, NULL)) {
3855
3856 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3857 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3858
3859 switch (il->ht.extension_chan_offset) {
3860 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3861 rxon->flags &=
3862 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3863 break;
3864 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3865 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3866 break;
3867 }
3868 } else {
3869
3870 switch (il->ht.extension_chan_offset) {
3871 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3872 rxon->flags &=
3873 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3874 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3875 break;
3876 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3877 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3878 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3879 break;
3880 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3881 default:
3882
3883 IL_ERR("invalid extension channel offset\n");
3884 break;
3885 }
3886 }
3887 } else {
3888 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3889 }
3890
3891 if (il->ops->set_rxon_chain)
3892 il->ops->set_rxon_chain(il);
3893
3894 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3895 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3896 il->ht.protection, il->ht.extension_chan_offset);
3897}
3898
3899void
3900il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3901{
3902 _il_set_rxon_ht(il, ht_conf);
3903}
3904EXPORT_SYMBOL(il_set_rxon_ht);
3905
3906
3907u8
3908il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
3909{
3910 const struct il_channel_info *ch_info;
3911 int i;
3912 u8 channel = 0;
3913 u8 min, max;
3914
3915 if (band == NL80211_BAND_5GHZ) {
3916 min = 14;
3917 max = il->channel_count;
3918 } else {
3919 min = 0;
3920 max = 14;
3921 }
3922
3923 for (i = min; i < max; i++) {
3924 channel = il->channel_info[i].channel;
3925 if (channel == le16_to_cpu(il->staging.channel))
3926 continue;
3927
3928 ch_info = il_get_channel_info(il, band, channel);
3929 if (il_is_channel_valid(ch_info))
3930 break;
3931 }
3932
3933 return channel;
3934}
3935EXPORT_SYMBOL(il_get_single_channel_number);
3936
3937
3938
3939
3940
3941
3942
3943
3944int
3945il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3946{
3947 enum nl80211_band band = ch->band;
3948 u16 channel = ch->hw_value;
3949
3950 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3951 return 0;
3952
3953 il->staging.channel = cpu_to_le16(channel);
3954 if (band == NL80211_BAND_5GHZ)
3955 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3956 else
3957 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3958
3959 il->band = band;
3960
3961 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3962
3963 return 0;
3964}
3965EXPORT_SYMBOL(il_set_rxon_channel);
3966
3967void
3968il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
3969 struct ieee80211_vif *vif)
3970{
3971 if (band == NL80211_BAND_5GHZ) {
3972 il->staging.flags &=
3973 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3974 RXON_FLG_CCK_MSK);
3975 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3976 } else {
3977
3978 if (vif && vif->bss_conf.use_short_slot)
3979 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3980 else
3981 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3982
3983 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3984 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3985 il->staging.flags &= ~RXON_FLG_CCK_MSK;
3986 }
3987}
3988EXPORT_SYMBOL(il_set_flags_for_band);
3989
3990
3991
3992
3993void
3994il_connection_init_rx_config(struct il_priv *il)
3995{
3996 const struct il_channel_info *ch_info;
3997
3998 memset(&il->staging, 0, sizeof(il->staging));
3999
4000 switch (il->iw_mode) {
4001 case NL80211_IFTYPE_UNSPECIFIED:
4002 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4003 break;
4004 case NL80211_IFTYPE_STATION:
4005 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4006 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4007 break;
4008 case NL80211_IFTYPE_ADHOC:
4009 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4010 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4011 il->staging.filter_flags =
4012 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
4013 break;
4014 default:
4015 IL_ERR("Unsupported interface type %d\n", il->vif->type);
4016 return;
4017 }
4018
4019#if 0
4020
4021
4022 if (!hw_to_local(il->hw)->short_preamble)
4023 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4024 else
4025 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4026#endif
4027
4028 ch_info =
4029 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4030
4031 if (!ch_info)
4032 ch_info = &il->channel_info[0];
4033
4034 il->staging.channel = cpu_to_le16(ch_info->channel);
4035 il->band = ch_info->band;
4036
4037 il_set_flags_for_band(il, il->band, il->vif);
4038
4039 il->staging.ofdm_basic_rates =
4040 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4041 il->staging.cck_basic_rates =
4042 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4043
4044
4045 il->staging.flags &=
4046 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4047 if (il->vif)
4048 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4049
4050 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4051 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4052}
4053EXPORT_SYMBOL(il_connection_init_rx_config);
4054
4055void
4056il_set_rate(struct il_priv *il)
4057{
4058 const struct ieee80211_supported_band *hw = NULL;
4059 struct ieee80211_rate *rate;
4060 int i;
4061
4062 hw = il_get_hw_mode(il, il->band);
4063 if (!hw) {
4064 IL_ERR("Failed to set rate: unable to get hw mode\n");
4065 return;
4066 }
4067
4068 il->active_rate = 0;
4069
4070 for (i = 0; i < hw->n_bitrates; i++) {
4071 rate = &(hw->bitrates[i]);
4072 if (rate->hw_value < RATE_COUNT_LEGACY)
4073 il->active_rate |= (1 << rate->hw_value);
4074 }
4075
4076 D_RATE("Set active_rate = %0x\n", il->active_rate);
4077
4078 il->staging.cck_basic_rates =
4079 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4080
4081 il->staging.ofdm_basic_rates =
4082 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4083}
4084EXPORT_SYMBOL(il_set_rate);
4085
4086void
4087il_chswitch_done(struct il_priv *il, bool is_success)
4088{
4089 if (test_bit(S_EXIT_PENDING, &il->status))
4090 return;
4091
4092 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4093 ieee80211_chswitch_done(il->vif, is_success);
4094}
4095EXPORT_SYMBOL(il_chswitch_done);
4096
4097void
4098il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4099{
4100 struct il_rx_pkt *pkt = rxb_addr(rxb);
4101 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4102 struct il_rxon_cmd *rxon = (void *)&il->active;
4103
4104 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4105 return;
4106
4107 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4108 rxon->channel = csa->channel;
4109 il->staging.channel = csa->channel;
4110 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4111 il_chswitch_done(il, true);
4112 } else {
4113 IL_ERR("CSA notif (fail) : channel %d\n",
4114 le16_to_cpu(csa->channel));
4115 il_chswitch_done(il, false);
4116 }
4117}
4118EXPORT_SYMBOL(il_hdl_csa);
4119
4120#ifdef CONFIG_IWLEGACY_DEBUG
4121void
4122il_print_rx_config_cmd(struct il_priv *il)
4123{
4124 struct il_rxon_cmd *rxon = &il->staging;
4125
4126 D_RADIO("RX CONFIG:\n");
4127 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4128 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4129 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4130 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4131 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4132 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4133 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4134 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4135 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4136 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4137}
4138EXPORT_SYMBOL(il_print_rx_config_cmd);
4139#endif
4140
4141
4142
4143void
4144il_irq_handle_error(struct il_priv *il)
4145{
4146
4147 set_bit(S_FW_ERROR, &il->status);
4148
4149
4150 clear_bit(S_HCMD_ACTIVE, &il->status);
4151
4152 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4153
4154 il->ops->dump_nic_error_log(il);
4155 if (il->ops->dump_fh)
4156 il->ops->dump_fh(il, NULL, false);
4157#ifdef CONFIG_IWLEGACY_DEBUG
4158 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4159 il_print_rx_config_cmd(il);
4160#endif
4161
4162 wake_up(&il->wait_command_queue);
4163
4164
4165
4166 clear_bit(S_READY, &il->status);
4167
4168 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4169 IL_DBG(IL_DL_FW_ERRORS,
4170 "Restarting adapter due to uCode error.\n");
4171
4172 if (il->cfg->mod_params->restart_fw)
4173 queue_work(il->workqueue, &il->restart);
4174 }
4175}
4176EXPORT_SYMBOL(il_irq_handle_error);
4177
4178static int
4179_il_apm_stop_master(struct il_priv *il)
4180{
4181 int ret = 0;
4182
4183
4184 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4185
4186 ret =
4187 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4188 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4189 if (ret < 0)
4190 IL_WARN("Master Disable Timed Out, 100 usec\n");
4191
4192 D_INFO("stop master\n");
4193
4194 return ret;
4195}
4196
4197void
4198_il_apm_stop(struct il_priv *il)
4199{
4200 lockdep_assert_held(&il->reg_lock);
4201
4202 D_INFO("Stop card, put in low power state\n");
4203
4204
4205 _il_apm_stop_master(il);
4206
4207
4208 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4209
4210 udelay(10);
4211
4212
4213
4214
4215
4216 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4217}
4218EXPORT_SYMBOL(_il_apm_stop);
4219
4220void
4221il_apm_stop(struct il_priv *il)
4222{
4223 unsigned long flags;
4224
4225 spin_lock_irqsave(&il->reg_lock, flags);
4226 _il_apm_stop(il);
4227 spin_unlock_irqrestore(&il->reg_lock, flags);
4228}
4229EXPORT_SYMBOL(il_apm_stop);
4230
4231
4232
4233
4234
4235
4236int
4237il_apm_init(struct il_priv *il)
4238{
4239 int ret = 0;
4240 u16 lctl;
4241
4242 D_INFO("Init card's basic functions\n");
4243
4244
4245
4246
4247
4248
4249
4250 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4251 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4252
4253
4254
4255
4256
4257 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4258 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4259
4260
4261 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4262
4263
4264
4265
4266
4267
4268 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4269 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279 if (il->cfg->set_l0s) {
4280 ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4281 if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
4282
4283 il_set_bit(il, CSR_GIO_REG,
4284 CSR_GIO_REG_VAL_L0S_ENABLED);
4285 D_POWER("L1 Enabled; Disabling L0S\n");
4286 } else {
4287
4288 il_clear_bit(il, CSR_GIO_REG,
4289 CSR_GIO_REG_VAL_L0S_ENABLED);
4290 D_POWER("L1 Disabled; Enabling L0S\n");
4291 }
4292 }
4293
4294
4295 if (il->cfg->pll_cfg_val)
4296 il_set_bit(il, CSR_ANA_PLL_CFG,
4297 il->cfg->pll_cfg_val);
4298
4299
4300
4301
4302
4303 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4304
4305
4306
4307
4308
4309
4310 ret =
4311 _il_poll_bit(il, CSR_GP_CNTRL,
4312 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4313 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4314 if (ret < 0) {
4315 D_INFO("Failed to init the card\n");
4316 goto out;
4317 }
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327 if (il->cfg->use_bsm)
4328 il_wr_prph(il, APMG_CLK_EN_REG,
4329 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4330 else
4331 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4332 udelay(20);
4333
4334
4335 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4336 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4337
4338out:
4339 return ret;
4340}
4341EXPORT_SYMBOL(il_apm_init);
4342
4343int
4344il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4345{
4346 int ret;
4347 s8 prev_tx_power;
4348 bool defer;
4349
4350 lockdep_assert_held(&il->mutex);
4351
4352 if (il->tx_power_user_lmt == tx_power && !force)
4353 return 0;
4354
4355 if (!il->ops->send_tx_power)
4356 return -EOPNOTSUPP;
4357
4358
4359 if (tx_power < 0) {
4360 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4361 return -EINVAL;
4362 }
4363
4364 if (tx_power > il->tx_power_device_lmt) {
4365 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4366 tx_power, il->tx_power_device_lmt);
4367 return -EINVAL;
4368 }
4369
4370 if (!il_is_ready_rf(il))
4371 return -EIO;
4372
4373
4374
4375 il->tx_power_next = tx_power;
4376
4377
4378 defer = test_bit(S_SCANNING, &il->status) ||
4379 memcmp(&il->active, &il->staging, sizeof(il->staging));
4380 if (defer && !force) {
4381 D_INFO("Deferring tx power set\n");
4382 return 0;
4383 }
4384
4385 prev_tx_power = il->tx_power_user_lmt;
4386 il->tx_power_user_lmt = tx_power;
4387
4388 ret = il->ops->send_tx_power(il);
4389
4390
4391 if (ret) {
4392 il->tx_power_user_lmt = prev_tx_power;
4393 il->tx_power_next = prev_tx_power;
4394 }
4395 return ret;
4396}
4397EXPORT_SYMBOL(il_set_tx_power);
4398
4399void
4400il_send_bt_config(struct il_priv *il)
4401{
4402 struct il_bt_cmd bt_cmd = {
4403 .lead_time = BT_LEAD_TIME_DEF,
4404 .max_kill = BT_MAX_KILL_DEF,
4405 .kill_ack_mask = 0,
4406 .kill_cts_mask = 0,
4407 };
4408
4409 if (!bt_coex_active)
4410 bt_cmd.flags = BT_COEX_DISABLE;
4411 else
4412 bt_cmd.flags = BT_COEX_ENABLE;
4413
4414 D_INFO("BT coex %s\n",
4415 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4416
4417 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4418 IL_ERR("failed to send BT Coex Config\n");
4419}
4420EXPORT_SYMBOL(il_send_bt_config);
4421
4422int
4423il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4424{
4425 struct il_stats_cmd stats_cmd = {
4426 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4427 };
4428
4429 if (flags & CMD_ASYNC)
4430 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4431 &stats_cmd, NULL);
4432 else
4433 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4434 &stats_cmd);
4435}
4436EXPORT_SYMBOL(il_send_stats_request);
4437
4438void
4439il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4440{
4441#ifdef CONFIG_IWLEGACY_DEBUG
4442 struct il_rx_pkt *pkt = rxb_addr(rxb);
4443 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4444 D_RX("sleep mode: %d, src: %d\n",
4445 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4446#endif
4447}
4448EXPORT_SYMBOL(il_hdl_pm_sleep);
4449
4450void
4451il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4452{
4453 struct il_rx_pkt *pkt = rxb_addr(rxb);
4454 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4455 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4456 il_get_cmd_string(pkt->hdr.cmd));
4457 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4458}
4459EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4460
4461void
4462il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4463{
4464 struct il_rx_pkt *pkt = rxb_addr(rxb);
4465
4466 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4467 "seq 0x%04X ser 0x%08X\n",
4468 le32_to_cpu(pkt->u.err_resp.error_type),
4469 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4470 pkt->u.err_resp.cmd_id,
4471 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4472 le32_to_cpu(pkt->u.err_resp.error_info));
4473}
4474EXPORT_SYMBOL(il_hdl_error);
4475
4476void
4477il_clear_isr_stats(struct il_priv *il)
4478{
4479 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4480}
4481
4482int
4483il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4484 const struct ieee80211_tx_queue_params *params)
4485{
4486 struct il_priv *il = hw->priv;
4487 unsigned long flags;
4488 int q;
4489
4490 D_MAC80211("enter\n");
4491
4492 if (!il_is_ready_rf(il)) {
4493 D_MAC80211("leave - RF not ready\n");
4494 return -EIO;
4495 }
4496
4497 if (queue >= AC_NUM) {
4498 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4499 return 0;
4500 }
4501
4502 q = AC_NUM - 1 - queue;
4503
4504 spin_lock_irqsave(&il->lock, flags);
4505
4506 il->qos_data.def_qos_parm.ac[q].cw_min =
4507 cpu_to_le16(params->cw_min);
4508 il->qos_data.def_qos_parm.ac[q].cw_max =
4509 cpu_to_le16(params->cw_max);
4510 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4511 il->qos_data.def_qos_parm.ac[q].edca_txop =
4512 cpu_to_le16((params->txop * 32));
4513
4514 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4515
4516 spin_unlock_irqrestore(&il->lock, flags);
4517
4518 D_MAC80211("leave\n");
4519 return 0;
4520}
4521EXPORT_SYMBOL(il_mac_conf_tx);
4522
4523int
4524il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4525{
4526 struct il_priv *il = hw->priv;
4527 int ret;
4528
4529 D_MAC80211("enter\n");
4530
4531 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4532
4533 D_MAC80211("leave ret %d\n", ret);
4534 return ret;
4535}
4536EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4537
4538static int
4539il_set_mode(struct il_priv *il)
4540{
4541 il_connection_init_rx_config(il);
4542
4543 if (il->ops->set_rxon_chain)
4544 il->ops->set_rxon_chain(il);
4545
4546 return il_commit_rxon(il);
4547}
4548
4549int
4550il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4551{
4552 struct il_priv *il = hw->priv;
4553 int err;
4554 bool reset;
4555
4556 mutex_lock(&il->mutex);
4557 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4558
4559 if (!il_is_ready_rf(il)) {
4560 IL_WARN("Try to add interface when device not ready\n");
4561 err = -EINVAL;
4562 goto out;
4563 }
4564
4565
4566
4567
4568
4569 reset = (il->vif == vif);
4570 if (il->vif && !reset) {
4571 err = -EOPNOTSUPP;
4572 goto out;
4573 }
4574
4575 il->vif = vif;
4576 il->iw_mode = vif->type;
4577
4578 err = il_set_mode(il);
4579 if (err) {
4580 IL_WARN("Fail to set mode %d\n", vif->type);
4581 if (!reset) {
4582 il->vif = NULL;
4583 il->iw_mode = NL80211_IFTYPE_STATION;
4584 }
4585 }
4586
4587out:
4588 D_MAC80211("leave err %d\n", err);
4589 mutex_unlock(&il->mutex);
4590
4591 return err;
4592}
4593EXPORT_SYMBOL(il_mac_add_interface);
4594
4595static void
4596il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4597{
4598 lockdep_assert_held(&il->mutex);
4599
4600 if (il->scan_vif == vif) {
4601 il_scan_cancel_timeout(il, 200);
4602 il_force_scan_end(il);
4603 }
4604
4605 il_set_mode(il);
4606}
4607
4608void
4609il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4610{
4611 struct il_priv *il = hw->priv;
4612
4613 mutex_lock(&il->mutex);
4614 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4615
4616 WARN_ON(il->vif != vif);
4617 il->vif = NULL;
4618 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4619 il_teardown_interface(il, vif);
4620 eth_zero_addr(il->bssid);
4621
4622 D_MAC80211("leave\n");
4623 mutex_unlock(&il->mutex);
4624}
4625EXPORT_SYMBOL(il_mac_remove_interface);
4626
4627int
4628il_alloc_txq_mem(struct il_priv *il)
4629{
4630 if (!il->txq)
4631 il->txq =
4632 kcalloc(il->cfg->num_of_queues,
4633 sizeof(struct il_tx_queue),
4634 GFP_KERNEL);
4635 if (!il->txq) {
4636 IL_ERR("Not enough memory for txq\n");
4637 return -ENOMEM;
4638 }
4639 return 0;
4640}
4641EXPORT_SYMBOL(il_alloc_txq_mem);
4642
4643void
4644il_free_txq_mem(struct il_priv *il)
4645{
4646 kfree(il->txq);
4647 il->txq = NULL;
4648}
4649EXPORT_SYMBOL(il_free_txq_mem);
4650
4651int
4652il_force_reset(struct il_priv *il, bool external)
4653{
4654 struct il_force_reset *force_reset;
4655
4656 if (test_bit(S_EXIT_PENDING, &il->status))
4657 return -EINVAL;
4658
4659 force_reset = &il->force_reset;
4660 force_reset->reset_request_count++;
4661 if (!external) {
4662 if (force_reset->last_force_reset_jiffies &&
4663 time_after(force_reset->last_force_reset_jiffies +
4664 force_reset->reset_duration, jiffies)) {
4665 D_INFO("force reset rejected\n");
4666 force_reset->reset_reject_count++;
4667 return -EAGAIN;
4668 }
4669 }
4670 force_reset->reset_success_count++;
4671 force_reset->last_force_reset_jiffies = jiffies;
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682 if (!external && !il->cfg->mod_params->restart_fw) {
4683 D_INFO("Cancel firmware reload based on "
4684 "module parameter setting\n");
4685 return 0;
4686 }
4687
4688 IL_ERR("On demand firmware reload\n");
4689
4690
4691 set_bit(S_FW_ERROR, &il->status);
4692 wake_up(&il->wait_command_queue);
4693
4694
4695
4696
4697 clear_bit(S_READY, &il->status);
4698 queue_work(il->workqueue, &il->restart);
4699
4700 return 0;
4701}
4702EXPORT_SYMBOL(il_force_reset);
4703
4704int
4705il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4706 enum nl80211_iftype newtype, bool newp2p)
4707{
4708 struct il_priv *il = hw->priv;
4709 int err;
4710
4711 mutex_lock(&il->mutex);
4712 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4713 vif->type, vif->addr, newtype, newp2p);
4714
4715 if (newp2p) {
4716 err = -EOPNOTSUPP;
4717 goto out;
4718 }
4719
4720 if (!il->vif || !il_is_ready_rf(il)) {
4721
4722
4723
4724
4725 err = -EBUSY;
4726 goto out;
4727 }
4728
4729
4730 vif->type = newtype;
4731 vif->p2p = false;
4732 il->iw_mode = newtype;
4733 il_teardown_interface(il, vif);
4734 err = 0;
4735
4736out:
4737 D_MAC80211("leave err %d\n", err);
4738 mutex_unlock(&il->mutex);
4739
4740 return err;
4741}
4742EXPORT_SYMBOL(il_mac_change_interface);
4743
4744void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4745 u32 queues, bool drop)
4746{
4747 struct il_priv *il = hw->priv;
4748 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4749 int i;
4750
4751 mutex_lock(&il->mutex);
4752 D_MAC80211("enter\n");
4753
4754 if (il->txq == NULL)
4755 goto out;
4756
4757 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4758 struct il_queue *q;
4759
4760 if (i == il->cmd_queue)
4761 continue;
4762
4763 q = &il->txq[i].q;
4764 if (q->read_ptr == q->write_ptr)
4765 continue;
4766
4767 if (time_after(jiffies, timeout)) {
4768 IL_ERR("Failed to flush queue %d\n", q->id);
4769 break;
4770 }
4771
4772 msleep(20);
4773 }
4774out:
4775 D_MAC80211("leave\n");
4776 mutex_unlock(&il->mutex);
4777}
4778EXPORT_SYMBOL(il_mac_flush);
4779
4780
4781
4782
4783
4784static int
4785il_check_stuck_queue(struct il_priv *il, int cnt)
4786{
4787 struct il_tx_queue *txq = &il->txq[cnt];
4788 struct il_queue *q = &txq->q;
4789 unsigned long timeout;
4790 unsigned long now = jiffies;
4791 int ret;
4792
4793 if (q->read_ptr == q->write_ptr) {
4794 txq->time_stamp = now;
4795 return 0;
4796 }
4797
4798 timeout =
4799 txq->time_stamp +
4800 msecs_to_jiffies(il->cfg->wd_timeout);
4801
4802 if (time_after(now, timeout)) {
4803 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4804 jiffies_to_msecs(now - txq->time_stamp));
4805 ret = il_force_reset(il, false);
4806 return (ret == -EAGAIN) ? 0 : 1;
4807 }
4808
4809 return 0;
4810}
4811
4812
4813
4814
4815
4816#define IL_WD_TICK(timeout) ((timeout) / 4)
4817
4818
4819
4820
4821
4822void
4823il_bg_watchdog(struct timer_list *t)
4824{
4825 struct il_priv *il = from_timer(il, t, watchdog);
4826 int cnt;
4827 unsigned long timeout;
4828
4829 if (test_bit(S_EXIT_PENDING, &il->status))
4830 return;
4831
4832 timeout = il->cfg->wd_timeout;
4833 if (timeout == 0)
4834 return;
4835
4836
4837 if (il_check_stuck_queue(il, il->cmd_queue))
4838 return;
4839
4840
4841 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4842
4843 if (cnt == il->cmd_queue)
4844 continue;
4845 if (il_check_stuck_queue(il, cnt))
4846 return;
4847 }
4848
4849 mod_timer(&il->watchdog,
4850 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4851}
4852EXPORT_SYMBOL(il_bg_watchdog);
4853
4854void
4855il_setup_watchdog(struct il_priv *il)
4856{
4857 unsigned int timeout = il->cfg->wd_timeout;
4858
4859 if (timeout)
4860 mod_timer(&il->watchdog,
4861 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4862 else
4863 del_timer(&il->watchdog);
4864}
4865EXPORT_SYMBOL(il_setup_watchdog);
4866
4867
4868
4869
4870
4871
4872
4873u32
4874il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4875{
4876 u32 quot;
4877 u32 rem;
4878 u32 interval = beacon_interval * TIME_UNIT;
4879
4880 if (!interval || !usec)
4881 return 0;
4882
4883 quot =
4884 (usec /
4885 interval) & (il_beacon_time_mask_high(il,
4886 il->hw_params.
4887 beacon_time_tsf_bits) >> il->
4888 hw_params.beacon_time_tsf_bits);
4889 rem =
4890 (usec % interval) & il_beacon_time_mask_low(il,
4891 il->hw_params.
4892 beacon_time_tsf_bits);
4893
4894 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4895}
4896EXPORT_SYMBOL(il_usecs_to_beacons);
4897
4898
4899
4900
4901__le32
4902il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4903 u32 beacon_interval)
4904{
4905 u32 base_low = base & il_beacon_time_mask_low(il,
4906 il->hw_params.
4907 beacon_time_tsf_bits);
4908 u32 addon_low = addon & il_beacon_time_mask_low(il,
4909 il->hw_params.
4910 beacon_time_tsf_bits);
4911 u32 interval = beacon_interval * TIME_UNIT;
4912 u32 res = (base & il_beacon_time_mask_high(il,
4913 il->hw_params.
4914 beacon_time_tsf_bits)) +
4915 (addon & il_beacon_time_mask_high(il,
4916 il->hw_params.
4917 beacon_time_tsf_bits));
4918
4919 if (base_low > addon_low)
4920 res += base_low - addon_low;
4921 else if (base_low < addon_low) {
4922 res += interval + base_low - addon_low;
4923 res += (1 << il->hw_params.beacon_time_tsf_bits);
4924 } else
4925 res += (1 << il->hw_params.beacon_time_tsf_bits);
4926
4927 return cpu_to_le32(res);
4928}
4929EXPORT_SYMBOL(il_add_beacon_time);
4930
4931#ifdef CONFIG_PM_SLEEP
4932
4933static int
4934il_pci_suspend(struct device *device)
4935{
4936 struct il_priv *il = dev_get_drvdata(device);
4937
4938
4939
4940
4941
4942
4943
4944
4945 il_apm_stop(il);
4946
4947 return 0;
4948}
4949
4950static int
4951il_pci_resume(struct device *device)
4952{
4953 struct pci_dev *pdev = to_pci_dev(device);
4954 struct il_priv *il = pci_get_drvdata(pdev);
4955 bool hw_rfkill = false;
4956
4957
4958
4959
4960
4961 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4962
4963 il_enable_interrupts(il);
4964
4965 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4966 hw_rfkill = true;
4967
4968 if (hw_rfkill)
4969 set_bit(S_RFKILL, &il->status);
4970 else
4971 clear_bit(S_RFKILL, &il->status);
4972
4973 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4974
4975 return 0;
4976}
4977
4978SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4979EXPORT_SYMBOL(il_pm_ops);
4980
4981#endif
4982
4983static void
4984il_update_qos(struct il_priv *il)
4985{
4986 if (test_bit(S_EXIT_PENDING, &il->status))
4987 return;
4988
4989 il->qos_data.def_qos_parm.qos_flags = 0;
4990
4991 if (il->qos_data.qos_active)
4992 il->qos_data.def_qos_parm.qos_flags |=
4993 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
4994
4995 if (il->ht.enabled)
4996 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
4997
4998 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
4999 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5000
5001 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5002 &il->qos_data.def_qos_parm, NULL);
5003}
5004
5005
5006
5007
5008int
5009il_mac_config(struct ieee80211_hw *hw, u32 changed)
5010{
5011 struct il_priv *il = hw->priv;
5012 const struct il_channel_info *ch_info;
5013 struct ieee80211_conf *conf = &hw->conf;
5014 struct ieee80211_channel *channel = conf->chandef.chan;
5015 struct il_ht_config *ht_conf = &il->current_ht_config;
5016 unsigned long flags = 0;
5017 int ret = 0;
5018 u16 ch;
5019 int scan_active = 0;
5020 bool ht_changed = false;
5021
5022 mutex_lock(&il->mutex);
5023 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
5024 changed);
5025
5026 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5027 scan_active = 1;
5028 D_MAC80211("scan active\n");
5029 }
5030
5031 if (changed &
5032 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5033
5034 il->current_ht_config.smps = conf->smps_mode;
5035
5036
5037
5038
5039
5040
5041
5042
5043 if (il->ops->set_rxon_chain)
5044 il->ops->set_rxon_chain(il);
5045 }
5046
5047
5048
5049
5050 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5051
5052 if (scan_active)
5053 goto set_ch_out;
5054
5055 ch = channel->hw_value;
5056 ch_info = il_get_channel_info(il, channel->band, ch);
5057 if (!il_is_channel_valid(ch_info)) {
5058 D_MAC80211("leave - invalid channel\n");
5059 ret = -EINVAL;
5060 goto set_ch_out;
5061 }
5062
5063 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5064 !il_is_channel_ibss(ch_info)) {
5065 D_MAC80211("leave - not IBSS channel\n");
5066 ret = -EINVAL;
5067 goto set_ch_out;
5068 }
5069
5070 spin_lock_irqsave(&il->lock, flags);
5071
5072
5073 if (il->ht.enabled != conf_is_ht(conf)) {
5074 il->ht.enabled = conf_is_ht(conf);
5075 ht_changed = true;
5076 }
5077 if (il->ht.enabled) {
5078 if (conf_is_ht40_minus(conf)) {
5079 il->ht.extension_chan_offset =
5080 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5081 il->ht.is_40mhz = true;
5082 } else if (conf_is_ht40_plus(conf)) {
5083 il->ht.extension_chan_offset =
5084 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5085 il->ht.is_40mhz = true;
5086 } else {
5087 il->ht.extension_chan_offset =
5088 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5089 il->ht.is_40mhz = false;
5090 }
5091 } else
5092 il->ht.is_40mhz = false;
5093
5094
5095
5096
5097
5098 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5099
5100
5101
5102
5103 if ((le16_to_cpu(il->staging.channel) != ch))
5104 il->staging.flags = 0;
5105
5106 il_set_rxon_channel(il, channel);
5107 il_set_rxon_ht(il, ht_conf);
5108
5109 il_set_flags_for_band(il, channel->band, il->vif);
5110
5111 spin_unlock_irqrestore(&il->lock, flags);
5112
5113 if (il->ops->update_bcast_stations)
5114 ret = il->ops->update_bcast_stations(il);
5115
5116set_ch_out:
5117
5118
5119
5120 il_set_rate(il);
5121 }
5122
5123 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5124 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5125 if (!il->power_data.ps_disabled)
5126 IL_WARN_ONCE("Enabling power save might cause firmware crashes\n");
5127 ret = il_power_update_mode(il, false);
5128 if (ret)
5129 D_MAC80211("Error setting sleep level\n");
5130 }
5131
5132 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5133 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5134 conf->power_level);
5135
5136 il_set_tx_power(il, conf->power_level, false);
5137 }
5138
5139 if (!il_is_ready(il)) {
5140 D_MAC80211("leave - not ready\n");
5141 goto out;
5142 }
5143
5144 if (scan_active)
5145 goto out;
5146
5147 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5148 il_commit_rxon(il);
5149 else
5150 D_INFO("Not re-sending same RXON configuration.\n");
5151 if (ht_changed)
5152 il_update_qos(il);
5153
5154out:
5155 D_MAC80211("leave ret %d\n", ret);
5156 mutex_unlock(&il->mutex);
5157
5158 return ret;
5159}
5160EXPORT_SYMBOL(il_mac_config);
5161
5162void
5163il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5164{
5165 struct il_priv *il = hw->priv;
5166 unsigned long flags;
5167
5168 mutex_lock(&il->mutex);
5169 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5170
5171 spin_lock_irqsave(&il->lock, flags);
5172
5173 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5174
5175
5176 dev_kfree_skb(il->beacon_skb);
5177 il->beacon_skb = NULL;
5178 il->timestamp = 0;
5179
5180 spin_unlock_irqrestore(&il->lock, flags);
5181
5182 il_scan_cancel_timeout(il, 100);
5183 if (!il_is_ready_rf(il)) {
5184 D_MAC80211("leave - not ready\n");
5185 mutex_unlock(&il->mutex);
5186 return;
5187 }
5188
5189
5190 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5191 il_commit_rxon(il);
5192
5193 il_set_rate(il);
5194
5195 D_MAC80211("leave\n");
5196 mutex_unlock(&il->mutex);
5197}
5198EXPORT_SYMBOL(il_mac_reset_tsf);
5199
5200static void
5201il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5202{
5203 struct il_ht_config *ht_conf = &il->current_ht_config;
5204 struct ieee80211_sta *sta;
5205 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5206
5207 D_ASSOC("enter:\n");
5208
5209 if (!il->ht.enabled)
5210 return;
5211
5212 il->ht.protection =
5213 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5214 il->ht.non_gf_sta_present =
5215 !!(bss_conf->
5216 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5217
5218 ht_conf->single_chain_sufficient = false;
5219
5220 switch (vif->type) {
5221 case NL80211_IFTYPE_STATION:
5222 rcu_read_lock();
5223 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5224 if (sta) {
5225 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5226 int maxstreams;
5227
5228 maxstreams =
5229 (ht_cap->mcs.
5230 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5231 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5232 maxstreams += 1;
5233
5234 if (ht_cap->mcs.rx_mask[1] == 0 &&
5235 ht_cap->mcs.rx_mask[2] == 0)
5236 ht_conf->single_chain_sufficient = true;
5237 if (maxstreams <= 1)
5238 ht_conf->single_chain_sufficient = true;
5239 } else {
5240
5241
5242
5243
5244
5245
5246 ht_conf->single_chain_sufficient = true;
5247 }
5248 rcu_read_unlock();
5249 break;
5250 case NL80211_IFTYPE_ADHOC:
5251 ht_conf->single_chain_sufficient = true;
5252 break;
5253 default:
5254 break;
5255 }
5256
5257 D_ASSOC("leave\n");
5258}
5259
5260static inline void
5261il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5262{
5263
5264
5265
5266
5267
5268 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5269 il->staging.assoc_id = 0;
5270 il_commit_rxon(il);
5271}
5272
5273static void
5274il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5275{
5276 struct il_priv *il = hw->priv;
5277 unsigned long flags;
5278 __le64 timestamp;
5279 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5280
5281 if (!skb)
5282 return;
5283
5284 D_MAC80211("enter\n");
5285
5286 lockdep_assert_held(&il->mutex);
5287
5288 if (!il->beacon_enabled) {
5289 IL_ERR("update beacon with no beaconing enabled\n");
5290 dev_kfree_skb(skb);
5291 return;
5292 }
5293
5294 spin_lock_irqsave(&il->lock, flags);
5295 dev_kfree_skb(il->beacon_skb);
5296 il->beacon_skb = skb;
5297
5298 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5299 il->timestamp = le64_to_cpu(timestamp);
5300
5301 D_MAC80211("leave\n");
5302 spin_unlock_irqrestore(&il->lock, flags);
5303
5304 if (!il_is_ready_rf(il)) {
5305 D_MAC80211("leave - RF not ready\n");
5306 return;
5307 }
5308
5309 il->ops->post_associate(il);
5310}
5311
5312void
5313il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5314 struct ieee80211_bss_conf *bss_conf, u32 changes)
5315{
5316 struct il_priv *il = hw->priv;
5317 int ret;
5318
5319 mutex_lock(&il->mutex);
5320 D_MAC80211("enter: changes 0x%x\n", changes);
5321
5322 if (!il_is_alive(il)) {
5323 D_MAC80211("leave - not alive\n");
5324 mutex_unlock(&il->mutex);
5325 return;
5326 }
5327
5328 if (changes & BSS_CHANGED_QOS) {
5329 unsigned long flags;
5330
5331 spin_lock_irqsave(&il->lock, flags);
5332 il->qos_data.qos_active = bss_conf->qos;
5333 il_update_qos(il);
5334 spin_unlock_irqrestore(&il->lock, flags);
5335 }
5336
5337 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5338
5339 if (vif->bss_conf.enable_beacon)
5340 il->beacon_enabled = true;
5341 else
5342 il->beacon_enabled = false;
5343 }
5344
5345 if (changes & BSS_CHANGED_BSSID) {
5346 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356 if (is_zero_ether_addr(bss_conf->bssid))
5357 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5358
5359
5360
5361
5362
5363
5364 if (il_scan_cancel_timeout(il, 100)) {
5365 D_MAC80211("leave - scan abort failed\n");
5366 mutex_unlock(&il->mutex);
5367 return;
5368 }
5369
5370
5371 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5372
5373
5374 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5375 }
5376
5377
5378
5379
5380
5381
5382 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5383 il_beacon_update(hw, vif);
5384
5385 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5386 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5387 if (bss_conf->use_short_preamble)
5388 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5389 else
5390 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5391 }
5392
5393 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5394 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5395 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
5396 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5397 else
5398 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5399 if (bss_conf->use_cts_prot)
5400 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5401 else
5402 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5403 }
5404
5405 if (changes & BSS_CHANGED_BASIC_RATES) {
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420 }
5421
5422 if (changes & BSS_CHANGED_HT) {
5423 il_ht_conf(il, vif);
5424
5425 if (il->ops->set_rxon_chain)
5426 il->ops->set_rxon_chain(il);
5427 }
5428
5429 if (changes & BSS_CHANGED_ASSOC) {
5430 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5431 if (bss_conf->assoc) {
5432 il->timestamp = bss_conf->sync_tsf;
5433
5434 if (!il_is_rfkill(il))
5435 il->ops->post_associate(il);
5436 } else
5437 il_set_no_assoc(il, vif);
5438 }
5439
5440 if (changes && il_is_associated(il) && bss_conf->aid) {
5441 D_MAC80211("Changes (%#x) while associated\n", changes);
5442 ret = il_send_rxon_assoc(il);
5443 if (!ret) {
5444
5445 memcpy((void *)&il->active, &il->staging,
5446 sizeof(struct il_rxon_cmd));
5447 }
5448 }
5449
5450 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5451 if (vif->bss_conf.enable_beacon) {
5452 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5453 ETH_ALEN);
5454 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5455 il->ops->config_ap(il);
5456 } else
5457 il_set_no_assoc(il, vif);
5458 }
5459
5460 if (changes & BSS_CHANGED_IBSS) {
5461 ret = il->ops->manage_ibss_station(il, vif,
5462 bss_conf->ibss_joined);
5463 if (ret)
5464 IL_ERR("failed to %s IBSS station %pM\n",
5465 bss_conf->ibss_joined ? "add" : "remove",
5466 bss_conf->bssid);
5467 }
5468
5469 D_MAC80211("leave\n");
5470 mutex_unlock(&il->mutex);
5471}
5472EXPORT_SYMBOL(il_mac_bss_info_changed);
5473
5474irqreturn_t
5475il_isr(int irq, void *data)
5476{
5477 struct il_priv *il = data;
5478 u32 inta, inta_mask;
5479 u32 inta_fh;
5480 unsigned long flags;
5481 if (!il)
5482 return IRQ_NONE;
5483
5484 spin_lock_irqsave(&il->lock, flags);
5485
5486
5487
5488
5489
5490 inta_mask = _il_rd(il, CSR_INT_MASK);
5491 _il_wr(il, CSR_INT_MASK, 0x00000000);
5492
5493
5494 inta = _il_rd(il, CSR_INT);
5495 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5496
5497
5498
5499
5500 if (!inta && !inta_fh) {
5501 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5502 goto none;
5503 }
5504
5505 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5506
5507
5508 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5509 goto unplugged;
5510 }
5511
5512 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5513 inta_fh);
5514
5515 inta &= ~CSR_INT_BIT_SCD;
5516
5517
5518 if (likely(inta || inta_fh))
5519 tasklet_schedule(&il->irq_tasklet);
5520
5521unplugged:
5522 spin_unlock_irqrestore(&il->lock, flags);
5523 return IRQ_HANDLED;
5524
5525none:
5526
5527
5528 if (test_bit(S_INT_ENABLED, &il->status))
5529 il_enable_interrupts(il);
5530 spin_unlock_irqrestore(&il->lock, flags);
5531 return IRQ_NONE;
5532}
5533EXPORT_SYMBOL(il_isr);
5534
5535
5536
5537
5538
5539void
5540il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5541 __le16 fc, __le32 *tx_flags)
5542{
5543 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5544 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5545 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5546 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5547
5548 if (!ieee80211_is_mgmt(fc))
5549 return;
5550
5551 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5552 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5553 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5554 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5555 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5556 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5557 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5558 break;
5559 }
5560 } else if (info->control.rates[0].
5561 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5562 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5563 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5564 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5565 }
5566}
5567EXPORT_SYMBOL(il_tx_cmd_protection);
5568