1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/etherdevice.h>
29#include "iwl-trans.h"
30#include "iwl-modparams.h"
31#include "dev.h"
32#include "agn.h"
33#include "calib.h"
34
35
36
37
38void iwl_connection_init_rx_config(struct iwl_priv *priv,
39 struct iwl_rxon_context *ctx)
40{
41 memset(&ctx->staging, 0, sizeof(ctx->staging));
42
43 if (!ctx->vif) {
44 ctx->staging.dev_type = ctx->unused_devtype;
45 } else
46 switch (ctx->vif->type) {
47 case NL80211_IFTYPE_AP:
48 ctx->staging.dev_type = ctx->ap_devtype;
49 break;
50
51 case NL80211_IFTYPE_STATION:
52 ctx->staging.dev_type = ctx->station_devtype;
53 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
54 break;
55
56 case NL80211_IFTYPE_ADHOC:
57 ctx->staging.dev_type = ctx->ibss_devtype;
58 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
59 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
60 RXON_FILTER_ACCEPT_GRP_MSK;
61 break;
62
63 case NL80211_IFTYPE_MONITOR:
64 ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
65 break;
66
67 default:
68 IWL_ERR(priv, "Unsupported interface type %d\n",
69 ctx->vif->type);
70 break;
71 }
72
73#if 0
74
75
76 if (!hw_to_local(priv->hw)->short_preamble)
77 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
78 else
79 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
80#endif
81
82 ctx->staging.channel =
83 cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
84 priv->band = priv->hw->conf.chandef.chan->band;
85
86 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
87
88
89 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
90 RXON_FLG_CHANNEL_MODE_PURE_40);
91 if (ctx->vif)
92 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
93
94 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
95 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
96 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
97}
98
99static int iwlagn_disable_bss(struct iwl_priv *priv,
100 struct iwl_rxon_context *ctx,
101 struct iwl_rxon_cmd *send)
102{
103 __le32 old_filter = send->filter_flags;
104 int ret;
105
106 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
107 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
108 0, sizeof(*send), send);
109
110 send->filter_flags = old_filter;
111
112 if (ret)
113 IWL_DEBUG_QUIET_RFKILL(priv,
114 "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
115
116 return ret;
117}
118
119static int iwlagn_disable_pan(struct iwl_priv *priv,
120 struct iwl_rxon_context *ctx,
121 struct iwl_rxon_cmd *send)
122{
123 struct iwl_notification_wait disable_wait;
124 __le32 old_filter = send->filter_flags;
125 u8 old_dev_type = send->dev_type;
126 int ret;
127 static const u16 deactivate_cmd[] = {
128 REPLY_WIPAN_DEACTIVATION_COMPLETE
129 };
130
131 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
132 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
133 NULL, NULL);
134
135 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
136 send->dev_type = RXON_DEV_TYPE_P2P;
137 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
138 0, sizeof(*send), send);
139
140 send->filter_flags = old_filter;
141 send->dev_type = old_dev_type;
142
143 if (ret) {
144 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
145 iwl_remove_notification(&priv->notif_wait, &disable_wait);
146 } else {
147 ret = iwl_wait_notification(&priv->notif_wait,
148 &disable_wait, HZ);
149 if (ret)
150 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
151 }
152
153 return ret;
154}
155
156static int iwlagn_disconn_pan(struct iwl_priv *priv,
157 struct iwl_rxon_context *ctx,
158 struct iwl_rxon_cmd *send)
159{
160 __le32 old_filter = send->filter_flags;
161 int ret;
162
163 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
164 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
165 sizeof(*send), send);
166
167 send->filter_flags = old_filter;
168
169 return ret;
170}
171
172static void iwlagn_update_qos(struct iwl_priv *priv,
173 struct iwl_rxon_context *ctx)
174{
175 int ret;
176
177 if (!ctx->is_active)
178 return;
179
180 ctx->qos_data.def_qos_parm.qos_flags = 0;
181
182 if (ctx->qos_data.qos_active)
183 ctx->qos_data.def_qos_parm.qos_flags |=
184 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
185
186 if (ctx->ht.enabled)
187 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
188
189 IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
190 ctx->qos_data.qos_active,
191 ctx->qos_data.def_qos_parm.qos_flags);
192
193 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
194 sizeof(struct iwl_qosparam_cmd),
195 &ctx->qos_data.def_qos_parm);
196 if (ret)
197 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
198}
199
200static int iwlagn_update_beacon(struct iwl_priv *priv,
201 struct ieee80211_vif *vif)
202{
203 lockdep_assert_held(&priv->mutex);
204
205 dev_kfree_skb(priv->beacon_skb);
206 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
207 if (!priv->beacon_skb)
208 return -ENOMEM;
209 return iwlagn_send_beacon_cmd(priv);
210}
211
212static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
213 struct iwl_rxon_context *ctx)
214{
215 int ret = 0;
216 struct iwl_rxon_assoc_cmd rxon_assoc;
217 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
218 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
219
220 if ((rxon1->flags == rxon2->flags) &&
221 (rxon1->filter_flags == rxon2->filter_flags) &&
222 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
223 (rxon1->ofdm_ht_single_stream_basic_rates ==
224 rxon2->ofdm_ht_single_stream_basic_rates) &&
225 (rxon1->ofdm_ht_dual_stream_basic_rates ==
226 rxon2->ofdm_ht_dual_stream_basic_rates) &&
227 (rxon1->ofdm_ht_triple_stream_basic_rates ==
228 rxon2->ofdm_ht_triple_stream_basic_rates) &&
229 (rxon1->acquisition_data == rxon2->acquisition_data) &&
230 (rxon1->rx_chain == rxon2->rx_chain) &&
231 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
232 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
233 return 0;
234 }
235
236 rxon_assoc.flags = ctx->staging.flags;
237 rxon_assoc.filter_flags = ctx->staging.filter_flags;
238 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
239 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
240 rxon_assoc.reserved1 = 0;
241 rxon_assoc.reserved2 = 0;
242 rxon_assoc.reserved3 = 0;
243 rxon_assoc.ofdm_ht_single_stream_basic_rates =
244 ctx->staging.ofdm_ht_single_stream_basic_rates;
245 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
246 ctx->staging.ofdm_ht_dual_stream_basic_rates;
247 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
248 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
249 ctx->staging.ofdm_ht_triple_stream_basic_rates;
250 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
251
252 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
253 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
254 return ret;
255}
256
257static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
258{
259 u16 new_val;
260 u16 beacon_factor;
261
262
263
264
265
266
267
268 if (!beacon_val)
269 return DEFAULT_BEACON_INTERVAL;
270
271
272
273
274
275
276
277
278
279
280
281
282
283 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
284 new_val = beacon_val / beacon_factor;
285
286 if (!new_val)
287 new_val = max_beacon_val;
288
289 return new_val;
290}
291
292static int iwl_send_rxon_timing(struct iwl_priv *priv,
293 struct iwl_rxon_context *ctx)
294{
295 u64 tsf;
296 s32 interval_tm, rem;
297 struct ieee80211_conf *conf = NULL;
298 u16 beacon_int;
299 struct ieee80211_vif *vif = ctx->vif;
300
301 conf = &priv->hw->conf;
302
303 lockdep_assert_held(&priv->mutex);
304
305 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
306
307 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
308 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
309
310 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
311
312
313
314
315
316 ctx->timing.atim_window = 0;
317
318 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
319 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
320 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
321 priv->contexts[IWL_RXON_CTX_BSS].vif &&
322 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
323 ctx->timing.beacon_interval =
324 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
325 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
326 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
327 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
328 priv->contexts[IWL_RXON_CTX_PAN].vif &&
329 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
330 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
331 !ctx->vif->bss_conf.beacon_int)) {
332 ctx->timing.beacon_interval =
333 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
334 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
335 } else {
336 beacon_int = iwl_adjust_beacon_interval(beacon_int,
337 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
338 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
339 }
340
341 ctx->beacon_int = beacon_int;
342
343 tsf = priv->timestamp;
344 interval_tm = beacon_int * TIME_UNIT;
345 rem = do_div(tsf, interval_tm);
346 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
347
348 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
349
350 IWL_DEBUG_ASSOC(priv,
351 "beacon interval %d beacon timer %d beacon tim %d\n",
352 le16_to_cpu(ctx->timing.beacon_interval),
353 le32_to_cpu(ctx->timing.beacon_init_val),
354 le16_to_cpu(ctx->timing.atim_window));
355
356 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
357 0, sizeof(ctx->timing), &ctx->timing);
358}
359
360static int iwlagn_rxon_disconn(struct iwl_priv *priv,
361 struct iwl_rxon_context *ctx)
362{
363 int ret;
364 struct iwl_rxon_cmd *active = (void *)&ctx->active;
365
366 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
367 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
368 } else {
369 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
370 if (ret)
371 return ret;
372 if (ctx->vif) {
373 ret = iwl_send_rxon_timing(priv, ctx);
374 if (ret) {
375 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
376 return ret;
377 }
378 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
379 }
380 }
381 if (ret)
382 return ret;
383
384
385
386
387
388 iwl_clear_ucode_stations(priv, ctx);
389
390 iwl_update_bcast_station(priv, ctx);
391 iwl_restore_stations(priv, ctx);
392 ret = iwl_restore_default_wep_keys(priv, ctx);
393 if (ret) {
394 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
395 return ret;
396 }
397
398 memcpy(active, &ctx->staging, sizeof(*active));
399 return 0;
400}
401
402static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
403{
404 int ret;
405 s8 prev_tx_power;
406 bool defer;
407 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
408
409 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
410 return 0;
411
412 lockdep_assert_held(&priv->mutex);
413
414 if (priv->tx_power_user_lmt == tx_power && !force)
415 return 0;
416
417 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
418 IWL_WARN(priv,
419 "Requested user TXPOWER %d below lower limit %d.\n",
420 tx_power,
421 IWLAGN_TX_POWER_TARGET_POWER_MIN);
422 return -EINVAL;
423 }
424
425 if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
426 IWL_WARN(priv,
427 "Requested user TXPOWER %d above upper limit %d.\n",
428 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
429 return -EINVAL;
430 }
431
432 if (!iwl_is_ready_rf(priv))
433 return -EIO;
434
435
436
437 priv->tx_power_next = tx_power;
438
439
440 defer = test_bit(STATUS_SCANNING, &priv->status) ||
441 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
442 if (defer && !force) {
443 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
444 return 0;
445 }
446
447 prev_tx_power = priv->tx_power_user_lmt;
448 priv->tx_power_user_lmt = tx_power;
449
450 ret = iwlagn_send_tx_power(priv);
451
452
453 if (ret) {
454 priv->tx_power_user_lmt = prev_tx_power;
455 priv->tx_power_next = prev_tx_power;
456 }
457 return ret;
458}
459
460static int iwlagn_rxon_connect(struct iwl_priv *priv,
461 struct iwl_rxon_context *ctx)
462{
463 int ret;
464 struct iwl_rxon_cmd *active = (void *)&ctx->active;
465
466
467 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
468 ret = iwl_send_rxon_timing(priv, ctx);
469 if (ret) {
470 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
471 return ret;
472 }
473 }
474
475 iwlagn_update_qos(priv, ctx);
476
477
478
479
480
481
482 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
483 ret = iwlagn_update_beacon(priv, ctx->vif);
484 if (ret) {
485 IWL_ERR(priv,
486 "Error sending required beacon (%d)!\n",
487 ret);
488 return ret;
489 }
490 }
491
492 priv->start_calib = 0;
493
494
495
496
497
498
499 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
500 sizeof(struct iwl_rxon_cmd), &ctx->staging);
501 if (ret) {
502 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
503 return ret;
504 }
505 memcpy(active, &ctx->staging, sizeof(*active));
506
507
508 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
509 if (iwlagn_update_beacon(priv, ctx->vif))
510 IWL_ERR(priv, "Error sending IBSS beacon\n");
511 iwl_init_sensitivity(priv);
512
513
514
515
516
517
518
519
520 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
521 if (ret) {
522 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
523 return ret;
524 }
525
526 return 0;
527}
528
529int iwlagn_set_pan_params(struct iwl_priv *priv)
530{
531 struct iwl_wipan_params_cmd cmd;
532 struct iwl_rxon_context *ctx_bss, *ctx_pan;
533 int slot0 = 300, slot1 = 0;
534 int ret;
535
536 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
537 return 0;
538
539 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
540
541 lockdep_assert_held(&priv->mutex);
542
543 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
544 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
545
546
547
548
549
550
551
552 if (!ctx_pan->is_active)
553 return 0;
554
555 memset(&cmd, 0, sizeof(cmd));
556
557
558 cmd.num_slots = 2;
559
560 cmd.slots[0].type = 0;
561 cmd.slots[1].type = 1;
562
563 if (ctx_bss->vif && ctx_pan->vif) {
564 int bcnint = ctx_pan->beacon_int;
565 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
566
567
568 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
569
570 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
571 bcnint &&
572 bcnint != ctx_bss->beacon_int) {
573 IWL_ERR(priv,
574 "beacon intervals don't match (%d, %d)\n",
575 ctx_bss->beacon_int, ctx_pan->beacon_int);
576 } else
577 bcnint = max_t(int, bcnint,
578 ctx_bss->beacon_int);
579 if (!bcnint)
580 bcnint = DEFAULT_BEACON_INTERVAL;
581 slot0 = bcnint / 2;
582 slot1 = bcnint - slot0;
583
584 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
585 (!ctx_bss->vif->bss_conf.idle &&
586 !ctx_bss->vif->bss_conf.assoc)) {
587 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
588 slot1 = IWL_MIN_SLOT_TIME;
589 } else if (!ctx_pan->vif->bss_conf.idle &&
590 !ctx_pan->vif->bss_conf.assoc) {
591 slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
592 slot0 = IWL_MIN_SLOT_TIME;
593 }
594 } else if (ctx_pan->vif) {
595 slot0 = 0;
596 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
597 ctx_pan->beacon_int;
598 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
599
600 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
601 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
602 slot1 = IWL_MIN_SLOT_TIME;
603 }
604 }
605
606 cmd.slots[0].width = cpu_to_le16(slot0);
607 cmd.slots[1].width = cpu_to_le16(slot1);
608
609 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
610 sizeof(cmd), &cmd);
611 if (ret)
612 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
613
614 return ret;
615}
616
617static void _iwl_set_rxon_ht(struct iwl_priv *priv,
618 struct iwl_ht_config *ht_conf,
619 struct iwl_rxon_context *ctx)
620{
621 struct iwl_rxon_cmd *rxon = &ctx->staging;
622
623 if (!ctx->ht.enabled) {
624 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
625 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
626 RXON_FLG_HT40_PROT_MSK |
627 RXON_FLG_HT_PROT_MSK);
628 return;
629 }
630
631
632
633
634 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
635 RXON_FLG_HT_OPERATING_MODE_POS);
636
637
638
639
640 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
641 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
642 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
643
644 if (ctx->ht.protection ==
645 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
646 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
647
648
649
650
651 switch (ctx->ht.extension_chan_offset) {
652 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
653 rxon->flags &=
654 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
655 break;
656 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
657 rxon->flags |=
658 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
659 break;
660 }
661 } else {
662
663
664
665
666 switch (ctx->ht.extension_chan_offset) {
667 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
668 rxon->flags &=
669 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
670 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
671 break;
672 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
673 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
674 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
675 break;
676 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
677 default:
678
679
680
681
682 IWL_ERR(priv,
683 "invalid extension channel offset\n");
684 break;
685 }
686 }
687 } else {
688 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
689 }
690
691 iwlagn_set_rxon_chain(priv, ctx);
692
693 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
694 "extension channel offset 0x%x\n",
695 le32_to_cpu(rxon->flags), ctx->ht.protection,
696 ctx->ht.extension_chan_offset);
697}
698
699void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
700{
701 struct iwl_rxon_context *ctx;
702
703 for_each_context(priv, ctx)
704 _iwl_set_rxon_ht(priv, ht_conf, ctx);
705}
706
707
708
709
710
711
712
713
714void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
715 struct iwl_rxon_context *ctx)
716{
717 enum nl80211_band band = ch->band;
718 u16 channel = ch->hw_value;
719
720 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
721 (priv->band == band))
722 return;
723
724 ctx->staging.channel = cpu_to_le16(channel);
725 if (band == NL80211_BAND_5GHZ)
726 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
727 else
728 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
729
730 priv->band = band;
731
732 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
733
734}
735
736void iwl_set_flags_for_band(struct iwl_priv *priv,
737 struct iwl_rxon_context *ctx,
738 enum nl80211_band band,
739 struct ieee80211_vif *vif)
740{
741 if (band == NL80211_BAND_5GHZ) {
742 ctx->staging.flags &=
743 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
744 | RXON_FLG_CCK_MSK);
745 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
746 } else {
747
748 if (vif && vif->bss_conf.use_short_slot)
749 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
750 else
751 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
752
753 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
754 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
755 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
756 }
757}
758
759static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
760 struct iwl_rxon_context *ctx, int hw_decrypt)
761{
762 struct iwl_rxon_cmd *rxon = &ctx->staging;
763
764 if (hw_decrypt)
765 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
766 else
767 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
768
769}
770
771
772static int iwl_check_rxon_cmd(struct iwl_priv *priv,
773 struct iwl_rxon_context *ctx)
774{
775 struct iwl_rxon_cmd *rxon = &ctx->staging;
776 u32 errors = 0;
777
778 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
779 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
780 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
781 errors |= BIT(0);
782 }
783 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
784 IWL_WARN(priv, "check 2.4G: wrong radar\n");
785 errors |= BIT(1);
786 }
787 } else {
788 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
789 IWL_WARN(priv, "check 5.2G: not short slot!\n");
790 errors |= BIT(2);
791 }
792 if (rxon->flags & RXON_FLG_CCK_MSK) {
793 IWL_WARN(priv, "check 5.2G: CCK!\n");
794 errors |= BIT(3);
795 }
796 }
797 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
798 IWL_WARN(priv, "mac/bssid mcast!\n");
799 errors |= BIT(4);
800 }
801
802
803 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
804 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
805 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
806 errors |= BIT(5);
807 }
808
809 if (le16_to_cpu(rxon->assoc_id) > 2007) {
810 IWL_WARN(priv, "aid > 2007\n");
811 errors |= BIT(6);
812 }
813
814 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
815 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
816 IWL_WARN(priv, "CCK and short slot\n");
817 errors |= BIT(7);
818 }
819
820 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
821 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
822 IWL_WARN(priv, "CCK and auto detect\n");
823 errors |= BIT(8);
824 }
825
826 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
827 RXON_FLG_TGG_PROTECT_MSK)) ==
828 RXON_FLG_TGG_PROTECT_MSK) {
829 IWL_WARN(priv, "TGg but no auto-detect\n");
830 errors |= BIT(9);
831 }
832
833 if (rxon->channel == 0) {
834 IWL_WARN(priv, "zero channel is invalid\n");
835 errors |= BIT(10);
836 }
837
838 WARN(errors, "Invalid RXON (%#x), channel %d",
839 errors, le16_to_cpu(rxon->channel));
840
841 return errors ? -EINVAL : 0;
842}
843
844
845
846
847
848
849
850
851
852static int iwl_full_rxon_required(struct iwl_priv *priv,
853 struct iwl_rxon_context *ctx)
854{
855 const struct iwl_rxon_cmd *staging = &ctx->staging;
856 const struct iwl_rxon_cmd *active = &ctx->active;
857
858#define CHK(cond) \
859 if ((cond)) { \
860 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
861 return 1; \
862 }
863
864#define CHK_NEQ(c1, c2) \
865 if ((c1) != (c2)) { \
866 IWL_DEBUG_INFO(priv, "need full RXON - " \
867 #c1 " != " #c2 " - %d != %d\n", \
868 (c1), (c2)); \
869 return 1; \
870 }
871
872
873 CHK(!iwl_is_associated_ctx(ctx));
874 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
875 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
876 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
877 active->wlap_bssid_addr));
878 CHK_NEQ(staging->dev_type, active->dev_type);
879 CHK_NEQ(staging->channel, active->channel);
880 CHK_NEQ(staging->air_propagation, active->air_propagation);
881 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
882 active->ofdm_ht_single_stream_basic_rates);
883 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
884 active->ofdm_ht_dual_stream_basic_rates);
885 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
886 active->ofdm_ht_triple_stream_basic_rates);
887 CHK_NEQ(staging->assoc_id, active->assoc_id);
888
889
890
891
892
893
894 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
895 active->flags & RXON_FLG_BAND_24G_MSK);
896
897
898 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
899 active->filter_flags & RXON_FILTER_ASSOC_MSK);
900
901#undef CHK
902#undef CHK_NEQ
903
904 return 0;
905}
906
907#ifdef CONFIG_IWLWIFI_DEBUG
908void iwl_print_rx_config_cmd(struct iwl_priv *priv,
909 enum iwl_rxon_context_id ctxid)
910{
911 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
912 struct iwl_rxon_cmd *rxon = &ctx->staging;
913
914 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
915 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
916 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
917 le16_to_cpu(rxon->channel));
918 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
919 le32_to_cpu(rxon->flags));
920 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
921 le32_to_cpu(rxon->filter_flags));
922 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
923 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
924 rxon->ofdm_basic_rates);
925 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
926 rxon->cck_basic_rates);
927 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
928 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
929 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
930 le16_to_cpu(rxon->assoc_id));
931}
932#endif
933
934static void iwl_calc_basic_rates(struct iwl_priv *priv,
935 struct iwl_rxon_context *ctx)
936{
937 int lowest_present_ofdm = 100;
938 int lowest_present_cck = 100;
939 u8 cck = 0;
940 u8 ofdm = 0;
941
942 if (ctx->vif) {
943 struct ieee80211_supported_band *sband;
944 unsigned long basic = ctx->vif->bss_conf.basic_rates;
945 int i;
946
947 sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
948
949 for_each_set_bit(i, &basic, BITS_PER_LONG) {
950 int hw = sband->bitrates[i].hw_value;
951 if (hw >= IWL_FIRST_OFDM_RATE) {
952 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
953 if (lowest_present_ofdm > hw)
954 lowest_present_ofdm = hw;
955 } else {
956 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
957
958 cck |= BIT(hw);
959 if (lowest_present_cck > hw)
960 lowest_present_cck = hw;
961 }
962 }
963 }
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
989 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
990 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
991 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
992
993 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 if (IWL_RATE_11M_INDEX < lowest_present_cck)
1009 cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1010 if (IWL_RATE_5M_INDEX < lowest_present_cck)
1011 cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1012 if (IWL_RATE_2M_INDEX < lowest_present_cck)
1013 cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1014
1015 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1016
1017 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1018 cck, ofdm);
1019
1020
1021 ctx->staging.cck_basic_rates = cck;
1022 ctx->staging.ofdm_basic_rates = ofdm;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1044{
1045
1046 struct iwl_rxon_cmd *active = (void *)&ctx->active;
1047 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1048 int ret;
1049
1050 lockdep_assert_held(&priv->mutex);
1051
1052 if (!iwl_is_alive(priv))
1053 return -EBUSY;
1054
1055
1056 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1057
1058 if (!ctx->is_active)
1059 return 0;
1060
1061
1062 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1063
1064
1065 iwl_calc_basic_rates(priv, ctx);
1066
1067
1068
1069
1070
1071 if (!priv->hw_params.use_rts_for_aggregation)
1072 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1073
1074 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1075 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1076 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1077 else
1078 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1079
1080 iwl_print_rx_config_cmd(priv, ctx->ctxid);
1081 ret = iwl_check_rxon_cmd(priv, ctx);
1082 if (ret) {
1083 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1084 return -EINVAL;
1085 }
1086
1087
1088
1089
1090
1091 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1092 (priv->switch_channel != ctx->staging.channel)) {
1093 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1094 le16_to_cpu(priv->switch_channel));
1095 iwl_chswitch_done(priv, false);
1096 }
1097
1098
1099
1100
1101
1102
1103 if (!iwl_full_rxon_required(priv, ctx)) {
1104 ret = iwlagn_send_rxon_assoc(priv, ctx);
1105 if (ret) {
1106 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1107 return ret;
1108 }
1109
1110 memcpy(active, &ctx->staging, sizeof(*active));
1111
1112
1113
1114
1115 iwl_set_tx_power(priv, priv->tx_power_next, false);
1116
1117
1118 iwl_power_update_mode(priv, true);
1119
1120 return 0;
1121 }
1122
1123 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
1124
1125 IWL_DEBUG_INFO(priv,
1126 "Going to commit RXON\n"
1127 " * with%s RXON_FILTER_ASSOC_MSK\n"
1128 " * channel = %d\n"
1129 " * bssid = %pM\n",
1130 (new_assoc ? "" : "out"),
1131 le16_to_cpu(ctx->staging.channel),
1132 ctx->staging.bssid_addr);
1133
1134
1135
1136
1137
1138
1139
1140 ret = iwlagn_rxon_disconn(priv, ctx);
1141 if (ret)
1142 return ret;
1143
1144 ret = iwlagn_set_pan_params(priv);
1145 if (ret)
1146 return ret;
1147
1148 if (new_assoc)
1149 return iwlagn_rxon_connect(priv, ctx);
1150
1151 return 0;
1152}
1153
1154void iwlagn_config_ht40(struct ieee80211_conf *conf,
1155 struct iwl_rxon_context *ctx)
1156{
1157 if (conf_is_ht40_minus(conf)) {
1158 ctx->ht.extension_chan_offset =
1159 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1160 ctx->ht.is_40mhz = true;
1161 } else if (conf_is_ht40_plus(conf)) {
1162 ctx->ht.extension_chan_offset =
1163 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1164 ctx->ht.is_40mhz = true;
1165 } else {
1166 ctx->ht.extension_chan_offset =
1167 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1168 ctx->ht.is_40mhz = false;
1169 }
1170}
1171
1172int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1173{
1174 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1175 struct iwl_rxon_context *ctx;
1176 struct ieee80211_conf *conf = &hw->conf;
1177 struct ieee80211_channel *channel = conf->chandef.chan;
1178 int ret = 0;
1179
1180 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
1181
1182 mutex_lock(&priv->mutex);
1183
1184 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1185 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1186 goto out;
1187 }
1188
1189 if (!iwl_is_ready(priv)) {
1190 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1191 goto out;
1192 }
1193
1194 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1195 IEEE80211_CONF_CHANGE_CHANNEL)) {
1196
1197 priv->current_ht_config.smps = conf->smps_mode;
1198
1199
1200
1201
1202
1203
1204
1205
1206 for_each_context(priv, ctx)
1207 iwlagn_set_rxon_chain(priv, ctx);
1208 }
1209
1210 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1211 for_each_context(priv, ctx) {
1212
1213 if (ctx->ht.enabled != conf_is_ht(conf))
1214 ctx->ht.enabled = conf_is_ht(conf);
1215
1216 if (ctx->ht.enabled) {
1217
1218
1219 if (!ctx->ht.is_40mhz ||
1220 !iwl_is_associated_ctx(ctx))
1221 iwlagn_config_ht40(conf, ctx);
1222 } else
1223 ctx->ht.is_40mhz = false;
1224
1225
1226
1227
1228
1229 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1230
1231
1232
1233
1234 if (le16_to_cpu(ctx->staging.channel) !=
1235 channel->hw_value)
1236 ctx->staging.flags = 0;
1237
1238 iwl_set_rxon_channel(priv, channel, ctx);
1239 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1240
1241 iwl_set_flags_for_band(priv, ctx, channel->band,
1242 ctx->vif);
1243 }
1244
1245 iwl_update_bcast_stations(priv);
1246 }
1247
1248 if (changed & (IEEE80211_CONF_CHANGE_PS |
1249 IEEE80211_CONF_CHANGE_IDLE)) {
1250 ret = iwl_power_update_mode(priv, false);
1251 if (ret)
1252 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
1253 }
1254
1255 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1256 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
1257 priv->tx_power_user_lmt, conf->power_level);
1258
1259 iwl_set_tx_power(priv, conf->power_level, false);
1260 }
1261
1262 for_each_context(priv, ctx) {
1263 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1264 continue;
1265 iwlagn_commit_rxon(priv, ctx);
1266 }
1267 out:
1268 mutex_unlock(&priv->mutex);
1269 IWL_DEBUG_MAC80211(priv, "leave\n");
1270
1271 return ret;
1272}
1273
1274static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1275 struct iwl_rxon_context *ctx,
1276 struct ieee80211_bss_conf *bss_conf)
1277{
1278 struct ieee80211_vif *vif = ctx->vif;
1279 struct iwl_rxon_context *tmp;
1280 struct ieee80211_sta *sta;
1281 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1282 struct ieee80211_sta_ht_cap *ht_cap;
1283 bool need_multiple;
1284
1285 lockdep_assert_held(&priv->mutex);
1286
1287 switch (vif->type) {
1288 case NL80211_IFTYPE_STATION:
1289 rcu_read_lock();
1290 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1291 if (!sta) {
1292
1293
1294
1295
1296
1297
1298 need_multiple = false;
1299 rcu_read_unlock();
1300 break;
1301 }
1302
1303 ht_cap = &sta->ht_cap;
1304
1305 need_multiple = true;
1306
1307
1308
1309
1310
1311 if (ht_cap->mcs.rx_mask[1] == 0 &&
1312 ht_cap->mcs.rx_mask[2] == 0) {
1313 need_multiple = false;
1314 } else if (!(ht_cap->mcs.tx_params &
1315 IEEE80211_HT_MCS_TX_DEFINED)) {
1316
1317 need_multiple = false;
1318 } else if (ht_cap->mcs.tx_params &
1319 IEEE80211_HT_MCS_TX_RX_DIFF) {
1320 int maxstreams;
1321
1322
1323
1324
1325
1326
1327
1328
1329 maxstreams = (ht_cap->mcs.tx_params &
1330 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
1331 maxstreams >>=
1332 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1333 maxstreams += 1;
1334
1335 if (maxstreams <= 1)
1336 need_multiple = false;
1337 }
1338
1339 rcu_read_unlock();
1340 break;
1341 case NL80211_IFTYPE_ADHOC:
1342
1343 need_multiple = false;
1344 break;
1345 default:
1346
1347 need_multiple = true;
1348 break;
1349 }
1350
1351 ctx->ht_need_multiple_chains = need_multiple;
1352
1353 if (!need_multiple) {
1354
1355 for_each_context(priv, tmp) {
1356 if (!tmp->vif)
1357 continue;
1358 if (tmp->ht_need_multiple_chains) {
1359 need_multiple = true;
1360 break;
1361 }
1362 }
1363 }
1364
1365 ht_conf->single_chain_sufficient = !need_multiple;
1366}
1367
1368static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1369{
1370 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1371 int ret;
1372
1373 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1374 return;
1375
1376 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
1377 iwl_is_any_associated(priv)) {
1378 struct iwl_calib_chain_noise_reset_cmd cmd;
1379
1380
1381 data->chain_noise_a = 0;
1382 data->chain_noise_b = 0;
1383 data->chain_noise_c = 0;
1384 data->chain_signal_a = 0;
1385 data->chain_signal_b = 0;
1386 data->chain_signal_c = 0;
1387 data->beacon_count = 0;
1388
1389 memset(&cmd, 0, sizeof(cmd));
1390 iwl_set_calib_hdr(&cmd.hdr,
1391 priv->phy_calib_chain_noise_reset_cmd);
1392 ret = iwl_dvm_send_cmd_pdu(priv,
1393 REPLY_PHY_CALIBRATION_CMD,
1394 0, sizeof(cmd), &cmd);
1395 if (ret)
1396 IWL_ERR(priv,
1397 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
1398 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1399 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
1400 }
1401}
1402
1403void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1404 struct ieee80211_vif *vif,
1405 struct ieee80211_bss_conf *bss_conf,
1406 u32 changes)
1407{
1408 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1409 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1410 int ret;
1411 bool force = false;
1412
1413 mutex_lock(&priv->mutex);
1414
1415 if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
1416
1417
1418
1419
1420 iwlagn_lift_passive_no_rx(priv);
1421 }
1422
1423 if (unlikely(!iwl_is_ready(priv))) {
1424 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1425 mutex_unlock(&priv->mutex);
1426 return;
1427 }
1428
1429 if (unlikely(!ctx->vif)) {
1430 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
1431 mutex_unlock(&priv->mutex);
1432 return;
1433 }
1434
1435 if (changes & BSS_CHANGED_BEACON_INT)
1436 force = true;
1437
1438 if (changes & BSS_CHANGED_QOS) {
1439 ctx->qos_data.qos_active = bss_conf->qos;
1440 iwlagn_update_qos(priv, ctx);
1441 }
1442
1443 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1444 if (vif->bss_conf.use_short_preamble)
1445 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1446 else
1447 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1448
1449 if (changes & BSS_CHANGED_ASSOC) {
1450 if (bss_conf->assoc) {
1451 priv->timestamp = bss_conf->sync_tsf;
1452 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1453 } else {
1454 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1455
1456 if (ctx->ctxid == IWL_RXON_CTX_BSS)
1457 priv->have_rekey_data = false;
1458 }
1459
1460 iwlagn_bt_coex_rssi_monitor(priv);
1461 }
1462
1463 if (ctx->ht.enabled) {
1464 ctx->ht.protection = bss_conf->ht_operation_mode &
1465 IEEE80211_HT_OP_MODE_PROTECTION;
1466 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
1467 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1468 iwlagn_check_needed_chains(priv, ctx, bss_conf);
1469 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1470 }
1471
1472 iwlagn_set_rxon_chain(priv, ctx);
1473
1474 if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
1475 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1476 else
1477 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1478
1479 if (bss_conf->use_cts_prot)
1480 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1481 else
1482 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1483
1484 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1485
1486 if (vif->type == NL80211_IFTYPE_AP ||
1487 vif->type == NL80211_IFTYPE_ADHOC) {
1488 if (vif->bss_conf.enable_beacon) {
1489 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1490 priv->beacon_ctx = ctx;
1491 } else {
1492 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1493 priv->beacon_ctx = NULL;
1494 }
1495 }
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 if (vif->type == NL80211_IFTYPE_STATION) {
1506 if (!bss_conf->assoc)
1507 ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1508 else
1509 ctx->staging.filter_flags &=
1510 ~RXON_FILTER_BCON_AWARE_MSK;
1511 }
1512
1513 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1514 iwlagn_commit_rxon(priv, ctx);
1515
1516 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
1517
1518
1519
1520
1521
1522 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1523 iwl_power_update_mode(priv, false);
1524
1525
1526 iwlagn_chain_noise_reset(priv);
1527 priv->start_calib = 1;
1528 }
1529
1530 if (changes & BSS_CHANGED_IBSS) {
1531 ret = iwlagn_manage_ibss_station(priv, vif,
1532 bss_conf->ibss_joined);
1533 if (ret)
1534 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1535 bss_conf->ibss_joined ? "add" : "remove",
1536 bss_conf->bssid);
1537 }
1538
1539 if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
1540 if (iwlagn_update_beacon(priv, vif))
1541 IWL_ERR(priv, "Error updating beacon\n");
1542 }
1543
1544 mutex_unlock(&priv->mutex);
1545}
1546
1547void iwlagn_post_scan(struct iwl_priv *priv)
1548{
1549 struct iwl_rxon_context *ctx;
1550
1551
1552
1553
1554
1555 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
1556 iwl_set_tx_power(priv, priv->tx_power_next, false);
1557
1558
1559
1560
1561
1562 for_each_context(priv, ctx)
1563 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1564 iwlagn_commit_rxon(priv, ctx);
1565
1566 iwlagn_set_pan_params(priv);
1567}
1568