1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/etherdevice.h>
14#include "iwl-trans.h"
15#include "iwl-modparams.h"
16#include "dev.h"
17#include "agn.h"
18#include "calib.h"
19
20
21
22
23void iwl_connection_init_rx_config(struct iwl_priv *priv,
24 struct iwl_rxon_context *ctx)
25{
26 memset(&ctx->staging, 0, sizeof(ctx->staging));
27
28 if (!ctx->vif) {
29 ctx->staging.dev_type = ctx->unused_devtype;
30 } else
31 switch (ctx->vif->type) {
32 case NL80211_IFTYPE_AP:
33 ctx->staging.dev_type = ctx->ap_devtype;
34 break;
35
36 case NL80211_IFTYPE_STATION:
37 ctx->staging.dev_type = ctx->station_devtype;
38 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
39 break;
40
41 case NL80211_IFTYPE_ADHOC:
42 ctx->staging.dev_type = ctx->ibss_devtype;
43 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
44 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
45 RXON_FILTER_ACCEPT_GRP_MSK;
46 break;
47
48 case NL80211_IFTYPE_MONITOR:
49 ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
50 break;
51
52 default:
53 IWL_ERR(priv, "Unsupported interface type %d\n",
54 ctx->vif->type);
55 break;
56 }
57
58#if 0
59
60
61 if (!hw_to_local(priv->hw)->short_preamble)
62 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
63 else
64 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
65#endif
66
67 ctx->staging.channel =
68 cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
69 priv->band = priv->hw->conf.chandef.chan->band;
70
71 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
72
73
74 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
75 RXON_FLG_CHANNEL_MODE_PURE_40);
76 if (ctx->vif)
77 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
78
79 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
80 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
81 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
82}
83
84static int iwlagn_disable_bss(struct iwl_priv *priv,
85 struct iwl_rxon_context *ctx,
86 struct iwl_rxon_cmd *send)
87{
88 __le32 old_filter = send->filter_flags;
89 int ret;
90
91 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
92 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
93 0, sizeof(*send), send);
94
95 send->filter_flags = old_filter;
96
97 if (ret)
98 IWL_DEBUG_QUIET_RFKILL(priv,
99 "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
100
101 return ret;
102}
103
104static int iwlagn_disable_pan(struct iwl_priv *priv,
105 struct iwl_rxon_context *ctx,
106 struct iwl_rxon_cmd *send)
107{
108 struct iwl_notification_wait disable_wait;
109 __le32 old_filter = send->filter_flags;
110 u8 old_dev_type = send->dev_type;
111 int ret;
112 static const u16 deactivate_cmd[] = {
113 REPLY_WIPAN_DEACTIVATION_COMPLETE
114 };
115
116 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
117 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
118 NULL, NULL);
119
120 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
121 send->dev_type = RXON_DEV_TYPE_P2P;
122 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
123 0, sizeof(*send), send);
124
125 send->filter_flags = old_filter;
126 send->dev_type = old_dev_type;
127
128 if (ret) {
129 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
130 iwl_remove_notification(&priv->notif_wait, &disable_wait);
131 } else {
132 ret = iwl_wait_notification(&priv->notif_wait,
133 &disable_wait, HZ);
134 if (ret)
135 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
136 }
137
138 return ret;
139}
140
141static int iwlagn_disconn_pan(struct iwl_priv *priv,
142 struct iwl_rxon_context *ctx,
143 struct iwl_rxon_cmd *send)
144{
145 __le32 old_filter = send->filter_flags;
146 int ret;
147
148 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
149 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
150 sizeof(*send), send);
151
152 send->filter_flags = old_filter;
153
154 return ret;
155}
156
157static void iwlagn_update_qos(struct iwl_priv *priv,
158 struct iwl_rxon_context *ctx)
159{
160 int ret;
161
162 if (!ctx->is_active)
163 return;
164
165 ctx->qos_data.def_qos_parm.qos_flags = 0;
166
167 if (ctx->qos_data.qos_active)
168 ctx->qos_data.def_qos_parm.qos_flags |=
169 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
170
171 if (ctx->ht.enabled)
172 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
173
174 IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
175 ctx->qos_data.qos_active,
176 ctx->qos_data.def_qos_parm.qos_flags);
177
178 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
179 sizeof(struct iwl_qosparam_cmd),
180 &ctx->qos_data.def_qos_parm);
181 if (ret)
182 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
183}
184
185static int iwlagn_update_beacon(struct iwl_priv *priv,
186 struct ieee80211_vif *vif)
187{
188 lockdep_assert_held(&priv->mutex);
189
190 dev_kfree_skb(priv->beacon_skb);
191 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
192 if (!priv->beacon_skb)
193 return -ENOMEM;
194 return iwlagn_send_beacon_cmd(priv);
195}
196
197static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
198 struct iwl_rxon_context *ctx)
199{
200 int ret = 0;
201 struct iwl_rxon_assoc_cmd rxon_assoc;
202 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
203 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
204
205 if ((rxon1->flags == rxon2->flags) &&
206 (rxon1->filter_flags == rxon2->filter_flags) &&
207 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
208 (rxon1->ofdm_ht_single_stream_basic_rates ==
209 rxon2->ofdm_ht_single_stream_basic_rates) &&
210 (rxon1->ofdm_ht_dual_stream_basic_rates ==
211 rxon2->ofdm_ht_dual_stream_basic_rates) &&
212 (rxon1->ofdm_ht_triple_stream_basic_rates ==
213 rxon2->ofdm_ht_triple_stream_basic_rates) &&
214 (rxon1->acquisition_data == rxon2->acquisition_data) &&
215 (rxon1->rx_chain == rxon2->rx_chain) &&
216 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
217 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
218 return 0;
219 }
220
221 rxon_assoc.flags = ctx->staging.flags;
222 rxon_assoc.filter_flags = ctx->staging.filter_flags;
223 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
224 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
225 rxon_assoc.reserved1 = 0;
226 rxon_assoc.reserved2 = 0;
227 rxon_assoc.reserved3 = 0;
228 rxon_assoc.ofdm_ht_single_stream_basic_rates =
229 ctx->staging.ofdm_ht_single_stream_basic_rates;
230 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
231 ctx->staging.ofdm_ht_dual_stream_basic_rates;
232 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
233 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
234 ctx->staging.ofdm_ht_triple_stream_basic_rates;
235 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
236
237 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
238 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
239 return ret;
240}
241
242static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
243{
244 u16 new_val;
245 u16 beacon_factor;
246
247
248
249
250
251
252
253 if (!beacon_val)
254 return DEFAULT_BEACON_INTERVAL;
255
256
257
258
259
260
261
262
263
264
265
266
267
268 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
269 new_val = beacon_val / beacon_factor;
270
271 if (!new_val)
272 new_val = max_beacon_val;
273
274 return new_val;
275}
276
277static int iwl_send_rxon_timing(struct iwl_priv *priv,
278 struct iwl_rxon_context *ctx)
279{
280 u64 tsf;
281 s32 interval_tm, rem;
282 struct ieee80211_conf *conf = NULL;
283 u16 beacon_int;
284 struct ieee80211_vif *vif = ctx->vif;
285
286 conf = &priv->hw->conf;
287
288 lockdep_assert_held(&priv->mutex);
289
290 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
291
292 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
293 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
294
295 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
296
297
298
299
300
301 ctx->timing.atim_window = 0;
302
303 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
304 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
305 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
306 priv->contexts[IWL_RXON_CTX_BSS].vif &&
307 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
308 ctx->timing.beacon_interval =
309 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
310 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
311 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
312 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
313 priv->contexts[IWL_RXON_CTX_PAN].vif &&
314 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
315 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
316 !ctx->vif->bss_conf.beacon_int)) {
317 ctx->timing.beacon_interval =
318 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
319 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
320 } else {
321 beacon_int = iwl_adjust_beacon_interval(beacon_int,
322 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
323 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
324 }
325
326 ctx->beacon_int = beacon_int;
327
328 tsf = priv->timestamp;
329 interval_tm = beacon_int * TIME_UNIT;
330 rem = do_div(tsf, interval_tm);
331 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
332
333 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
334
335 IWL_DEBUG_ASSOC(priv,
336 "beacon interval %d beacon timer %d beacon tim %d\n",
337 le16_to_cpu(ctx->timing.beacon_interval),
338 le32_to_cpu(ctx->timing.beacon_init_val),
339 le16_to_cpu(ctx->timing.atim_window));
340
341 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
342 0, sizeof(ctx->timing), &ctx->timing);
343}
344
345static int iwlagn_rxon_disconn(struct iwl_priv *priv,
346 struct iwl_rxon_context *ctx)
347{
348 int ret;
349 struct iwl_rxon_cmd *active = (void *)&ctx->active;
350
351 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
352 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
353 } else {
354 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
355 if (ret)
356 return ret;
357 if (ctx->vif) {
358 ret = iwl_send_rxon_timing(priv, ctx);
359 if (ret) {
360 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
361 return ret;
362 }
363 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
364 }
365 }
366 if (ret)
367 return ret;
368
369
370
371
372
373 iwl_clear_ucode_stations(priv, ctx);
374
375 iwl_update_bcast_station(priv, ctx);
376 iwl_restore_stations(priv, ctx);
377 ret = iwl_restore_default_wep_keys(priv, ctx);
378 if (ret) {
379 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
380 return ret;
381 }
382
383 memcpy(active, &ctx->staging, sizeof(*active));
384 return 0;
385}
386
387static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
388{
389 int ret;
390 s8 prev_tx_power;
391 bool defer;
392 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
393
394 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
395 return 0;
396
397 lockdep_assert_held(&priv->mutex);
398
399 if (priv->tx_power_user_lmt == tx_power && !force)
400 return 0;
401
402 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
403 IWL_WARN(priv,
404 "Requested user TXPOWER %d below lower limit %d.\n",
405 tx_power,
406 IWLAGN_TX_POWER_TARGET_POWER_MIN);
407 return -EINVAL;
408 }
409
410 if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
411 IWL_WARN(priv,
412 "Requested user TXPOWER %d above upper limit %d.\n",
413 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
414 return -EINVAL;
415 }
416
417 if (!iwl_is_ready_rf(priv))
418 return -EIO;
419
420
421
422 priv->tx_power_next = tx_power;
423
424
425 defer = test_bit(STATUS_SCANNING, &priv->status) ||
426 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
427 if (defer && !force) {
428 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
429 return 0;
430 }
431
432 prev_tx_power = priv->tx_power_user_lmt;
433 priv->tx_power_user_lmt = tx_power;
434
435 ret = iwlagn_send_tx_power(priv);
436
437
438 if (ret) {
439 priv->tx_power_user_lmt = prev_tx_power;
440 priv->tx_power_next = prev_tx_power;
441 }
442 return ret;
443}
444
445static int iwlagn_rxon_connect(struct iwl_priv *priv,
446 struct iwl_rxon_context *ctx)
447{
448 int ret;
449 struct iwl_rxon_cmd *active = (void *)&ctx->active;
450
451
452 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
453 ret = iwl_send_rxon_timing(priv, ctx);
454 if (ret) {
455 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
456 return ret;
457 }
458 }
459
460 iwlagn_update_qos(priv, ctx);
461
462
463
464
465
466
467 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
468 ret = iwlagn_update_beacon(priv, ctx->vif);
469 if (ret) {
470 IWL_ERR(priv,
471 "Error sending required beacon (%d)!\n",
472 ret);
473 return ret;
474 }
475 }
476
477 priv->start_calib = 0;
478
479
480
481
482
483
484 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
485 sizeof(struct iwl_rxon_cmd), &ctx->staging);
486 if (ret) {
487 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
488 return ret;
489 }
490 memcpy(active, &ctx->staging, sizeof(*active));
491
492
493 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
494 if (iwlagn_update_beacon(priv, ctx->vif))
495 IWL_ERR(priv, "Error sending IBSS beacon\n");
496 iwl_init_sensitivity(priv);
497
498
499
500
501
502
503
504
505 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
506 if (ret) {
507 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
508 return ret;
509 }
510
511 return 0;
512}
513
514int iwlagn_set_pan_params(struct iwl_priv *priv)
515{
516 struct iwl_wipan_params_cmd cmd;
517 struct iwl_rxon_context *ctx_bss, *ctx_pan;
518 int slot0 = 300, slot1 = 0;
519 int ret;
520
521 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
522 return 0;
523
524 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
525
526 lockdep_assert_held(&priv->mutex);
527
528 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
529 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
530
531
532
533
534
535
536
537 if (!ctx_pan->is_active)
538 return 0;
539
540 memset(&cmd, 0, sizeof(cmd));
541
542
543 cmd.num_slots = 2;
544
545 cmd.slots[0].type = 0;
546 cmd.slots[1].type = 1;
547
548 if (ctx_bss->vif && ctx_pan->vif) {
549 int bcnint = ctx_pan->beacon_int;
550 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
551
552
553 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
554
555 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
556 bcnint &&
557 bcnint != ctx_bss->beacon_int) {
558 IWL_ERR(priv,
559 "beacon intervals don't match (%d, %d)\n",
560 ctx_bss->beacon_int, ctx_pan->beacon_int);
561 } else
562 bcnint = max_t(int, bcnint,
563 ctx_bss->beacon_int);
564 if (!bcnint)
565 bcnint = DEFAULT_BEACON_INTERVAL;
566 slot0 = bcnint / 2;
567 slot1 = bcnint - slot0;
568
569 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
570 (!ctx_bss->vif->bss_conf.idle &&
571 !ctx_bss->vif->bss_conf.assoc)) {
572 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
573 slot1 = IWL_MIN_SLOT_TIME;
574 } else if (!ctx_pan->vif->bss_conf.idle &&
575 !ctx_pan->vif->bss_conf.assoc) {
576 slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
577 slot0 = IWL_MIN_SLOT_TIME;
578 }
579 } else if (ctx_pan->vif) {
580 slot0 = 0;
581 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
582 ctx_pan->beacon_int;
583 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
584
585 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
586 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
587 slot1 = IWL_MIN_SLOT_TIME;
588 }
589 }
590
591 cmd.slots[0].width = cpu_to_le16(slot0);
592 cmd.slots[1].width = cpu_to_le16(slot1);
593
594 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
595 sizeof(cmd), &cmd);
596 if (ret)
597 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
598
599 return ret;
600}
601
602static void _iwl_set_rxon_ht(struct iwl_priv *priv,
603 struct iwl_ht_config *ht_conf,
604 struct iwl_rxon_context *ctx)
605{
606 struct iwl_rxon_cmd *rxon = &ctx->staging;
607
608 if (!ctx->ht.enabled) {
609 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
611 RXON_FLG_HT40_PROT_MSK |
612 RXON_FLG_HT_PROT_MSK);
613 return;
614 }
615
616
617
618
619 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
620 RXON_FLG_HT_OPERATING_MODE_POS);
621
622
623
624
625 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
626 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
627 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
628
629 if (ctx->ht.protection ==
630 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
631 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
632
633
634
635
636 switch (ctx->ht.extension_chan_offset) {
637 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
638 rxon->flags &=
639 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
640 break;
641 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
642 rxon->flags |=
643 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
644 break;
645 }
646 } else {
647
648
649
650
651 switch (ctx->ht.extension_chan_offset) {
652 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
653 rxon->flags &=
654 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
655 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
656 break;
657 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
658 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
659 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
660 break;
661 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
662 default:
663
664
665
666
667 IWL_ERR(priv,
668 "invalid extension channel offset\n");
669 break;
670 }
671 }
672 } else {
673 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
674 }
675
676 iwlagn_set_rxon_chain(priv, ctx);
677
678 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
679 "extension channel offset 0x%x\n",
680 le32_to_cpu(rxon->flags), ctx->ht.protection,
681 ctx->ht.extension_chan_offset);
682}
683
684void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
685{
686 struct iwl_rxon_context *ctx;
687
688 for_each_context(priv, ctx)
689 _iwl_set_rxon_ht(priv, ht_conf, ctx);
690}
691
692
693
694
695
696
697
698
699void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
700 struct iwl_rxon_context *ctx)
701{
702 enum nl80211_band band = ch->band;
703 u16 channel = ch->hw_value;
704
705 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
706 (priv->band == band))
707 return;
708
709 ctx->staging.channel = cpu_to_le16(channel);
710 if (band == NL80211_BAND_5GHZ)
711 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
712 else
713 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
714
715 priv->band = band;
716
717 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
718
719}
720
721void iwl_set_flags_for_band(struct iwl_priv *priv,
722 struct iwl_rxon_context *ctx,
723 enum nl80211_band band,
724 struct ieee80211_vif *vif)
725{
726 if (band == NL80211_BAND_5GHZ) {
727 ctx->staging.flags &=
728 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
729 | RXON_FLG_CCK_MSK);
730 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
731 } else {
732
733 if (vif && vif->bss_conf.use_short_slot)
734 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
735 else
736 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
737
738 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
739 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
740 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
741 }
742}
743
744static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
745 struct iwl_rxon_context *ctx, int hw_decrypt)
746{
747 struct iwl_rxon_cmd *rxon = &ctx->staging;
748
749 if (hw_decrypt)
750 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
751 else
752 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
753
754}
755
756
757static int iwl_check_rxon_cmd(struct iwl_priv *priv,
758 struct iwl_rxon_context *ctx)
759{
760 struct iwl_rxon_cmd *rxon = &ctx->staging;
761 u32 errors = 0;
762
763 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
764 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
765 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
766 errors |= BIT(0);
767 }
768 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
769 IWL_WARN(priv, "check 2.4G: wrong radar\n");
770 errors |= BIT(1);
771 }
772 } else {
773 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
774 IWL_WARN(priv, "check 5.2G: not short slot!\n");
775 errors |= BIT(2);
776 }
777 if (rxon->flags & RXON_FLG_CCK_MSK) {
778 IWL_WARN(priv, "check 5.2G: CCK!\n");
779 errors |= BIT(3);
780 }
781 }
782 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
783 IWL_WARN(priv, "mac/bssid mcast!\n");
784 errors |= BIT(4);
785 }
786
787
788 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
789 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
790 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
791 errors |= BIT(5);
792 }
793
794 if (le16_to_cpu(rxon->assoc_id) > 2007) {
795 IWL_WARN(priv, "aid > 2007\n");
796 errors |= BIT(6);
797 }
798
799 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
800 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
801 IWL_WARN(priv, "CCK and short slot\n");
802 errors |= BIT(7);
803 }
804
805 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
806 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
807 IWL_WARN(priv, "CCK and auto detect\n");
808 errors |= BIT(8);
809 }
810
811 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
812 RXON_FLG_TGG_PROTECT_MSK)) ==
813 RXON_FLG_TGG_PROTECT_MSK) {
814 IWL_WARN(priv, "TGg but no auto-detect\n");
815 errors |= BIT(9);
816 }
817
818 if (rxon->channel == 0) {
819 IWL_WARN(priv, "zero channel is invalid\n");
820 errors |= BIT(10);
821 }
822
823 WARN(errors, "Invalid RXON (%#x), channel %d",
824 errors, le16_to_cpu(rxon->channel));
825
826 return errors ? -EINVAL : 0;
827}
828
829
830
831
832
833
834
835
836
837static int iwl_full_rxon_required(struct iwl_priv *priv,
838 struct iwl_rxon_context *ctx)
839{
840 const struct iwl_rxon_cmd *staging = &ctx->staging;
841 const struct iwl_rxon_cmd *active = &ctx->active;
842
843#define CHK(cond) \
844 if ((cond)) { \
845 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
846 return 1; \
847 }
848
849#define CHK_NEQ(c1, c2) \
850 if ((c1) != (c2)) { \
851 IWL_DEBUG_INFO(priv, "need full RXON - " \
852 #c1 " != " #c2 " - %d != %d\n", \
853 (c1), (c2)); \
854 return 1; \
855 }
856
857
858 CHK(!iwl_is_associated_ctx(ctx));
859 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
860 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
861 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
862 active->wlap_bssid_addr));
863 CHK_NEQ(staging->dev_type, active->dev_type);
864 CHK_NEQ(staging->channel, active->channel);
865 CHK_NEQ(staging->air_propagation, active->air_propagation);
866 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
867 active->ofdm_ht_single_stream_basic_rates);
868 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
869 active->ofdm_ht_dual_stream_basic_rates);
870 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
871 active->ofdm_ht_triple_stream_basic_rates);
872 CHK_NEQ(staging->assoc_id, active->assoc_id);
873
874
875
876
877
878
879 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
880 active->flags & RXON_FLG_BAND_24G_MSK);
881
882
883 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
884 active->filter_flags & RXON_FILTER_ASSOC_MSK);
885
886#undef CHK
887#undef CHK_NEQ
888
889 return 0;
890}
891
892#ifdef CONFIG_IWLWIFI_DEBUG
893void iwl_print_rx_config_cmd(struct iwl_priv *priv,
894 enum iwl_rxon_context_id ctxid)
895{
896 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
897 struct iwl_rxon_cmd *rxon = &ctx->staging;
898
899 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
900 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
901 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
902 le16_to_cpu(rxon->channel));
903 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
904 le32_to_cpu(rxon->flags));
905 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
906 le32_to_cpu(rxon->filter_flags));
907 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
908 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
909 rxon->ofdm_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
911 rxon->cck_basic_rates);
912 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
913 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
914 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
915 le16_to_cpu(rxon->assoc_id));
916}
917#endif
918
919static void iwl_calc_basic_rates(struct iwl_priv *priv,
920 struct iwl_rxon_context *ctx)
921{
922 int lowest_present_ofdm = 100;
923 int lowest_present_cck = 100;
924 u8 cck = 0;
925 u8 ofdm = 0;
926
927 if (ctx->vif) {
928 struct ieee80211_supported_band *sband;
929 unsigned long basic = ctx->vif->bss_conf.basic_rates;
930 int i;
931
932 sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
933
934 for_each_set_bit(i, &basic, BITS_PER_LONG) {
935 int hw = sband->bitrates[i].hw_value;
936 if (hw >= IWL_FIRST_OFDM_RATE) {
937 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
938 if (lowest_present_ofdm > hw)
939 lowest_present_ofdm = hw;
940 } else {
941 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
942
943 cck |= BIT(hw);
944 if (lowest_present_cck > hw)
945 lowest_present_cck = hw;
946 }
947 }
948 }
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
974 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
975 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
976 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
977
978 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993 if (IWL_RATE_11M_INDEX < lowest_present_cck)
994 cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
995 if (IWL_RATE_5M_INDEX < lowest_present_cck)
996 cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
997 if (IWL_RATE_2M_INDEX < lowest_present_cck)
998 cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
999
1000 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1001
1002 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1003 cck, ofdm);
1004
1005
1006 ctx->staging.cck_basic_rates = cck;
1007 ctx->staging.ofdm_basic_rates = ofdm;
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1029{
1030
1031 struct iwl_rxon_cmd *active = (void *)&ctx->active;
1032 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1033 int ret;
1034
1035 lockdep_assert_held(&priv->mutex);
1036
1037 if (!iwl_is_alive(priv))
1038 return -EBUSY;
1039
1040
1041 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1042
1043 if (!ctx->is_active)
1044 return 0;
1045
1046
1047 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1048
1049
1050 iwl_calc_basic_rates(priv, ctx);
1051
1052
1053
1054
1055
1056 if (!priv->hw_params.use_rts_for_aggregation)
1057 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1058
1059 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1060 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1061 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1062 else
1063 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1064
1065 iwl_print_rx_config_cmd(priv, ctx->ctxid);
1066 ret = iwl_check_rxon_cmd(priv, ctx);
1067 if (ret) {
1068 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1069 return -EINVAL;
1070 }
1071
1072
1073
1074
1075
1076 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1077 (priv->switch_channel != ctx->staging.channel)) {
1078 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1079 le16_to_cpu(priv->switch_channel));
1080 iwl_chswitch_done(priv, false);
1081 }
1082
1083
1084
1085
1086
1087
1088 if (!iwl_full_rxon_required(priv, ctx)) {
1089 ret = iwlagn_send_rxon_assoc(priv, ctx);
1090 if (ret) {
1091 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1092 return ret;
1093 }
1094
1095 memcpy(active, &ctx->staging, sizeof(*active));
1096
1097
1098
1099
1100 iwl_set_tx_power(priv, priv->tx_power_next, false);
1101
1102
1103 iwl_power_update_mode(priv, true);
1104
1105 return 0;
1106 }
1107
1108 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.swcrypto);
1109
1110 IWL_DEBUG_INFO(priv,
1111 "Going to commit RXON\n"
1112 " * with%s RXON_FILTER_ASSOC_MSK\n"
1113 " * channel = %d\n"
1114 " * bssid = %pM\n",
1115 (new_assoc ? "" : "out"),
1116 le16_to_cpu(ctx->staging.channel),
1117 ctx->staging.bssid_addr);
1118
1119
1120
1121
1122
1123
1124
1125 ret = iwlagn_rxon_disconn(priv, ctx);
1126 if (ret)
1127 return ret;
1128
1129 ret = iwlagn_set_pan_params(priv);
1130 if (ret)
1131 return ret;
1132
1133 if (new_assoc)
1134 return iwlagn_rxon_connect(priv, ctx);
1135
1136 return 0;
1137}
1138
1139void iwlagn_config_ht40(struct ieee80211_conf *conf,
1140 struct iwl_rxon_context *ctx)
1141{
1142 if (conf_is_ht40_minus(conf)) {
1143 ctx->ht.extension_chan_offset =
1144 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1145 ctx->ht.is_40mhz = true;
1146 } else if (conf_is_ht40_plus(conf)) {
1147 ctx->ht.extension_chan_offset =
1148 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1149 ctx->ht.is_40mhz = true;
1150 } else {
1151 ctx->ht.extension_chan_offset =
1152 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1153 ctx->ht.is_40mhz = false;
1154 }
1155}
1156
1157int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1158{
1159 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1160 struct iwl_rxon_context *ctx;
1161 struct ieee80211_conf *conf = &hw->conf;
1162 struct ieee80211_channel *channel = conf->chandef.chan;
1163 int ret = 0;
1164
1165 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
1166
1167 mutex_lock(&priv->mutex);
1168
1169 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1170 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1171 goto out;
1172 }
1173
1174 if (!iwl_is_ready(priv)) {
1175 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1176 goto out;
1177 }
1178
1179 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1180 IEEE80211_CONF_CHANGE_CHANNEL)) {
1181
1182 priv->current_ht_config.smps = conf->smps_mode;
1183
1184
1185
1186
1187
1188
1189
1190
1191 for_each_context(priv, ctx)
1192 iwlagn_set_rxon_chain(priv, ctx);
1193 }
1194
1195 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1196 for_each_context(priv, ctx) {
1197
1198 if (ctx->ht.enabled != conf_is_ht(conf))
1199 ctx->ht.enabled = conf_is_ht(conf);
1200
1201 if (ctx->ht.enabled) {
1202
1203
1204 if (!ctx->ht.is_40mhz ||
1205 !iwl_is_associated_ctx(ctx))
1206 iwlagn_config_ht40(conf, ctx);
1207 } else
1208 ctx->ht.is_40mhz = false;
1209
1210
1211
1212
1213
1214 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1215
1216
1217
1218
1219 if (le16_to_cpu(ctx->staging.channel) !=
1220 channel->hw_value)
1221 ctx->staging.flags = 0;
1222
1223 iwl_set_rxon_channel(priv, channel, ctx);
1224 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1225
1226 iwl_set_flags_for_band(priv, ctx, channel->band,
1227 ctx->vif);
1228 }
1229
1230 iwl_update_bcast_stations(priv);
1231 }
1232
1233 if (changed & (IEEE80211_CONF_CHANGE_PS |
1234 IEEE80211_CONF_CHANGE_IDLE)) {
1235 ret = iwl_power_update_mode(priv, false);
1236 if (ret)
1237 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
1238 }
1239
1240 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1241 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
1242 priv->tx_power_user_lmt, conf->power_level);
1243
1244 iwl_set_tx_power(priv, conf->power_level, false);
1245 }
1246
1247 for_each_context(priv, ctx) {
1248 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1249 continue;
1250 iwlagn_commit_rxon(priv, ctx);
1251 }
1252 out:
1253 mutex_unlock(&priv->mutex);
1254 IWL_DEBUG_MAC80211(priv, "leave\n");
1255
1256 return ret;
1257}
1258
1259static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1260 struct iwl_rxon_context *ctx,
1261 struct ieee80211_bss_conf *bss_conf)
1262{
1263 struct ieee80211_vif *vif = ctx->vif;
1264 struct iwl_rxon_context *tmp;
1265 struct ieee80211_sta *sta;
1266 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1267 struct ieee80211_sta_ht_cap *ht_cap;
1268 bool need_multiple;
1269
1270 lockdep_assert_held(&priv->mutex);
1271
1272 switch (vif->type) {
1273 case NL80211_IFTYPE_STATION:
1274 rcu_read_lock();
1275 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1276 if (!sta) {
1277
1278
1279
1280
1281
1282
1283 need_multiple = false;
1284 rcu_read_unlock();
1285 break;
1286 }
1287
1288 ht_cap = &sta->ht_cap;
1289
1290 need_multiple = true;
1291
1292
1293
1294
1295
1296 if (ht_cap->mcs.rx_mask[1] == 0 &&
1297 ht_cap->mcs.rx_mask[2] == 0) {
1298 need_multiple = false;
1299 } else if (!(ht_cap->mcs.tx_params &
1300 IEEE80211_HT_MCS_TX_DEFINED)) {
1301
1302 need_multiple = false;
1303 } else if (ht_cap->mcs.tx_params &
1304 IEEE80211_HT_MCS_TX_RX_DIFF) {
1305 int maxstreams;
1306
1307
1308
1309
1310
1311
1312
1313
1314 maxstreams = (ht_cap->mcs.tx_params &
1315 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
1316 maxstreams >>=
1317 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1318 maxstreams += 1;
1319
1320 if (maxstreams <= 1)
1321 need_multiple = false;
1322 }
1323
1324 rcu_read_unlock();
1325 break;
1326 case NL80211_IFTYPE_ADHOC:
1327
1328 need_multiple = false;
1329 break;
1330 default:
1331
1332 need_multiple = true;
1333 break;
1334 }
1335
1336 ctx->ht_need_multiple_chains = need_multiple;
1337
1338 if (!need_multiple) {
1339
1340 for_each_context(priv, tmp) {
1341 if (!tmp->vif)
1342 continue;
1343 if (tmp->ht_need_multiple_chains) {
1344 need_multiple = true;
1345 break;
1346 }
1347 }
1348 }
1349
1350 ht_conf->single_chain_sufficient = !need_multiple;
1351}
1352
1353static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1354{
1355 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1356 int ret;
1357
1358 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1359 return;
1360
1361 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
1362 iwl_is_any_associated(priv)) {
1363 struct iwl_calib_chain_noise_reset_cmd cmd;
1364
1365
1366 data->chain_noise_a = 0;
1367 data->chain_noise_b = 0;
1368 data->chain_noise_c = 0;
1369 data->chain_signal_a = 0;
1370 data->chain_signal_b = 0;
1371 data->chain_signal_c = 0;
1372 data->beacon_count = 0;
1373
1374 memset(&cmd, 0, sizeof(cmd));
1375 iwl_set_calib_hdr(&cmd.hdr,
1376 priv->phy_calib_chain_noise_reset_cmd);
1377 ret = iwl_dvm_send_cmd_pdu(priv,
1378 REPLY_PHY_CALIBRATION_CMD,
1379 0, sizeof(cmd), &cmd);
1380 if (ret)
1381 IWL_ERR(priv,
1382 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
1383 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1384 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
1385 }
1386}
1387
1388void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1389 struct ieee80211_vif *vif,
1390 struct ieee80211_bss_conf *bss_conf,
1391 u32 changes)
1392{
1393 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1394 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1395 int ret;
1396 bool force = false;
1397
1398 mutex_lock(&priv->mutex);
1399
1400 if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
1401
1402
1403
1404
1405 iwlagn_lift_passive_no_rx(priv);
1406 }
1407
1408 if (unlikely(!iwl_is_ready(priv))) {
1409 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1410 mutex_unlock(&priv->mutex);
1411 return;
1412 }
1413
1414 if (unlikely(!ctx->vif)) {
1415 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
1416 mutex_unlock(&priv->mutex);
1417 return;
1418 }
1419
1420 if (changes & BSS_CHANGED_BEACON_INT)
1421 force = true;
1422
1423 if (changes & BSS_CHANGED_QOS) {
1424 ctx->qos_data.qos_active = bss_conf->qos;
1425 iwlagn_update_qos(priv, ctx);
1426 }
1427
1428 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1429 if (vif->bss_conf.use_short_preamble)
1430 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1431 else
1432 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1433
1434 if (changes & BSS_CHANGED_ASSOC) {
1435 if (bss_conf->assoc) {
1436 priv->timestamp = bss_conf->sync_tsf;
1437 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1438 } else {
1439 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1440
1441 if (ctx->ctxid == IWL_RXON_CTX_BSS)
1442 priv->have_rekey_data = false;
1443 }
1444
1445 iwlagn_bt_coex_rssi_monitor(priv);
1446 }
1447
1448 if (ctx->ht.enabled) {
1449 ctx->ht.protection = bss_conf->ht_operation_mode &
1450 IEEE80211_HT_OP_MODE_PROTECTION;
1451 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
1452 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1453 iwlagn_check_needed_chains(priv, ctx, bss_conf);
1454 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1455 }
1456
1457 iwlagn_set_rxon_chain(priv, ctx);
1458
1459 if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
1460 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1461 else
1462 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1463
1464 if (bss_conf->use_cts_prot)
1465 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1466 else
1467 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1468
1469 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1470
1471 if (vif->type == NL80211_IFTYPE_AP ||
1472 vif->type == NL80211_IFTYPE_ADHOC) {
1473 if (vif->bss_conf.enable_beacon) {
1474 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1475 priv->beacon_ctx = ctx;
1476 } else {
1477 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1478 priv->beacon_ctx = NULL;
1479 }
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 if (vif->type == NL80211_IFTYPE_STATION) {
1491 if (!bss_conf->assoc)
1492 ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1493 else
1494 ctx->staging.filter_flags &=
1495 ~RXON_FILTER_BCON_AWARE_MSK;
1496 }
1497
1498 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1499 iwlagn_commit_rxon(priv, ctx);
1500
1501 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
1502
1503
1504
1505
1506
1507 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1508 iwl_power_update_mode(priv, false);
1509
1510
1511 iwlagn_chain_noise_reset(priv);
1512 priv->start_calib = 1;
1513 }
1514
1515 if (changes & BSS_CHANGED_IBSS) {
1516 ret = iwlagn_manage_ibss_station(priv, vif,
1517 bss_conf->ibss_joined);
1518 if (ret)
1519 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1520 bss_conf->ibss_joined ? "add" : "remove",
1521 bss_conf->bssid);
1522 }
1523
1524 if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
1525 if (iwlagn_update_beacon(priv, vif))
1526 IWL_ERR(priv, "Error updating beacon\n");
1527 }
1528
1529 mutex_unlock(&priv->mutex);
1530}
1531
1532void iwlagn_post_scan(struct iwl_priv *priv)
1533{
1534 struct iwl_rxon_context *ctx;
1535
1536
1537
1538
1539
1540 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
1541 iwl_set_tx_power(priv, priv->tx_power_next, false);
1542
1543
1544
1545
1546
1547 for_each_context(priv, ctx)
1548 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1549 iwlagn_commit_rxon(priv, ctx);
1550
1551 iwlagn_set_pan_params(priv);
1552}
1553