1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/slab.h>
34#include <linux/delay.h>
35#include <linux/sched.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_arp.h>
40
41#include <net/mac80211.h>
42
43#include <asm/div64.h>
44
45#include "iwl-eeprom-read.h"
46#include "iwl-eeprom-parse.h"
47#include "iwl-io.h"
48#include "iwl-trans.h"
49#include "iwl-op-mode.h"
50#include "iwl-drv.h"
51#include "iwl-modparams.h"
52#include "iwl-prph.h"
53
54#include "dev.h"
55#include "calib.h"
56#include "agn.h"
57
58
59
60
61
62
63
64
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
66MODULE_DESCRIPTION(DRV_DESCRIPTION);
67MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
68MODULE_LICENSE("GPL");
69
70
71
72
73
74static const struct iwl_hcmd_names iwl_dvm_cmd_names[] = {
75 HCMD_NAME(REPLY_ALIVE),
76 HCMD_NAME(REPLY_ERROR),
77 HCMD_NAME(REPLY_ECHO),
78 HCMD_NAME(REPLY_RXON),
79 HCMD_NAME(REPLY_RXON_ASSOC),
80 HCMD_NAME(REPLY_QOS_PARAM),
81 HCMD_NAME(REPLY_RXON_TIMING),
82 HCMD_NAME(REPLY_ADD_STA),
83 HCMD_NAME(REPLY_REMOVE_STA),
84 HCMD_NAME(REPLY_REMOVE_ALL_STA),
85 HCMD_NAME(REPLY_TX),
86 HCMD_NAME(REPLY_TXFIFO_FLUSH),
87 HCMD_NAME(REPLY_WEPKEY),
88 HCMD_NAME(REPLY_LEDS_CMD),
89 HCMD_NAME(REPLY_TX_LINK_QUALITY_CMD),
90 HCMD_NAME(COEX_PRIORITY_TABLE_CMD),
91 HCMD_NAME(COEX_MEDIUM_NOTIFICATION),
92 HCMD_NAME(COEX_EVENT_CMD),
93 HCMD_NAME(TEMPERATURE_NOTIFICATION),
94 HCMD_NAME(CALIBRATION_CFG_CMD),
95 HCMD_NAME(CALIBRATION_RES_NOTIFICATION),
96 HCMD_NAME(CALIBRATION_COMPLETE_NOTIFICATION),
97 HCMD_NAME(REPLY_QUIET_CMD),
98 HCMD_NAME(REPLY_CHANNEL_SWITCH),
99 HCMD_NAME(CHANNEL_SWITCH_NOTIFICATION),
100 HCMD_NAME(REPLY_SPECTRUM_MEASUREMENT_CMD),
101 HCMD_NAME(SPECTRUM_MEASURE_NOTIFICATION),
102 HCMD_NAME(POWER_TABLE_CMD),
103 HCMD_NAME(PM_SLEEP_NOTIFICATION),
104 HCMD_NAME(PM_DEBUG_STATISTIC_NOTIFIC),
105 HCMD_NAME(REPLY_SCAN_CMD),
106 HCMD_NAME(REPLY_SCAN_ABORT_CMD),
107 HCMD_NAME(SCAN_START_NOTIFICATION),
108 HCMD_NAME(SCAN_RESULTS_NOTIFICATION),
109 HCMD_NAME(SCAN_COMPLETE_NOTIFICATION),
110 HCMD_NAME(BEACON_NOTIFICATION),
111 HCMD_NAME(REPLY_TX_BEACON),
112 HCMD_NAME(WHO_IS_AWAKE_NOTIFICATION),
113 HCMD_NAME(REPLY_TX_POWER_DBM_CMD),
114 HCMD_NAME(QUIET_NOTIFICATION),
115 HCMD_NAME(REPLY_TX_PWR_TABLE_CMD),
116 HCMD_NAME(REPLY_TX_POWER_DBM_CMD_V1),
117 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
118 HCMD_NAME(MEASURE_ABORT_NOTIFICATION),
119 HCMD_NAME(REPLY_BT_CONFIG),
120 HCMD_NAME(REPLY_STATISTICS_CMD),
121 HCMD_NAME(STATISTICS_NOTIFICATION),
122 HCMD_NAME(REPLY_CARD_STATE_CMD),
123 HCMD_NAME(CARD_STATE_NOTIFICATION),
124 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
125 HCMD_NAME(REPLY_CT_KILL_CONFIG_CMD),
126 HCMD_NAME(SENSITIVITY_CMD),
127 HCMD_NAME(REPLY_PHY_CALIBRATION_CMD),
128 HCMD_NAME(REPLY_WIPAN_PARAMS),
129 HCMD_NAME(REPLY_WIPAN_RXON),
130 HCMD_NAME(REPLY_WIPAN_RXON_TIMING),
131 HCMD_NAME(REPLY_WIPAN_RXON_ASSOC),
132 HCMD_NAME(REPLY_WIPAN_QOS_PARAM),
133 HCMD_NAME(REPLY_WIPAN_WEPKEY),
134 HCMD_NAME(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
135 HCMD_NAME(REPLY_WIPAN_NOA_NOTIFICATION),
136 HCMD_NAME(REPLY_WIPAN_DEACTIVATION_COMPLETE),
137 HCMD_NAME(REPLY_RX_PHY_CMD),
138 HCMD_NAME(REPLY_RX_MPDU_CMD),
139 HCMD_NAME(REPLY_RX),
140 HCMD_NAME(REPLY_COMPRESSED_BA),
141 HCMD_NAME(REPLY_BT_COEX_PRIO_TABLE),
142 HCMD_NAME(REPLY_BT_COEX_PROT_ENV),
143 HCMD_NAME(REPLY_BT_COEX_PROFILE_NOTIF),
144 HCMD_NAME(REPLY_D3_CONFIG),
145 HCMD_NAME(REPLY_WOWLAN_PATTERNS),
146 HCMD_NAME(REPLY_WOWLAN_WAKEUP_FILTER),
147 HCMD_NAME(REPLY_WOWLAN_TSC_RSC_PARAMS),
148 HCMD_NAME(REPLY_WOWLAN_TKIP_PARAMS),
149 HCMD_NAME(REPLY_WOWLAN_KEK_KCK_MATERIAL),
150 HCMD_NAME(REPLY_WOWLAN_GET_STATUS),
151};
152
153static const struct iwl_hcmd_arr iwl_dvm_groups[] = {
154 [0x0] = HCMD_ARR(iwl_dvm_cmd_names),
155};
156
157static const struct iwl_op_mode_ops iwl_dvm_ops;
158
159void iwl_update_chain_flags(struct iwl_priv *priv)
160{
161 struct iwl_rxon_context *ctx;
162
163 for_each_context(priv, ctx) {
164 iwlagn_set_rxon_chain(priv, ctx);
165 if (ctx->active.rx_chain != ctx->staging.rx_chain)
166 iwlagn_commit_rxon(priv, ctx);
167 }
168}
169
170
171static void iwl_set_beacon_tim(struct iwl_priv *priv,
172 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
173 u8 *beacon, u32 frame_size)
174{
175 u16 tim_idx;
176 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
177
178
179
180
181
182 tim_idx = mgmt->u.beacon.variable - beacon;
183
184
185 while ((tim_idx < (frame_size - 2)) &&
186 (beacon[tim_idx] != WLAN_EID_TIM))
187 tim_idx += beacon[tim_idx+1] + 2;
188
189
190 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
191 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
192 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
193 } else
194 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
195}
196
197int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
198{
199 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
200 struct iwl_host_cmd cmd = {
201 .id = REPLY_TX_BEACON,
202 };
203 struct ieee80211_tx_info *info;
204 u32 frame_size;
205 u32 rate_flags;
206 u32 rate;
207
208
209
210
211
212
213 lockdep_assert_held(&priv->mutex);
214
215 if (!priv->beacon_ctx) {
216 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
217 return 0;
218 }
219
220 if (WARN_ON(!priv->beacon_skb))
221 return -EINVAL;
222
223
224 if (!priv->beacon_cmd)
225 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
226 tx_beacon_cmd = priv->beacon_cmd;
227 if (!tx_beacon_cmd)
228 return -ENOMEM;
229
230 frame_size = priv->beacon_skb->len;
231
232
233 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
234 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
235 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
236 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
237 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
238
239
240 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
241 frame_size);
242
243
244 info = IEEE80211_SKB_CB(priv->beacon_skb);
245
246
247
248
249
250
251 if (info->control.rates[0].idx < 0 ||
252 info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
253 rate = 0;
254 else
255 rate = info->control.rates[0].idx;
256
257 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
258 priv->nvm_data->valid_tx_ant);
259 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
260
261
262 if (info->band == NL80211_BAND_5GHZ)
263 rate += IWL_FIRST_OFDM_RATE;
264 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
265 rate_flags |= RATE_MCS_CCK_MSK;
266
267 tx_beacon_cmd->tx.rate_n_flags =
268 iwl_hw_set_rate_n_flags(rate, rate_flags);
269
270
271 cmd.len[0] = sizeof(*tx_beacon_cmd);
272 cmd.data[0] = tx_beacon_cmd;
273 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
274 cmd.len[1] = frame_size;
275 cmd.data[1] = priv->beacon_skb->data;
276 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
277
278 return iwl_dvm_send_cmd(priv, &cmd);
279}
280
281static void iwl_bg_beacon_update(struct work_struct *work)
282{
283 struct iwl_priv *priv =
284 container_of(work, struct iwl_priv, beacon_update);
285 struct sk_buff *beacon;
286
287 mutex_lock(&priv->mutex);
288 if (!priv->beacon_ctx) {
289 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
290 goto out;
291 }
292
293 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
294
295
296
297
298
299
300 goto out;
301 }
302
303
304 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
305 if (!beacon) {
306 IWL_ERR(priv, "update beacon failed -- keeping old\n");
307 goto out;
308 }
309
310
311 dev_kfree_skb(priv->beacon_skb);
312
313 priv->beacon_skb = beacon;
314
315 iwlagn_send_beacon_cmd(priv);
316 out:
317 mutex_unlock(&priv->mutex);
318}
319
320static void iwl_bg_bt_runtime_config(struct work_struct *work)
321{
322 struct iwl_priv *priv =
323 container_of(work, struct iwl_priv, bt_runtime_config);
324
325 mutex_lock(&priv->mutex);
326 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
327 goto out;
328
329
330 if (!iwl_is_ready_rf(priv))
331 goto out;
332
333 iwlagn_send_advance_bt_config(priv);
334out:
335 mutex_unlock(&priv->mutex);
336}
337
338static void iwl_bg_bt_full_concurrency(struct work_struct *work)
339{
340 struct iwl_priv *priv =
341 container_of(work, struct iwl_priv, bt_full_concurrency);
342 struct iwl_rxon_context *ctx;
343
344 mutex_lock(&priv->mutex);
345
346 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
347 goto out;
348
349
350 if (!iwl_is_ready_rf(priv))
351 goto out;
352
353 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
354 priv->bt_full_concurrent ?
355 "full concurrency" : "3-wire");
356
357
358
359
360
361 for_each_context(priv, ctx) {
362 iwlagn_set_rxon_chain(priv, ctx);
363 iwlagn_commit_rxon(priv, ctx);
364 }
365
366 iwlagn_send_advance_bt_config(priv);
367out:
368 mutex_unlock(&priv->mutex);
369}
370
371int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
372{
373 struct iwl_statistics_cmd statistics_cmd = {
374 .configuration_flags =
375 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
376 };
377
378 if (flags & CMD_ASYNC)
379 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
380 CMD_ASYNC,
381 sizeof(struct iwl_statistics_cmd),
382 &statistics_cmd);
383 else
384 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
385 sizeof(struct iwl_statistics_cmd),
386 &statistics_cmd);
387}
388
389
390
391
392
393
394
395
396
397
398
399static void iwl_bg_statistics_periodic(struct timer_list *t)
400{
401 struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
402
403 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
404 return;
405
406
407 if (!iwl_is_ready_rf(priv))
408 return;
409
410 iwl_send_statistics_request(priv, CMD_ASYNC, false);
411}
412
413
414static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
415 u32 start_idx, u32 num_events,
416 u32 capacity, u32 mode)
417{
418 u32 i;
419 u32 ptr;
420 u32 ev, time, data;
421 unsigned long reg_flags;
422
423 if (mode == 0)
424 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
425 else
426 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
427
428
429 if (!iwl_trans_grab_nic_access(priv->trans, ®_flags))
430 return;
431
432
433 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
434
435
436
437
438
439
440
441 if (WARN_ON(num_events > capacity - start_idx))
442 num_events = capacity - start_idx;
443
444
445
446
447
448 for (i = 0; i < num_events; i++) {
449 ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
450 time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
451 if (mode == 0) {
452 trace_iwlwifi_dev_ucode_cont_event(
453 priv->trans->dev, 0, time, ev);
454 } else {
455 data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
456 trace_iwlwifi_dev_ucode_cont_event(
457 priv->trans->dev, time, data, ev);
458 }
459 }
460
461 iwl_trans_release_nic_access(priv->trans, ®_flags);
462}
463
464static void iwl_continuous_event_trace(struct iwl_priv *priv)
465{
466 u32 capacity;
467 struct {
468 u32 capacity;
469 u32 mode;
470 u32 wrap_counter;
471 u32 write_counter;
472 } __packed read;
473 u32 base;
474 u32 mode;
475 u32 num_wraps;
476 u32 next_entry;
477
478 base = priv->device_pointers.log_event_table;
479 if (iwlagn_hw_valid_rtc_data_addr(base)) {
480 iwl_trans_read_mem_bytes(priv->trans, base,
481 &read, sizeof(read));
482 capacity = read.capacity;
483 mode = read.mode;
484 num_wraps = read.wrap_counter;
485 next_entry = read.write_counter;
486 } else
487 return;
488
489
490
491
492
493
494 if (unlikely(next_entry == capacity))
495 next_entry = 0;
496
497
498
499
500
501
502
503
504 if (unlikely(next_entry < priv->event_log.next_entry &&
505 num_wraps == priv->event_log.num_wraps))
506 num_wraps++;
507
508 if (num_wraps == priv->event_log.num_wraps) {
509 iwl_print_cont_event_trace(
510 priv, base, priv->event_log.next_entry,
511 next_entry - priv->event_log.next_entry,
512 capacity, mode);
513
514 priv->event_log.non_wraps_count++;
515 } else {
516 if (num_wraps - priv->event_log.num_wraps > 1)
517 priv->event_log.wraps_more_count++;
518 else
519 priv->event_log.wraps_once_count++;
520
521 trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
522 num_wraps - priv->event_log.num_wraps,
523 next_entry, priv->event_log.next_entry);
524
525 if (next_entry < priv->event_log.next_entry) {
526 iwl_print_cont_event_trace(
527 priv, base, priv->event_log.next_entry,
528 capacity - priv->event_log.next_entry,
529 capacity, mode);
530
531 iwl_print_cont_event_trace(
532 priv, base, 0, next_entry, capacity, mode);
533 } else {
534 iwl_print_cont_event_trace(
535 priv, base, next_entry,
536 capacity - next_entry,
537 capacity, mode);
538
539 iwl_print_cont_event_trace(
540 priv, base, 0, next_entry, capacity, mode);
541 }
542 }
543
544 priv->event_log.num_wraps = num_wraps;
545 priv->event_log.next_entry = next_entry;
546}
547
548
549
550
551
552
553
554
555
556static void iwl_bg_ucode_trace(struct timer_list *t)
557{
558 struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
559
560 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
561 return;
562
563 if (priv->event_log.ucode_trace) {
564 iwl_continuous_event_trace(priv);
565
566 mod_timer(&priv->ucode_trace,
567 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
568 }
569}
570
571static void iwl_bg_tx_flush(struct work_struct *work)
572{
573 struct iwl_priv *priv =
574 container_of(work, struct iwl_priv, tx_flush);
575
576 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
577 return;
578
579
580 if (!iwl_is_ready_rf(priv))
581 return;
582
583 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
584 iwlagn_dev_txfifo_flush(priv);
585}
586
587
588
589
590
591static const u8 iwlagn_bss_ac_to_fifo[] = {
592 IWL_TX_FIFO_VO,
593 IWL_TX_FIFO_VI,
594 IWL_TX_FIFO_BE,
595 IWL_TX_FIFO_BK,
596};
597
598static const u8 iwlagn_bss_ac_to_queue[] = {
599 0, 1, 2, 3,
600};
601
602static const u8 iwlagn_pan_ac_to_fifo[] = {
603 IWL_TX_FIFO_VO_IPAN,
604 IWL_TX_FIFO_VI_IPAN,
605 IWL_TX_FIFO_BE_IPAN,
606 IWL_TX_FIFO_BK_IPAN,
607};
608
609static const u8 iwlagn_pan_ac_to_queue[] = {
610 7, 6, 5, 4,
611};
612
613static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
614{
615 int i;
616
617
618
619
620
621 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
622 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
623 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
624
625 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
626 priv->contexts[i].ctxid = i;
627
628 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
629 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
630 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
631 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
632 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
633 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
634 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
635 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
636 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
637 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
638 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
639 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
640 BIT(NL80211_IFTYPE_STATION);
641 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
642 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
643 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
644 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
645 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
646 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
647 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
648 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
649
650 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
651 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
652 REPLY_WIPAN_RXON_TIMING;
653 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
654 REPLY_WIPAN_RXON_ASSOC;
655 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
656 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
657 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
658 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
659 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
660 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
661 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
662
663 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
664 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
665 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
666 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
667 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
668 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
669 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
670 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
671
672 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
673}
674
675static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
676{
677 struct iwl_ct_kill_config cmd;
678 struct iwl_ct_kill_throttling_config adv_cmd;
679 int ret = 0;
680
681 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
682 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
683
684 priv->thermal_throttle.ct_kill_toggle = false;
685
686 if (priv->lib->support_ct_kill_exit) {
687 adv_cmd.critical_temperature_enter =
688 cpu_to_le32(priv->hw_params.ct_kill_threshold);
689 adv_cmd.critical_temperature_exit =
690 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
691
692 ret = iwl_dvm_send_cmd_pdu(priv,
693 REPLY_CT_KILL_CONFIG_CMD,
694 0, sizeof(adv_cmd), &adv_cmd);
695 if (ret)
696 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
697 else
698 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
699 "succeeded, critical temperature enter is %d,"
700 "exit is %d\n",
701 priv->hw_params.ct_kill_threshold,
702 priv->hw_params.ct_kill_exit_threshold);
703 } else {
704 cmd.critical_temperature_R =
705 cpu_to_le32(priv->hw_params.ct_kill_threshold);
706
707 ret = iwl_dvm_send_cmd_pdu(priv,
708 REPLY_CT_KILL_CONFIG_CMD,
709 0, sizeof(cmd), &cmd);
710 if (ret)
711 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
712 else
713 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
714 "succeeded, "
715 "critical temperature is %d\n",
716 priv->hw_params.ct_kill_threshold);
717 }
718}
719
720static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
721{
722 struct iwl_calib_cfg_cmd calib_cfg_cmd;
723 struct iwl_host_cmd cmd = {
724 .id = CALIBRATION_CFG_CMD,
725 .len = { sizeof(struct iwl_calib_cfg_cmd), },
726 .data = { &calib_cfg_cmd, },
727 };
728
729 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
730 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
731 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
732
733 return iwl_dvm_send_cmd(priv, &cmd);
734}
735
736
737static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
738{
739 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
740 .valid = cpu_to_le32(valid_tx_ant),
741 };
742
743 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
744 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
745 return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
746 sizeof(struct iwl_tx_ant_config_cmd),
747 &tx_ant_cmd);
748 } else {
749 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
750 return -EOPNOTSUPP;
751 }
752}
753
754static void iwl_send_bt_config(struct iwl_priv *priv)
755{
756 struct iwl_bt_cmd bt_cmd = {
757 .lead_time = BT_LEAD_TIME_DEF,
758 .max_kill = BT_MAX_KILL_DEF,
759 .kill_ack_mask = 0,
760 .kill_cts_mask = 0,
761 };
762
763 if (!iwlwifi_mod_params.bt_coex_active)
764 bt_cmd.flags = BT_COEX_DISABLE;
765 else
766 bt_cmd.flags = BT_COEX_ENABLE;
767
768 priv->bt_enable_flag = bt_cmd.flags;
769 IWL_DEBUG_INFO(priv, "BT coex %s\n",
770 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
771
772 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
773 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
774 IWL_ERR(priv, "failed to send BT Coex Config\n");
775}
776
777
778
779
780
781
782int iwl_alive_start(struct iwl_priv *priv)
783{
784 int ret = 0;
785 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
786
787 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
788
789
790 set_bit(STATUS_ALIVE, &priv->status);
791
792 if (iwl_is_rfkill(priv))
793 return -ERFKILL;
794
795 if (priv->event_log.ucode_trace) {
796
797 mod_timer(&priv->ucode_trace, jiffies);
798 }
799
800
801 if (priv->lib->bt_params &&
802 priv->lib->bt_params->advanced_bt_coexist) {
803
804 if (priv->lib->bt_params->bt_sco_disable)
805 priv->bt_enable_pspoll = false;
806 else
807 priv->bt_enable_pspoll = true;
808
809 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
810 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
811 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
812 iwlagn_send_advance_bt_config(priv);
813 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
814 priv->cur_rssi_ctx = NULL;
815
816 iwl_send_prio_tbl(priv);
817
818
819 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
820 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
821 if (ret)
822 return ret;
823 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
824 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
825 if (ret)
826 return ret;
827 } else if (priv->lib->bt_params) {
828
829
830
831 iwl_send_bt_config(priv);
832 }
833
834
835
836
837 iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
838
839 ieee80211_wake_queues(priv->hw);
840
841
842 iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
843
844 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
845 struct iwl_rxon_cmd *active_rxon =
846 (struct iwl_rxon_cmd *)&ctx->active;
847
848 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
849 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
850 } else {
851 struct iwl_rxon_context *tmp;
852
853 for_each_context(priv, tmp)
854 iwl_connection_init_rx_config(priv, tmp);
855
856 iwlagn_set_rxon_chain(priv, ctx);
857 }
858
859 if (!priv->wowlan) {
860
861 iwl_reset_run_time_calib(priv);
862 }
863
864 set_bit(STATUS_READY, &priv->status);
865
866
867 ret = iwlagn_commit_rxon(priv, ctx);
868 if (ret)
869 return ret;
870
871
872 iwl_rf_kill_ct_config(priv);
873
874 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
875
876 return iwl_power_update_mode(priv, true);
877}
878
879
880
881
882
883
884
885
886
887
888static void iwl_clear_driver_stations(struct iwl_priv *priv)
889{
890 struct iwl_rxon_context *ctx;
891
892 spin_lock_bh(&priv->sta_lock);
893 memset(priv->stations, 0, sizeof(priv->stations));
894 priv->num_stations = 0;
895
896 priv->ucode_key_table = 0;
897
898 for_each_context(priv, ctx) {
899
900
901
902
903
904
905
906 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
907 ctx->key_mapping_keys = 0;
908 }
909
910 spin_unlock_bh(&priv->sta_lock);
911}
912
913void iwl_down(struct iwl_priv *priv)
914{
915 int exit_pending;
916
917 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
918
919 lockdep_assert_held(&priv->mutex);
920
921 iwl_scan_cancel_timeout(priv, 200);
922
923 exit_pending =
924 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
925
926 iwl_clear_ucode_stations(priv, NULL);
927 iwl_dealloc_bcast_stations(priv);
928 iwl_clear_driver_stations(priv);
929
930
931 priv->bt_status = 0;
932 priv->cur_rssi_ctx = NULL;
933 priv->bt_is_sco = 0;
934 if (priv->lib->bt_params)
935 priv->bt_traffic_load =
936 priv->lib->bt_params->bt_init_traffic_load;
937 else
938 priv->bt_traffic_load = 0;
939 priv->bt_full_concurrent = false;
940 priv->bt_ci_compliance = 0;
941
942
943
944 if (!exit_pending)
945 clear_bit(STATUS_EXIT_PENDING, &priv->status);
946
947 if (priv->mac80211_registered)
948 ieee80211_stop_queues(priv->hw);
949
950 priv->ucode_loaded = false;
951 iwl_trans_stop_device(priv->trans);
952
953
954 atomic_set(&priv->num_aux_in_flight, 0);
955
956
957 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
958 STATUS_RF_KILL_HW |
959 test_bit(STATUS_FW_ERROR, &priv->status) <<
960 STATUS_FW_ERROR |
961 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
962 STATUS_EXIT_PENDING;
963
964 dev_kfree_skb(priv->beacon_skb);
965 priv->beacon_skb = NULL;
966}
967
968
969
970
971
972
973
974static void iwl_bg_run_time_calib_work(struct work_struct *work)
975{
976 struct iwl_priv *priv = container_of(work, struct iwl_priv,
977 run_time_calib_work);
978
979 mutex_lock(&priv->mutex);
980
981 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
982 test_bit(STATUS_SCANNING, &priv->status)) {
983 mutex_unlock(&priv->mutex);
984 return;
985 }
986
987 if (priv->start_calib) {
988 iwl_chain_noise_calibration(priv);
989 iwl_sensitivity_calibration(priv);
990 }
991
992 mutex_unlock(&priv->mutex);
993}
994
995void iwlagn_prepare_restart(struct iwl_priv *priv)
996{
997 bool bt_full_concurrent;
998 u8 bt_ci_compliance;
999 u8 bt_load;
1000 u8 bt_status;
1001 bool bt_is_sco;
1002 int i;
1003
1004 lockdep_assert_held(&priv->mutex);
1005
1006 priv->is_open = 0;
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 bt_full_concurrent = priv->bt_full_concurrent;
1018 bt_ci_compliance = priv->bt_ci_compliance;
1019 bt_load = priv->bt_traffic_load;
1020 bt_status = priv->bt_status;
1021 bt_is_sco = priv->bt_is_sco;
1022
1023 iwl_down(priv);
1024
1025 priv->bt_full_concurrent = bt_full_concurrent;
1026 priv->bt_ci_compliance = bt_ci_compliance;
1027 priv->bt_traffic_load = bt_load;
1028 priv->bt_status = bt_status;
1029 priv->bt_is_sco = bt_is_sco;
1030
1031
1032 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
1033 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1034
1035 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1036 atomic_set(&priv->queue_stop_count[i], 0);
1037
1038 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
1039}
1040
1041static void iwl_bg_restart(struct work_struct *data)
1042{
1043 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
1044
1045 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1046 return;
1047
1048 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
1049 mutex_lock(&priv->mutex);
1050 iwlagn_prepare_restart(priv);
1051 mutex_unlock(&priv->mutex);
1052 iwl_cancel_deferred_work(priv);
1053 if (priv->mac80211_registered)
1054 ieee80211_restart_hw(priv->hw);
1055 else
1056 IWL_ERR(priv,
1057 "Cannot request restart before registering with mac80211\n");
1058 } else {
1059 WARN_ON(1);
1060 }
1061}
1062
1063
1064
1065
1066
1067
1068
1069static void iwl_setup_deferred_work(struct iwl_priv *priv)
1070{
1071 priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
1072
1073 INIT_WORK(&priv->restart, iwl_bg_restart);
1074 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
1075 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
1076 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
1077 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
1078 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
1079
1080 iwl_setup_scan_deferred_work(priv);
1081
1082 if (priv->lib->bt_params)
1083 iwlagn_bt_setup_deferred_work(priv);
1084
1085 timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
1086
1087 timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
1088}
1089
1090void iwl_cancel_deferred_work(struct iwl_priv *priv)
1091{
1092 if (priv->lib->bt_params)
1093 iwlagn_bt_cancel_deferred_work(priv);
1094
1095 cancel_work_sync(&priv->run_time_calib_work);
1096 cancel_work_sync(&priv->beacon_update);
1097
1098 iwl_cancel_scan_deferred_work(priv);
1099
1100 cancel_work_sync(&priv->bt_full_concurrency);
1101 cancel_work_sync(&priv->bt_runtime_config);
1102
1103 del_timer_sync(&priv->statistics_periodic);
1104 del_timer_sync(&priv->ucode_trace);
1105}
1106
1107static int iwl_init_drv(struct iwl_priv *priv)
1108{
1109 spin_lock_init(&priv->sta_lock);
1110
1111 mutex_init(&priv->mutex);
1112
1113 INIT_LIST_HEAD(&priv->calib_results);
1114
1115 priv->band = NL80211_BAND_2GHZ;
1116
1117 priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
1118
1119 priv->iw_mode = NL80211_IFTYPE_STATION;
1120 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
1121 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
1122 priv->agg_tids_count = 0;
1123
1124 priv->rx_statistics_jiffies = jiffies;
1125
1126
1127 iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1128
1129 iwl_init_scan_params(priv);
1130
1131
1132 if (priv->lib->bt_params &&
1133 priv->lib->bt_params->advanced_bt_coexist) {
1134 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1135 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1136 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
1137 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
1138 priv->bt_duration = BT_DURATION_LIMIT_DEF;
1139 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1140 }
1141
1142 return 0;
1143}
1144
1145static void iwl_uninit_drv(struct iwl_priv *priv)
1146{
1147 kfree(priv->scan_cmd);
1148 kfree(priv->beacon_cmd);
1149 kfree(rcu_dereference_raw(priv->noa_data));
1150 iwl_calib_free_results(priv);
1151#ifdef CONFIG_IWLWIFI_DEBUGFS
1152 kfree(priv->wowlan_sram);
1153#endif
1154}
1155
1156static void iwl_set_hw_params(struct iwl_priv *priv)
1157{
1158 if (priv->cfg->ht_params)
1159 priv->hw_params.use_rts_for_aggregation =
1160 priv->cfg->ht_params->use_rts_for_aggregation;
1161
1162
1163 priv->lib->set_hw_params(priv);
1164}
1165
1166
1167
1168
1169static void iwl_option_config(struct iwl_priv *priv)
1170{
1171#ifdef CONFIG_IWLWIFI_DEBUG
1172 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
1173#else
1174 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
1175#endif
1176
1177#ifdef CONFIG_IWLWIFI_DEBUGFS
1178 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
1179#else
1180 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
1181#endif
1182
1183#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1184 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
1185#else
1186 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1187#endif
1188}
1189
1190static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1191{
1192 struct iwl_nvm_data *data = priv->nvm_data;
1193
1194 if (data->sku_cap_11n_enable &&
1195 !priv->cfg->ht_params) {
1196 IWL_ERR(priv, "Invalid 11n configuration\n");
1197 return -EINVAL;
1198 }
1199
1200 if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable &&
1201 !data->sku_cap_band_52ghz_enable) {
1202 IWL_ERR(priv, "Invalid device sku\n");
1203 return -EINVAL;
1204 }
1205
1206 IWL_DEBUG_INFO(priv,
1207 "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
1208 data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled",
1209 data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled",
1210 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
1211
1212 priv->hw_params.tx_chains_num =
1213 num_of_ant(data->valid_tx_ant);
1214 if (priv->cfg->rx_with_siso_diversity)
1215 priv->hw_params.rx_chains_num = 1;
1216 else
1217 priv->hw_params.rx_chains_num =
1218 num_of_ant(data->valid_rx_ant);
1219
1220 IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1221 data->valid_tx_ant,
1222 data->valid_rx_ant);
1223
1224 return 0;
1225}
1226
1227static int iwl_nvm_check_version(struct iwl_nvm_data *data,
1228 struct iwl_trans *trans)
1229{
1230 if (data->nvm_version >= trans->cfg->nvm_ver ||
1231 data->calib_version >= trans->cfg->nvm_calib_ver) {
1232 IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
1233 data->nvm_version, data->calib_version);
1234 return 0;
1235 }
1236
1237 IWL_ERR(trans,
1238 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
1239 data->nvm_version, trans->cfg->nvm_ver,
1240 data->calib_version, trans->cfg->nvm_calib_ver);
1241 return -EINVAL;
1242}
1243
1244static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1245 const struct iwl_cfg *cfg,
1246 const struct iwl_fw *fw,
1247 struct dentry *dbgfs_dir)
1248{
1249 struct iwl_priv *priv;
1250 struct ieee80211_hw *hw;
1251 struct iwl_op_mode *op_mode;
1252 u16 num_mac;
1253 u32 ucode_flags;
1254 struct iwl_trans_config trans_cfg = {};
1255 static const u8 no_reclaim_cmds[] = {
1256 REPLY_RX_PHY_CMD,
1257 REPLY_RX_MPDU_CMD,
1258 REPLY_COMPRESSED_BA,
1259 STATISTICS_NOTIFICATION,
1260 REPLY_TX,
1261 };
1262 int i;
1263
1264
1265
1266
1267 hw = iwl_alloc_all();
1268 if (!hw) {
1269 pr_err("%s: Cannot allocate network device\n", cfg->name);
1270 goto out;
1271 }
1272
1273 op_mode = hw->priv;
1274 op_mode->ops = &iwl_dvm_ops;
1275 priv = IWL_OP_MODE_GET_DVM(op_mode);
1276 priv->trans = trans;
1277 priv->dev = trans->dev;
1278 priv->cfg = cfg;
1279 priv->fw = fw;
1280
1281 switch (priv->cfg->device_family) {
1282 case IWL_DEVICE_FAMILY_1000:
1283 case IWL_DEVICE_FAMILY_100:
1284 priv->lib = &iwl_dvm_1000_cfg;
1285 break;
1286 case IWL_DEVICE_FAMILY_2000:
1287 priv->lib = &iwl_dvm_2000_cfg;
1288 break;
1289 case IWL_DEVICE_FAMILY_105:
1290 priv->lib = &iwl_dvm_105_cfg;
1291 break;
1292 case IWL_DEVICE_FAMILY_2030:
1293 case IWL_DEVICE_FAMILY_135:
1294 priv->lib = &iwl_dvm_2030_cfg;
1295 break;
1296 case IWL_DEVICE_FAMILY_5000:
1297 priv->lib = &iwl_dvm_5000_cfg;
1298 break;
1299 case IWL_DEVICE_FAMILY_5150:
1300 priv->lib = &iwl_dvm_5150_cfg;
1301 break;
1302 case IWL_DEVICE_FAMILY_6000:
1303 case IWL_DEVICE_FAMILY_6000i:
1304 priv->lib = &iwl_dvm_6000_cfg;
1305 break;
1306 case IWL_DEVICE_FAMILY_6005:
1307 priv->lib = &iwl_dvm_6005_cfg;
1308 break;
1309 case IWL_DEVICE_FAMILY_6050:
1310 case IWL_DEVICE_FAMILY_6150:
1311 priv->lib = &iwl_dvm_6050_cfg;
1312 break;
1313 case IWL_DEVICE_FAMILY_6030:
1314 priv->lib = &iwl_dvm_6030_cfg;
1315 break;
1316 default:
1317 break;
1318 }
1319
1320 if (WARN_ON(!priv->lib))
1321 goto out_free_hw;
1322
1323
1324
1325
1326
1327 trans_cfg.op_mode = op_mode;
1328 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1329 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1330
1331 switch (iwlwifi_mod_params.amsdu_size) {
1332 case IWL_AMSDU_DEF:
1333 case IWL_AMSDU_4K:
1334 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1335 break;
1336 case IWL_AMSDU_8K:
1337 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
1338 break;
1339 case IWL_AMSDU_12K:
1340 default:
1341 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1342 pr_err("Unsupported amsdu_size: %d\n",
1343 iwlwifi_mod_params.amsdu_size);
1344 }
1345
1346 trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
1347
1348 trans_cfg.command_groups = iwl_dvm_groups;
1349 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
1350
1351 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
1352 trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
1353 driver_data[2]);
1354
1355 WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
1356 priv->cfg->base_params->num_of_queues);
1357
1358 ucode_flags = fw->ucode_capa.flags;
1359
1360 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1361 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1362 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1363 } else {
1364 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1365 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1366 }
1367
1368
1369 iwl_trans_configure(priv->trans, &trans_cfg);
1370
1371 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1372 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
1373 trans->command_groups = trans_cfg.command_groups;
1374 trans->command_groups_size = trans_cfg.command_groups_size;
1375
1376
1377
1378 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
1379
1380 iwl_option_config(priv);
1381
1382 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
1383
1384
1385 priv->bt_ant_couple_ok =
1386 (iwlwifi_mod_params.antenna_coupling >
1387 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1388 true : false;
1389
1390
1391 priv->bt_ch_announce = true;
1392 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1393 (priv->bt_ch_announce) ? "On" : "Off");
1394
1395
1396
1397
1398 spin_lock_init(&priv->statistics.lock);
1399
1400
1401
1402
1403 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
1404 priv->cfg->name, priv->trans->hw_rev);
1405
1406 if (iwl_trans_start_hw(priv->trans))
1407 goto out_free_hw;
1408
1409
1410 if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
1411 &priv->eeprom_blob_size)) {
1412 IWL_ERR(priv, "Unable to init EEPROM\n");
1413 goto out_free_hw;
1414 }
1415
1416
1417 iwl_trans_stop_device(priv->trans);
1418
1419 priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1420 priv->eeprom_blob,
1421 priv->eeprom_blob_size);
1422 if (!priv->nvm_data)
1423 goto out_free_eeprom_blob;
1424
1425 if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
1426 goto out_free_eeprom;
1427
1428 if (iwl_eeprom_init_hw_params(priv))
1429 goto out_free_eeprom;
1430
1431
1432 memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
1433 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1434 priv->hw->wiphy->addresses = priv->addresses;
1435 priv->hw->wiphy->n_addresses = 1;
1436 num_mac = priv->nvm_data->n_hw_addrs;
1437 if (num_mac > 1) {
1438 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1439 ETH_ALEN);
1440 priv->addresses[1].addr[5]++;
1441 priv->hw->wiphy->n_addresses++;
1442 }
1443
1444
1445
1446
1447 iwl_set_hw_params(priv);
1448
1449 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1450 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
1451 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1452
1453
1454
1455
1456 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1457 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1458
1459
1460 iwl_trans_configure(priv->trans, &trans_cfg);
1461 }
1462
1463
1464
1465
1466 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1467 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1468 if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
1469 i != IWL_DEFAULT_CMD_QUEUE_NUM &&
1470 i != IWL_IPAN_CMD_QUEUE_NUM)
1471 priv->queue_to_mac80211[i] = i;
1472 atomic_set(&priv->queue_stop_count[i], 0);
1473 }
1474
1475 if (iwl_init_drv(priv))
1476 goto out_free_eeprom;
1477
1478
1479
1480
1481
1482
1483 iwl_setup_deferred_work(priv);
1484 iwl_setup_rx_handlers(priv);
1485
1486 iwl_power_initialize(priv);
1487 iwl_tt_initialize(priv);
1488
1489 snprintf(priv->hw->wiphy->fw_version,
1490 sizeof(priv->hw->wiphy->fw_version),
1491 "%s", fw->fw_version);
1492
1493 priv->new_scan_threshold_behaviour =
1494 !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1495
1496 priv->phy_calib_chain_noise_reset_cmd =
1497 fw->ucode_capa.standard_phy_calibration_size;
1498 priv->phy_calib_chain_noise_gain_cmd =
1499 fw->ucode_capa.standard_phy_calibration_size + 1;
1500
1501
1502 iwl_init_context(priv, ucode_flags);
1503
1504
1505
1506
1507
1508
1509 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1510 goto out_destroy_workqueue;
1511
1512 iwl_dbgfs_register(priv, dbgfs_dir);
1513
1514 return op_mode;
1515
1516out_destroy_workqueue:
1517 iwl_tt_exit(priv);
1518 iwl_cancel_deferred_work(priv);
1519 destroy_workqueue(priv->workqueue);
1520 priv->workqueue = NULL;
1521 iwl_uninit_drv(priv);
1522out_free_eeprom_blob:
1523 kfree(priv->eeprom_blob);
1524out_free_eeprom:
1525 kfree(priv->nvm_data);
1526out_free_hw:
1527 ieee80211_free_hw(priv->hw);
1528out:
1529 op_mode = NULL;
1530 return op_mode;
1531}
1532
1533static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1534{
1535 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1536
1537 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1538
1539 iwlagn_mac_unregister(priv);
1540
1541 iwl_tt_exit(priv);
1542
1543 kfree(priv->eeprom_blob);
1544 kfree(priv->nvm_data);
1545
1546
1547 flush_workqueue(priv->workqueue);
1548
1549
1550
1551
1552 destroy_workqueue(priv->workqueue);
1553 priv->workqueue = NULL;
1554
1555 iwl_uninit_drv(priv);
1556
1557 dev_kfree_skb(priv->beacon_skb);
1558
1559 iwl_trans_op_mode_leave(priv->trans);
1560 ieee80211_free_hw(priv->hw);
1561}
1562
1563static const char * const desc_lookup_text[] = {
1564 "OK",
1565 "FAIL",
1566 "BAD_PARAM",
1567 "BAD_CHECKSUM",
1568 "NMI_INTERRUPT_WDG",
1569 "SYSASSERT",
1570 "FATAL_ERROR",
1571 "BAD_COMMAND",
1572 "HW_ERROR_TUNE_LOCK",
1573 "HW_ERROR_TEMPERATURE",
1574 "ILLEGAL_CHAN_FREQ",
1575 "VCC_NOT_STABLE",
1576 "FH_ERROR",
1577 "NMI_INTERRUPT_HOST",
1578 "NMI_INTERRUPT_ACTION_PT",
1579 "NMI_INTERRUPT_UNKNOWN",
1580 "UCODE_VERSION_MISMATCH",
1581 "HW_ERROR_ABS_LOCK",
1582 "HW_ERROR_CAL_LOCK_FAIL",
1583 "NMI_INTERRUPT_INST_ACTION_PT",
1584 "NMI_INTERRUPT_DATA_ACTION_PT",
1585 "NMI_TRM_HW_ER",
1586 "NMI_INTERRUPT_TRM",
1587 "NMI_INTERRUPT_BREAK_POINT",
1588 "DEBUG_0",
1589 "DEBUG_1",
1590 "DEBUG_2",
1591 "DEBUG_3",
1592};
1593
1594static struct { char *name; u8 num; } advanced_lookup[] = {
1595 { "NMI_INTERRUPT_WDG", 0x34 },
1596 { "SYSASSERT", 0x35 },
1597 { "UCODE_VERSION_MISMATCH", 0x37 },
1598 { "BAD_COMMAND", 0x38 },
1599 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1600 { "FATAL_ERROR", 0x3D },
1601 { "NMI_TRM_HW_ERR", 0x46 },
1602 { "NMI_INTERRUPT_TRM", 0x4C },
1603 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1604 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1605 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1606 { "NMI_INTERRUPT_HOST", 0x66 },
1607 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1608 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1609 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1610 { "ADVANCED_SYSASSERT", 0 },
1611};
1612
1613static const char *desc_lookup(u32 num)
1614{
1615 int i;
1616 int max = ARRAY_SIZE(desc_lookup_text);
1617
1618 if (num < max)
1619 return desc_lookup_text[num];
1620
1621 max = ARRAY_SIZE(advanced_lookup) - 1;
1622 for (i = 0; i < max; i++) {
1623 if (advanced_lookup[i].num == num)
1624 break;
1625 }
1626 return advanced_lookup[i].name;
1627}
1628
1629#define ERROR_START_OFFSET (1 * sizeof(u32))
1630#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1631
1632static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1633{
1634 struct iwl_trans *trans = priv->trans;
1635 u32 base;
1636 struct iwl_error_event_table table;
1637
1638 base = priv->device_pointers.error_event_table;
1639 if (priv->cur_ucode == IWL_UCODE_INIT) {
1640 if (!base)
1641 base = priv->fw->init_errlog_ptr;
1642 } else {
1643 if (!base)
1644 base = priv->fw->inst_errlog_ptr;
1645 }
1646
1647 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1648 IWL_ERR(priv,
1649 "Not valid error log pointer 0x%08X for %s uCode\n",
1650 base,
1651 (priv->cur_ucode == IWL_UCODE_INIT)
1652 ? "Init" : "RT");
1653 return;
1654 }
1655
1656
1657 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
1658
1659 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1660 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
1661 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
1662 priv->status, table.valid);
1663 }
1664
1665 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1666 desc_lookup(table.error_id));
1667 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1668 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1669 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1670 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1671 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1672 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1673 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1674 IWL_ERR(priv, "0x%08X | line\n", table.line);
1675 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1676 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1677 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1678 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1679 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1680 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1681 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1682 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1683 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1684 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1685 IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
1686 IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
1687 IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
1688 IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
1689 IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
1690 IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
1691 IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
1692 IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
1693 IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
1694 IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
1695 IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
1696 IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
1697 IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
1698 IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
1699}
1700
1701#define EVENT_START_OFFSET (4 * sizeof(u32))
1702
1703
1704
1705
1706
1707static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1708 u32 num_events, u32 mode,
1709 int pos, char **buf, size_t bufsz)
1710{
1711 u32 i;
1712 u32 base;
1713 u32 event_size;
1714 u32 ptr;
1715 u32 ev, time, data;
1716 unsigned long reg_flags;
1717
1718 struct iwl_trans *trans = priv->trans;
1719
1720 if (num_events == 0)
1721 return pos;
1722
1723 base = priv->device_pointers.log_event_table;
1724 if (priv->cur_ucode == IWL_UCODE_INIT) {
1725 if (!base)
1726 base = priv->fw->init_evtlog_ptr;
1727 } else {
1728 if (!base)
1729 base = priv->fw->inst_evtlog_ptr;
1730 }
1731
1732 if (mode == 0)
1733 event_size = 2 * sizeof(u32);
1734 else
1735 event_size = 3 * sizeof(u32);
1736
1737 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1738
1739
1740 if (!iwl_trans_grab_nic_access(trans, ®_flags))
1741 return pos;
1742
1743
1744 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
1745
1746
1747
1748 for (i = 0; i < num_events; i++) {
1749 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1750 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1751 if (mode == 0) {
1752
1753 if (bufsz) {
1754 pos += scnprintf(*buf + pos, bufsz - pos,
1755 "EVT_LOG:0x%08x:%04u\n",
1756 time, ev);
1757 } else {
1758 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
1759 time, ev);
1760 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1761 time, ev);
1762 }
1763 } else {
1764 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1765 if (bufsz) {
1766 pos += scnprintf(*buf + pos, bufsz - pos,
1767 "EVT_LOGT:%010u:0x%08x:%04u\n",
1768 time, data, ev);
1769 } else {
1770 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1771 time, data, ev);
1772 trace_iwlwifi_dev_ucode_event(trans->dev, time,
1773 data, ev);
1774 }
1775 }
1776 }
1777
1778
1779 iwl_trans_release_nic_access(trans, ®_flags);
1780 return pos;
1781}
1782
1783
1784
1785
1786static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1787 u32 num_wraps, u32 next_entry,
1788 u32 size, u32 mode,
1789 int pos, char **buf, size_t bufsz)
1790{
1791
1792
1793
1794
1795 if (num_wraps) {
1796 if (next_entry < size) {
1797 pos = iwl_print_event_log(priv,
1798 capacity - (size - next_entry),
1799 size - next_entry, mode,
1800 pos, buf, bufsz);
1801 pos = iwl_print_event_log(priv, 0,
1802 next_entry, mode,
1803 pos, buf, bufsz);
1804 } else
1805 pos = iwl_print_event_log(priv, next_entry - size,
1806 size, mode, pos, buf, bufsz);
1807 } else {
1808 if (next_entry < size) {
1809 pos = iwl_print_event_log(priv, 0, next_entry,
1810 mode, pos, buf, bufsz);
1811 } else {
1812 pos = iwl_print_event_log(priv, next_entry - size,
1813 size, mode, pos, buf, bufsz);
1814 }
1815 }
1816 return pos;
1817}
1818
1819#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1820
1821int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1822 char **buf)
1823{
1824 u32 base;
1825 u32 capacity;
1826 u32 mode;
1827 u32 num_wraps;
1828 u32 next_entry;
1829 u32 size;
1830 u32 logsize;
1831 int pos = 0;
1832 size_t bufsz = 0;
1833 struct iwl_trans *trans = priv->trans;
1834
1835 base = priv->device_pointers.log_event_table;
1836 if (priv->cur_ucode == IWL_UCODE_INIT) {
1837 logsize = priv->fw->init_evtlog_size;
1838 if (!base)
1839 base = priv->fw->init_evtlog_ptr;
1840 } else {
1841 logsize = priv->fw->inst_evtlog_size;
1842 if (!base)
1843 base = priv->fw->inst_evtlog_ptr;
1844 }
1845
1846 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1847 IWL_ERR(priv,
1848 "Invalid event log pointer 0x%08X for %s uCode\n",
1849 base,
1850 (priv->cur_ucode == IWL_UCODE_INIT)
1851 ? "Init" : "RT");
1852 return -EINVAL;
1853 }
1854
1855
1856 capacity = iwl_trans_read_mem32(trans, base);
1857 mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
1858 num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
1859 next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
1860
1861 if (capacity > logsize) {
1862 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
1863 "entries\n", capacity, logsize);
1864 capacity = logsize;
1865 }
1866
1867 if (next_entry > logsize) {
1868 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1869 next_entry, logsize);
1870 next_entry = logsize;
1871 }
1872
1873 size = num_wraps ? capacity : next_entry;
1874
1875
1876 if (size == 0) {
1877 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
1878 return pos;
1879 }
1880
1881 if (!(iwl_have_debug_level(IWL_DL_FW)) && !full_log)
1882 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1883 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1884 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1885 size);
1886
1887#ifdef CONFIG_IWLWIFI_DEBUG
1888 if (buf) {
1889 if (full_log)
1890 bufsz = capacity * 48;
1891 else
1892 bufsz = size * 48;
1893 *buf = kmalloc(bufsz, GFP_KERNEL);
1894 if (!*buf)
1895 return -ENOMEM;
1896 }
1897 if (iwl_have_debug_level(IWL_DL_FW) || full_log) {
1898
1899
1900
1901
1902
1903 if (num_wraps)
1904 pos = iwl_print_event_log(priv, next_entry,
1905 capacity - next_entry, mode,
1906 pos, buf, bufsz);
1907
1908 pos = iwl_print_event_log(priv, 0,
1909 next_entry, mode, pos, buf, bufsz);
1910 } else
1911 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1912 next_entry, size, mode,
1913 pos, buf, bufsz);
1914#else
1915 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1916 next_entry, size, mode,
1917 pos, buf, bufsz);
1918#endif
1919 return pos;
1920}
1921
1922static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
1923{
1924 unsigned int reload_msec;
1925 unsigned long reload_jiffies;
1926
1927 if (iwl_have_debug_level(IWL_DL_FW))
1928 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
1929
1930
1931 priv->ucode_loaded = false;
1932
1933
1934 set_bit(STATUS_FW_ERROR, &priv->status);
1935
1936 iwl_abort_notification_waits(&priv->notif_wait);
1937
1938
1939
1940 clear_bit(STATUS_READY, &priv->status);
1941
1942 if (!ondemand) {
1943
1944
1945
1946
1947
1948
1949 reload_jiffies = jiffies;
1950 reload_msec = jiffies_to_msecs((long) reload_jiffies -
1951 (long) priv->reload_jiffies);
1952 priv->reload_jiffies = reload_jiffies;
1953 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
1954 priv->reload_count++;
1955 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
1956 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
1957 return;
1958 }
1959 } else
1960 priv->reload_count = 0;
1961 }
1962
1963 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1964 if (iwlwifi_mod_params.fw_restart) {
1965 IWL_DEBUG_FW(priv,
1966 "Restarting adapter due to uCode error.\n");
1967 queue_work(priv->workqueue, &priv->restart);
1968 } else
1969 IWL_DEBUG_FW(priv,
1970 "Detected FW error, but not restarting\n");
1971 }
1972}
1973
1974static void iwl_nic_error(struct iwl_op_mode *op_mode)
1975{
1976 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1977
1978 IWL_ERR(priv, "Loaded firmware version: %s\n",
1979 priv->fw->fw_version);
1980
1981 iwl_dump_nic_error_log(priv);
1982 iwl_dump_nic_event_log(priv, false, NULL);
1983
1984 iwlagn_fw_error(priv, false);
1985}
1986
1987static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1988{
1989 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1990
1991 if (!iwl_check_for_ct_kill(priv)) {
1992 IWL_ERR(priv, "Restarting adapter queue is full\n");
1993 iwlagn_fw_error(priv, false);
1994 }
1995}
1996
1997#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
1998
1999static void iwl_nic_config(struct iwl_op_mode *op_mode)
2000{
2001 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2002
2003
2004 iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2005 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2006 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
2007 (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
2008 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
2009 (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
2010 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
2011
2012
2013 if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
2014 u32 reg_val =
2015 priv->nvm_data->radio_cfg_type <<
2016 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
2017 priv->nvm_data->radio_cfg_step <<
2018 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
2019 priv->nvm_data->radio_cfg_dash <<
2020 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2021
2022 iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2023 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2024 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2025 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
2026 reg_val);
2027
2028 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
2029 priv->nvm_data->radio_cfg_type,
2030 priv->nvm_data->radio_cfg_step,
2031 priv->nvm_data->radio_cfg_dash);
2032 } else {
2033 WARN_ON(1);
2034 }
2035
2036
2037 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
2038 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2039 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
2040
2041
2042
2043
2044
2045 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
2046 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2047 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2048
2049 if (priv->lib->nic_config)
2050 priv->lib->nic_config(priv);
2051}
2052
2053static void iwl_wimax_active(struct iwl_op_mode *op_mode)
2054{
2055 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2056
2057 clear_bit(STATUS_READY, &priv->status);
2058 IWL_ERR(priv, "RF is used by WiMAX\n");
2059}
2060
2061static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2062{
2063 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2064 int mq = priv->queue_to_mac80211[queue];
2065
2066 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2067 return;
2068
2069 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
2070 IWL_DEBUG_TX_QUEUES(priv,
2071 "queue %d (mac80211 %d) already stopped\n",
2072 queue, mq);
2073 return;
2074 }
2075
2076 set_bit(mq, &priv->transport_queue_stop);
2077 ieee80211_stop_queue(priv->hw, mq);
2078}
2079
2080static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
2081{
2082 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2083 int mq = priv->queue_to_mac80211[queue];
2084
2085 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2086 return;
2087
2088 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
2089 IWL_DEBUG_TX_QUEUES(priv,
2090 "queue %d (mac80211 %d) already awake\n",
2091 queue, mq);
2092 return;
2093 }
2094
2095 clear_bit(mq, &priv->transport_queue_stop);
2096
2097 if (!priv->passive_no_rx)
2098 ieee80211_wake_queue(priv->hw, mq);
2099}
2100
2101void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2102{
2103 int mq;
2104
2105 if (!priv->passive_no_rx)
2106 return;
2107
2108 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
2109 if (!test_bit(mq, &priv->transport_queue_stop)) {
2110 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
2111 ieee80211_wake_queue(priv->hw, mq);
2112 } else {
2113 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
2114 }
2115 }
2116
2117 priv->passive_no_rx = false;
2118}
2119
2120static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2121{
2122 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2123 struct ieee80211_tx_info *info;
2124
2125 info = IEEE80211_SKB_CB(skb);
2126 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2127 ieee80211_free_txskb(priv->hw, skb);
2128}
2129
2130static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2131{
2132 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2133
2134 if (state)
2135 set_bit(STATUS_RF_KILL_HW, &priv->status);
2136 else
2137 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2138
2139 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2140
2141 return false;
2142}
2143
2144static const struct iwl_op_mode_ops iwl_dvm_ops = {
2145 .start = iwl_op_mode_dvm_start,
2146 .stop = iwl_op_mode_dvm_stop,
2147 .rx = iwl_rx_dispatch,
2148 .queue_full = iwl_stop_sw_queue,
2149 .queue_not_full = iwl_wake_sw_queue,
2150 .hw_rf_kill = iwl_set_hw_rfkill_state,
2151 .free_skb = iwl_free_skb,
2152 .nic_error = iwl_nic_error,
2153 .cmd_queue_full = iwl_cmd_queue_full,
2154 .nic_config = iwl_nic_config,
2155 .wimax_active = iwl_wimax_active,
2156};
2157
2158
2159
2160
2161
2162
2163static int __init iwl_init(void)
2164{
2165
2166 int ret;
2167
2168 ret = iwlagn_rate_control_register();
2169 if (ret) {
2170 pr_err("Unable to register rate control algorithm: %d\n", ret);
2171 return ret;
2172 }
2173
2174 ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
2175 if (ret) {
2176 pr_err("Unable to register op_mode: %d\n", ret);
2177 iwlagn_rate_control_unregister();
2178 }
2179
2180 return ret;
2181}
2182module_init(iwl_init);
2183
2184static void __exit iwl_exit(void)
2185{
2186 iwl_opmode_deregister("iwldvm");
2187 iwlagn_rate_control_unregister();
2188}
2189module_exit(iwl_exit);
2190