1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "iwl-fh.h"
55#include "iwl-3945-fh.h"
56#include "iwl-commands.h"
57#include "iwl-sta.h"
58#include "iwl-3945.h"
59#include "iwl-core.h"
60#include "iwl-helpers.h"
61#include "iwl-dev.h"
62#include "iwl-spectrum.h"
63
64
65
66
67
68#define DRV_DESCRIPTION \
69"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
70
71#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
72#define VD "d"
73#else
74#define VD
75#endif
76
77
78
79
80
81
82#define DRV_VERSION IWLWIFI_VERSION VD "s"
83#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
84#define DRV_AUTHOR "<ilw@linux.intel.com>"
85
86MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91
92struct iwl_mod_params iwl3945_mod_params = {
93 .sw_crypto = 1,
94 .restart_fw = 1,
95 .disable_hw_scan = 1,
96
97};
98
99
100
101
102
103
104
105
106
107
108
109
110__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
111{
112 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
113
114 switch (iwl3945_mod_params.antenna) {
115 case IWL_ANTENNA_DIVERSITY:
116 return 0;
117
118 case IWL_ANTENNA_MAIN:
119 if (eeprom->antenna_switch_type)
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122
123 case IWL_ANTENNA_AUX:
124 if (eeprom->antenna_switch_type)
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
127 }
128
129
130 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
131 iwl3945_mod_params.antenna);
132
133 return 0;
134}
135
136static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
137 struct ieee80211_key_conf *keyconf,
138 u8 sta_id)
139{
140 unsigned long flags;
141 __le16 key_flags = 0;
142 int ret;
143
144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
146
147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
148 key_flags |= STA_KEY_MULTICAST_MSK;
149
150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 key_flags &= ~STA_KEY_FLG_INVALID;
153
154 spin_lock_irqsave(&priv->sta_lock, flags);
155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
158 keyconf->keylen);
159
160 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
161 keyconf->keylen);
162
163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
164 == STA_KEY_FLG_NO_ENC)
165 priv->stations[sta_id].sta.key.key_offset =
166 iwl_legacy_get_free_ucode_key_index(priv);
167
168
169
170 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
171 "no space for a new key");
172
173 priv->stations[sta_id].sta.key.key_flags = key_flags;
174 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
175 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
176
177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
178
179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181
182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183
184 return ret;
185}
186
187static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
188 struct ieee80211_key_conf *keyconf,
189 u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
195 struct ieee80211_key_conf *keyconf,
196 u8 sta_id)
197{
198 return -EOPNOTSUPP;
199}
200
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{
203 unsigned long flags;
204 struct iwl_legacy_addsta_cmd sta_cmd;
205
206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
208 memset(&priv->stations[sta_id].sta.key, 0,
209 sizeof(struct iwl4965_keyinfo));
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218}
219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
221 struct ieee80211_key_conf *keyconf, u8 sta_id)
222{
223 int ret = 0;
224
225 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
226
227 switch (keyconf->cipher) {
228 case WLAN_CIPHER_SUITE_CCMP:
229 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
230 break;
231 case WLAN_CIPHER_SUITE_TKIP:
232 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
233 break;
234 case WLAN_CIPHER_SUITE_WEP40:
235 case WLAN_CIPHER_SUITE_WEP104:
236 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
237 break;
238 default:
239 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
240 keyconf->cipher);
241 ret = -EINVAL;
242 }
243
244 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
245 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
246 sta_id, ret);
247
248 return ret;
249}
250
251static int iwl3945_remove_static_key(struct iwl_priv *priv)
252{
253 int ret = -EOPNOTSUPP;
254
255 return ret;
256}
257
258static int iwl3945_set_static_key(struct iwl_priv *priv,
259 struct ieee80211_key_conf *key)
260{
261 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
262 key->cipher == WLAN_CIPHER_SUITE_WEP104)
263 return -EOPNOTSUPP;
264
265 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
266 return -EINVAL;
267}
268
269static void iwl3945_clear_free_frames(struct iwl_priv *priv)
270{
271 struct list_head *element;
272
273 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
274 priv->frames_count);
275
276 while (!list_empty(&priv->free_frames)) {
277 element = priv->free_frames.next;
278 list_del(element);
279 kfree(list_entry(element, struct iwl3945_frame, list));
280 priv->frames_count--;
281 }
282
283 if (priv->frames_count) {
284 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
285 priv->frames_count);
286 priv->frames_count = 0;
287 }
288}
289
290static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
291{
292 struct iwl3945_frame *frame;
293 struct list_head *element;
294 if (list_empty(&priv->free_frames)) {
295 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
296 if (!frame) {
297 IWL_ERR(priv, "Could not allocate frame!\n");
298 return NULL;
299 }
300
301 priv->frames_count++;
302 return frame;
303 }
304
305 element = priv->free_frames.next;
306 list_del(element);
307 return list_entry(element, struct iwl3945_frame, list);
308}
309
310static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
311{
312 memset(frame, 0, sizeof(*frame));
313 list_add(&frame->list, &priv->free_frames);
314}
315
316unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 int left)
319{
320
321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0;
323
324 if (priv->beacon_skb->len > left)
325 return 0;
326
327 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
328
329 return priv->beacon_skb->len;
330}
331
332static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
333{
334 struct iwl3945_frame *frame;
335 unsigned int frame_size;
336 int rc;
337 u8 rate;
338
339 frame = iwl3945_get_free_frame(priv);
340
341 if (!frame) {
342 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
343 "command.\n");
344 return -ENOMEM;
345 }
346
347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]);
349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351
352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]);
354
355 iwl3945_free_frame(priv, frame);
356
357 return rc;
358}
359
360static void iwl3945_unset_hw_params(struct iwl_priv *priv)
361{
362 if (priv->_3945.shared_virt)
363 dma_free_coherent(&priv->pci_dev->dev,
364 sizeof(struct iwl3945_shared),
365 priv->_3945.shared_virt,
366 priv->_3945.shared_phys);
367}
368
369static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
370 struct ieee80211_tx_info *info,
371 struct iwl_device_cmd *cmd,
372 struct sk_buff *skb_frag,
373 int sta_id)
374{
375 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
376 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
377
378 tx_cmd->sec_ctl = 0;
379
380 switch (keyinfo->cipher) {
381 case WLAN_CIPHER_SUITE_CCMP:
382 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
383 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
384 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
385 break;
386
387 case WLAN_CIPHER_SUITE_TKIP:
388 break;
389
390 case WLAN_CIPHER_SUITE_WEP104:
391 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
392
393 case WLAN_CIPHER_SUITE_WEP40:
394 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
395 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
396
397 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
398
399 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
400 "with key %d\n", info->control.hw_key->hw_key_idx);
401 break;
402
403 default:
404 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
405 break;
406 }
407}
408
409
410
411
412static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
413 struct iwl_device_cmd *cmd,
414 struct ieee80211_tx_info *info,
415 struct ieee80211_hdr *hdr, u8 std_id)
416{
417 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
418 __le32 tx_flags = tx_cmd->tx_flags;
419 __le16 fc = hdr->frame_control;
420
421 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 tx_flags |= TX_CMD_FLG_ACK_MSK;
424 if (ieee80211_is_mgmt(fc))
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 if (ieee80211_is_probe_resp(fc) &&
427 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
428 tx_flags |= TX_CMD_FLG_TSF_MSK;
429 } else {
430 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
431 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
432 }
433
434 tx_cmd->sta_id = std_id;
435 if (ieee80211_has_morefrags(fc))
436 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
437
438 if (ieee80211_is_data_qos(fc)) {
439 u8 *qc = ieee80211_get_qos_ctl(hdr);
440 tx_cmd->tid_tspec = qc[0] & 0xf;
441 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
442 } else {
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 }
445
446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) {
450 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
451 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
452 else
453 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
454 } else {
455 tx_cmd->timeout.pm_frame_timeout = 0;
456 }
457
458 tx_cmd->driver_txop = 0;
459 tx_cmd->tx_flags = tx_flags;
460 tx_cmd->next_frame_len = 0;
461}
462
463
464
465
466static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
470 struct iwl3945_tx_cmd *tx_cmd;
471 struct iwl_tx_queue *txq = NULL;
472 struct iwl_queue *q = NULL;
473 struct iwl_device_cmd *out_cmd;
474 struct iwl_cmd_meta *out_meta;
475 dma_addr_t phys_addr;
476 dma_addr_t txcmd_phys;
477 int txq_id = skb_get_queue_mapping(skb);
478 u16 len, idx, hdr_len;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
494 IWL_ERR(priv, "ERROR: No TX rate available.\n");
495 goto drop_unlock;
496 }
497
498 unicast = !is_multicast_ether_addr(hdr->addr1);
499 id = 0;
500
501 fc = hdr->frame_control;
502
503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc))
507 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
508 else if (ieee80211_is_reassoc_req(fc))
509 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
510#endif
511
512 spin_unlock_irqrestore(&priv->lock, flags);
513
514 hdr_len = ieee80211_hdrlen(fc);
515
516
517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) {
521 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
522 hdr->addr1);
523 goto drop;
524 }
525
526 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
527
528 if (ieee80211_is_data_qos(fc)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
530 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
531 if (unlikely(tid >= MAX_TID_COUNT))
532 goto drop;
533 }
534
535
536 txq = &priv->txq[txq_id];
537 q = &txq->q;
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop;
541
542 spin_lock_irqsave(&priv->lock, flags);
543
544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545
546
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
548 txq->txb[q->write_ptr].skb = skb;
549 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
550
551
552 out_cmd = txq->cmd[idx];
553 out_meta = &txq->meta[idx];
554 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
556 memset(tx_cmd, 0, sizeof(*tx_cmd));
557
558
559
560
561
562
563
564 out_cmd->hdr.cmd = REPLY_TX;
565 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
566 INDEX_TO_SEQ(q->write_ptr)));
567
568
569 memcpy(tx_cmd->hdr, hdr, hdr_len);
570
571
572 if (info->control.hw_key)
573 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
574
575
576 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
577
578
579 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
580
581
582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len);
584
585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589
590 if (!ieee80211_has_morefrags(hdr->frame_control)) {
591 txq->need_update = 1;
592 } else {
593 wait_write_ptr = 1;
594 txq->need_update = 0;
595 }
596
597 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
598 le16_to_cpu(out_cmd->hdr.sequence));
599 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
600 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
601 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
602 ieee80211_hdrlen(fc));
603
604
605
606
607
608
609
610
611
612
613 len = sizeof(struct iwl3945_tx_cmd) +
614 sizeof(struct iwl_cmd_header) + hdr_len;
615 len = (len + 3) & ~3;
616
617
618
619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
620 len, PCI_DMA_TODEVICE);
621
622
623 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
624 dma_unmap_len_set(out_meta, len, len);
625
626
627
628 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
629 txcmd_phys, len, 1, 0);
630
631
632
633
634 len = skb->len - hdr_len;
635 if (len) {
636 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
637 len, PCI_DMA_TODEVICE);
638 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
639 phys_addr, len,
640 0, U32_PAD(len));
641 }
642
643
644
645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags);
648
649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) {
651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1;
654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags);
656 }
657
658 iwl_legacy_stop_queue(priv, txq);
659 }
660
661 return 0;
662
663drop_unlock:
664 spin_unlock_irqrestore(&priv->lock, flags);
665drop:
666 return -1;
667}
668
669static int iwl3945_get_measurement(struct iwl_priv *priv,
670 struct ieee80211_measurement_params *params,
671 u8 type)
672{
673 struct iwl_spectrum_cmd spectrum;
674 struct iwl_rx_packet *pkt;
675 struct iwl_host_cmd cmd = {
676 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
677 .data = (void *)&spectrum,
678 .flags = CMD_WANT_SKB,
679 };
680 u32 add_time = le64_to_cpu(params->start_time);
681 int rc;
682 int spectrum_resp_status;
683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685
686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval));
690
691 memset(&spectrum, 0, sizeof(spectrum));
692
693 spectrum.channel_count = cpu_to_le16(1);
694 spectrum.flags =
695 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
696 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699
700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time =
702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval));
705 else
706 spectrum.start_time = 0;
707
708 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
709 spectrum.channels[0].channel = params->channel;
710 spectrum.channels[0].type = type;
711 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714
715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc)
717 return rc;
718
719 pkt = (struct iwl_rx_packet *)cmd.reply_page;
720 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
721 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
722 rc = -EIO;
723 }
724
725 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
726 switch (spectrum_resp_status) {
727 case 0:
728 if (pkt->u.spectrum.id != 0xff) {
729 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
730 pkt->u.spectrum.id);
731 priv->measurement_status &= ~MEASUREMENT_READY;
732 }
733 priv->measurement_status |= MEASUREMENT_ACTIVE;
734 rc = 0;
735 break;
736
737 case 1:
738 rc = -EAGAIN;
739 break;
740 }
741
742 iwl_legacy_free_pages(priv, cmd.reply_page);
743
744 return rc;
745}
746
747static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
748 struct iwl_rx_mem_buffer *rxb)
749{
750 struct iwl_rx_packet *pkt = rxb_addr(rxb);
751 struct iwl_alive_resp *palive;
752 struct delayed_work *pwork;
753
754 palive = &pkt->u.alive_frame;
755
756 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
757 "0x%01X 0x%01X\n",
758 palive->is_valid, palive->ver_type,
759 palive->ver_subtype);
760
761 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
762 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
763 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->init_alive_start;
766 } else {
767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
768 memcpy(&priv->card_alive, &pkt->u.alive_frame,
769 sizeof(struct iwl_alive_resp));
770 pwork = &priv->alive_start;
771 iwl3945_disable_events(priv);
772 }
773
774
775
776 if (palive->is_valid == UCODE_VALID_OK)
777 queue_delayed_work(priv->workqueue, pwork,
778 msecs_to_jiffies(5));
779 else
780 IWL_WARN(priv, "uCode did not respond OK.\n");
781}
782
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb)
785{
786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif
789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791}
792
793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
794 struct iwl_rx_mem_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
799 u8 rate = beacon->beacon_notify_hdr.rate;
800
801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
802 "tsf %d %d rate %d\n",
803 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
804 beacon->beacon_notify_hdr.failure_frame,
805 le32_to_cpu(beacon->ibss_mgr_status),
806 le32_to_cpu(beacon->high_tsf),
807 le32_to_cpu(beacon->low_tsf), rate);
808#endif
809
810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
811
812}
813
814
815
816static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
817 struct iwl_rx_mem_buffer *rxb)
818{
819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
820 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
821 unsigned long status = priv->status;
822
823 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
824 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
825 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
826
827 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
828 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
829
830 if (flags & HW_CARD_DISABLED)
831 set_bit(STATUS_RF_KILL_HW, &priv->status);
832 else
833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
834
835
836 iwl_legacy_scan_cancel(priv);
837
838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
840 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
841 test_bit(STATUS_RF_KILL_HW, &priv->status));
842 else
843 wake_up(&priv->wait_command_queue);
844}
845
846
847
848
849
850
851
852
853
854
855static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
856{
857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
862 iwl_legacy_rx_spectrum_measure_notif;
863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
865 iwl_legacy_rx_pm_debug_statistics_notif;
866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
867
868
869
870
871
872
873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
875
876 iwl_legacy_setup_rx_scan_handlers(priv);
877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
878
879
880 iwl3945_hw_rx_handler_setup(priv);
881}
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
951 dma_addr_t dma_addr)
952{
953 return cpu_to_le32((u32)dma_addr);
954}
955
956
957
958
959
960
961
962
963
964
965
966
967static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
968{
969 struct iwl_rx_queue *rxq = &priv->rxq;
970 struct list_head *element;
971 struct iwl_rx_mem_buffer *rxb;
972 unsigned long flags;
973 int write;
974
975 spin_lock_irqsave(&rxq->lock, flags);
976 write = rxq->write & ~0x7;
977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
978
979 element = rxq->rx_free.next;
980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
981 list_del(element);
982
983
984 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
985 rxq->queue[rxq->write] = rxb;
986 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
987 rxq->free_count--;
988 }
989 spin_unlock_irqrestore(&rxq->lock, flags);
990
991
992 if (rxq->free_count <= RX_LOW_WATERMARK)
993 queue_work(priv->workqueue, &priv->rx_replenish);
994
995
996
997
998 if ((rxq->write_actual != (rxq->write & ~0x7))
999 || (abs(rxq->write - rxq->read) > 7)) {
1000 spin_lock_irqsave(&rxq->lock, flags);
1001 rxq->need_update = 1;
1002 spin_unlock_irqrestore(&rxq->lock, flags);
1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1004 }
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1016{
1017 struct iwl_rx_queue *rxq = &priv->rxq;
1018 struct list_head *element;
1019 struct iwl_rx_mem_buffer *rxb;
1020 struct page *page;
1021 unsigned long flags;
1022 gfp_t gfp_mask = priority;
1023
1024 while (1) {
1025 spin_lock_irqsave(&rxq->lock, flags);
1026
1027 if (list_empty(&rxq->rx_used)) {
1028 spin_unlock_irqrestore(&rxq->lock, flags);
1029 return;
1030 }
1031 spin_unlock_irqrestore(&rxq->lock, flags);
1032
1033 if (rxq->free_count > RX_LOW_WATERMARK)
1034 gfp_mask |= __GFP_NOWARN;
1035
1036 if (priv->hw_params.rx_page_order > 0)
1037 gfp_mask |= __GFP_COMP;
1038
1039
1040 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1041 if (!page) {
1042 if (net_ratelimit())
1043 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1044 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1045 net_ratelimit())
1046 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1047 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1048 rxq->free_count);
1049
1050
1051
1052 break;
1053 }
1054
1055 spin_lock_irqsave(&rxq->lock, flags);
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 __free_pages(page, priv->hw_params.rx_page_order);
1059 return;
1060 }
1061 element = rxq->rx_used.next;
1062 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1063 list_del(element);
1064 spin_unlock_irqrestore(&rxq->lock, flags);
1065
1066 rxb->page = page;
1067
1068 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1069 PAGE_SIZE << priv->hw_params.rx_page_order,
1070 PCI_DMA_FROMDEVICE);
1071
1072 spin_lock_irqsave(&rxq->lock, flags);
1073
1074 list_add_tail(&rxb->list, &rxq->rx_free);
1075 rxq->free_count++;
1076 priv->alloc_rxb_page++;
1077
1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 }
1080}
1081
1082void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1083{
1084 unsigned long flags;
1085 int i;
1086 spin_lock_irqsave(&rxq->lock, flags);
1087 INIT_LIST_HEAD(&rxq->rx_free);
1088 INIT_LIST_HEAD(&rxq->rx_used);
1089
1090 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1091
1092
1093 if (rxq->pool[i].page != NULL) {
1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1096 PCI_DMA_FROMDEVICE);
1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1098 rxq->pool[i].page = NULL;
1099 }
1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1101 }
1102
1103
1104
1105 rxq->read = rxq->write = 0;
1106 rxq->write_actual = 0;
1107 rxq->free_count = 0;
1108 spin_unlock_irqrestore(&rxq->lock, flags);
1109}
1110
1111void iwl3945_rx_replenish(void *data)
1112{
1113 struct iwl_priv *priv = data;
1114 unsigned long flags;
1115
1116 iwl3945_rx_allocate(priv, GFP_KERNEL);
1117
1118 spin_lock_irqsave(&priv->lock, flags);
1119 iwl3945_rx_queue_restock(priv);
1120 spin_unlock_irqrestore(&priv->lock, flags);
1121}
1122
1123static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1124{
1125 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1126
1127 iwl3945_rx_queue_restock(priv);
1128}
1129
1130
1131
1132
1133
1134
1135
1136static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157
1158
1159static u8 ratio2dB[100] = {
1160
1161 0, 0, 6, 10, 12, 14, 16, 17, 18, 19,
1162 20, 21, 22, 22, 23, 23, 24, 25, 26, 26,
1163 26, 26, 26, 27, 27, 28, 28, 28, 29, 29,
1164 29, 30, 30, 30, 31, 31, 31, 31, 32, 32,
1165 32, 32, 32, 33, 33, 33, 33, 33, 34, 34,
1166 34, 34, 34, 34, 35, 35, 35, 35, 35, 35,
1167 36, 36, 36, 36, 36, 36, 36, 37, 37, 37,
1168 37, 37, 37, 37, 37, 38, 38, 38, 38, 38,
1169 38, 38, 38, 38, 38, 39, 39, 39, 39, 39,
1170 39, 39, 39, 39, 39, 40, 40, 40, 40, 40
1171};
1172
1173
1174
1175
1176int iwl3945_calc_db_from_ratio(int sig_ratio)
1177{
1178
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182
1183
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio/10];
1186
1187
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202static void iwl3945_rx_handle(struct iwl_priv *priv)
1203{
1204 struct iwl_rx_mem_buffer *rxb;
1205 struct iwl_rx_packet *pkt;
1206 struct iwl_rx_queue *rxq = &priv->rxq;
1207 u32 r, i;
1208 int reclaim;
1209 unsigned long flags;
1210 u8 fill_rx = 0;
1211 u32 count = 8;
1212 int total_empty = 0;
1213
1214
1215
1216 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1217 i = rxq->read;
1218
1219
1220 total_empty = r - rxq->write_actual;
1221 if (total_empty < 0)
1222 total_empty += RX_QUEUE_SIZE;
1223
1224 if (total_empty > (RX_QUEUE_SIZE / 2))
1225 fill_rx = 1;
1226
1227 if (i == r)
1228 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1229
1230 while (i != r) {
1231 int len;
1232
1233 rxb = rxq->queue[i];
1234
1235
1236
1237
1238 BUG_ON(rxb == NULL);
1239
1240 rxq->queue[i] = NULL;
1241
1242 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1243 PAGE_SIZE << priv->hw_params.rx_page_order,
1244 PCI_DMA_FROMDEVICE);
1245 pkt = rxb_addr(rxb);
1246
1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1248 len += sizeof(u32);
1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1250
1251
1252
1253
1254
1255
1256
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1259 (pkt->hdr.cmd != REPLY_TX);
1260
1261
1262
1263
1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1269 } else {
1270
1271 IWL_DEBUG_RX(priv,
1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1274 pkt->hdr.cmd);
1275 }
1276
1277
1278
1279
1280
1281
1282
1283
1284 if (reclaim) {
1285
1286
1287
1288 if (rxb->page)
1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1290 else
1291 IWL_WARN(priv, "Claim null rxb?\n");
1292 }
1293
1294
1295
1296
1297 spin_lock_irqsave(&rxq->lock, flags);
1298 if (rxb->page != NULL) {
1299 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1300 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1301 PCI_DMA_FROMDEVICE);
1302 list_add_tail(&rxb->list, &rxq->rx_free);
1303 rxq->free_count++;
1304 } else
1305 list_add_tail(&rxb->list, &rxq->rx_used);
1306
1307 spin_unlock_irqrestore(&rxq->lock, flags);
1308
1309 i = (i + 1) & RX_QUEUE_MASK;
1310
1311
1312 if (fill_rx) {
1313 count++;
1314 if (count >= 8) {
1315 rxq->read = i;
1316 iwl3945_rx_replenish_now(priv);
1317 count = 0;
1318 }
1319 }
1320 }
1321
1322
1323 rxq->read = i;
1324 if (fill_rx)
1325 iwl3945_rx_replenish_now(priv);
1326 else
1327 iwl3945_rx_queue_restock(priv);
1328}
1329
1330
1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1332{
1333
1334 synchronize_irq(priv->pci_dev->irq);
1335 tasklet_kill(&priv->irq_tasklet);
1336}
1337
1338static const char *iwl3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1362{
1363 u32 i;
1364 u32 desc, time, count, base, data1;
1365 u32 blink1, blink2, ilink1, ilink2;
1366
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374
1375 count = iwl_legacy_read_targ_mem(priv, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1379 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1380 priv->status, count);
1381 }
1382
1383 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1384 "ilink1 nmiPC Line\n");
1385 for (i = ERROR_START_OFFSET;
1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1387 i += ERROR_ELEM_SIZE) {
1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1389 time =
1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1391 blink1 =
1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1393 blink2 =
1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1395 ilink1 =
1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1397 ilink2 =
1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1399 data1 =
1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1401
1402 IWL_ERR(priv,
1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1405 ilink1, ilink2, data1);
1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1407 0, blink1, blink2, ilink1, ilink2);
1408 }
1409}
1410
1411static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1412{
1413 u32 inta, handled = 0;
1414 u32 inta_fh;
1415 unsigned long flags;
1416#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1417 u32 inta_mask;
1418#endif
1419
1420 spin_lock_irqsave(&priv->lock, flags);
1421
1422
1423
1424
1425 inta = iwl_read32(priv, CSR_INT);
1426 iwl_write32(priv, CSR_INT, inta);
1427
1428
1429
1430
1431 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1432 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1433
1434#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1435 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1436
1437 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1438 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1439 inta, inta_mask, inta_fh);
1440 }
1441#endif
1442
1443 spin_unlock_irqrestore(&priv->lock, flags);
1444
1445
1446
1447
1448
1449 if (inta_fh & CSR39_FH_INT_RX_MASK)
1450 inta |= CSR_INT_BIT_FH_RX;
1451 if (inta_fh & CSR39_FH_INT_TX_MASK)
1452 inta |= CSR_INT_BIT_FH_TX;
1453
1454
1455 if (inta & CSR_INT_BIT_HW_ERR) {
1456 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1457
1458
1459 iwl_legacy_disable_interrupts(priv);
1460
1461 priv->isr_stats.hw++;
1462 iwl_legacy_irq_handle_error(priv);
1463
1464 handled |= CSR_INT_BIT_HW_ERR;
1465
1466 return;
1467 }
1468
1469#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1470 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1471
1472 if (inta & CSR_INT_BIT_SCD) {
1473 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1474 "the frame/frames.\n");
1475 priv->isr_stats.sch++;
1476 }
1477
1478
1479 if (inta & CSR_INT_BIT_ALIVE) {
1480 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1481 priv->isr_stats.alive++;
1482 }
1483 }
1484#endif
1485
1486 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1487
1488
1489 if (inta & CSR_INT_BIT_SW_ERR) {
1490 IWL_ERR(priv, "Microcode SW error detected. "
1491 "Restarting 0x%X.\n", inta);
1492 priv->isr_stats.sw++;
1493 iwl_legacy_irq_handle_error(priv);
1494 handled |= CSR_INT_BIT_SW_ERR;
1495 }
1496
1497
1498 if (inta & CSR_INT_BIT_WAKEUP) {
1499 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1500 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1501 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1502 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1503 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1504 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1505 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1506 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1507
1508 priv->isr_stats.wakeup++;
1509 handled |= CSR_INT_BIT_WAKEUP;
1510 }
1511
1512
1513
1514
1515 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1516 iwl3945_rx_handle(priv);
1517 priv->isr_stats.rx++;
1518 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1519 }
1520
1521 if (inta & CSR_INT_BIT_FH_TX) {
1522 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1523 priv->isr_stats.tx++;
1524
1525 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1526 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1527 (FH39_SRVC_CHNL), 0x0);
1528 handled |= CSR_INT_BIT_FH_TX;
1529 }
1530
1531 if (inta & ~handled) {
1532 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1533 priv->isr_stats.unhandled++;
1534 }
1535
1536 if (inta & ~priv->inta_mask) {
1537 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1538 inta & ~priv->inta_mask);
1539 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1540 }
1541
1542
1543
1544 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1545 iwl_legacy_enable_interrupts(priv);
1546
1547#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1548 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1549 inta = iwl_read32(priv, CSR_INT);
1550 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1551 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1552 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1553 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1554 }
1555#endif
1556}
1557
1558static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1559 enum ieee80211_band band,
1560 u8 is_active, u8 n_probes,
1561 struct iwl3945_scan_channel *scan_ch,
1562 struct ieee80211_vif *vif)
1563{
1564 struct ieee80211_channel *chan;
1565 const struct ieee80211_supported_band *sband;
1566 const struct iwl_channel_info *ch_info;
1567 u16 passive_dwell = 0;
1568 u16 active_dwell = 0;
1569 int added, i;
1570
1571 sband = iwl_get_hw_mode(priv, band);
1572 if (!sband)
1573 return 0;
1574
1575 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1576 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1577
1578 if (passive_dwell <= active_dwell)
1579 passive_dwell = active_dwell + 1;
1580
1581 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1582 chan = priv->scan_request->channels[i];
1583
1584 if (chan->band != band)
1585 continue;
1586
1587 scan_ch->channel = chan->hw_value;
1588
1589 ch_info = iwl_legacy_get_channel_info(priv, band,
1590 scan_ch->channel);
1591 if (!iwl_legacy_is_channel_valid(ch_info)) {
1592 IWL_DEBUG_SCAN(priv,
1593 "Channel %d is INVALID for this band.\n",
1594 scan_ch->channel);
1595 continue;
1596 }
1597
1598 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1599 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1600
1601
1602
1603 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1604 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1605 scan_ch->type = 0;
1606 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1607 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1608 } else {
1609 scan_ch->type = 1;
1610 }
1611
1612
1613
1614
1615
1616 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1617 if (n_probes)
1618 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1619 } else {
1620
1621
1622 if ((scan_ch->type & 1) && n_probes)
1623 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1624 }
1625
1626
1627 scan_ch->tpc.dsp_atten = 110;
1628
1629
1630
1631 if (band == IEEE80211_BAND_5GHZ)
1632 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1633 else {
1634 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1635
1636
1637
1638
1639 }
1640
1641 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1642 scan_ch->channel,
1643 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1644 (scan_ch->type & 1) ?
1645 active_dwell : passive_dwell);
1646
1647 scan_ch++;
1648 added++;
1649 }
1650
1651 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1652 return added;
1653}
1654
1655static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1656 struct ieee80211_rate *rates)
1657{
1658 int i;
1659
1660 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1661 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1662 rates[i].hw_value = i;
1663 rates[i].hw_value_short = i;
1664 rates[i].flags = 0;
1665 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1666
1667
1668
1669 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1670 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1671 }
1672 }
1673}
1674
1675
1676
1677
1678
1679
1680
1681static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1682{
1683 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1684 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1685 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1686 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1687 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1688 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1689}
1690
1691
1692
1693
1694
1695static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1696{
1697 u32 val;
1698 u32 save_len = len;
1699 int rc = 0;
1700 u32 errcnt;
1701
1702 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1703
1704 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1705 IWL39_RTC_INST_LOWER_BOUND);
1706
1707 errcnt = 0;
1708 for (; len > 0; len -= sizeof(u32), image++) {
1709
1710
1711
1712 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1713 if (val != le32_to_cpu(*image)) {
1714 IWL_ERR(priv, "uCode INST section is invalid at "
1715 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1716 save_len - len, val, le32_to_cpu(*image));
1717 rc = -EIO;
1718 errcnt++;
1719 if (errcnt >= 20)
1720 break;
1721 }
1722 }
1723
1724
1725 if (!errcnt)
1726 IWL_DEBUG_INFO(priv,
1727 "ucode image in INSTRUCTION memory is good\n");
1728
1729 return rc;
1730}
1731
1732
1733
1734
1735
1736
1737
1738static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1739{
1740 u32 val;
1741 int rc = 0;
1742 u32 errcnt = 0;
1743 u32 i;
1744
1745 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1746
1747 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1748
1749
1750
1751 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1752 i + IWL39_RTC_INST_LOWER_BOUND);
1753 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1754 if (val != le32_to_cpu(*image)) {
1755#if 0
1756 IWL_ERR(priv, "uCode INST section is invalid at "
1757 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1758 i, val, *image);
1759#endif
1760 rc = -EIO;
1761 errcnt++;
1762 if (errcnt >= 3)
1763 break;
1764 }
1765 }
1766
1767 return rc;
1768}
1769
1770
1771
1772
1773
1774
1775static int iwl3945_verify_ucode(struct iwl_priv *priv)
1776{
1777 __le32 *image;
1778 u32 len;
1779 int rc = 0;
1780
1781
1782 image = (__le32 *)priv->ucode_boot.v_addr;
1783 len = priv->ucode_boot.len;
1784 rc = iwl3945_verify_inst_sparse(priv, image, len);
1785 if (rc == 0) {
1786 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1787 return 0;
1788 }
1789
1790
1791 image = (__le32 *)priv->ucode_init.v_addr;
1792 len = priv->ucode_init.len;
1793 rc = iwl3945_verify_inst_sparse(priv, image, len);
1794 if (rc == 0) {
1795 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1796 return 0;
1797 }
1798
1799
1800 image = (__le32 *)priv->ucode_code.v_addr;
1801 len = priv->ucode_code.len;
1802 rc = iwl3945_verify_inst_sparse(priv, image, len);
1803 if (rc == 0) {
1804 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1805 return 0;
1806 }
1807
1808 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1809
1810
1811
1812
1813 image = (__le32 *)priv->ucode_boot.v_addr;
1814 len = priv->ucode_boot.len;
1815 rc = iwl3945_verify_inst_full(priv, image, len);
1816
1817 return rc;
1818}
1819
1820static void iwl3945_nic_start(struct iwl_priv *priv)
1821{
1822
1823 iwl_write32(priv, CSR_RESET, 0);
1824}
1825
1826#define IWL3945_UCODE_GET(item) \
1827static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
1828{ \
1829 return le32_to_cpu(ucode->v1.item); \
1830}
1831
1832static u32 iwl3945_ucode_get_header_size(u32 api_ver)
1833{
1834 return 24;
1835}
1836
1837static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
1838{
1839 return (u8 *) ucode->v1.data;
1840}
1841
1842IWL3945_UCODE_GET(inst_size);
1843IWL3945_UCODE_GET(data_size);
1844IWL3945_UCODE_GET(init_size);
1845IWL3945_UCODE_GET(init_data_size);
1846IWL3945_UCODE_GET(boot_size);
1847
1848
1849
1850
1851
1852
1853static int iwl3945_read_ucode(struct iwl_priv *priv)
1854{
1855 const struct iwl_ucode_header *ucode;
1856 int ret = -EINVAL, index;
1857 const struct firmware *ucode_raw;
1858
1859 const char *name_pre = priv->cfg->fw_name_pre;
1860 const unsigned int api_max = priv->cfg->ucode_api_max;
1861 const unsigned int api_min = priv->cfg->ucode_api_min;
1862 char buf[25];
1863 u8 *src;
1864 size_t len;
1865 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1866
1867
1868
1869 for (index = api_max; index >= api_min; index--) {
1870 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
1871 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1872 if (ret < 0) {
1873 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1874 buf, ret);
1875 if (ret == -ENOENT)
1876 continue;
1877 else
1878 goto error;
1879 } else {
1880 if (index < api_max)
1881 IWL_ERR(priv, "Loaded firmware %s, "
1882 "which is deprecated. "
1883 " Please use API v%u instead.\n",
1884 buf, api_max);
1885 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
1886 "(%zd bytes) from disk\n",
1887 buf, ucode_raw->size);
1888 break;
1889 }
1890 }
1891
1892 if (ret < 0)
1893 goto error;
1894
1895
1896 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
1897 IWL_ERR(priv, "File size way too small!\n");
1898 ret = -EINVAL;
1899 goto err_release;
1900 }
1901
1902
1903 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1904
1905 priv->ucode_ver = le32_to_cpu(ucode->ver);
1906 api_ver = IWL_UCODE_API(priv->ucode_ver);
1907 inst_size = iwl3945_ucode_get_inst_size(ucode);
1908 data_size = iwl3945_ucode_get_data_size(ucode);
1909 init_size = iwl3945_ucode_get_init_size(ucode);
1910 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
1911 boot_size = iwl3945_ucode_get_boot_size(ucode);
1912 src = iwl3945_ucode_get_data(ucode);
1913
1914
1915
1916
1917
1918 if (api_ver < api_min || api_ver > api_max) {
1919 IWL_ERR(priv, "Driver unable to support your firmware API. "
1920 "Driver supports v%u, firmware is v%u.\n",
1921 api_max, api_ver);
1922 priv->ucode_ver = 0;
1923 ret = -EINVAL;
1924 goto err_release;
1925 }
1926 if (api_ver != api_max)
1927 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
1928 "got %u. New firmware can be obtained "
1929 "from http://www.intellinuxwireless.org.\n",
1930 api_max, api_ver);
1931
1932 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1933 IWL_UCODE_MAJOR(priv->ucode_ver),
1934 IWL_UCODE_MINOR(priv->ucode_ver),
1935 IWL_UCODE_API(priv->ucode_ver),
1936 IWL_UCODE_SERIAL(priv->ucode_ver));
1937
1938 snprintf(priv->hw->wiphy->fw_version,
1939 sizeof(priv->hw->wiphy->fw_version),
1940 "%u.%u.%u.%u",
1941 IWL_UCODE_MAJOR(priv->ucode_ver),
1942 IWL_UCODE_MINOR(priv->ucode_ver),
1943 IWL_UCODE_API(priv->ucode_ver),
1944 IWL_UCODE_SERIAL(priv->ucode_ver));
1945
1946 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1947 priv->ucode_ver);
1948 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1949 inst_size);
1950 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1951 data_size);
1952 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1953 init_size);
1954 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1955 init_data_size);
1956 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1957 boot_size);
1958
1959
1960
1961 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
1962 inst_size + data_size + init_size +
1963 init_data_size + boot_size) {
1964
1965 IWL_DEBUG_INFO(priv,
1966 "uCode file size %zd does not match expected size\n",
1967 ucode_raw->size);
1968 ret = -EINVAL;
1969 goto err_release;
1970 }
1971
1972
1973 if (inst_size > IWL39_MAX_INST_SIZE) {
1974 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1975 inst_size);
1976 ret = -EINVAL;
1977 goto err_release;
1978 }
1979
1980 if (data_size > IWL39_MAX_DATA_SIZE) {
1981 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1982 data_size);
1983 ret = -EINVAL;
1984 goto err_release;
1985 }
1986 if (init_size > IWL39_MAX_INST_SIZE) {
1987 IWL_DEBUG_INFO(priv,
1988 "uCode init instr len %d too large to fit in\n",
1989 init_size);
1990 ret = -EINVAL;
1991 goto err_release;
1992 }
1993 if (init_data_size > IWL39_MAX_DATA_SIZE) {
1994 IWL_DEBUG_INFO(priv,
1995 "uCode init data len %d too large to fit in\n",
1996 init_data_size);
1997 ret = -EINVAL;
1998 goto err_release;
1999 }
2000 if (boot_size > IWL39_MAX_BSM_SIZE) {
2001 IWL_DEBUG_INFO(priv,
2002 "uCode boot instr len %d too large to fit in\n",
2003 boot_size);
2004 ret = -EINVAL;
2005 goto err_release;
2006 }
2007
2008
2009
2010
2011
2012
2013 priv->ucode_code.len = inst_size;
2014 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2015
2016 priv->ucode_data.len = data_size;
2017 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2018
2019 priv->ucode_data_backup.len = data_size;
2020 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2021
2022 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2023 !priv->ucode_data_backup.v_addr)
2024 goto err_pci_alloc;
2025
2026
2027 if (init_size && init_data_size) {
2028 priv->ucode_init.len = init_size;
2029 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2030
2031 priv->ucode_init_data.len = init_data_size;
2032 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2033
2034 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2035 goto err_pci_alloc;
2036 }
2037
2038
2039 if (boot_size) {
2040 priv->ucode_boot.len = boot_size;
2041 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2042
2043 if (!priv->ucode_boot.v_addr)
2044 goto err_pci_alloc;
2045 }
2046
2047
2048
2049
2050 len = inst_size;
2051 IWL_DEBUG_INFO(priv,
2052 "Copying (but not loading) uCode instr len %zd\n", len);
2053 memcpy(priv->ucode_code.v_addr, src, len);
2054 src += len;
2055
2056 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2057 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2058
2059
2060
2061 len = data_size;
2062 IWL_DEBUG_INFO(priv,
2063 "Copying (but not loading) uCode data len %zd\n", len);
2064 memcpy(priv->ucode_data.v_addr, src, len);
2065 memcpy(priv->ucode_data_backup.v_addr, src, len);
2066 src += len;
2067
2068
2069 if (init_size) {
2070 len = init_size;
2071 IWL_DEBUG_INFO(priv,
2072 "Copying (but not loading) init instr len %zd\n", len);
2073 memcpy(priv->ucode_init.v_addr, src, len);
2074 src += len;
2075 }
2076
2077
2078 if (init_data_size) {
2079 len = init_data_size;
2080 IWL_DEBUG_INFO(priv,
2081 "Copying (but not loading) init data len %zd\n", len);
2082 memcpy(priv->ucode_init_data.v_addr, src, len);
2083 src += len;
2084 }
2085
2086
2087 len = boot_size;
2088 IWL_DEBUG_INFO(priv,
2089 "Copying (but not loading) boot instr len %zd\n", len);
2090 memcpy(priv->ucode_boot.v_addr, src, len);
2091
2092
2093 release_firmware(ucode_raw);
2094 return 0;
2095
2096 err_pci_alloc:
2097 IWL_ERR(priv, "failed to allocate pci memory\n");
2098 ret = -ENOMEM;
2099 iwl3945_dealloc_ucode_pci(priv);
2100
2101 err_release:
2102 release_firmware(ucode_raw);
2103
2104 error:
2105 return ret;
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2119{
2120 dma_addr_t pinst;
2121 dma_addr_t pdata;
2122
2123
2124 pinst = priv->ucode_code.p_addr;
2125 pdata = priv->ucode_data_backup.p_addr;
2126
2127
2128 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2129 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2130 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2131 priv->ucode_data.len);
2132
2133
2134
2135 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2136 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2137
2138 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2139
2140 return 0;
2141}
2142
2143
2144
2145
2146
2147
2148
2149
2150static void iwl3945_init_alive_start(struct iwl_priv *priv)
2151{
2152
2153 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2154
2155
2156 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2157 goto restart;
2158 }
2159
2160
2161
2162
2163 if (iwl3945_verify_ucode(priv)) {
2164
2165
2166 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2167 goto restart;
2168 }
2169
2170
2171
2172
2173 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2174 if (iwl3945_set_ucode_ptrs(priv)) {
2175
2176
2177 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2178 goto restart;
2179 }
2180 return;
2181
2182 restart:
2183 queue_work(priv->workqueue, &priv->restart);
2184}
2185
2186
2187
2188
2189
2190
2191static void iwl3945_alive_start(struct iwl_priv *priv)
2192{
2193 int thermal_spin = 0;
2194 u32 rfkill;
2195 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2196
2197 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2198
2199 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2200
2201
2202 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2203 goto restart;
2204 }
2205
2206
2207
2208
2209 if (iwl3945_verify_ucode(priv)) {
2210
2211
2212 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2213 goto restart;
2214 }
2215
2216 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2217 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2218
2219 if (rfkill & 0x1) {
2220 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2221
2222
2223 while (iwl3945_hw_get_temperature(priv) == 0) {
2224 thermal_spin++;
2225 udelay(10);
2226 }
2227
2228 if (thermal_spin)
2229 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2230 thermal_spin * 10);
2231 } else
2232 set_bit(STATUS_RF_KILL_HW, &priv->status);
2233
2234
2235 set_bit(STATUS_ALIVE, &priv->status);
2236
2237
2238 iwl_legacy_setup_watchdog(priv);
2239
2240 if (iwl_legacy_is_rfkill(priv))
2241 return;
2242
2243 ieee80211_wake_queues(priv->hw);
2244
2245 priv->active_rate = IWL_RATES_MASK_3945;
2246
2247 iwl_legacy_power_update_mode(priv, true);
2248
2249 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2250 struct iwl3945_rxon_cmd *active_rxon =
2251 (struct iwl3945_rxon_cmd *)(&ctx->active);
2252
2253 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2254 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2255 } else {
2256
2257 iwl_legacy_connection_init_rx_config(priv, ctx);
2258 }
2259
2260
2261 iwl_legacy_send_bt_config(priv);
2262
2263 set_bit(STATUS_READY, &priv->status);
2264
2265
2266 iwl3945_commit_rxon(priv, ctx);
2267
2268 iwl3945_reg_txpower_periodic(priv);
2269
2270 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2271 wake_up(&priv->wait_command_queue);
2272
2273 return;
2274
2275 restart:
2276 queue_work(priv->workqueue, &priv->restart);
2277}
2278
2279static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2280
2281static void __iwl3945_down(struct iwl_priv *priv)
2282{
2283 unsigned long flags;
2284 int exit_pending;
2285
2286 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2287
2288 iwl_legacy_scan_cancel_timeout(priv, 200);
2289
2290 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2291
2292
2293
2294 del_timer_sync(&priv->watchdog);
2295
2296
2297 iwl_legacy_clear_ucode_stations(priv, NULL);
2298 iwl_legacy_dealloc_bcast_stations(priv);
2299 iwl_legacy_clear_driver_stations(priv);
2300
2301
2302 wake_up_all(&priv->wait_command_queue);
2303
2304
2305
2306 if (!exit_pending)
2307 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2308
2309
2310 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2311
2312
2313 spin_lock_irqsave(&priv->lock, flags);
2314 iwl_legacy_disable_interrupts(priv);
2315 spin_unlock_irqrestore(&priv->lock, flags);
2316 iwl3945_synchronize_irq(priv);
2317
2318 if (priv->mac80211_registered)
2319 ieee80211_stop_queues(priv->hw);
2320
2321
2322
2323 if (!iwl_legacy_is_init(priv)) {
2324 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2325 STATUS_RF_KILL_HW |
2326 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2327 STATUS_GEO_CONFIGURED |
2328 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2329 STATUS_EXIT_PENDING;
2330 goto exit;
2331 }
2332
2333
2334
2335 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2336 STATUS_RF_KILL_HW |
2337 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2338 STATUS_GEO_CONFIGURED |
2339 test_bit(STATUS_FW_ERROR, &priv->status) <<
2340 STATUS_FW_ERROR |
2341 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2342 STATUS_EXIT_PENDING;
2343
2344 iwl3945_hw_txq_ctx_stop(priv);
2345 iwl3945_hw_rxq_stop(priv);
2346
2347
2348 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2349 udelay(5);
2350
2351
2352 iwl_legacy_apm_stop(priv);
2353
2354 exit:
2355 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2356
2357 if (priv->beacon_skb)
2358 dev_kfree_skb(priv->beacon_skb);
2359 priv->beacon_skb = NULL;
2360
2361
2362 iwl3945_clear_free_frames(priv);
2363}
2364
2365static void iwl3945_down(struct iwl_priv *priv)
2366{
2367 mutex_lock(&priv->mutex);
2368 __iwl3945_down(priv);
2369 mutex_unlock(&priv->mutex);
2370
2371 iwl3945_cancel_deferred_work(priv);
2372}
2373
2374#define MAX_HW_RESTARTS 5
2375
2376static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2377{
2378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2379 unsigned long flags;
2380 u8 sta_id;
2381
2382 spin_lock_irqsave(&priv->sta_lock, flags);
2383 sta_id = iwl_legacy_prep_station(priv, ctx,
2384 iwlegacy_bcast_addr, false, NULL);
2385 if (sta_id == IWL_INVALID_STATION) {
2386 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2387 spin_unlock_irqrestore(&priv->sta_lock, flags);
2388
2389 return -EINVAL;
2390 }
2391
2392 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2393 priv->stations[sta_id].used |= IWL_STA_BCAST;
2394 spin_unlock_irqrestore(&priv->sta_lock, flags);
2395
2396 return 0;
2397}
2398
2399static int __iwl3945_up(struct iwl_priv *priv)
2400{
2401 int rc, i;
2402
2403 rc = iwl3945_alloc_bcast_station(priv);
2404 if (rc)
2405 return rc;
2406
2407 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2408 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2409 return -EIO;
2410 }
2411
2412 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2413 IWL_ERR(priv, "ucode not available for device bring up\n");
2414 return -EIO;
2415 }
2416
2417
2418 if (iwl_read32(priv, CSR_GP_CNTRL) &
2419 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2420 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2421 else {
2422 set_bit(STATUS_RF_KILL_HW, &priv->status);
2423 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2424 return -ENODEV;
2425 }
2426
2427 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2428
2429 rc = iwl3945_hw_nic_init(priv);
2430 if (rc) {
2431 IWL_ERR(priv, "Unable to int nic\n");
2432 return rc;
2433 }
2434
2435
2436 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2437 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2438 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2439
2440
2441 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2442 iwl_legacy_enable_interrupts(priv);
2443
2444
2445 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2446 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2447
2448
2449
2450
2451 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2452 priv->ucode_data.len);
2453
2454
2455 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2456 return 0;
2457
2458 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2459
2460
2461
2462
2463 rc = priv->cfg->ops->lib->load_ucode(priv);
2464
2465 if (rc) {
2466 IWL_ERR(priv,
2467 "Unable to set up bootstrap uCode: %d\n", rc);
2468 continue;
2469 }
2470
2471
2472 iwl3945_nic_start(priv);
2473
2474 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2475
2476 return 0;
2477 }
2478
2479 set_bit(STATUS_EXIT_PENDING, &priv->status);
2480 __iwl3945_down(priv);
2481 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2482
2483
2484
2485 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2486 return -EIO;
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496static void iwl3945_bg_init_alive_start(struct work_struct *data)
2497{
2498 struct iwl_priv *priv =
2499 container_of(data, struct iwl_priv, init_alive_start.work);
2500
2501 mutex_lock(&priv->mutex);
2502 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2503 goto out;
2504
2505 iwl3945_init_alive_start(priv);
2506out:
2507 mutex_unlock(&priv->mutex);
2508}
2509
2510static void iwl3945_bg_alive_start(struct work_struct *data)
2511{
2512 struct iwl_priv *priv =
2513 container_of(data, struct iwl_priv, alive_start.work);
2514
2515 mutex_lock(&priv->mutex);
2516 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2517 goto out;
2518
2519 iwl3945_alive_start(priv);
2520out:
2521 mutex_unlock(&priv->mutex);
2522}
2523
2524
2525
2526
2527
2528
2529
2530static void iwl3945_rfkill_poll(struct work_struct *data)
2531{
2532 struct iwl_priv *priv =
2533 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2534 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2535 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2536 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2537
2538 if (new_rfkill != old_rfkill) {
2539 if (new_rfkill)
2540 set_bit(STATUS_RF_KILL_HW, &priv->status);
2541 else
2542 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2543
2544 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2545
2546 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2547 new_rfkill ? "disable radio" : "enable radio");
2548 }
2549
2550
2551
2552 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2553 round_jiffies_relative(2 * HZ));
2554
2555}
2556
2557int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2558{
2559 struct iwl_host_cmd cmd = {
2560 .id = REPLY_SCAN_CMD,
2561 .len = sizeof(struct iwl3945_scan_cmd),
2562 .flags = CMD_SIZE_HUGE,
2563 };
2564 struct iwl3945_scan_cmd *scan;
2565 u8 n_probes = 0;
2566 enum ieee80211_band band;
2567 bool is_active = false;
2568 int ret;
2569 u16 len;
2570
2571 lockdep_assert_held(&priv->mutex);
2572
2573 if (!priv->scan_cmd) {
2574 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2575 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2576 if (!priv->scan_cmd) {
2577 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2578 return -ENOMEM;
2579 }
2580 }
2581 scan = priv->scan_cmd;
2582 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2583
2584 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2585 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2586
2587 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2588 u16 interval;
2589 u32 extra;
2590 u32 suspend_time = 100;
2591 u32 scan_suspend_time = 100;
2592
2593 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2594
2595 interval = vif->bss_conf.beacon_int;
2596
2597 scan->suspend_time = 0;
2598 scan->max_out_time = cpu_to_le32(200 * 1024);
2599 if (!interval)
2600 interval = suspend_time;
2601
2602
2603
2604
2605
2606
2607
2608 extra = (suspend_time / interval) << 24;
2609 scan_suspend_time = 0xFF0FFFFF &
2610 (extra | ((suspend_time % interval) * 1024));
2611
2612 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2613 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2614 scan_suspend_time, interval);
2615 }
2616
2617 if (priv->scan_request->n_ssids) {
2618 int i, p = 0;
2619 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2620 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2621
2622 if (!priv->scan_request->ssids[i].ssid_len)
2623 continue;
2624 scan->direct_scan[p].id = WLAN_EID_SSID;
2625 scan->direct_scan[p].len =
2626 priv->scan_request->ssids[i].ssid_len;
2627 memcpy(scan->direct_scan[p].ssid,
2628 priv->scan_request->ssids[i].ssid,
2629 priv->scan_request->ssids[i].ssid_len);
2630 n_probes++;
2631 p++;
2632 }
2633 is_active = true;
2634 } else
2635 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2636
2637
2638
2639 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2640 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2641 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2642
2643
2644
2645 switch (priv->scan_band) {
2646 case IEEE80211_BAND_2GHZ:
2647 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2648 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2649 band = IEEE80211_BAND_2GHZ;
2650 break;
2651 case IEEE80211_BAND_5GHZ:
2652 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2653 band = IEEE80211_BAND_5GHZ;
2654 break;
2655 default:
2656 IWL_WARN(priv, "Invalid scan band\n");
2657 return -EIO;
2658 }
2659
2660
2661
2662
2663
2664
2665 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2666 IWL_GOOD_CRC_TH_DISABLED;
2667
2668 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2669 vif->addr, priv->scan_request->ie,
2670 priv->scan_request->ie_len,
2671 IWL_MAX_SCAN_SIZE - sizeof(*scan));
2672 scan->tx_cmd.len = cpu_to_le16(len);
2673
2674
2675 scan->flags |= iwl3945_get_antenna_flags(priv);
2676
2677 scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2678 (void *)&scan->data[len], vif);
2679 if (scan->channel_count == 0) {
2680 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2681 return -EIO;
2682 }
2683
2684 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2685 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2686 cmd.data = scan;
2687 scan->len = cpu_to_le16(cmd.len);
2688
2689 set_bit(STATUS_SCAN_HW, &priv->status);
2690 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2691 if (ret)
2692 clear_bit(STATUS_SCAN_HW, &priv->status);
2693 return ret;
2694}
2695
2696void iwl3945_post_scan(struct iwl_priv *priv)
2697{
2698 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2699
2700
2701
2702
2703
2704 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2705 iwl3945_commit_rxon(priv, ctx);
2706}
2707
2708static void iwl3945_bg_restart(struct work_struct *data)
2709{
2710 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2711
2712 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2713 return;
2714
2715 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2716 struct iwl_rxon_context *ctx;
2717 mutex_lock(&priv->mutex);
2718 for_each_context(priv, ctx)
2719 ctx->vif = NULL;
2720 priv->is_open = 0;
2721 mutex_unlock(&priv->mutex);
2722 iwl3945_down(priv);
2723 ieee80211_restart_hw(priv->hw);
2724 } else {
2725 iwl3945_down(priv);
2726
2727 mutex_lock(&priv->mutex);
2728 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2729 mutex_unlock(&priv->mutex);
2730 return;
2731 }
2732
2733 __iwl3945_up(priv);
2734 mutex_unlock(&priv->mutex);
2735 }
2736}
2737
2738static void iwl3945_bg_rx_replenish(struct work_struct *data)
2739{
2740 struct iwl_priv *priv =
2741 container_of(data, struct iwl_priv, rx_replenish);
2742
2743 mutex_lock(&priv->mutex);
2744 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2745 goto out;
2746
2747 iwl3945_rx_replenish(priv);
2748out:
2749 mutex_unlock(&priv->mutex);
2750}
2751
2752void iwl3945_post_associate(struct iwl_priv *priv)
2753{
2754 int rc = 0;
2755 struct ieee80211_conf *conf = NULL;
2756 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2757
2758 if (!ctx->vif || !priv->is_open)
2759 return;
2760
2761 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2762 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
2763
2764 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2765 return;
2766
2767 iwl_legacy_scan_cancel_timeout(priv, 200);
2768
2769 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2770
2771 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2772 iwl3945_commit_rxon(priv, ctx);
2773
2774 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2775 if (rc)
2776 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2777 "Attempting to continue.\n");
2778
2779 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2780
2781 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2782
2783 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2784 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
2785
2786 if (ctx->vif->bss_conf.use_short_preamble)
2787 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2788 else
2789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2790
2791 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2792 if (ctx->vif->bss_conf.use_short_slot)
2793 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2794 else
2795 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2796 }
2797
2798 iwl3945_commit_rxon(priv, ctx);
2799
2800 switch (ctx->vif->type) {
2801 case NL80211_IFTYPE_STATION:
2802 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
2803 break;
2804 case NL80211_IFTYPE_ADHOC:
2805 iwl3945_send_beacon_cmd(priv);
2806 break;
2807 default:
2808 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2809 __func__, ctx->vif->type);
2810 break;
2811 }
2812}
2813
2814
2815
2816
2817
2818
2819
2820#define UCODE_READY_TIMEOUT (2 * HZ)
2821
2822static int iwl3945_mac_start(struct ieee80211_hw *hw)
2823{
2824 struct iwl_priv *priv = hw->priv;
2825 int ret;
2826
2827 IWL_DEBUG_MAC80211(priv, "enter\n");
2828
2829
2830 mutex_lock(&priv->mutex);
2831
2832
2833
2834
2835 if (!priv->ucode_code.len) {
2836 ret = iwl3945_read_ucode(priv);
2837 if (ret) {
2838 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2839 mutex_unlock(&priv->mutex);
2840 goto out_release_irq;
2841 }
2842 }
2843
2844 ret = __iwl3945_up(priv);
2845
2846 mutex_unlock(&priv->mutex);
2847
2848 if (ret)
2849 goto out_release_irq;
2850
2851 IWL_DEBUG_INFO(priv, "Start UP work.\n");
2852
2853
2854
2855 ret = wait_event_timeout(priv->wait_command_queue,
2856 test_bit(STATUS_READY, &priv->status),
2857 UCODE_READY_TIMEOUT);
2858 if (!ret) {
2859 if (!test_bit(STATUS_READY, &priv->status)) {
2860 IWL_ERR(priv,
2861 "Wait for START_ALIVE timeout after %dms.\n",
2862 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2863 ret = -ETIMEDOUT;
2864 goto out_release_irq;
2865 }
2866 }
2867
2868
2869
2870 cancel_delayed_work(&priv->_3945.rfkill_poll);
2871
2872 priv->is_open = 1;
2873 IWL_DEBUG_MAC80211(priv, "leave\n");
2874 return 0;
2875
2876out_release_irq:
2877 priv->is_open = 0;
2878 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
2879 return ret;
2880}
2881
2882static void iwl3945_mac_stop(struct ieee80211_hw *hw)
2883{
2884 struct iwl_priv *priv = hw->priv;
2885
2886 IWL_DEBUG_MAC80211(priv, "enter\n");
2887
2888 if (!priv->is_open) {
2889 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2890 return;
2891 }
2892
2893 priv->is_open = 0;
2894
2895 iwl3945_down(priv);
2896
2897 flush_workqueue(priv->workqueue);
2898
2899
2900 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2901 round_jiffies_relative(2 * HZ));
2902
2903 IWL_DEBUG_MAC80211(priv, "leave\n");
2904}
2905
2906static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2907{
2908 struct iwl_priv *priv = hw->priv;
2909
2910 IWL_DEBUG_MAC80211(priv, "enter\n");
2911
2912 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2913 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2914
2915 if (iwl3945_tx_skb(priv, skb))
2916 dev_kfree_skb_any(skb);
2917
2918 IWL_DEBUG_MAC80211(priv, "leave\n");
2919}
2920
2921void iwl3945_config_ap(struct iwl_priv *priv)
2922{
2923 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2924 struct ieee80211_vif *vif = ctx->vif;
2925 int rc = 0;
2926
2927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2928 return;
2929
2930
2931 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
2932
2933
2934 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2935 iwl3945_commit_rxon(priv, ctx);
2936
2937
2938 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2939 if (rc)
2940 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2941 "Attempting to continue.\n");
2942
2943 ctx->staging.assoc_id = 0;
2944
2945 if (vif->bss_conf.use_short_preamble)
2946 ctx->staging.flags |=
2947 RXON_FLG_SHORT_PREAMBLE_MSK;
2948 else
2949 ctx->staging.flags &=
2950 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2951
2952 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2953 if (vif->bss_conf.use_short_slot)
2954 ctx->staging.flags |=
2955 RXON_FLG_SHORT_SLOT_MSK;
2956 else
2957 ctx->staging.flags &=
2958 ~RXON_FLG_SHORT_SLOT_MSK;
2959 }
2960
2961 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2962 iwl3945_commit_rxon(priv, ctx);
2963 }
2964 iwl3945_send_beacon_cmd(priv);
2965}
2966
2967static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2968 struct ieee80211_vif *vif,
2969 struct ieee80211_sta *sta,
2970 struct ieee80211_key_conf *key)
2971{
2972 struct iwl_priv *priv = hw->priv;
2973 int ret = 0;
2974 u8 sta_id = IWL_INVALID_STATION;
2975 u8 static_key;
2976
2977 IWL_DEBUG_MAC80211(priv, "enter\n");
2978
2979 if (iwl3945_mod_params.sw_crypto) {
2980 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2981 return -EOPNOTSUPP;
2982 }
2983
2984
2985
2986
2987
2988 if (vif->type == NL80211_IFTYPE_ADHOC &&
2989 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2990 return -EOPNOTSUPP;
2991
2992 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
2993
2994 if (!static_key) {
2995 sta_id = iwl_legacy_sta_id_or_broadcast(
2996 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
2997 if (sta_id == IWL_INVALID_STATION)
2998 return -EINVAL;
2999 }
3000
3001 mutex_lock(&priv->mutex);
3002 iwl_legacy_scan_cancel_timeout(priv, 100);
3003
3004 switch (cmd) {
3005 case SET_KEY:
3006 if (static_key)
3007 ret = iwl3945_set_static_key(priv, key);
3008 else
3009 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3010 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3011 break;
3012 case DISABLE_KEY:
3013 if (static_key)
3014 ret = iwl3945_remove_static_key(priv);
3015 else
3016 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3017 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3018 break;
3019 default:
3020 ret = -EINVAL;
3021 }
3022
3023 mutex_unlock(&priv->mutex);
3024 IWL_DEBUG_MAC80211(priv, "leave\n");
3025
3026 return ret;
3027}
3028
3029static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3030 struct ieee80211_vif *vif,
3031 struct ieee80211_sta *sta)
3032{
3033 struct iwl_priv *priv = hw->priv;
3034 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3035 int ret;
3036 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3037 u8 sta_id;
3038
3039 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3040 sta->addr);
3041 mutex_lock(&priv->mutex);
3042 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3043 sta->addr);
3044 sta_priv->common.sta_id = IWL_INVALID_STATION;
3045
3046
3047 ret = iwl_legacy_add_station_common(priv,
3048 &priv->contexts[IWL_RXON_CTX_BSS],
3049 sta->addr, is_ap, sta, &sta_id);
3050 if (ret) {
3051 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3052 sta->addr, ret);
3053
3054 mutex_unlock(&priv->mutex);
3055 return ret;
3056 }
3057
3058 sta_priv->common.sta_id = sta_id;
3059
3060
3061 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3062 sta->addr);
3063 iwl3945_rs_rate_init(priv, sta, sta_id);
3064 mutex_unlock(&priv->mutex);
3065
3066 return 0;
3067}
3068
3069static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3070 unsigned int changed_flags,
3071 unsigned int *total_flags,
3072 u64 multicast)
3073{
3074 struct iwl_priv *priv = hw->priv;
3075 __le32 filter_or = 0, filter_nand = 0;
3076 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3077
3078#define CHK(test, flag) do { \
3079 if (*total_flags & (test)) \
3080 filter_or |= (flag); \
3081 else \
3082 filter_nand |= (flag); \
3083 } while (0)
3084
3085 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3086 changed_flags, *total_flags);
3087
3088 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3089 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3090 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3091
3092#undef CHK
3093
3094 mutex_lock(&priv->mutex);
3095
3096 ctx->staging.filter_flags &= ~filter_nand;
3097 ctx->staging.filter_flags |= filter_or;
3098
3099
3100
3101
3102
3103
3104
3105 mutex_unlock(&priv->mutex);
3106
3107
3108
3109
3110
3111
3112
3113 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3114 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3115}
3116
3117
3118
3119
3120
3121
3122
3123
3124#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137static ssize_t iwl3945_show_debug_level(struct device *d,
3138 struct device_attribute *attr, char *buf)
3139{
3140 struct iwl_priv *priv = dev_get_drvdata(d);
3141 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3142}
3143static ssize_t iwl3945_store_debug_level(struct device *d,
3144 struct device_attribute *attr,
3145 const char *buf, size_t count)
3146{
3147 struct iwl_priv *priv = dev_get_drvdata(d);
3148 unsigned long val;
3149 int ret;
3150
3151 ret = strict_strtoul(buf, 0, &val);
3152 if (ret)
3153 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3154 else {
3155 priv->debug_level = val;
3156 if (iwl_legacy_alloc_traffic_mem(priv))
3157 IWL_ERR(priv,
3158 "Not enough memory to generate traffic log\n");
3159 }
3160 return strnlen(buf, count);
3161}
3162
3163static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3164 iwl3945_show_debug_level, iwl3945_store_debug_level);
3165
3166#endif
3167
3168static ssize_t iwl3945_show_temperature(struct device *d,
3169 struct device_attribute *attr, char *buf)
3170{
3171 struct iwl_priv *priv = dev_get_drvdata(d);
3172
3173 if (!iwl_legacy_is_alive(priv))
3174 return -EAGAIN;
3175
3176 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3177}
3178
3179static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3180
3181static ssize_t iwl3945_show_tx_power(struct device *d,
3182 struct device_attribute *attr, char *buf)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3186}
3187
3188static ssize_t iwl3945_store_tx_power(struct device *d,
3189 struct device_attribute *attr,
3190 const char *buf, size_t count)
3191{
3192 struct iwl_priv *priv = dev_get_drvdata(d);
3193 char *p = (char *)buf;
3194 u32 val;
3195
3196 val = simple_strtoul(p, &p, 10);
3197 if (p == buf)
3198 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3199 else
3200 iwl3945_hw_reg_set_txpower(priv, val);
3201
3202 return count;
3203}
3204
3205static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3206
3207static ssize_t iwl3945_show_flags(struct device *d,
3208 struct device_attribute *attr, char *buf)
3209{
3210 struct iwl_priv *priv = dev_get_drvdata(d);
3211 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3212
3213 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3214}
3215
3216static ssize_t iwl3945_store_flags(struct device *d,
3217 struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct iwl_priv *priv = dev_get_drvdata(d);
3221 u32 flags = simple_strtoul(buf, NULL, 0);
3222 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3223
3224 mutex_lock(&priv->mutex);
3225 if (le32_to_cpu(ctx->staging.flags) != flags) {
3226
3227 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3228 IWL_WARN(priv, "Could not cancel scan.\n");
3229 else {
3230 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3231 flags);
3232 ctx->staging.flags = cpu_to_le32(flags);
3233 iwl3945_commit_rxon(priv, ctx);
3234 }
3235 }
3236 mutex_unlock(&priv->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3242
3243static ssize_t iwl3945_show_filter_flags(struct device *d,
3244 struct device_attribute *attr, char *buf)
3245{
3246 struct iwl_priv *priv = dev_get_drvdata(d);
3247 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3248
3249 return sprintf(buf, "0x%04X\n",
3250 le32_to_cpu(ctx->active.filter_flags));
3251}
3252
3253static ssize_t iwl3945_store_filter_flags(struct device *d,
3254 struct device_attribute *attr,
3255 const char *buf, size_t count)
3256{
3257 struct iwl_priv *priv = dev_get_drvdata(d);
3258 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3259 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3260
3261 mutex_lock(&priv->mutex);
3262 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3263
3264 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3265 IWL_WARN(priv, "Could not cancel scan.\n");
3266 else {
3267 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3268 "0x%04X\n", filter_flags);
3269 ctx->staging.filter_flags =
3270 cpu_to_le32(filter_flags);
3271 iwl3945_commit_rxon(priv, ctx);
3272 }
3273 }
3274 mutex_unlock(&priv->mutex);
3275
3276 return count;
3277}
3278
3279static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3280 iwl3945_store_filter_flags);
3281
3282static ssize_t iwl3945_show_measurement(struct device *d,
3283 struct device_attribute *attr, char *buf)
3284{
3285 struct iwl_priv *priv = dev_get_drvdata(d);
3286 struct iwl_spectrum_notification measure_report;
3287 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3288 u8 *data = (u8 *)&measure_report;
3289 unsigned long flags;
3290
3291 spin_lock_irqsave(&priv->lock, flags);
3292 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3293 spin_unlock_irqrestore(&priv->lock, flags);
3294 return 0;
3295 }
3296 memcpy(&measure_report, &priv->measure_report, size);
3297 priv->measurement_status = 0;
3298 spin_unlock_irqrestore(&priv->lock, flags);
3299
3300 while (size && (PAGE_SIZE - len)) {
3301 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3302 PAGE_SIZE - len, 1);
3303 len = strlen(buf);
3304 if (PAGE_SIZE - len)
3305 buf[len++] = '\n';
3306
3307 ofs += 16;
3308 size -= min(size, 16U);
3309 }
3310
3311 return len;
3312}
3313
3314static ssize_t iwl3945_store_measurement(struct device *d,
3315 struct device_attribute *attr,
3316 const char *buf, size_t count)
3317{
3318 struct iwl_priv *priv = dev_get_drvdata(d);
3319 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3320 struct ieee80211_measurement_params params = {
3321 .channel = le16_to_cpu(ctx->active.channel),
3322 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3323 .duration = cpu_to_le16(1),
3324 };
3325 u8 type = IWL_MEASURE_BASIC;
3326 u8 buffer[32];
3327 u8 channel;
3328
3329 if (count) {
3330 char *p = buffer;
3331 strncpy(buffer, buf, min(sizeof(buffer), count));
3332 channel = simple_strtoul(p, NULL, 0);
3333 if (channel)
3334 params.channel = channel;
3335
3336 p = buffer;
3337 while (*p && *p != ' ')
3338 p++;
3339 if (*p)
3340 type = simple_strtoul(p + 1, NULL, 0);
3341 }
3342
3343 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3344 "channel %d (for '%s')\n", type, params.channel, buf);
3345 iwl3945_get_measurement(priv, ¶ms, type);
3346
3347 return count;
3348}
3349
3350static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3351 iwl3945_show_measurement, iwl3945_store_measurement);
3352
3353static ssize_t iwl3945_store_retry_rate(struct device *d,
3354 struct device_attribute *attr,
3355 const char *buf, size_t count)
3356{
3357 struct iwl_priv *priv = dev_get_drvdata(d);
3358
3359 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3360 if (priv->retry_rate <= 0)
3361 priv->retry_rate = 1;
3362
3363 return count;
3364}
3365
3366static ssize_t iwl3945_show_retry_rate(struct device *d,
3367 struct device_attribute *attr, char *buf)
3368{
3369 struct iwl_priv *priv = dev_get_drvdata(d);
3370 return sprintf(buf, "%d", priv->retry_rate);
3371}
3372
3373static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3374 iwl3945_store_retry_rate);
3375
3376
3377static ssize_t iwl3945_show_channels(struct device *d,
3378 struct device_attribute *attr, char *buf)
3379{
3380
3381 return 0;
3382}
3383
3384static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3385
3386static ssize_t iwl3945_show_antenna(struct device *d,
3387 struct device_attribute *attr, char *buf)
3388{
3389 struct iwl_priv *priv = dev_get_drvdata(d);
3390
3391 if (!iwl_legacy_is_alive(priv))
3392 return -EAGAIN;
3393
3394 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3395}
3396
3397static ssize_t iwl3945_store_antenna(struct device *d,
3398 struct device_attribute *attr,
3399 const char *buf, size_t count)
3400{
3401 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3402 int ant;
3403
3404 if (count == 0)
3405 return 0;
3406
3407 if (sscanf(buf, "%1i", &ant) != 1) {
3408 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3409 return count;
3410 }
3411
3412 if ((ant >= 0) && (ant <= 2)) {
3413 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3414 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3415 } else
3416 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3417
3418
3419 return count;
3420}
3421
3422static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3423
3424static ssize_t iwl3945_show_status(struct device *d,
3425 struct device_attribute *attr, char *buf)
3426{
3427 struct iwl_priv *priv = dev_get_drvdata(d);
3428 if (!iwl_legacy_is_alive(priv))
3429 return -EAGAIN;
3430 return sprintf(buf, "0x%08x\n", (int)priv->status);
3431}
3432
3433static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3434
3435static ssize_t iwl3945_dump_error_log(struct device *d,
3436 struct device_attribute *attr,
3437 const char *buf, size_t count)
3438{
3439 struct iwl_priv *priv = dev_get_drvdata(d);
3440 char *p = (char *)buf;
3441
3442 if (p[0] == '1')
3443 iwl3945_dump_nic_error_log(priv);
3444
3445 return strnlen(buf, count);
3446}
3447
3448static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3449
3450
3451
3452
3453
3454
3455
3456static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3457{
3458 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3459
3460 init_waitqueue_head(&priv->wait_command_queue);
3461
3462 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3463 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3464 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3465 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3466 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3467
3468 iwl_legacy_setup_scan_deferred_work(priv);
3469
3470 iwl3945_hw_setup_deferred_work(priv);
3471
3472 init_timer(&priv->watchdog);
3473 priv->watchdog.data = (unsigned long)priv;
3474 priv->watchdog.function = iwl_legacy_bg_watchdog;
3475
3476 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3477 iwl3945_irq_tasklet, (unsigned long)priv);
3478}
3479
3480static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3481{
3482 iwl3945_hw_cancel_deferred_work(priv);
3483
3484 cancel_delayed_work_sync(&priv->init_alive_start);
3485 cancel_delayed_work(&priv->alive_start);
3486
3487 iwl_legacy_cancel_scan_deferred_work(priv);
3488}
3489
3490static struct attribute *iwl3945_sysfs_entries[] = {
3491 &dev_attr_antenna.attr,
3492 &dev_attr_channels.attr,
3493 &dev_attr_dump_errors.attr,
3494 &dev_attr_flags.attr,
3495 &dev_attr_filter_flags.attr,
3496 &dev_attr_measurement.attr,
3497 &dev_attr_retry_rate.attr,
3498 &dev_attr_status.attr,
3499 &dev_attr_temperature.attr,
3500 &dev_attr_tx_power.attr,
3501#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3502 &dev_attr_debug_level.attr,
3503#endif
3504 NULL
3505};
3506
3507static struct attribute_group iwl3945_attribute_group = {
3508 .name = NULL,
3509 .attrs = iwl3945_sysfs_entries,
3510};
3511
3512struct ieee80211_ops iwl3945_hw_ops = {
3513 .tx = iwl3945_mac_tx,
3514 .start = iwl3945_mac_start,
3515 .stop = iwl3945_mac_stop,
3516 .add_interface = iwl_legacy_mac_add_interface,
3517 .remove_interface = iwl_legacy_mac_remove_interface,
3518 .change_interface = iwl_legacy_mac_change_interface,
3519 .config = iwl_legacy_mac_config,
3520 .configure_filter = iwl3945_configure_filter,
3521 .set_key = iwl3945_mac_set_key,
3522 .conf_tx = iwl_legacy_mac_conf_tx,
3523 .reset_tsf = iwl_legacy_mac_reset_tsf,
3524 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3525 .hw_scan = iwl_legacy_mac_hw_scan,
3526 .sta_add = iwl3945_mac_sta_add,
3527 .sta_remove = iwl_legacy_mac_sta_remove,
3528 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3529};
3530
3531static int iwl3945_init_drv(struct iwl_priv *priv)
3532{
3533 int ret;
3534 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3535
3536 priv->retry_rate = 1;
3537 priv->beacon_skb = NULL;
3538
3539 spin_lock_init(&priv->sta_lock);
3540 spin_lock_init(&priv->hcmd_lock);
3541
3542 INIT_LIST_HEAD(&priv->free_frames);
3543
3544 mutex_init(&priv->mutex);
3545
3546 priv->ieee_channels = NULL;
3547 priv->ieee_rates = NULL;
3548 priv->band = IEEE80211_BAND_2GHZ;
3549
3550 priv->iw_mode = NL80211_IFTYPE_STATION;
3551 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3552
3553
3554 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3555
3556 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3557 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3558 eeprom->version);
3559 ret = -EINVAL;
3560 goto err;
3561 }
3562 ret = iwl_legacy_init_channel_map(priv);
3563 if (ret) {
3564 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3565 goto err;
3566 }
3567
3568
3569 if (iwl3945_txpower_set_from_eeprom(priv)) {
3570 ret = -EIO;
3571 goto err_free_channel_map;
3572 }
3573
3574 ret = iwl_legacy_init_geos(priv);
3575 if (ret) {
3576 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3577 goto err_free_channel_map;
3578 }
3579 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3580
3581 return 0;
3582
3583err_free_channel_map:
3584 iwl_legacy_free_channel_map(priv);
3585err:
3586 return ret;
3587}
3588
3589#define IWL3945_MAX_PROBE_REQUEST 200
3590
3591static int iwl3945_setup_mac(struct iwl_priv *priv)
3592{
3593 int ret;
3594 struct ieee80211_hw *hw = priv->hw;
3595
3596 hw->rate_control_algorithm = "iwl-3945-rs";
3597 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3598 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3599
3600
3601 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3602 IEEE80211_HW_SPECTRUM_MGMT;
3603
3604 hw->wiphy->interface_modes =
3605 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3606
3607 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3608 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3609 WIPHY_FLAG_IBSS_RSN;
3610
3611 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3612
3613 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3614
3615
3616 hw->queues = 4;
3617
3618 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3619 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3620 &priv->bands[IEEE80211_BAND_2GHZ];
3621
3622 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3623 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3624 &priv->bands[IEEE80211_BAND_5GHZ];
3625
3626 iwl_legacy_leds_init(priv);
3627
3628 ret = ieee80211_register_hw(priv->hw);
3629 if (ret) {
3630 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3631 return ret;
3632 }
3633 priv->mac80211_registered = 1;
3634
3635 return 0;
3636}
3637
3638static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3639{
3640 int err = 0, i;
3641 struct iwl_priv *priv;
3642 struct ieee80211_hw *hw;
3643 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3644 struct iwl3945_eeprom *eeprom;
3645 unsigned long flags;
3646
3647
3648
3649
3650
3651
3652
3653 hw = iwl_legacy_alloc_all(cfg);
3654 if (hw == NULL) {
3655 pr_err("Can not allocate network device\n");
3656 err = -ENOMEM;
3657 goto out;
3658 }
3659 priv = hw->priv;
3660 SET_IEEE80211_DEV(hw, &pdev->dev);
3661
3662 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3663
3664
3665 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3666
3667 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3668 priv->contexts[i].ctxid = i;
3669
3670 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3671 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3672 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3673 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3674 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3675 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3676 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3677 BIT(NL80211_IFTYPE_STATION) |
3678 BIT(NL80211_IFTYPE_ADHOC);
3679 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3680 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3681 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3682
3683
3684
3685
3686
3687 if (iwl3945_mod_params.disable_hw_scan) {
3688 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3689 iwl3945_hw_ops.hw_scan = NULL;
3690 }
3691
3692 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3693 priv->cfg = cfg;
3694 priv->pci_dev = pdev;
3695 priv->inta_mask = CSR_INI_SET_MASK;
3696
3697 if (iwl_legacy_alloc_traffic_mem(priv))
3698 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3699
3700
3701
3702
3703 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3704 PCIE_LINK_STATE_CLKPM);
3705
3706 if (pci_enable_device(pdev)) {
3707 err = -ENODEV;
3708 goto out_ieee80211_free_hw;
3709 }
3710
3711 pci_set_master(pdev);
3712
3713 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3714 if (!err)
3715 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3716 if (err) {
3717 IWL_WARN(priv, "No suitable DMA available.\n");
3718 goto out_pci_disable_device;
3719 }
3720
3721 pci_set_drvdata(pdev, priv);
3722 err = pci_request_regions(pdev, DRV_NAME);
3723 if (err)
3724 goto out_pci_disable_device;
3725
3726
3727
3728
3729 priv->hw_base = pci_iomap(pdev, 0, 0);
3730 if (!priv->hw_base) {
3731 err = -ENODEV;
3732 goto out_pci_release_regions;
3733 }
3734
3735 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3736 (unsigned long long) pci_resource_len(pdev, 0));
3737 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3738
3739
3740
3741 pci_write_config_byte(pdev, 0x41, 0x00);
3742
3743
3744
3745
3746 spin_lock_init(&priv->reg_lock);
3747 spin_lock_init(&priv->lock);
3748
3749
3750
3751
3752
3753
3754 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3755
3756
3757
3758
3759
3760
3761 err = iwl_legacy_eeprom_init(priv);
3762 if (err) {
3763 IWL_ERR(priv, "Unable to init EEPROM\n");
3764 goto out_iounmap;
3765 }
3766
3767 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3768 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
3769 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
3770
3771
3772
3773
3774
3775 if (iwl3945_hw_set_hw_params(priv)) {
3776 IWL_ERR(priv, "failed to set hw settings\n");
3777 goto out_eeprom_free;
3778 }
3779
3780
3781
3782
3783
3784 err = iwl3945_init_drv(priv);
3785 if (err) {
3786 IWL_ERR(priv, "initializing driver failed\n");
3787 goto out_unset_hw_params;
3788 }
3789
3790 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
3791 priv->cfg->name);
3792
3793
3794
3795
3796
3797 spin_lock_irqsave(&priv->lock, flags);
3798 iwl_legacy_disable_interrupts(priv);
3799 spin_unlock_irqrestore(&priv->lock, flags);
3800
3801 pci_enable_msi(priv->pci_dev);
3802
3803 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3804 IRQF_SHARED, DRV_NAME, priv);
3805 if (err) {
3806 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3807 goto out_disable_msi;
3808 }
3809
3810 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3811 if (err) {
3812 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3813 goto out_release_irq;
3814 }
3815
3816 iwl_legacy_set_rxon_channel(priv,
3817 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
3818 &priv->contexts[IWL_RXON_CTX_BSS]);
3819 iwl3945_setup_deferred_work(priv);
3820 iwl3945_setup_rx_handlers(priv);
3821 iwl_legacy_power_initialize(priv);
3822
3823
3824
3825
3826
3827 iwl_legacy_enable_interrupts(priv);
3828
3829 err = iwl3945_setup_mac(priv);
3830 if (err)
3831 goto out_remove_sysfs;
3832
3833 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
3834 if (err)
3835 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3836
3837
3838 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3839 2 * HZ);
3840
3841 return 0;
3842
3843 out_remove_sysfs:
3844 destroy_workqueue(priv->workqueue);
3845 priv->workqueue = NULL;
3846 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3847 out_release_irq:
3848 free_irq(priv->pci_dev->irq, priv);
3849 out_disable_msi:
3850 pci_disable_msi(priv->pci_dev);
3851 iwl_legacy_free_geos(priv);
3852 iwl_legacy_free_channel_map(priv);
3853 out_unset_hw_params:
3854 iwl3945_unset_hw_params(priv);
3855 out_eeprom_free:
3856 iwl_legacy_eeprom_free(priv);
3857 out_iounmap:
3858 pci_iounmap(pdev, priv->hw_base);
3859 out_pci_release_regions:
3860 pci_release_regions(pdev);
3861 out_pci_disable_device:
3862 pci_set_drvdata(pdev, NULL);
3863 pci_disable_device(pdev);
3864 out_ieee80211_free_hw:
3865 iwl_legacy_free_traffic_mem(priv);
3866 ieee80211_free_hw(priv->hw);
3867 out:
3868 return err;
3869}
3870
3871static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
3872{
3873 struct iwl_priv *priv = pci_get_drvdata(pdev);
3874 unsigned long flags;
3875
3876 if (!priv)
3877 return;
3878
3879 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3880
3881 iwl_legacy_dbgfs_unregister(priv);
3882
3883 set_bit(STATUS_EXIT_PENDING, &priv->status);
3884
3885 iwl_legacy_leds_exit(priv);
3886
3887 if (priv->mac80211_registered) {
3888 ieee80211_unregister_hw(priv->hw);
3889 priv->mac80211_registered = 0;
3890 } else {
3891 iwl3945_down(priv);
3892 }
3893
3894
3895
3896
3897
3898
3899
3900
3901 iwl_legacy_apm_stop(priv);
3902
3903
3904
3905
3906 spin_lock_irqsave(&priv->lock, flags);
3907 iwl_legacy_disable_interrupts(priv);
3908 spin_unlock_irqrestore(&priv->lock, flags);
3909
3910 iwl3945_synchronize_irq(priv);
3911
3912 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3913
3914 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
3915
3916 iwl3945_dealloc_ucode_pci(priv);
3917
3918 if (priv->rxq.bd)
3919 iwl3945_rx_queue_free(priv, &priv->rxq);
3920 iwl3945_hw_txq_ctx_free(priv);
3921
3922 iwl3945_unset_hw_params(priv);
3923
3924
3925 flush_workqueue(priv->workqueue);
3926
3927
3928
3929
3930 destroy_workqueue(priv->workqueue);
3931 priv->workqueue = NULL;
3932 iwl_legacy_free_traffic_mem(priv);
3933
3934 free_irq(pdev->irq, priv);
3935 pci_disable_msi(pdev);
3936
3937 pci_iounmap(pdev, priv->hw_base);
3938 pci_release_regions(pdev);
3939 pci_disable_device(pdev);
3940 pci_set_drvdata(pdev, NULL);
3941
3942 iwl_legacy_free_channel_map(priv);
3943 iwl_legacy_free_geos(priv);
3944 kfree(priv->scan_cmd);
3945 if (priv->beacon_skb)
3946 dev_kfree_skb(priv->beacon_skb);
3947
3948 ieee80211_free_hw(priv->hw);
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958static struct pci_driver iwl3945_driver = {
3959 .name = DRV_NAME,
3960 .id_table = iwl3945_hw_card_ids,
3961 .probe = iwl3945_pci_probe,
3962 .remove = __devexit_p(iwl3945_pci_remove),
3963 .driver.pm = IWL_LEGACY_PM_OPS,
3964};
3965
3966static int __init iwl3945_init(void)
3967{
3968
3969 int ret;
3970 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3971 pr_info(DRV_COPYRIGHT "\n");
3972
3973 ret = iwl3945_rate_control_register();
3974 if (ret) {
3975 pr_err("Unable to register rate control algorithm: %d\n", ret);
3976 return ret;
3977 }
3978
3979 ret = pci_register_driver(&iwl3945_driver);
3980 if (ret) {
3981 pr_err("Unable to initialize PCI module\n");
3982 goto error_register;
3983 }
3984
3985 return ret;
3986
3987error_register:
3988 iwl3945_rate_control_unregister();
3989 return ret;
3990}
3991
3992static void __exit iwl3945_exit(void)
3993{
3994 pci_unregister_driver(&iwl3945_driver);
3995 iwl3945_rate_control_unregister();
3996}
3997
3998MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
3999
4000module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4001MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4002module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4003MODULE_PARM_DESC(swcrypto,
4004 "using software crypto (default 1 [software])");
4005module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4006 int, S_IRUGO);
4007MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4008#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4009module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4010MODULE_PARM_DESC(debug, "debug output mask");
4011#endif
4012module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4013MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4014
4015module_exit(iwl3945_exit);
4016module_init(iwl3945_init);
4017