1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/atomic.h>
24#include <linux/crc32.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/hardirq.h>
29#include <linux/if_vlan.h>
30#include <linux/in.h>
31#include <linux/interrupt.h>
32#include <linux/ip.h>
33#include <linux/irqflags.h>
34#include <linux/irqreturn.h>
35#include <linux/mii.h>
36#include <linux/net.h>
37#include <linux/netdevice.h>
38#include <linux/pci.h>
39#include <linux/pci_ids.h>
40#include <linux/pm.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include <linux/tcp.h>
46#include <linux/timer.h>
47#include <linux/types.h>
48#include <linux/workqueue.h>
49
50#include "atl2.h"
51
52#define ATL2_DRV_VERSION "2.2.3"
53
54static const char atl2_driver_name[] = "atl2";
55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
57static const char atl2_driver_version[] = ATL2_DRV_VERSION;
58
59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(ATL2_DRV_VERSION);
63
64
65
66
67static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
68 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
69
70 {0,}
71};
72MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
73
74static void atl2_set_ethtool_ops(struct net_device *netdev);
75
76static void atl2_check_options(struct atl2_adapter *adapter);
77
78
79
80
81
82
83
84
85
86static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
87{
88 struct atl2_hw *hw = &adapter->hw;
89 struct pci_dev *pdev = adapter->pdev;
90
91
92 hw->vendor_id = pdev->vendor;
93 hw->device_id = pdev->device;
94 hw->subsystem_vendor_id = pdev->subsystem_vendor;
95 hw->subsystem_id = pdev->subsystem_device;
96 hw->revision_id = pdev->revision;
97
98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
99
100 adapter->wol = 0;
101 adapter->ict = 50000;
102 adapter->link_speed = SPEED_0;
103 adapter->link_duplex = FULL_DUPLEX;
104
105 hw->phy_configured = false;
106 hw->preamble_len = 7;
107 hw->ipgt = 0x60;
108 hw->min_ifg = 0x50;
109 hw->ipgr1 = 0x40;
110 hw->ipgr2 = 0x60;
111 hw->retry_buf = 2;
112 hw->max_retry = 0xf;
113 hw->lcol = 0x37;
114 hw->jam_ipg = 7;
115 hw->fc_rxd_hi = 0;
116 hw->fc_rxd_lo = 0;
117 hw->max_frame_size = adapter->netdev->mtu;
118
119 spin_lock_init(&adapter->stats_lock);
120
121 set_bit(__ATL2_DOWN, &adapter->flags);
122
123 return 0;
124}
125
126
127
128
129
130
131
132
133
134
135static void atl2_set_multi(struct net_device *netdev)
136{
137 struct atl2_adapter *adapter = netdev_priv(netdev);
138 struct atl2_hw *hw = &adapter->hw;
139 struct netdev_hw_addr *ha;
140 u32 rctl;
141 u32 hash_value;
142
143
144 rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
145
146 if (netdev->flags & IFF_PROMISC) {
147 rctl |= MAC_CTRL_PROMIS_EN;
148 } else if (netdev->flags & IFF_ALLMULTI) {
149 rctl |= MAC_CTRL_MC_ALL_EN;
150 rctl &= ~MAC_CTRL_PROMIS_EN;
151 } else
152 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
153
154 ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
155
156
157 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
159
160
161 netdev_for_each_mc_addr(ha, netdev) {
162 hash_value = atl2_hash_mc_addr(hw, ha->addr);
163 atl2_hash_set(hw, hash_value);
164 }
165}
166
167static void init_ring_ptrs(struct atl2_adapter *adapter)
168{
169
170 adapter->txd_write_ptr = 0;
171 atomic_set(&adapter->txd_read_ptr, 0);
172
173 adapter->rxd_read_ptr = 0;
174 adapter->rxd_write_ptr = 0;
175
176 atomic_set(&adapter->txs_write_ptr, 0);
177 adapter->txs_next_clear = 0;
178}
179
180
181
182
183
184
185
186static int atl2_configure(struct atl2_adapter *adapter)
187{
188 struct atl2_hw *hw = &adapter->hw;
189 u32 value;
190
191
192 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
193
194
195 value = (((u32)hw->mac_addr[2]) << 24) |
196 (((u32)hw->mac_addr[3]) << 16) |
197 (((u32)hw->mac_addr[4]) << 8) |
198 (((u32)hw->mac_addr[5]));
199 ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
200 value = (((u32)hw->mac_addr[0]) << 8) |
201 (((u32)hw->mac_addr[1]));
202 ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
203
204
205 ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
206 (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
207
208
209 ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
210 (u32)(adapter->txd_dma & 0x00000000ffffffffULL));
211 ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
212 (u32)(adapter->txs_dma & 0x00000000ffffffffULL));
213 ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
214 (u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
215
216
217 ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
218 ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
219 ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
220
221
222
223
224
225
226
227
228 value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
229 MAC_IPG_IFG_IPGT_SHIFT) |
230 (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
231 MAC_IPG_IFG_MIFG_SHIFT) |
232 (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
233 MAC_IPG_IFG_IPGR1_SHIFT)|
234 (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
235 MAC_IPG_IFG_IPGR2_SHIFT);
236 ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
237
238
239 value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
240 (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
241 MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
242 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
243 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
244 (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
245 MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
246 ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
247
248
249 ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
250 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
251
252
253 ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
254
255
256 ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
257 ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
258
259
260 ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
261
262
263 ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
264 ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
265
266
267 ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
268 ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
269
270
271 ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
272 ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
273
274 value = ATL2_READ_REG(&adapter->hw, REG_ISR);
275 if ((value & ISR_PHY_LINKDOWN) != 0)
276 value = 1;
277 else
278 value = 0;
279
280
281 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
282 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
283 return value;
284}
285
286
287
288
289
290
291
292static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
293{
294 struct pci_dev *pdev = adapter->pdev;
295 int size;
296 u8 offset = 0;
297
298
299 adapter->ring_size = size =
300 adapter->txd_ring_size * 1 + 7 +
301 adapter->txs_ring_size * 4 + 7 +
302 adapter->rxd_ring_size * 1536 + 127;
303
304 adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
305 &adapter->ring_dma);
306 if (!adapter->ring_vir_addr)
307 return -ENOMEM;
308 memset(adapter->ring_vir_addr, 0, adapter->ring_size);
309
310
311 adapter->txd_dma = adapter->ring_dma ;
312 offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
313 adapter->txd_dma += offset;
314 adapter->txd_ring = adapter->ring_vir_addr + offset;
315
316
317 adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
318 offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
319 adapter->txs_dma += offset;
320 adapter->txs_ring = (struct tx_pkt_status *)
321 (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
322
323
324 adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
325 offset = (adapter->rxd_dma & 127) ?
326 (128 - (adapter->rxd_dma & 127)) : 0;
327 if (offset > 7)
328 offset -= 8;
329 else
330 offset += (128 - 8);
331
332 adapter->rxd_dma += offset;
333 adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
334 (adapter->txs_ring_size * 4 + offset));
335
336
337
338
339
340 return 0;
341}
342
343
344
345
346
347static inline void atl2_irq_enable(struct atl2_adapter *adapter)
348{
349 ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
350 ATL2_WRITE_FLUSH(&adapter->hw);
351}
352
353
354
355
356
357static inline void atl2_irq_disable(struct atl2_adapter *adapter)
358{
359 ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
360 ATL2_WRITE_FLUSH(&adapter->hw);
361 synchronize_irq(adapter->pdev->irq);
362}
363
364static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
365{
366 if (features & NETIF_F_HW_VLAN_RX) {
367
368 *ctrl |= MAC_CTRL_RMV_VLAN;
369 } else {
370
371 *ctrl &= ~MAC_CTRL_RMV_VLAN;
372 }
373}
374
375static void atl2_vlan_mode(struct net_device *netdev,
376 netdev_features_t features)
377{
378 struct atl2_adapter *adapter = netdev_priv(netdev);
379 u32 ctrl;
380
381 atl2_irq_disable(adapter);
382
383 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
384 __atl2_vlan_mode(features, &ctrl);
385 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
386
387 atl2_irq_enable(adapter);
388}
389
390static void atl2_restore_vlan(struct atl2_adapter *adapter)
391{
392 atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
393}
394
395static netdev_features_t atl2_fix_features(struct net_device *netdev,
396 netdev_features_t features)
397{
398
399
400
401
402 if (features & NETIF_F_HW_VLAN_RX)
403 features |= NETIF_F_HW_VLAN_TX;
404 else
405 features &= ~NETIF_F_HW_VLAN_TX;
406
407 return features;
408}
409
410static int atl2_set_features(struct net_device *netdev,
411 netdev_features_t features)
412{
413 netdev_features_t changed = netdev->features ^ features;
414
415 if (changed & NETIF_F_HW_VLAN_RX)
416 atl2_vlan_mode(netdev, features);
417
418 return 0;
419}
420
421static void atl2_intr_rx(struct atl2_adapter *adapter)
422{
423 struct net_device *netdev = adapter->netdev;
424 struct rx_desc *rxd;
425 struct sk_buff *skb;
426
427 do {
428 rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
429 if (!rxd->status.update)
430 break;
431
432
433 rxd->status.update = 0;
434
435 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
436 int rx_size = (int)(rxd->status.pkt_size - 4);
437
438 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
439 if (NULL == skb) {
440 printk(KERN_WARNING
441 "%s: Mem squeeze, deferring packet.\n",
442 netdev->name);
443
444
445
446
447 netdev->stats.rx_dropped++;
448 break;
449 }
450 memcpy(skb->data, rxd->packet, rx_size);
451 skb_put(skb, rx_size);
452 skb->protocol = eth_type_trans(skb, netdev);
453 if (rxd->status.vlan) {
454 u16 vlan_tag = (rxd->status.vtag>>4) |
455 ((rxd->status.vtag&7) << 13) |
456 ((rxd->status.vtag&8) << 9);
457
458 __vlan_hwaccel_put_tag(skb, vlan_tag);
459 }
460 netif_rx(skb);
461 netdev->stats.rx_bytes += rx_size;
462 netdev->stats.rx_packets++;
463 } else {
464 netdev->stats.rx_errors++;
465
466 if (rxd->status.ok && rxd->status.pkt_size <= 60)
467 netdev->stats.rx_length_errors++;
468 if (rxd->status.mcast)
469 netdev->stats.multicast++;
470 if (rxd->status.crc)
471 netdev->stats.rx_crc_errors++;
472 if (rxd->status.align)
473 netdev->stats.rx_frame_errors++;
474 }
475
476
477 if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
478 adapter->rxd_write_ptr = 0;
479 } while (1);
480
481
482 adapter->rxd_read_ptr = adapter->rxd_write_ptr;
483 ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
484}
485
486static void atl2_intr_tx(struct atl2_adapter *adapter)
487{
488 struct net_device *netdev = adapter->netdev;
489 u32 txd_read_ptr;
490 u32 txs_write_ptr;
491 struct tx_pkt_status *txs;
492 struct tx_pkt_header *txph;
493 int free_hole = 0;
494
495 do {
496 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
497 txs = adapter->txs_ring + txs_write_ptr;
498 if (!txs->update)
499 break;
500
501 free_hole = 1;
502 txs->update = 0;
503
504 if (++txs_write_ptr == adapter->txs_ring_size)
505 txs_write_ptr = 0;
506 atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
507
508 txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
509 txph = (struct tx_pkt_header *)
510 (((u8 *)adapter->txd_ring) + txd_read_ptr);
511
512 if (txph->pkt_size != txs->pkt_size) {
513 struct tx_pkt_status *old_txs = txs;
514 printk(KERN_WARNING
515 "%s: txs packet size not consistent with txd"
516 " txd_:0x%08x, txs_:0x%08x!\n",
517 adapter->netdev->name,
518 *(u32 *)txph, *(u32 *)txs);
519 printk(KERN_WARNING
520 "txd read ptr: 0x%x\n",
521 txd_read_ptr);
522 txs = adapter->txs_ring + txs_write_ptr;
523 printk(KERN_WARNING
524 "txs-behind:0x%08x\n",
525 *(u32 *)txs);
526 if (txs_write_ptr < 2) {
527 txs = adapter->txs_ring +
528 (adapter->txs_ring_size +
529 txs_write_ptr - 2);
530 } else {
531 txs = adapter->txs_ring + (txs_write_ptr - 2);
532 }
533 printk(KERN_WARNING
534 "txs-before:0x%08x\n",
535 *(u32 *)txs);
536 txs = old_txs;
537 }
538
539
540 txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
541 if (txd_read_ptr >= adapter->txd_ring_size)
542 txd_read_ptr -= adapter->txd_ring_size;
543
544 atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
545
546
547 if (txs->ok) {
548 netdev->stats.tx_bytes += txs->pkt_size;
549 netdev->stats.tx_packets++;
550 }
551 else
552 netdev->stats.tx_errors++;
553
554 if (txs->defer)
555 netdev->stats.collisions++;
556 if (txs->abort_col)
557 netdev->stats.tx_aborted_errors++;
558 if (txs->late_col)
559 netdev->stats.tx_window_errors++;
560 if (txs->underun)
561 netdev->stats.tx_fifo_errors++;
562 } while (1);
563
564 if (free_hole) {
565 if (netif_queue_stopped(adapter->netdev) &&
566 netif_carrier_ok(adapter->netdev))
567 netif_wake_queue(adapter->netdev);
568 }
569}
570
571static void atl2_check_for_link(struct atl2_adapter *adapter)
572{
573 struct net_device *netdev = adapter->netdev;
574 u16 phy_data = 0;
575
576 spin_lock(&adapter->stats_lock);
577 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
578 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
579 spin_unlock(&adapter->stats_lock);
580
581
582 if (!(phy_data & BMSR_LSTATUS)) {
583 if (netif_carrier_ok(netdev)) {
584 printk(KERN_INFO "%s: %s NIC Link is Down\n",
585 atl2_driver_name, netdev->name);
586 adapter->link_speed = SPEED_0;
587 netif_carrier_off(netdev);
588 netif_stop_queue(netdev);
589 }
590 }
591 schedule_work(&adapter->link_chg_task);
592}
593
594static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
595{
596 u16 phy_data;
597 spin_lock(&adapter->stats_lock);
598 atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
599 spin_unlock(&adapter->stats_lock);
600}
601
602
603
604
605
606
607
608static irqreturn_t atl2_intr(int irq, void *data)
609{
610 struct atl2_adapter *adapter = netdev_priv(data);
611 struct atl2_hw *hw = &adapter->hw;
612 u32 status;
613
614 status = ATL2_READ_REG(hw, REG_ISR);
615 if (0 == status)
616 return IRQ_NONE;
617
618
619 if (status & ISR_PHY)
620 atl2_clear_phy_int(adapter);
621
622
623 ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
624
625
626 if (status & ISR_PHY_LINKDOWN) {
627 if (netif_running(adapter->netdev)) {
628 ATL2_WRITE_REG(hw, REG_ISR, 0);
629 ATL2_WRITE_REG(hw, REG_IMR, 0);
630 ATL2_WRITE_FLUSH(hw);
631 schedule_work(&adapter->reset_task);
632 return IRQ_HANDLED;
633 }
634 }
635
636
637 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
638 ATL2_WRITE_REG(hw, REG_ISR, 0);
639 ATL2_WRITE_REG(hw, REG_IMR, 0);
640 ATL2_WRITE_FLUSH(hw);
641 schedule_work(&adapter->reset_task);
642 return IRQ_HANDLED;
643 }
644
645
646 if (status & (ISR_PHY | ISR_MANUAL)) {
647 adapter->netdev->stats.tx_carrier_errors++;
648 atl2_check_for_link(adapter);
649 }
650
651
652 if (status & ISR_TX_EVENT)
653 atl2_intr_tx(adapter);
654
655
656 if (status & ISR_RX_EVENT)
657 atl2_intr_rx(adapter);
658
659
660 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
661 return IRQ_HANDLED;
662}
663
664static int atl2_request_irq(struct atl2_adapter *adapter)
665{
666 struct net_device *netdev = adapter->netdev;
667 int flags, err = 0;
668
669 flags = IRQF_SHARED;
670 adapter->have_msi = true;
671 err = pci_enable_msi(adapter->pdev);
672 if (err)
673 adapter->have_msi = false;
674
675 if (adapter->have_msi)
676 flags &= ~IRQF_SHARED;
677
678 return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
679 netdev);
680}
681
682
683
684
685
686
687
688static void atl2_free_ring_resources(struct atl2_adapter *adapter)
689{
690 struct pci_dev *pdev = adapter->pdev;
691 pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
692 adapter->ring_dma);
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707static int atl2_open(struct net_device *netdev)
708{
709 struct atl2_adapter *adapter = netdev_priv(netdev);
710 int err;
711 u32 val;
712
713
714 if (test_bit(__ATL2_TESTING, &adapter->flags))
715 return -EBUSY;
716
717
718 err = atl2_setup_ring_resources(adapter);
719 if (err)
720 return err;
721
722 err = atl2_init_hw(&adapter->hw);
723 if (err) {
724 err = -EIO;
725 goto err_init_hw;
726 }
727
728
729 atl2_set_multi(netdev);
730 init_ring_ptrs(adapter);
731
732 atl2_restore_vlan(adapter);
733
734 if (atl2_configure(adapter)) {
735 err = -EIO;
736 goto err_config;
737 }
738
739 err = atl2_request_irq(adapter);
740 if (err)
741 goto err_req_irq;
742
743 clear_bit(__ATL2_DOWN, &adapter->flags);
744
745 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ));
746
747 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
748 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
749 val | MASTER_CTRL_MANUAL_INT);
750
751 atl2_irq_enable(adapter);
752
753 return 0;
754
755err_init_hw:
756err_req_irq:
757err_config:
758 atl2_free_ring_resources(adapter);
759 atl2_reset_hw(&adapter->hw);
760
761 return err;
762}
763
764static void atl2_down(struct atl2_adapter *adapter)
765{
766 struct net_device *netdev = adapter->netdev;
767
768
769
770 set_bit(__ATL2_DOWN, &adapter->flags);
771
772 netif_tx_disable(netdev);
773
774
775 atl2_reset_hw(&adapter->hw);
776 msleep(1);
777
778 atl2_irq_disable(adapter);
779
780 del_timer_sync(&adapter->watchdog_timer);
781 del_timer_sync(&adapter->phy_config_timer);
782 clear_bit(0, &adapter->cfg_phy);
783
784 netif_carrier_off(netdev);
785 adapter->link_speed = SPEED_0;
786 adapter->link_duplex = -1;
787}
788
789static void atl2_free_irq(struct atl2_adapter *adapter)
790{
791 struct net_device *netdev = adapter->netdev;
792
793 free_irq(adapter->pdev->irq, netdev);
794
795#ifdef CONFIG_PCI_MSI
796 if (adapter->have_msi)
797 pci_disable_msi(adapter->pdev);
798#endif
799}
800
801
802
803
804
805
806
807
808
809
810
811
812static int atl2_close(struct net_device *netdev)
813{
814 struct atl2_adapter *adapter = netdev_priv(netdev);
815
816 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
817
818 atl2_down(adapter);
819 atl2_free_irq(adapter);
820 atl2_free_ring_resources(adapter);
821
822 return 0;
823}
824
825static inline int TxsFreeUnit(struct atl2_adapter *adapter)
826{
827 u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
828
829 return (adapter->txs_next_clear >= txs_write_ptr) ?
830 (int) (adapter->txs_ring_size - adapter->txs_next_clear +
831 txs_write_ptr - 1) :
832 (int) (txs_write_ptr - adapter->txs_next_clear - 1);
833}
834
835static inline int TxdFreeBytes(struct atl2_adapter *adapter)
836{
837 u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
838
839 return (adapter->txd_write_ptr >= txd_read_ptr) ?
840 (int) (adapter->txd_ring_size - adapter->txd_write_ptr +
841 txd_read_ptr - 1) :
842 (int) (txd_read_ptr - adapter->txd_write_ptr - 1);
843}
844
845static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
846 struct net_device *netdev)
847{
848 struct atl2_adapter *adapter = netdev_priv(netdev);
849 struct tx_pkt_header *txph;
850 u32 offset, copy_len;
851 int txs_unused;
852 int txbuf_unused;
853
854 if (test_bit(__ATL2_DOWN, &adapter->flags)) {
855 dev_kfree_skb_any(skb);
856 return NETDEV_TX_OK;
857 }
858
859 if (unlikely(skb->len <= 0)) {
860 dev_kfree_skb_any(skb);
861 return NETDEV_TX_OK;
862 }
863
864 txs_unused = TxsFreeUnit(adapter);
865 txbuf_unused = TxdFreeBytes(adapter);
866
867 if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
868 txs_unused < 1) {
869
870 netif_stop_queue(netdev);
871 return NETDEV_TX_BUSY;
872 }
873
874 offset = adapter->txd_write_ptr;
875
876 txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
877
878 *(u32 *)txph = 0;
879 txph->pkt_size = skb->len;
880
881 offset += 4;
882 if (offset >= adapter->txd_ring_size)
883 offset -= adapter->txd_ring_size;
884 copy_len = adapter->txd_ring_size - offset;
885 if (copy_len >= skb->len) {
886 memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
887 offset += ((u32)(skb->len + 3) & ~3);
888 } else {
889 memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
890 memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
891 skb->len-copy_len);
892 offset = ((u32)(skb->len-copy_len + 3) & ~3);
893 }
894#ifdef NETIF_F_HW_VLAN_TX
895 if (vlan_tx_tag_present(skb)) {
896 u16 vlan_tag = vlan_tx_tag_get(skb);
897 vlan_tag = (vlan_tag << 4) |
898 (vlan_tag >> 13) |
899 ((vlan_tag >> 9) & 0x8);
900 txph->ins_vlan = 1;
901 txph->vlan = vlan_tag;
902 }
903#endif
904 if (offset >= adapter->txd_ring_size)
905 offset -= adapter->txd_ring_size;
906 adapter->txd_write_ptr = offset;
907
908
909 adapter->txs_ring[adapter->txs_next_clear].update = 0;
910 if (++adapter->txs_next_clear == adapter->txs_ring_size)
911 adapter->txs_next_clear = 0;
912
913 ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
914 (adapter->txd_write_ptr >> 2));
915
916 mmiowb();
917 dev_kfree_skb_any(skb);
918 return NETDEV_TX_OK;
919}
920
921
922
923
924
925
926
927
928static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
929{
930 struct atl2_adapter *adapter = netdev_priv(netdev);
931 struct atl2_hw *hw = &adapter->hw;
932
933 if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
934 return -EINVAL;
935
936
937 if (hw->max_frame_size != new_mtu) {
938 netdev->mtu = new_mtu;
939 ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
940 VLAN_SIZE + ETHERNET_FCS_SIZE);
941 }
942
943 return 0;
944}
945
946
947
948
949
950
951
952
953static int atl2_set_mac(struct net_device *netdev, void *p)
954{
955 struct atl2_adapter *adapter = netdev_priv(netdev);
956 struct sockaddr *addr = p;
957
958 if (!is_valid_ether_addr(addr->sa_data))
959 return -EADDRNOTAVAIL;
960
961 if (netif_running(netdev))
962 return -EBUSY;
963
964 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
965 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
966
967 atl2_set_mac_addr(&adapter->hw);
968
969 return 0;
970}
971
972
973
974
975
976
977
978static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
979{
980 struct atl2_adapter *adapter = netdev_priv(netdev);
981 struct mii_ioctl_data *data = if_mii(ifr);
982 unsigned long flags;
983
984 switch (cmd) {
985 case SIOCGMIIPHY:
986 data->phy_id = 0;
987 break;
988 case SIOCGMIIREG:
989 spin_lock_irqsave(&adapter->stats_lock, flags);
990 if (atl2_read_phy_reg(&adapter->hw,
991 data->reg_num & 0x1F, &data->val_out)) {
992 spin_unlock_irqrestore(&adapter->stats_lock, flags);
993 return -EIO;
994 }
995 spin_unlock_irqrestore(&adapter->stats_lock, flags);
996 break;
997 case SIOCSMIIREG:
998 if (data->reg_num & ~(0x1F))
999 return -EFAULT;
1000 spin_lock_irqsave(&adapter->stats_lock, flags);
1001 if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
1002 data->val_in)) {
1003 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1004 return -EIO;
1005 }
1006 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1007 break;
1008 default:
1009 return -EOPNOTSUPP;
1010 }
1011 return 0;
1012}
1013
1014
1015
1016
1017
1018
1019
1020static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1021{
1022 switch (cmd) {
1023 case SIOCGMIIPHY:
1024 case SIOCGMIIREG:
1025 case SIOCSMIIREG:
1026 return atl2_mii_ioctl(netdev, ifr, cmd);
1027#ifdef ETHTOOL_OPS_COMPAT
1028 case SIOCETHTOOL:
1029 return ethtool_ioctl(ifr);
1030#endif
1031 default:
1032 return -EOPNOTSUPP;
1033 }
1034}
1035
1036
1037
1038
1039
1040static void atl2_tx_timeout(struct net_device *netdev)
1041{
1042 struct atl2_adapter *adapter = netdev_priv(netdev);
1043
1044
1045 schedule_work(&adapter->reset_task);
1046}
1047
1048
1049
1050
1051
1052static void atl2_watchdog(unsigned long data)
1053{
1054 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1055
1056 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1057 u32 drop_rxd, drop_rxs;
1058 unsigned long flags;
1059
1060 spin_lock_irqsave(&adapter->stats_lock, flags);
1061 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
1062 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
1063 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1064
1065 adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs;
1066
1067
1068 mod_timer(&adapter->watchdog_timer,
1069 round_jiffies(jiffies + 4 * HZ));
1070 }
1071}
1072
1073
1074
1075
1076
1077static void atl2_phy_config(unsigned long data)
1078{
1079 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1080 struct atl2_hw *hw = &adapter->hw;
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(&adapter->stats_lock, flags);
1084 atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1085 atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
1086 MII_CR_RESTART_AUTO_NEG);
1087 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1088 clear_bit(0, &adapter->cfg_phy);
1089}
1090
1091static int atl2_up(struct atl2_adapter *adapter)
1092{
1093 struct net_device *netdev = adapter->netdev;
1094 int err = 0;
1095 u32 val;
1096
1097
1098
1099 err = atl2_init_hw(&adapter->hw);
1100 if (err) {
1101 err = -EIO;
1102 return err;
1103 }
1104
1105 atl2_set_multi(netdev);
1106 init_ring_ptrs(adapter);
1107
1108 atl2_restore_vlan(adapter);
1109
1110 if (atl2_configure(adapter)) {
1111 err = -EIO;
1112 goto err_up;
1113 }
1114
1115 clear_bit(__ATL2_DOWN, &adapter->flags);
1116
1117 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1118 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
1119 MASTER_CTRL_MANUAL_INT);
1120
1121 atl2_irq_enable(adapter);
1122
1123err_up:
1124 return err;
1125}
1126
1127static void atl2_reinit_locked(struct atl2_adapter *adapter)
1128{
1129 WARN_ON(in_interrupt());
1130 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1131 msleep(1);
1132 atl2_down(adapter);
1133 atl2_up(adapter);
1134 clear_bit(__ATL2_RESETTING, &adapter->flags);
1135}
1136
1137static void atl2_reset_task(struct work_struct *work)
1138{
1139 struct atl2_adapter *adapter;
1140 adapter = container_of(work, struct atl2_adapter, reset_task);
1141
1142 atl2_reinit_locked(adapter);
1143}
1144
1145static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
1146{
1147 u32 value;
1148 struct atl2_hw *hw = &adapter->hw;
1149 struct net_device *netdev = adapter->netdev;
1150
1151
1152 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1153
1154
1155 if (FULL_DUPLEX == adapter->link_duplex)
1156 value |= MAC_CTRL_DUPLX;
1157
1158
1159 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1160
1161
1162 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1163
1164
1165 value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1166 MAC_CTRL_PRMLEN_SHIFT);
1167
1168
1169 __atl2_vlan_mode(netdev->features, &value);
1170
1171
1172 value |= MAC_CTRL_BC_EN;
1173 if (netdev->flags & IFF_PROMISC)
1174 value |= MAC_CTRL_PROMIS_EN;
1175 else if (netdev->flags & IFF_ALLMULTI)
1176 value |= MAC_CTRL_MC_ALL_EN;
1177
1178
1179 value |= (((u32)(adapter->hw.retry_buf &
1180 MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1181
1182 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1183}
1184
1185static int atl2_check_link(struct atl2_adapter *adapter)
1186{
1187 struct atl2_hw *hw = &adapter->hw;
1188 struct net_device *netdev = adapter->netdev;
1189 int ret_val;
1190 u16 speed, duplex, phy_data;
1191 int reconfig = 0;
1192
1193
1194 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1195 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1196 if (!(phy_data&BMSR_LSTATUS)) {
1197 if (netif_carrier_ok(netdev)) {
1198 u32 value;
1199
1200 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1201 value &= ~MAC_CTRL_RX_EN;
1202 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1203 adapter->link_speed = SPEED_0;
1204 netif_carrier_off(netdev);
1205 netif_stop_queue(netdev);
1206 }
1207 return 0;
1208 }
1209
1210
1211 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1212 if (ret_val)
1213 return ret_val;
1214 switch (hw->MediaType) {
1215 case MEDIA_TYPE_100M_FULL:
1216 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1217 reconfig = 1;
1218 break;
1219 case MEDIA_TYPE_100M_HALF:
1220 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1221 reconfig = 1;
1222 break;
1223 case MEDIA_TYPE_10M_FULL:
1224 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1225 reconfig = 1;
1226 break;
1227 case MEDIA_TYPE_10M_HALF:
1228 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1229 reconfig = 1;
1230 break;
1231 }
1232
1233 if (reconfig == 0) {
1234 if (adapter->link_speed != speed ||
1235 adapter->link_duplex != duplex) {
1236 adapter->link_speed = speed;
1237 adapter->link_duplex = duplex;
1238 atl2_setup_mac_ctrl(adapter);
1239 printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
1240 atl2_driver_name, netdev->name,
1241 adapter->link_speed,
1242 adapter->link_duplex == FULL_DUPLEX ?
1243 "Full Duplex" : "Half Duplex");
1244 }
1245
1246 if (!netif_carrier_ok(netdev)) {
1247 netif_carrier_on(netdev);
1248 netif_wake_queue(netdev);
1249 }
1250 return 0;
1251 }
1252
1253
1254 if (netif_carrier_ok(netdev)) {
1255 u32 value;
1256
1257 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1258 value &= ~MAC_CTRL_RX_EN;
1259 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1260
1261 adapter->link_speed = SPEED_0;
1262 netif_carrier_off(netdev);
1263 netif_stop_queue(netdev);
1264 }
1265
1266
1267
1268 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1269 if (!test_and_set_bit(0, &adapter->cfg_phy))
1270 mod_timer(&adapter->phy_config_timer,
1271 round_jiffies(jiffies + 5 * HZ));
1272 }
1273
1274 return 0;
1275}
1276
1277
1278
1279
1280
1281static void atl2_link_chg_task(struct work_struct *work)
1282{
1283 struct atl2_adapter *adapter;
1284 unsigned long flags;
1285
1286 adapter = container_of(work, struct atl2_adapter, link_chg_task);
1287
1288 spin_lock_irqsave(&adapter->stats_lock, flags);
1289 atl2_check_link(adapter);
1290 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1291}
1292
1293static void atl2_setup_pcicmd(struct pci_dev *pdev)
1294{
1295 u16 cmd;
1296
1297 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1298
1299 if (cmd & PCI_COMMAND_INTX_DISABLE)
1300 cmd &= ~PCI_COMMAND_INTX_DISABLE;
1301 if (cmd & PCI_COMMAND_IO)
1302 cmd &= ~PCI_COMMAND_IO;
1303 if (0 == (cmd & PCI_COMMAND_MEMORY))
1304 cmd |= PCI_COMMAND_MEMORY;
1305 if (0 == (cmd & PCI_COMMAND_MASTER))
1306 cmd |= PCI_COMMAND_MASTER;
1307 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1308
1309
1310
1311
1312
1313
1314 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
1315}
1316
1317#ifdef CONFIG_NET_POLL_CONTROLLER
1318static void atl2_poll_controller(struct net_device *netdev)
1319{
1320 disable_irq(netdev->irq);
1321 atl2_intr(netdev->irq, netdev);
1322 enable_irq(netdev->irq);
1323}
1324#endif
1325
1326
1327static const struct net_device_ops atl2_netdev_ops = {
1328 .ndo_open = atl2_open,
1329 .ndo_stop = atl2_close,
1330 .ndo_start_xmit = atl2_xmit_frame,
1331 .ndo_set_rx_mode = atl2_set_multi,
1332 .ndo_validate_addr = eth_validate_addr,
1333 .ndo_set_mac_address = atl2_set_mac,
1334 .ndo_change_mtu = atl2_change_mtu,
1335 .ndo_fix_features = atl2_fix_features,
1336 .ndo_set_features = atl2_set_features,
1337 .ndo_do_ioctl = atl2_ioctl,
1338 .ndo_tx_timeout = atl2_tx_timeout,
1339#ifdef CONFIG_NET_POLL_CONTROLLER
1340 .ndo_poll_controller = atl2_poll_controller,
1341#endif
1342};
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static int __devinit atl2_probe(struct pci_dev *pdev,
1356 const struct pci_device_id *ent)
1357{
1358 struct net_device *netdev;
1359 struct atl2_adapter *adapter;
1360 static int cards_found;
1361 unsigned long mmio_start;
1362 int mmio_len;
1363 int err;
1364
1365 cards_found = 0;
1366
1367 err = pci_enable_device(pdev);
1368 if (err)
1369 return err;
1370
1371
1372
1373
1374
1375
1376 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1377 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1378 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1379 goto err_dma;
1380 }
1381
1382
1383
1384 err = pci_request_regions(pdev, atl2_driver_name);
1385 if (err)
1386 goto err_pci_reg;
1387
1388
1389
1390 pci_set_master(pdev);
1391
1392 err = -ENOMEM;
1393 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1394 if (!netdev)
1395 goto err_alloc_etherdev;
1396
1397 SET_NETDEV_DEV(netdev, &pdev->dev);
1398
1399 pci_set_drvdata(pdev, netdev);
1400 adapter = netdev_priv(netdev);
1401 adapter->netdev = netdev;
1402 adapter->pdev = pdev;
1403 adapter->hw.back = adapter;
1404
1405 mmio_start = pci_resource_start(pdev, 0x0);
1406 mmio_len = pci_resource_len(pdev, 0x0);
1407
1408 adapter->hw.mem_rang = (u32)mmio_len;
1409 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1410 if (!adapter->hw.hw_addr) {
1411 err = -EIO;
1412 goto err_ioremap;
1413 }
1414
1415 atl2_setup_pcicmd(pdev);
1416
1417 netdev->netdev_ops = &atl2_netdev_ops;
1418 atl2_set_ethtool_ops(netdev);
1419 netdev->watchdog_timeo = 5 * HZ;
1420 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1421
1422 netdev->mem_start = mmio_start;
1423 netdev->mem_end = mmio_start + mmio_len;
1424 adapter->bd_number = cards_found;
1425 adapter->pci_using_64 = false;
1426
1427
1428 err = atl2_sw_init(adapter);
1429 if (err)
1430 goto err_sw_init;
1431
1432 err = -EIO;
1433
1434 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX;
1435 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
1436
1437
1438 atl2_phy_init(&adapter->hw);
1439
1440
1441
1442
1443 if (atl2_reset_hw(&adapter->hw)) {
1444 err = -EIO;
1445 goto err_reset;
1446 }
1447
1448
1449 atl2_read_mac_addr(&adapter->hw);
1450 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
1451
1452#ifdef ETHTOOL_GPERMADDR
1453 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
1454
1455 if (!is_valid_ether_addr(netdev->perm_addr)) {
1456#else
1457 if (!is_valid_ether_addr(netdev->dev_addr)) {
1458#endif
1459 err = -EIO;
1460 goto err_eeprom;
1461 }
1462
1463 atl2_check_options(adapter);
1464
1465 init_timer(&adapter->watchdog_timer);
1466 adapter->watchdog_timer.function = atl2_watchdog;
1467 adapter->watchdog_timer.data = (unsigned long) adapter;
1468
1469 init_timer(&adapter->phy_config_timer);
1470 adapter->phy_config_timer.function = atl2_phy_config;
1471 adapter->phy_config_timer.data = (unsigned long) adapter;
1472
1473 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1474 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
1475
1476 strcpy(netdev->name, "eth%d");
1477 err = register_netdev(netdev);
1478 if (err)
1479 goto err_register;
1480
1481
1482 netif_carrier_off(netdev);
1483 netif_stop_queue(netdev);
1484
1485 cards_found++;
1486
1487 return 0;
1488
1489err_reset:
1490err_register:
1491err_sw_init:
1492err_eeprom:
1493 iounmap(adapter->hw.hw_addr);
1494err_ioremap:
1495 free_netdev(netdev);
1496err_alloc_etherdev:
1497 pci_release_regions(pdev);
1498err_pci_reg:
1499err_dma:
1500 pci_disable_device(pdev);
1501 return err;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static void __devexit atl2_remove(struct pci_dev *pdev)
1516{
1517 struct net_device *netdev = pci_get_drvdata(pdev);
1518 struct atl2_adapter *adapter = netdev_priv(netdev);
1519
1520
1521
1522 set_bit(__ATL2_DOWN, &adapter->flags);
1523
1524 del_timer_sync(&adapter->watchdog_timer);
1525 del_timer_sync(&adapter->phy_config_timer);
1526 cancel_work_sync(&adapter->reset_task);
1527 cancel_work_sync(&adapter->link_chg_task);
1528
1529 unregister_netdev(netdev);
1530
1531 atl2_force_ps(&adapter->hw);
1532
1533 iounmap(adapter->hw.hw_addr);
1534 pci_release_regions(pdev);
1535
1536 free_netdev(netdev);
1537
1538 pci_disable_device(pdev);
1539}
1540
1541static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
1542{
1543 struct net_device *netdev = pci_get_drvdata(pdev);
1544 struct atl2_adapter *adapter = netdev_priv(netdev);
1545 struct atl2_hw *hw = &adapter->hw;
1546 u16 speed, duplex;
1547 u32 ctrl = 0;
1548 u32 wufc = adapter->wol;
1549
1550#ifdef CONFIG_PM
1551 int retval = 0;
1552#endif
1553
1554 netif_device_detach(netdev);
1555
1556 if (netif_running(netdev)) {
1557 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
1558 atl2_down(adapter);
1559 }
1560
1561#ifdef CONFIG_PM
1562 retval = pci_save_state(pdev);
1563 if (retval)
1564 return retval;
1565#endif
1566
1567 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1568 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1569 if (ctrl & BMSR_LSTATUS)
1570 wufc &= ~ATLX_WUFC_LNKC;
1571
1572 if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
1573 u32 ret_val;
1574
1575 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1576 if (ret_val) {
1577 printk(KERN_DEBUG
1578 "%s: get speed&duplex error while suspend\n",
1579 atl2_driver_name);
1580 goto wol_dis;
1581 }
1582
1583 ctrl = 0;
1584
1585
1586 if (wufc & ATLX_WUFC_MAG)
1587 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
1588
1589
1590 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1591
1592
1593 ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1594 if (FULL_DUPLEX == adapter->link_duplex)
1595 ctrl |= MAC_CTRL_DUPLX;
1596 ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1597 ctrl |= (((u32)adapter->hw.preamble_len &
1598 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1599 ctrl |= (((u32)(adapter->hw.retry_buf &
1600 MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
1601 MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1602 if (wufc & ATLX_WUFC_MAG) {
1603
1604 ctrl |= MAC_CTRL_BC_EN;
1605 }
1606
1607 ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
1608
1609
1610 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1611 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1612 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1613 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1614 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1615 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1616
1617 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1618 goto suspend_exit;
1619 }
1620
1621 if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
1622
1623 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1624 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1625 ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
1626
1627
1628 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1629 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1630 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1631 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1632 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1633 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1634
1635 hw->phy_configured = false;
1636
1637 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1638
1639 goto suspend_exit;
1640 }
1641
1642wol_dis:
1643
1644 ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
1645
1646
1647 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1648 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1649 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1650 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1651 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1652 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1653
1654 atl2_force_ps(hw);
1655 hw->phy_configured = false;
1656
1657 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1658
1659suspend_exit:
1660 if (netif_running(netdev))
1661 atl2_free_irq(adapter);
1662
1663 pci_disable_device(pdev);
1664
1665 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1666
1667 return 0;
1668}
1669
1670#ifdef CONFIG_PM
1671static int atl2_resume(struct pci_dev *pdev)
1672{
1673 struct net_device *netdev = pci_get_drvdata(pdev);
1674 struct atl2_adapter *adapter = netdev_priv(netdev);
1675 u32 err;
1676
1677 pci_set_power_state(pdev, PCI_D0);
1678 pci_restore_state(pdev);
1679
1680 err = pci_enable_device(pdev);
1681 if (err) {
1682 printk(KERN_ERR
1683 "atl2: Cannot enable PCI device from suspend\n");
1684 return err;
1685 }
1686
1687 pci_set_master(pdev);
1688
1689 ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL);
1690
1691 pci_enable_wake(pdev, PCI_D3hot, 0);
1692 pci_enable_wake(pdev, PCI_D3cold, 0);
1693
1694 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1695
1696 if (netif_running(netdev)) {
1697 err = atl2_request_irq(adapter);
1698 if (err)
1699 return err;
1700 }
1701
1702 atl2_reset_hw(&adapter->hw);
1703
1704 if (netif_running(netdev))
1705 atl2_up(adapter);
1706
1707 netif_device_attach(netdev);
1708
1709 return 0;
1710}
1711#endif
1712
1713static void atl2_shutdown(struct pci_dev *pdev)
1714{
1715 atl2_suspend(pdev, PMSG_SUSPEND);
1716}
1717
1718static struct pci_driver atl2_driver = {
1719 .name = atl2_driver_name,
1720 .id_table = atl2_pci_tbl,
1721 .probe = atl2_probe,
1722 .remove = __devexit_p(atl2_remove),
1723
1724 .suspend = atl2_suspend,
1725#ifdef CONFIG_PM
1726 .resume = atl2_resume,
1727#endif
1728 .shutdown = atl2_shutdown,
1729};
1730
1731
1732
1733
1734
1735
1736
1737static int __init atl2_init_module(void)
1738{
1739 printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
1740 atl2_driver_version);
1741 printk(KERN_INFO "%s\n", atl2_copyright);
1742 return pci_register_driver(&atl2_driver);
1743}
1744module_init(atl2_init_module);
1745
1746
1747
1748
1749
1750
1751
1752static void __exit atl2_exit_module(void)
1753{
1754 pci_unregister_driver(&atl2_driver);
1755}
1756module_exit(atl2_exit_module);
1757
1758static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1759{
1760 struct atl2_adapter *adapter = hw->back;
1761 pci_read_config_word(adapter->pdev, reg, value);
1762}
1763
1764static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1765{
1766 struct atl2_adapter *adapter = hw->back;
1767 pci_write_config_word(adapter->pdev, reg, *value);
1768}
1769
1770static int atl2_get_settings(struct net_device *netdev,
1771 struct ethtool_cmd *ecmd)
1772{
1773 struct atl2_adapter *adapter = netdev_priv(netdev);
1774 struct atl2_hw *hw = &adapter->hw;
1775
1776 ecmd->supported = (SUPPORTED_10baseT_Half |
1777 SUPPORTED_10baseT_Full |
1778 SUPPORTED_100baseT_Half |
1779 SUPPORTED_100baseT_Full |
1780 SUPPORTED_Autoneg |
1781 SUPPORTED_TP);
1782 ecmd->advertising = ADVERTISED_TP;
1783
1784 ecmd->advertising |= ADVERTISED_Autoneg;
1785 ecmd->advertising |= hw->autoneg_advertised;
1786
1787 ecmd->port = PORT_TP;
1788 ecmd->phy_address = 0;
1789 ecmd->transceiver = XCVR_INTERNAL;
1790
1791 if (adapter->link_speed != SPEED_0) {
1792 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
1793 if (adapter->link_duplex == FULL_DUPLEX)
1794 ecmd->duplex = DUPLEX_FULL;
1795 else
1796 ecmd->duplex = DUPLEX_HALF;
1797 } else {
1798 ethtool_cmd_speed_set(ecmd, -1);
1799 ecmd->duplex = -1;
1800 }
1801
1802 ecmd->autoneg = AUTONEG_ENABLE;
1803 return 0;
1804}
1805
1806static int atl2_set_settings(struct net_device *netdev,
1807 struct ethtool_cmd *ecmd)
1808{
1809 struct atl2_adapter *adapter = netdev_priv(netdev);
1810 struct atl2_hw *hw = &adapter->hw;
1811
1812 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1813 msleep(1);
1814
1815 if (ecmd->autoneg == AUTONEG_ENABLE) {
1816#define MY_ADV_MASK (ADVERTISE_10_HALF | \
1817 ADVERTISE_10_FULL | \
1818 ADVERTISE_100_HALF| \
1819 ADVERTISE_100_FULL)
1820
1821 if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
1822 hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
1823 hw->autoneg_advertised = MY_ADV_MASK;
1824 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1825 ADVERTISE_100_FULL) {
1826 hw->MediaType = MEDIA_TYPE_100M_FULL;
1827 hw->autoneg_advertised = ADVERTISE_100_FULL;
1828 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1829 ADVERTISE_100_HALF) {
1830 hw->MediaType = MEDIA_TYPE_100M_HALF;
1831 hw->autoneg_advertised = ADVERTISE_100_HALF;
1832 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1833 ADVERTISE_10_FULL) {
1834 hw->MediaType = MEDIA_TYPE_10M_FULL;
1835 hw->autoneg_advertised = ADVERTISE_10_FULL;
1836 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1837 ADVERTISE_10_HALF) {
1838 hw->MediaType = MEDIA_TYPE_10M_HALF;
1839 hw->autoneg_advertised = ADVERTISE_10_HALF;
1840 } else {
1841 clear_bit(__ATL2_RESETTING, &adapter->flags);
1842 return -EINVAL;
1843 }
1844 ecmd->advertising = hw->autoneg_advertised |
1845 ADVERTISED_TP | ADVERTISED_Autoneg;
1846 } else {
1847 clear_bit(__ATL2_RESETTING, &adapter->flags);
1848 return -EINVAL;
1849 }
1850
1851
1852 if (netif_running(adapter->netdev)) {
1853 atl2_down(adapter);
1854 atl2_up(adapter);
1855 } else
1856 atl2_reset_hw(&adapter->hw);
1857
1858 clear_bit(__ATL2_RESETTING, &adapter->flags);
1859 return 0;
1860}
1861
1862static u32 atl2_get_msglevel(struct net_device *netdev)
1863{
1864 return 0;
1865}
1866
1867
1868
1869
1870static void atl2_set_msglevel(struct net_device *netdev, u32 data)
1871{
1872}
1873
1874static int atl2_get_regs_len(struct net_device *netdev)
1875{
1876#define ATL2_REGS_LEN 42
1877 return sizeof(u32) * ATL2_REGS_LEN;
1878}
1879
1880static void atl2_get_regs(struct net_device *netdev,
1881 struct ethtool_regs *regs, void *p)
1882{
1883 struct atl2_adapter *adapter = netdev_priv(netdev);
1884 struct atl2_hw *hw = &adapter->hw;
1885 u32 *regs_buff = p;
1886 u16 phy_data;
1887
1888 memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
1889
1890 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
1891
1892 regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
1893 regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
1894 regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
1895 regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
1896 regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
1897 regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
1898 regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
1899 regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
1900 regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
1901 regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
1902 regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
1903 regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
1904 regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
1905 regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
1906 regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
1907 regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
1908 regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
1909 regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
1910 regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
1911 regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
1912 regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
1913 regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
1914 regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
1915 regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
1916 regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
1917 regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
1918 regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
1919 regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
1920 regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
1921 regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
1922 regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
1923 regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
1924 regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
1925 regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
1926 regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
1927 regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
1928 regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
1929 regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
1930 regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
1931
1932 atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
1933 regs_buff[40] = (u32)phy_data;
1934 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1935 regs_buff[41] = (u32)phy_data;
1936}
1937
1938static int atl2_get_eeprom_len(struct net_device *netdev)
1939{
1940 struct atl2_adapter *adapter = netdev_priv(netdev);
1941
1942 if (!atl2_check_eeprom_exist(&adapter->hw))
1943 return 512;
1944 else
1945 return 0;
1946}
1947
1948static int atl2_get_eeprom(struct net_device *netdev,
1949 struct ethtool_eeprom *eeprom, u8 *bytes)
1950{
1951 struct atl2_adapter *adapter = netdev_priv(netdev);
1952 struct atl2_hw *hw = &adapter->hw;
1953 u32 *eeprom_buff;
1954 int first_dword, last_dword;
1955 int ret_val = 0;
1956 int i;
1957
1958 if (eeprom->len == 0)
1959 return -EINVAL;
1960
1961 if (atl2_check_eeprom_exist(hw))
1962 return -EINVAL;
1963
1964 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1965
1966 first_dword = eeprom->offset >> 2;
1967 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
1968
1969 eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
1970 GFP_KERNEL);
1971 if (!eeprom_buff)
1972 return -ENOMEM;
1973
1974 for (i = first_dword; i < last_dword; i++) {
1975 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
1976 ret_val = -EIO;
1977 goto free;
1978 }
1979 }
1980
1981 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
1982 eeprom->len);
1983free:
1984 kfree(eeprom_buff);
1985
1986 return ret_val;
1987}
1988
1989static int atl2_set_eeprom(struct net_device *netdev,
1990 struct ethtool_eeprom *eeprom, u8 *bytes)
1991{
1992 struct atl2_adapter *adapter = netdev_priv(netdev);
1993 struct atl2_hw *hw = &adapter->hw;
1994 u32 *eeprom_buff;
1995 u32 *ptr;
1996 int max_len, first_dword, last_dword, ret_val = 0;
1997 int i;
1998
1999 if (eeprom->len == 0)
2000 return -EOPNOTSUPP;
2001
2002 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
2003 return -EFAULT;
2004
2005 max_len = 512;
2006
2007 first_dword = eeprom->offset >> 2;
2008 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
2009 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
2010 if (!eeprom_buff)
2011 return -ENOMEM;
2012
2013 ptr = eeprom_buff;
2014
2015 if (eeprom->offset & 3) {
2016
2017
2018 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
2019 ret_val = -EIO;
2020 goto out;
2021 }
2022 ptr++;
2023 }
2024 if (((eeprom->offset + eeprom->len) & 3)) {
2025
2026
2027
2028
2029 if (!atl2_read_eeprom(hw, last_dword * 4,
2030 &(eeprom_buff[last_dword - first_dword]))) {
2031 ret_val = -EIO;
2032 goto out;
2033 }
2034 }
2035
2036
2037 memcpy(ptr, bytes, eeprom->len);
2038
2039 for (i = 0; i < last_dword - first_dword + 1; i++) {
2040 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
2041 ret_val = -EIO;
2042 goto out;
2043 }
2044 }
2045 out:
2046 kfree(eeprom_buff);
2047 return ret_val;
2048}
2049
2050static void atl2_get_drvinfo(struct net_device *netdev,
2051 struct ethtool_drvinfo *drvinfo)
2052{
2053 struct atl2_adapter *adapter = netdev_priv(netdev);
2054
2055 strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
2056 strlcpy(drvinfo->version, atl2_driver_version,
2057 sizeof(drvinfo->version));
2058 strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
2059 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
2060 sizeof(drvinfo->bus_info));
2061 drvinfo->n_stats = 0;
2062 drvinfo->testinfo_len = 0;
2063 drvinfo->regdump_len = atl2_get_regs_len(netdev);
2064 drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
2065}
2066
2067static void atl2_get_wol(struct net_device *netdev,
2068 struct ethtool_wolinfo *wol)
2069{
2070 struct atl2_adapter *adapter = netdev_priv(netdev);
2071
2072 wol->supported = WAKE_MAGIC;
2073 wol->wolopts = 0;
2074
2075 if (adapter->wol & ATLX_WUFC_EX)
2076 wol->wolopts |= WAKE_UCAST;
2077 if (adapter->wol & ATLX_WUFC_MC)
2078 wol->wolopts |= WAKE_MCAST;
2079 if (adapter->wol & ATLX_WUFC_BC)
2080 wol->wolopts |= WAKE_BCAST;
2081 if (adapter->wol & ATLX_WUFC_MAG)
2082 wol->wolopts |= WAKE_MAGIC;
2083 if (adapter->wol & ATLX_WUFC_LNKC)
2084 wol->wolopts |= WAKE_PHY;
2085}
2086
2087static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2088{
2089 struct atl2_adapter *adapter = netdev_priv(netdev);
2090
2091 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2092 return -EOPNOTSUPP;
2093
2094 if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
2095 return -EOPNOTSUPP;
2096
2097
2098 adapter->wol = 0;
2099
2100 if (wol->wolopts & WAKE_MAGIC)
2101 adapter->wol |= ATLX_WUFC_MAG;
2102 if (wol->wolopts & WAKE_PHY)
2103 adapter->wol |= ATLX_WUFC_LNKC;
2104
2105 return 0;
2106}
2107
2108static int atl2_nway_reset(struct net_device *netdev)
2109{
2110 struct atl2_adapter *adapter = netdev_priv(netdev);
2111 if (netif_running(netdev))
2112 atl2_reinit_locked(adapter);
2113 return 0;
2114}
2115
2116static const struct ethtool_ops atl2_ethtool_ops = {
2117 .get_settings = atl2_get_settings,
2118 .set_settings = atl2_set_settings,
2119 .get_drvinfo = atl2_get_drvinfo,
2120 .get_regs_len = atl2_get_regs_len,
2121 .get_regs = atl2_get_regs,
2122 .get_wol = atl2_get_wol,
2123 .set_wol = atl2_set_wol,
2124 .get_msglevel = atl2_get_msglevel,
2125 .set_msglevel = atl2_set_msglevel,
2126 .nway_reset = atl2_nway_reset,
2127 .get_link = ethtool_op_get_link,
2128 .get_eeprom_len = atl2_get_eeprom_len,
2129 .get_eeprom = atl2_get_eeprom,
2130 .set_eeprom = atl2_set_eeprom,
2131};
2132
2133static void atl2_set_ethtool_ops(struct net_device *netdev)
2134{
2135 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
2136}
2137
2138#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2139 (((a) & 0xff00ff00) >> 8))
2140#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
2141#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
2142
2143
2144
2145
2146
2147
2148
2149static s32 atl2_reset_hw(struct atl2_hw *hw)
2150{
2151 u32 icr;
2152 u16 pci_cfg_cmd_word;
2153 int i;
2154
2155
2156 atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2157 if ((pci_cfg_cmd_word &
2158 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
2159 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
2160 pci_cfg_cmd_word |=
2161 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
2162 atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2163 }
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
2178 wmb();
2179 msleep(1);
2180
2181
2182 for (i = 0; i < 10; i++) {
2183 icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
2184 if (!icr)
2185 break;
2186 msleep(1);
2187 cpu_relax();
2188 }
2189
2190 if (icr)
2191 return icr;
2192
2193 return 0;
2194}
2195
2196#define CUSTOM_SPI_CS_SETUP 2
2197#define CUSTOM_SPI_CLK_HI 2
2198#define CUSTOM_SPI_CLK_LO 2
2199#define CUSTOM_SPI_CS_HOLD 2
2200#define CUSTOM_SPI_CS_HI 3
2201
2202static struct atl2_spi_flash_dev flash_table[] =
2203{
2204
2205{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
2206{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
2207{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
2208};
2209
2210static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
2211{
2212 int i;
2213 u32 value;
2214
2215 ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
2216 ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
2217
2218 value = SPI_FLASH_CTRL_WAIT_READY |
2219 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2220 SPI_FLASH_CTRL_CS_SETUP_SHIFT |
2221 (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
2222 SPI_FLASH_CTRL_CLK_HI_SHIFT |
2223 (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
2224 SPI_FLASH_CTRL_CLK_LO_SHIFT |
2225 (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2226 SPI_FLASH_CTRL_CS_HOLD_SHIFT |
2227 (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
2228 SPI_FLASH_CTRL_CS_HI_SHIFT |
2229 (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
2230
2231 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2232
2233 value |= SPI_FLASH_CTRL_START;
2234
2235 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2236
2237 for (i = 0; i < 10; i++) {
2238 msleep(1);
2239 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2240 if (!(value & SPI_FLASH_CTRL_START))
2241 break;
2242 }
2243
2244 if (value & SPI_FLASH_CTRL_START)
2245 return false;
2246
2247 *buf = ATL2_READ_REG(hw, REG_SPI_DATA);
2248
2249 return true;
2250}
2251
2252
2253
2254
2255
2256static int get_permanent_address(struct atl2_hw *hw)
2257{
2258 u32 Addr[2];
2259 u32 i, Control;
2260 u16 Register;
2261 u8 EthAddr[ETH_ALEN];
2262 bool KeyValid;
2263
2264 if (is_valid_ether_addr(hw->perm_mac_addr))
2265 return 0;
2266
2267 Addr[0] = 0;
2268 Addr[1] = 0;
2269
2270 if (!atl2_check_eeprom_exist(hw)) {
2271 Register = 0;
2272 KeyValid = false;
2273
2274
2275 i = 0;
2276 while (1) {
2277 if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
2278 if (KeyValid) {
2279 if (Register == REG_MAC_STA_ADDR)
2280 Addr[0] = Control;
2281 else if (Register ==
2282 (REG_MAC_STA_ADDR + 4))
2283 Addr[1] = Control;
2284 KeyValid = false;
2285 } else if ((Control & 0xff) == 0x5A) {
2286 KeyValid = true;
2287 Register = (u16) (Control >> 16);
2288 } else {
2289
2290 break;
2291 }
2292 } else {
2293 break;
2294 }
2295 i += 4;
2296 }
2297
2298 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2299 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2300
2301 if (is_valid_ether_addr(EthAddr)) {
2302 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2303 return 0;
2304 }
2305 return 1;
2306 }
2307
2308
2309 Addr[0] = 0;
2310 Addr[1] = 0;
2311 Register = 0;
2312 KeyValid = false;
2313 i = 0;
2314 while (1) {
2315 if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
2316 if (KeyValid) {
2317 if (Register == REG_MAC_STA_ADDR)
2318 Addr[0] = Control;
2319 else if (Register == (REG_MAC_STA_ADDR + 4))
2320 Addr[1] = Control;
2321 KeyValid = false;
2322 } else if ((Control & 0xff) == 0x5A) {
2323 KeyValid = true;
2324 Register = (u16) (Control >> 16);
2325 } else {
2326 break;
2327 }
2328 } else {
2329 break;
2330 }
2331 i += 4;
2332 }
2333
2334 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2335 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
2336 if (is_valid_ether_addr(EthAddr)) {
2337 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2338 return 0;
2339 }
2340
2341 Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
2342 Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
2343 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2344 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2345
2346 if (is_valid_ether_addr(EthAddr)) {
2347 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2348 return 0;
2349 }
2350
2351 return 1;
2352}
2353
2354
2355
2356
2357
2358
2359static s32 atl2_read_mac_addr(struct atl2_hw *hw)
2360{
2361 if (get_permanent_address(hw)) {
2362
2363
2364 hw->perm_mac_addr[0] = 0x00;
2365 hw->perm_mac_addr[1] = 0x13;
2366 hw->perm_mac_addr[2] = 0x74;
2367 hw->perm_mac_addr[3] = 0x00;
2368 hw->perm_mac_addr[4] = 0x5c;
2369 hw->perm_mac_addr[5] = 0x38;
2370 }
2371
2372 memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN);
2373
2374 return 0;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
2391{
2392 u32 crc32, value;
2393 int i;
2394
2395 value = 0;
2396 crc32 = ether_crc_le(6, mc_addr);
2397
2398 for (i = 0; i < 32; i++)
2399 value |= (((crc32 >> i) & 1) << (31 - i));
2400
2401 return value;
2402}
2403
2404
2405
2406
2407
2408
2409
2410static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
2411{
2412 u32 hash_bit, hash_reg;
2413 u32 mta;
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423 hash_reg = (hash_value >> 31) & 0x1;
2424 hash_bit = (hash_value >> 26) & 0x1F;
2425
2426 mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
2427
2428 mta |= (1 << hash_bit);
2429
2430 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
2431}
2432
2433
2434
2435
2436static void atl2_init_pcie(struct atl2_hw *hw)
2437{
2438 u32 value;
2439 value = LTSSM_TEST_MODE_DEF;
2440 ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
2441
2442 value = PCIE_DLL_TX_CTRL1_DEF;
2443 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
2444}
2445
2446static void atl2_init_flash_opcode(struct atl2_hw *hw)
2447{
2448 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
2449 hw->flash_vendor = 0;
2450
2451
2452 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
2453 flash_table[hw->flash_vendor].cmdPROGRAM);
2454 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
2455 flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
2456 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
2457 flash_table[hw->flash_vendor].cmdCHIP_ERASE);
2458 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
2459 flash_table[hw->flash_vendor].cmdRDID);
2460 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
2461 flash_table[hw->flash_vendor].cmdWREN);
2462 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
2463 flash_table[hw->flash_vendor].cmdRDSR);
2464 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
2465 flash_table[hw->flash_vendor].cmdWRSR);
2466 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
2467 flash_table[hw->flash_vendor].cmdREAD);
2468}
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479static s32 atl2_init_hw(struct atl2_hw *hw)
2480{
2481 u32 ret_val = 0;
2482
2483 atl2_init_pcie(hw);
2484
2485
2486
2487 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
2488 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
2489
2490 atl2_init_flash_opcode(hw);
2491
2492 ret_val = atl2_phy_init(hw);
2493
2494 return ret_val;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
2505 u16 *duplex)
2506{
2507 s32 ret_val;
2508 u16 phy_data;
2509
2510
2511 ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
2512 if (ret_val)
2513 return ret_val;
2514
2515 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
2516 return ATLX_ERR_PHY_RES;
2517
2518 switch (phy_data & MII_ATLX_PSSR_SPEED) {
2519 case MII_ATLX_PSSR_100MBS:
2520 *speed = SPEED_100;
2521 break;
2522 case MII_ATLX_PSSR_10MBS:
2523 *speed = SPEED_10;
2524 break;
2525 default:
2526 return ATLX_ERR_PHY_SPEED;
2527 break;
2528 }
2529
2530 if (phy_data & MII_ATLX_PSSR_DPLX)
2531 *duplex = FULL_DUPLEX;
2532 else
2533 *duplex = HALF_DUPLEX;
2534
2535 return 0;
2536}
2537
2538
2539
2540
2541
2542
2543static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
2544{
2545 u32 val;
2546 int i;
2547
2548 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2549 MDIO_START |
2550 MDIO_SUP_PREAMBLE |
2551 MDIO_RW |
2552 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2553 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2554
2555 wmb();
2556
2557 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2558 udelay(2);
2559 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2560 if (!(val & (MDIO_START | MDIO_BUSY)))
2561 break;
2562 wmb();
2563 }
2564 if (!(val & (MDIO_START | MDIO_BUSY))) {
2565 *phy_data = (u16)val;
2566 return 0;
2567 }
2568
2569 return ATLX_ERR_PHY;
2570}
2571
2572
2573
2574
2575
2576
2577
2578static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
2579{
2580 int i;
2581 u32 val;
2582
2583 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
2584 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
2585 MDIO_SUP_PREAMBLE |
2586 MDIO_START |
2587 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2588 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2589
2590 wmb();
2591
2592 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2593 udelay(2);
2594 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2595 if (!(val & (MDIO_START | MDIO_BUSY)))
2596 break;
2597
2598 wmb();
2599 }
2600
2601 if (!(val & (MDIO_START | MDIO_BUSY)))
2602 return 0;
2603
2604 return ATLX_ERR_PHY;
2605}
2606
2607
2608
2609
2610
2611
2612static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
2613{
2614 s32 ret_val;
2615 s16 mii_autoneg_adv_reg;
2616
2617
2618 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
2631
2632
2633
2634 switch (hw->MediaType) {
2635 case MEDIA_TYPE_AUTO_SENSOR:
2636 mii_autoneg_adv_reg |=
2637 (MII_AR_10T_HD_CAPS |
2638 MII_AR_10T_FD_CAPS |
2639 MII_AR_100TX_HD_CAPS|
2640 MII_AR_100TX_FD_CAPS);
2641 hw->autoneg_advertised =
2642 ADVERTISE_10_HALF |
2643 ADVERTISE_10_FULL |
2644 ADVERTISE_100_HALF|
2645 ADVERTISE_100_FULL;
2646 break;
2647 case MEDIA_TYPE_100M_FULL:
2648 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
2649 hw->autoneg_advertised = ADVERTISE_100_FULL;
2650 break;
2651 case MEDIA_TYPE_100M_HALF:
2652 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
2653 hw->autoneg_advertised = ADVERTISE_100_HALF;
2654 break;
2655 case MEDIA_TYPE_10M_FULL:
2656 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
2657 hw->autoneg_advertised = ADVERTISE_10_FULL;
2658 break;
2659 default:
2660 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
2661 hw->autoneg_advertised = ADVERTISE_10_HALF;
2662 break;
2663 }
2664
2665
2666 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
2667
2668 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
2669
2670 ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
2671
2672 if (ret_val)
2673 return ret_val;
2674
2675 return 0;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685static s32 atl2_phy_commit(struct atl2_hw *hw)
2686{
2687 s32 ret_val;
2688 u16 phy_data;
2689
2690 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2691 ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
2692 if (ret_val) {
2693 u32 val;
2694 int i;
2695
2696 for (i = 0; i < 25; i++) {
2697 msleep(1);
2698 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2699 if (!(val & (MDIO_START | MDIO_BUSY)))
2700 break;
2701 }
2702
2703 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
2704 printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
2705 return ret_val;
2706 }
2707 }
2708 return 0;
2709}
2710
2711static s32 atl2_phy_init(struct atl2_hw *hw)
2712{
2713 s32 ret_val;
2714 u16 phy_val;
2715
2716 if (hw->phy_configured)
2717 return 0;
2718
2719
2720 ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
2721 ATL2_WRITE_FLUSH(hw);
2722 msleep(1);
2723
2724
2725 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2726 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2727
2728
2729 if (phy_val & 0x1000) {
2730 phy_val &= ~0x1000;
2731 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
2732 }
2733
2734 msleep(1);
2735
2736
2737 ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
2738 if (ret_val)
2739 return ret_val;
2740
2741
2742 ret_val = atl2_phy_setup_autoneg_adv(hw);
2743 if (ret_val)
2744 return ret_val;
2745
2746
2747 ret_val = atl2_phy_commit(hw);
2748 if (ret_val)
2749 return ret_val;
2750
2751 hw->phy_configured = true;
2752
2753 return ret_val;
2754}
2755
2756static void atl2_set_mac_addr(struct atl2_hw *hw)
2757{
2758 u32 value;
2759
2760
2761
2762 value = (((u32)hw->mac_addr[2]) << 24) |
2763 (((u32)hw->mac_addr[3]) << 16) |
2764 (((u32)hw->mac_addr[4]) << 8) |
2765 (((u32)hw->mac_addr[5]));
2766 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
2767
2768 value = (((u32)hw->mac_addr[0]) << 8) |
2769 (((u32)hw->mac_addr[1]));
2770 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
2771}
2772
2773
2774
2775
2776
2777static int atl2_check_eeprom_exist(struct atl2_hw *hw)
2778{
2779 u32 value;
2780
2781 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2782 if (value & SPI_FLASH_CTRL_EN_VPD) {
2783 value &= ~SPI_FLASH_CTRL_EN_VPD;
2784 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2785 }
2786 value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
2787 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2788}
2789
2790
2791static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
2792{
2793 return true;
2794}
2795
2796static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
2797{
2798 int i;
2799 u32 Control;
2800
2801 if (Offset & 0x3)
2802 return false;
2803
2804 ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
2805 Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2806 ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
2807
2808 for (i = 0; i < 10; i++) {
2809 msleep(2);
2810 Control = ATL2_READ_REG(hw, REG_VPD_CAP);
2811 if (Control & VPD_CAP_VPD_FLAG)
2812 break;
2813 }
2814
2815 if (Control & VPD_CAP_VPD_FLAG) {
2816 *pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
2817 return true;
2818 }
2819 return false;
2820}
2821
2822static void atl2_force_ps(struct atl2_hw *hw)
2823{
2824 u16 phy_val;
2825
2826 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2827 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2828 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
2829
2830 atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
2831 atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
2832 atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
2833 atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
2834}
2835
2836
2837
2838
2839#define ATL2_MAX_NIC 4
2840
2841#define OPTION_UNSET -1
2842#define OPTION_DISABLED 0
2843#define OPTION_ENABLED 1
2844
2845
2846
2847
2848
2849#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
2850#ifndef module_param_array
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861#define ATL2_PARAM(X, desc) \
2862 static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2863 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2864 MODULE_PARM_DESC(X, desc);
2865#else
2866#define ATL2_PARAM(X, desc) \
2867 static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
2868 static unsigned int num_##X; \
2869 module_param_array_named(X, X, int, &num_##X, 0); \
2870 MODULE_PARM_DESC(X, desc);
2871#endif
2872
2873
2874
2875
2876
2877
2878#define ATL2_MIN_TX_MEMSIZE 4
2879#define ATL2_MAX_TX_MEMSIZE 64
2880#define ATL2_DEFAULT_TX_MEMSIZE 8
2881ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
2882
2883
2884
2885
2886
2887
2888#define ATL2_MIN_RXD_COUNT 16
2889#define ATL2_MAX_RXD_COUNT 512
2890#define ATL2_DEFAULT_RXD_COUNT 64
2891ATL2_PARAM(RxMemBlock, "Number of receive memory block");
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905ATL2_PARAM(MediaType, "MediaType Select");
2906
2907
2908
2909
2910
2911
2912#define INT_MOD_DEFAULT_CNT 100
2913#define INT_MOD_MAX_CNT 65000
2914#define INT_MOD_MIN_CNT 50
2915ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
2916
2917
2918
2919
2920
2921
2922
2923
2924ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
2925
2926#define AUTONEG_ADV_DEFAULT 0x2F
2927#define AUTONEG_ADV_MASK 0x2F
2928#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
2929
2930#define FLASH_VENDOR_DEFAULT 0
2931#define FLASH_VENDOR_MIN 0
2932#define FLASH_VENDOR_MAX 2
2933
2934struct atl2_option {
2935 enum { enable_option, range_option, list_option } type;
2936 char *name;
2937 char *err;
2938 int def;
2939 union {
2940 struct {
2941 int min;
2942 int max;
2943 } r;
2944 struct {
2945 int nr;
2946 struct atl2_opt_list { int i; char *str; } *p;
2947 } l;
2948 } arg;
2949};
2950
2951static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2952{
2953 int i;
2954 struct atl2_opt_list *ent;
2955
2956 if (*value == OPTION_UNSET) {
2957 *value = opt->def;
2958 return 0;
2959 }
2960
2961 switch (opt->type) {
2962 case enable_option:
2963 switch (*value) {
2964 case OPTION_ENABLED:
2965 printk(KERN_INFO "%s Enabled\n", opt->name);
2966 return 0;
2967 break;
2968 case OPTION_DISABLED:
2969 printk(KERN_INFO "%s Disabled\n", opt->name);
2970 return 0;
2971 break;
2972 }
2973 break;
2974 case range_option:
2975 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
2976 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
2977 return 0;
2978 }
2979 break;
2980 case list_option:
2981 for (i = 0; i < opt->arg.l.nr; i++) {
2982 ent = &opt->arg.l.p[i];
2983 if (*value == ent->i) {
2984 if (ent->str[0] != '\0')
2985 printk(KERN_INFO "%s\n", ent->str);
2986 return 0;
2987 }
2988 }
2989 break;
2990 default:
2991 BUG();
2992 }
2993
2994 printk(KERN_INFO "Invalid %s specified (%i) %s\n",
2995 opt->name, *value, opt->err);
2996 *value = opt->def;
2997 return -1;
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009static void __devinit atl2_check_options(struct atl2_adapter *adapter)
3010{
3011 int val;
3012 struct atl2_option opt;
3013 int bd = adapter->bd_number;
3014 if (bd >= ATL2_MAX_NIC) {
3015 printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
3016 bd);
3017 printk(KERN_NOTICE "Using defaults for all values\n");
3018#ifndef module_param_array
3019 bd = ATL2_MAX_NIC;
3020#endif
3021 }
3022
3023
3024 opt.type = range_option;
3025 opt.name = "Bytes of Transmit Memory";
3026 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
3027 opt.def = ATL2_DEFAULT_TX_MEMSIZE;
3028 opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
3029 opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
3030#ifdef module_param_array
3031 if (num_TxMemSize > bd) {
3032#endif
3033 val = TxMemSize[bd];
3034 atl2_validate_option(&val, &opt);
3035 adapter->txd_ring_size = ((u32) val) * 1024;
3036#ifdef module_param_array
3037 } else
3038 adapter->txd_ring_size = ((u32)opt.def) * 1024;
3039#endif
3040
3041 adapter->txs_ring_size = adapter->txd_ring_size / 128;
3042 if (adapter->txs_ring_size > 160)
3043 adapter->txs_ring_size = 160;
3044
3045
3046 opt.type = range_option;
3047 opt.name = "Number of receive memory block";
3048 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
3049 opt.def = ATL2_DEFAULT_RXD_COUNT;
3050 opt.arg.r.min = ATL2_MIN_RXD_COUNT;
3051 opt.arg.r.max = ATL2_MAX_RXD_COUNT;
3052#ifdef module_param_array
3053 if (num_RxMemBlock > bd) {
3054#endif
3055 val = RxMemBlock[bd];
3056 atl2_validate_option(&val, &opt);
3057 adapter->rxd_ring_size = (u32)val;
3058
3059
3060#ifdef module_param_array
3061 } else
3062 adapter->rxd_ring_size = (u32)opt.def;
3063#endif
3064
3065 adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
3066 adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
3067 (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
3068 (adapter->rxd_ring_size / 12);
3069
3070
3071 opt.type = range_option;
3072 opt.name = "Interrupt Moderate Timer";
3073 opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
3074 opt.def = INT_MOD_DEFAULT_CNT;
3075 opt.arg.r.min = INT_MOD_MIN_CNT;
3076 opt.arg.r.max = INT_MOD_MAX_CNT;
3077#ifdef module_param_array
3078 if (num_IntModTimer > bd) {
3079#endif
3080 val = IntModTimer[bd];
3081 atl2_validate_option(&val, &opt);
3082 adapter->imt = (u16) val;
3083#ifdef module_param_array
3084 } else
3085 adapter->imt = (u16)(opt.def);
3086#endif
3087
3088 opt.type = range_option;
3089 opt.name = "SPI Flash Vendor";
3090 opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
3091 opt.def = FLASH_VENDOR_DEFAULT;
3092 opt.arg.r.min = FLASH_VENDOR_MIN;
3093 opt.arg.r.max = FLASH_VENDOR_MAX;
3094#ifdef module_param_array
3095 if (num_FlashVendor > bd) {
3096#endif
3097 val = FlashVendor[bd];
3098 atl2_validate_option(&val, &opt);
3099 adapter->hw.flash_vendor = (u8) val;
3100#ifdef module_param_array
3101 } else
3102 adapter->hw.flash_vendor = (u8)(opt.def);
3103#endif
3104
3105 opt.type = range_option;
3106 opt.name = "Speed/Duplex Selection";
3107 opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
3108 opt.def = MEDIA_TYPE_AUTO_SENSOR;
3109 opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
3110 opt.arg.r.max = MEDIA_TYPE_10M_HALF;
3111#ifdef module_param_array
3112 if (num_MediaType > bd) {
3113#endif
3114 val = MediaType[bd];
3115 atl2_validate_option(&val, &opt);
3116 adapter->hw.MediaType = (u16) val;
3117#ifdef module_param_array
3118 } else
3119 adapter->hw.MediaType = (u16)(opt.def);
3120#endif
3121}
3122