1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifdef TC35815_NAPI
26#define DRV_VERSION "1.36-NAPI"
27#else
28#define DRV_VERSION "1.36"
29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815"
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/types.h>
36#include <linux/fcntl.h>
37#include <linux/interrupt.h>
38#include <linux/ioport.h>
39#include <linux/in.h>
40#include <linux/slab.h>
41#include <linux/string.h>
42#include <linux/spinlock.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/delay.h>
49#include <linux/pci.h>
50#include <linux/mii.h>
51#include <linux/ethtool.h>
52#include <linux/platform_device.h>
53#include <asm/io.h>
54#include <asm/byteorder.h>
55
56
57
58#define GATHER_TXINT
59#define WORKAROUND_LOSTCAR
60#define WORKAROUND_100HALF_PROMISC
61
62
63typedef enum {
64 TC35815CF = 0,
65 TC35815_NWU,
66 TC35815_TX4939,
67} board_t;
68
69
70static const struct {
71 const char *name;
72} board_info[] __devinitdata = {
73 { "TOSHIBA TC35815CF 10/100BaseTX" },
74 { "TOSHIBA TC35815 with Wake on LAN" },
75 { "TOSHIBA TC35815/TX4939" },
76};
77
78static const struct pci_device_id tc35815_pci_tbl[] = {
79 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
80 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
82 {0,}
83};
84MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl);
85
86
87static struct tc35815_options {
88 int speed;
89 int duplex;
90 int doforce;
91} options;
92
93
94
95
96struct tc35815_regs {
97 volatile __u32 DMA_Ctl;
98 volatile __u32 TxFrmPtr;
99 volatile __u32 TxThrsh;
100 volatile __u32 TxPollCtr;
101 volatile __u32 BLFrmPtr;
102 volatile __u32 RxFragSize;
103 volatile __u32 Int_En;
104 volatile __u32 FDA_Bas;
105 volatile __u32 FDA_Lim;
106 volatile __u32 Int_Src;
107 volatile __u32 unused0[2];
108 volatile __u32 PauseCnt;
109 volatile __u32 RemPauCnt;
110 volatile __u32 TxCtlFrmStat;
111 volatile __u32 unused1;
112 volatile __u32 MAC_Ctl;
113 volatile __u32 CAM_Ctl;
114 volatile __u32 Tx_Ctl;
115 volatile __u32 Tx_Stat;
116 volatile __u32 Rx_Ctl;
117 volatile __u32 Rx_Stat;
118 volatile __u32 MD_Data;
119 volatile __u32 MD_CA;
120 volatile __u32 CAM_Adr;
121 volatile __u32 CAM_Data;
122 volatile __u32 CAM_Ena;
123 volatile __u32 PROM_Ctl;
124 volatile __u32 PROM_Data;
125 volatile __u32 Algn_Cnt;
126 volatile __u32 CRC_Cnt;
127 volatile __u32 Miss_Cnt;
128};
129
130
131
132
133
134#define DMA_RxAlign 0x00c00000
135#define DMA_RxAlign_1 0x00400000
136#define DMA_RxAlign_2 0x00800000
137#define DMA_RxAlign_3 0x00c00000
138#define DMA_M66EnStat 0x00080000
139#define DMA_IntMask 0x00040000
140#define DMA_SWIntReq 0x00020000
141#define DMA_TxWakeUp 0x00010000
142#define DMA_RxBigE 0x00008000
143#define DMA_TxBigE 0x00004000
144#define DMA_TestMode 0x00002000
145#define DMA_PowrMgmnt 0x00001000
146#define DMA_DmBurst_Mask 0x000001fc
147
148
149#define RxFrag_EnPack 0x00008000
150#define RxFrag_MinFragMask 0x00000ffc
151
152
153#define MAC_Link10 0x00008000
154#define MAC_EnMissRoll 0x00002000
155#define MAC_MissRoll 0x00000400
156#define MAC_Loop10 0x00000080
157#define MAC_Conn_Auto 0x00000000
158#define MAC_Conn_10M 0x00000020
159#define MAC_Conn_Mll 0x00000040
160#define MAC_MacLoop 0x00000010
161#define MAC_FullDup 0x00000008
162#define MAC_Reset 0x00000004
163#define MAC_HaltImm 0x00000002
164#define MAC_HaltReq 0x00000001
165
166
167#define PROM_Busy 0x00008000
168#define PROM_Read 0x00004000
169#define PROM_Write 0x00002000
170#define PROM_Erase 0x00006000
171
172
173#define PROM_Addr_Ena 0x00000030
174
175
176
177#define CAM_CompEn 0x00000010
178#define CAM_NegCAM 0x00000008
179
180#define CAM_BroadAcc 0x00000004
181#define CAM_GroupAcc 0x00000002
182#define CAM_StationAcc 0x00000001
183
184
185#define CAM_ENTRY_MAX 21
186#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1)
187#define CAM_Ena_Bit(index) (1<<(index))
188#define CAM_ENTRY_DESTINATION 0
189#define CAM_ENTRY_SOURCE 1
190#define CAM_ENTRY_MACCTL 20
191
192
193#define Tx_En 0x00000001
194#define Tx_TxHalt 0x00000002
195#define Tx_NoPad 0x00000004
196#define Tx_NoCRC 0x00000008
197#define Tx_FBack 0x00000010
198#define Tx_EnUnder 0x00000100
199#define Tx_EnExDefer 0x00000200
200#define Tx_EnLCarr 0x00000400
201#define Tx_EnExColl 0x00000800
202#define Tx_EnLateColl 0x00001000
203#define Tx_EnTxPar 0x00002000
204#define Tx_EnComp 0x00004000
205
206
207#define Tx_TxColl_MASK 0x0000000F
208#define Tx_ExColl 0x00000010
209#define Tx_TXDefer 0x00000020
210#define Tx_Paused 0x00000040
211#define Tx_IntTx 0x00000080
212#define Tx_Under 0x00000100
213#define Tx_Defer 0x00000200
214#define Tx_NCarr 0x00000400
215#define Tx_10Stat 0x00000800
216#define Tx_LateColl 0x00001000
217#define Tx_TxPar 0x00002000
218#define Tx_Comp 0x00004000
219#define Tx_Halted 0x00008000
220#define Tx_SQErr 0x00010000
221
222
223#define Rx_EnGood 0x00004000
224#define Rx_EnRxPar 0x00002000
225#define Rx_EnLongErr 0x00000800
226#define Rx_EnOver 0x00000400
227#define Rx_EnCRCErr 0x00000200
228#define Rx_EnAlign 0x00000100
229#define Rx_IgnoreCRC 0x00000040
230#define Rx_StripCRC 0x00000010
231#define Rx_ShortEn 0x00000008
232#define Rx_LongEn 0x00000004
233#define Rx_RxHalt 0x00000002
234#define Rx_RxEn 0x00000001
235
236
237#define Rx_Halted 0x00008000
238#define Rx_Good 0x00004000
239#define Rx_RxPar 0x00002000
240
241#define Rx_LongErr 0x00000800
242#define Rx_Over 0x00000400
243#define Rx_CRCErr 0x00000200
244#define Rx_Align 0x00000100
245#define Rx_10Stat 0x00000080
246#define Rx_IntRx 0x00000040
247#define Rx_CtlRecd 0x00000020
248
249#define Rx_Stat_Mask 0x0000EFC0
250
251
252#define Int_NRAbtEn 0x00000800
253#define Int_TxCtlCmpEn 0x00000400
254#define Int_DmParErrEn 0x00000200
255#define Int_DParDEn 0x00000100
256#define Int_EarNotEn 0x00000080
257#define Int_DParErrEn 0x00000040
258#define Int_SSysErrEn 0x00000020
259#define Int_RMasAbtEn 0x00000010
260#define Int_RTargAbtEn 0x00000008
261#define Int_STargAbtEn 0x00000004
262#define Int_BLExEn 0x00000002
263#define Int_FDAExEn 0x00000001
264
265
266
267#define Int_NRabt 0x00004000
268#define Int_DmParErrStat 0x00002000
269#define Int_BLEx 0x00001000
270#define Int_FDAEx 0x00000800
271#define Int_IntNRAbt 0x00000400
272#define Int_IntCmp 0x00000200
273#define Int_IntExBD 0x00000100
274#define Int_DmParErr 0x00000080
275#define Int_IntEarNot 0x00000040
276#define Int_SWInt 0x00000020
277#define Int_IntBLEx 0x00000010
278#define Int_IntFDAEx 0x00000008
279#define Int_IntPCI 0x00000004
280#define Int_IntMacRx 0x00000002
281#define Int_IntMacTx 0x00000001
282
283
284#define MD_CA_PreSup 0x00001000
285#define MD_CA_Busy 0x00000800
286#define MD_CA_Wr 0x00000400
287
288
289
290
291
292
293
294struct FDesc {
295 volatile __u32 FDNext;
296 volatile __u32 FDSystem;
297 volatile __u32 FDStat;
298 volatile __u32 FDCtl;
299};
300
301
302struct BDesc {
303 volatile __u32 BuffData;
304 volatile __u32 BDCtl;
305};
306
307#define FD_ALIGN 16
308
309
310#define FD_FDLength_MASK 0x0000FFFF
311#define FD_BDCnt_MASK 0x001F0000
312#define FD_FrmOpt_MASK 0x7C000000
313#define FD_FrmOpt_BigEndian 0x40000000
314#define FD_FrmOpt_IntTx 0x20000000
315#define FD_FrmOpt_NoCRC 0x10000000
316#define FD_FrmOpt_NoPadding 0x08000000
317#define FD_FrmOpt_Packing 0x04000000
318#define FD_CownsFD 0x80000000
319#define FD_Next_EOL 0x00000001
320#define FD_BDCnt_SHIFT 16
321
322
323#define BD_BuffLength_MASK 0x0000FFFF
324#define BD_RxBDID_MASK 0x00FF0000
325#define BD_RxBDSeqN_MASK 0x7F000000
326#define BD_CownsBD 0x80000000
327#define BD_RxBDID_SHIFT 16
328#define BD_RxBDSeqN_SHIFT 24
329
330
331
332#undef NO_CHECK_CARRIER
333
334#ifdef NO_CHECK_CARRIER
335#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
336 Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
337 Tx_En)
338#else
339#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
340 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
341 Tx_En)
342#endif
343#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
344 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn)
345#define INT_EN_CMD (Int_NRAbtEn | \
346 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
347 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
348 Int_STargAbtEn | \
349 Int_BLExEn | Int_FDAExEn)
350#define DMA_CTL_CMD DMA_BURST_SIZE
351#define HAVE_DMA_RXALIGN(lp) likely((lp)->boardtype != TC35815CF)
352
353
354#define DMA_BURST_SIZE 32
355#define TX_THRESHOLD 1024
356#define TX_THRESHOLD_MAX 1536
357#define TX_THRESHOLD_KEEP_LIMIT 10
358
359
360#ifdef TC35815_USE_PACKEDBUFFER
361#define FD_PAGE_NUM 2
362#define RX_BUF_NUM 8
363#define RX_FD_NUM 250
364#define TX_FD_NUM 128
365#define RX_BUF_SIZE PAGE_SIZE
366#else
367#define FD_PAGE_NUM 4
368#define RX_BUF_NUM 128
369#define RX_FD_NUM 256
370#define TX_FD_NUM 128
371#if RX_CTL_CMD & Rx_LongEn
372#define RX_BUF_SIZE PAGE_SIZE
373#elif RX_CTL_CMD & Rx_StripCRC
374#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 4 + 2, 32)
375#else
376#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 2, 32)
377#endif
378#endif
379#define RX_FD_RESERVE (2 / 2)
380#define NAPI_WEIGHT 16
381
382struct TxFD {
383 struct FDesc fd;
384 struct BDesc bd;
385 struct BDesc unused;
386};
387
388struct RxFD {
389 struct FDesc fd;
390 struct BDesc bd[0];
391};
392
393struct FrFD {
394 struct FDesc fd;
395 struct BDesc bd[RX_BUF_NUM];
396};
397
398
399#define tc_readl(addr) readl(addr)
400#define tc_writel(d, addr) writel(d, addr)
401
402#define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
403
404
405enum tc35815_timer_state {
406 arbwait = 0,
407 lupwait = 1,
408 ltrywait = 2,
409 asleep = 3,
410 lcheck = 4,
411};
412
413
414struct tc35815_local {
415 struct pci_dev *pci_dev;
416
417 struct net_device *dev;
418 struct napi_struct napi;
419
420
421 struct net_device_stats stats;
422 struct {
423 int max_tx_qlen;
424 int tx_ints;
425 int rx_ints;
426 int tx_underrun;
427 } lstats;
428
429
430
431
432
433
434 spinlock_t lock;
435
436 int phy_addr;
437 int fullduplex;
438 unsigned short saved_lpa;
439 struct timer_list timer;
440 enum tc35815_timer_state timer_state;
441 unsigned int timer_ticks;
442
443
444
445
446
447
448
449
450
451
452
453
454
455 void * fd_buf;
456 dma_addr_t fd_buf_dma;
457 struct TxFD *tfd_base;
458 unsigned int tfd_start;
459 unsigned int tfd_end;
460 struct RxFD *rfd_base;
461 struct RxFD *rfd_limit;
462 struct RxFD *rfd_cur;
463 struct FrFD *fbl_ptr;
464#ifdef TC35815_USE_PACKEDBUFFER
465 unsigned char fbl_curid;
466 void * data_buf[RX_BUF_NUM];
467 dma_addr_t data_buf_dma[RX_BUF_NUM];
468 struct {
469 struct sk_buff *skb;
470 dma_addr_t skb_dma;
471 } tx_skbs[TX_FD_NUM];
472#else
473 unsigned int fbl_count;
474 struct {
475 struct sk_buff *skb;
476 dma_addr_t skb_dma;
477 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
478#endif
479 struct mii_if_info mii;
480 unsigned short mii_id[2];
481 u32 msg_enable;
482 board_t boardtype;
483};
484
485static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
486{
487 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
488}
489#ifdef DEBUG
490static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
491{
492 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
493}
494#endif
495#ifdef TC35815_USE_PACKEDBUFFER
496static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
497{
498 int i;
499 for (i = 0; i < RX_BUF_NUM; i++) {
500 if (bus >= lp->data_buf_dma[i] &&
501 bus < lp->data_buf_dma[i] + PAGE_SIZE)
502 return (void *)((u8 *)lp->data_buf[i] +
503 (bus - lp->data_buf_dma[i]));
504 }
505 return NULL;
506}
507
508#define TC35815_DMA_SYNC_ONDEMAND
509static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
510{
511#ifdef TC35815_DMA_SYNC_ONDEMAND
512 void *buf;
513
514
515 if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL)
516 return NULL;
517 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
518 PCI_DMA_FROMDEVICE);
519 if (pci_dma_mapping_error(*dma_handle)) {
520 free_page((unsigned long)buf);
521 return NULL;
522 }
523 return buf;
524#else
525 return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
526#endif
527}
528
529static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
530{
531#ifdef TC35815_DMA_SYNC_ONDEMAND
532 pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
533 free_page((unsigned long)buf);
534#else
535 pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
536#endif
537}
538#else
539static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
540 struct pci_dev *hwdev,
541 dma_addr_t *dma_handle)
542{
543 struct sk_buff *skb;
544 skb = dev_alloc_skb(RX_BUF_SIZE);
545 if (!skb)
546 return NULL;
547 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
548 PCI_DMA_FROMDEVICE);
549 if (pci_dma_mapping_error(*dma_handle)) {
550 dev_kfree_skb_any(skb);
551 return NULL;
552 }
553 skb_reserve(skb, 2);
554 return skb;
555}
556
557static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
558{
559 pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
560 PCI_DMA_FROMDEVICE);
561 dev_kfree_skb_any(skb);
562}
563#endif
564
565
566
567static int tc35815_open(struct net_device *dev);
568static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
569static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
570#ifdef TC35815_NAPI
571static int tc35815_rx(struct net_device *dev, int limit);
572static int tc35815_poll(struct napi_struct *napi, int budget);
573#else
574static void tc35815_rx(struct net_device *dev);
575#endif
576static void tc35815_txdone(struct net_device *dev);
577static int tc35815_close(struct net_device *dev);
578static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
579static void tc35815_set_multicast_list(struct net_device *dev);
580static void tc35815_tx_timeout(struct net_device *dev);
581static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
582#ifdef CONFIG_NET_POLL_CONTROLLER
583static void tc35815_poll_controller(struct net_device *dev);
584#endif
585static const struct ethtool_ops tc35815_ethtool_ops;
586
587
588static void tc35815_chip_reset(struct net_device *dev);
589static void tc35815_chip_init(struct net_device *dev);
590static void tc35815_find_phy(struct net_device *dev);
591static void tc35815_phy_chip_init(struct net_device *dev);
592
593#ifdef DEBUG
594static void panic_queues(struct net_device *dev);
595#endif
596
597static void tc35815_timer(unsigned long data);
598static void tc35815_start_auto_negotiation(struct net_device *dev,
599 struct ethtool_cmd *ep);
600static int tc_mdio_read(struct net_device *dev, int phy_id, int location);
601static void tc_mdio_write(struct net_device *dev, int phy_id, int location,
602 int val);
603
604#ifdef CONFIG_CPU_TX49XX
605
606
607
608
609
610static int __devinit tc35815_mac_match(struct device *dev, void *data)
611{
612 struct platform_device *plat_dev = to_platform_device(dev);
613 struct pci_dev *pci_dev = data;
614 unsigned int id = pci_dev->irq;
615 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
616}
617
618static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
619{
620 struct tc35815_local *lp = dev->priv;
621 struct device *pd = bus_find_device(&platform_bus_type, NULL,
622 lp->pci_dev, tc35815_mac_match);
623 if (pd) {
624 if (pd->platform_data)
625 memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
626 put_device(pd);
627 return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
628 }
629 return -ENODEV;
630}
631#else
632static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
633{
634 return -ENODEV;
635}
636#endif
637
638static int __devinit tc35815_init_dev_addr (struct net_device *dev)
639{
640 struct tc35815_regs __iomem *tr =
641 (struct tc35815_regs __iomem *)dev->base_addr;
642 int i;
643
644 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
645 ;
646 for (i = 0; i < 6; i += 2) {
647 unsigned short data;
648 tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
649 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
650 ;
651 data = tc_readl(&tr->PROM_Data);
652 dev->dev_addr[i] = data & 0xff;
653 dev->dev_addr[i+1] = data >> 8;
654 }
655 if (!is_valid_ether_addr(dev->dev_addr))
656 return tc35815_read_plat_dev_addr(dev);
657 return 0;
658}
659
660static int __devinit tc35815_init_one (struct pci_dev *pdev,
661 const struct pci_device_id *ent)
662{
663 void __iomem *ioaddr = NULL;
664 struct net_device *dev;
665 struct tc35815_local *lp;
666 int rc;
667 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
668
669 static int printed_version;
670 if (!printed_version++) {
671 printk(version);
672 dev_printk(KERN_DEBUG, &pdev->dev,
673 "speed:%d duplex:%d doforce:%d\n",
674 options.speed, options.duplex, options.doforce);
675 }
676
677 if (!pdev->irq) {
678 dev_warn(&pdev->dev, "no IRQ assigned.\n");
679 return -ENODEV;
680 }
681
682
683 dev = alloc_etherdev (sizeof (*lp));
684 if (dev == NULL) {
685 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
686 return -ENOMEM;
687 }
688 SET_NETDEV_DEV(dev, &pdev->dev);
689 lp = dev->priv;
690 lp->dev = dev;
691
692
693 rc = pci_enable_device (pdev);
694 if (rc)
695 goto err_out;
696
697 mmio_start = pci_resource_start (pdev, 1);
698 mmio_end = pci_resource_end (pdev, 1);
699 mmio_flags = pci_resource_flags (pdev, 1);
700 mmio_len = pci_resource_len (pdev, 1);
701
702
703
704
705
706 if (!(mmio_flags & IORESOURCE_MEM)) {
707 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
708 rc = -ENODEV;
709 goto err_out;
710 }
711
712
713 if ((mmio_len < sizeof(struct tc35815_regs))) {
714 dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
715 rc = -ENODEV;
716 goto err_out;
717 }
718
719 rc = pci_request_regions (pdev, MODNAME);
720 if (rc)
721 goto err_out;
722
723 pci_set_master (pdev);
724
725
726 ioaddr = ioremap (mmio_start, mmio_len);
727 if (ioaddr == NULL) {
728 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
729 rc = -EIO;
730 goto err_out_free_res;
731 }
732
733
734 dev->open = tc35815_open;
735 dev->hard_start_xmit = tc35815_send_packet;
736 dev->stop = tc35815_close;
737 dev->get_stats = tc35815_get_stats;
738 dev->set_multicast_list = tc35815_set_multicast_list;
739 dev->do_ioctl = tc35815_ioctl;
740 dev->ethtool_ops = &tc35815_ethtool_ops;
741 dev->tx_timeout = tc35815_tx_timeout;
742 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
743#ifdef TC35815_NAPI
744 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
745#endif
746#ifdef CONFIG_NET_POLL_CONTROLLER
747 dev->poll_controller = tc35815_poll_controller;
748#endif
749
750 dev->irq = pdev->irq;
751 dev->base_addr = (unsigned long) ioaddr;
752
753 spin_lock_init(&lp->lock);
754 lp->pci_dev = pdev;
755 lp->boardtype = ent->driver_data;
756
757 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
758 pci_set_drvdata(pdev, dev);
759
760
761 tc35815_chip_reset(dev);
762
763
764 if (tc35815_init_dev_addr(dev)) {
765 dev_warn(&pdev->dev, "not valid ether addr\n");
766 random_ether_addr(dev->dev_addr);
767 }
768
769 rc = register_netdev (dev);
770 if (rc)
771 goto err_out_unmap;
772
773 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
774 printk(KERN_INFO "%s: %s at 0x%lx, "
775 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
776 "IRQ %d\n",
777 dev->name,
778 board_info[ent->driver_data].name,
779 dev->base_addr,
780 dev->dev_addr[0], dev->dev_addr[1],
781 dev->dev_addr[2], dev->dev_addr[3],
782 dev->dev_addr[4], dev->dev_addr[5],
783 dev->irq);
784
785 setup_timer(&lp->timer, tc35815_timer, (unsigned long) dev);
786 lp->mii.dev = dev;
787 lp->mii.mdio_read = tc_mdio_read;
788 lp->mii.mdio_write = tc_mdio_write;
789 lp->mii.phy_id_mask = 0x1f;
790 lp->mii.reg_num_mask = 0x1f;
791 tc35815_find_phy(dev);
792 lp->mii.phy_id = lp->phy_addr;
793 lp->mii.full_duplex = 0;
794 lp->mii.force_media = 0;
795
796 return 0;
797
798err_out_unmap:
799 iounmap(ioaddr);
800err_out_free_res:
801 pci_release_regions (pdev);
802err_out:
803 free_netdev (dev);
804 return rc;
805}
806
807
808static void __devexit tc35815_remove_one (struct pci_dev *pdev)
809{
810 struct net_device *dev = pci_get_drvdata (pdev);
811 unsigned long mmio_addr;
812
813 mmio_addr = dev->base_addr;
814
815 unregister_netdev (dev);
816
817 if (mmio_addr) {
818 iounmap ((void __iomem *)mmio_addr);
819 pci_release_regions (pdev);
820 }
821
822 free_netdev (dev);
823
824 pci_set_drvdata (pdev, NULL);
825}
826
827static int
828tc35815_init_queues(struct net_device *dev)
829{
830 struct tc35815_local *lp = dev->priv;
831 int i;
832 unsigned long fd_addr;
833
834 if (!lp->fd_buf) {
835 BUG_ON(sizeof(struct FDesc) +
836 sizeof(struct BDesc) * RX_BUF_NUM +
837 sizeof(struct FDesc) * RX_FD_NUM +
838 sizeof(struct TxFD) * TX_FD_NUM >
839 PAGE_SIZE * FD_PAGE_NUM);
840
841 if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0)
842 return -ENOMEM;
843 for (i = 0; i < RX_BUF_NUM; i++) {
844#ifdef TC35815_USE_PACKEDBUFFER
845 if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) {
846 while (--i >= 0) {
847 free_rxbuf_page(lp->pci_dev,
848 lp->data_buf[i],
849 lp->data_buf_dma[i]);
850 lp->data_buf[i] = NULL;
851 }
852 pci_free_consistent(lp->pci_dev,
853 PAGE_SIZE * FD_PAGE_NUM,
854 lp->fd_buf,
855 lp->fd_buf_dma);
856 lp->fd_buf = NULL;
857 return -ENOMEM;
858 }
859#else
860 lp->rx_skbs[i].skb =
861 alloc_rxbuf_skb(dev, lp->pci_dev,
862 &lp->rx_skbs[i].skb_dma);
863 if (!lp->rx_skbs[i].skb) {
864 while (--i >= 0) {
865 free_rxbuf_skb(lp->pci_dev,
866 lp->rx_skbs[i].skb,
867 lp->rx_skbs[i].skb_dma);
868 lp->rx_skbs[i].skb = NULL;
869 }
870 pci_free_consistent(lp->pci_dev,
871 PAGE_SIZE * FD_PAGE_NUM,
872 lp->fd_buf,
873 lp->fd_buf_dma);
874 lp->fd_buf = NULL;
875 return -ENOMEM;
876 }
877#endif
878 }
879 printk(KERN_DEBUG "%s: FD buf %p DataBuf",
880 dev->name, lp->fd_buf);
881#ifdef TC35815_USE_PACKEDBUFFER
882 printk(" DataBuf");
883 for (i = 0; i < RX_BUF_NUM; i++)
884 printk(" %p", lp->data_buf[i]);
885#endif
886 printk("\n");
887 } else {
888 for (i = 0; i < FD_PAGE_NUM; i++) {
889 clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE));
890 }
891 }
892 fd_addr = (unsigned long)lp->fd_buf;
893
894
895 lp->rfd_base = (struct RxFD *)fd_addr;
896 fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
897 for (i = 0; i < RX_FD_NUM; i++) {
898 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
899 }
900 lp->rfd_cur = lp->rfd_base;
901 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
902
903
904 lp->tfd_base = (struct TxFD *)fd_addr;
905 fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
906 for (i = 0; i < TX_FD_NUM; i++) {
907 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
908 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
909 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
910 }
911 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
912 lp->tfd_start = 0;
913 lp->tfd_end = 0;
914
915
916 lp->fbl_ptr = (struct FrFD *)fd_addr;
917 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
918 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
919#ifndef TC35815_USE_PACKEDBUFFER
920
921
922
923
924
925 lp->fbl_count = 0;
926 for (i = 0; i < RX_BUF_NUM; i++) {
927 if (lp->rx_skbs[i].skb) {
928 if (i != lp->fbl_count) {
929 lp->rx_skbs[lp->fbl_count].skb =
930 lp->rx_skbs[i].skb;
931 lp->rx_skbs[lp->fbl_count].skb_dma =
932 lp->rx_skbs[i].skb_dma;
933 }
934 lp->fbl_count++;
935 }
936 }
937#endif
938 for (i = 0; i < RX_BUF_NUM; i++) {
939#ifdef TC35815_USE_PACKEDBUFFER
940 lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
941#else
942 if (i >= lp->fbl_count) {
943 lp->fbl_ptr->bd[i].BuffData = 0;
944 lp->fbl_ptr->bd[i].BDCtl = 0;
945 continue;
946 }
947 lp->fbl_ptr->bd[i].BuffData =
948 cpu_to_le32(lp->rx_skbs[i].skb_dma);
949#endif
950
951 lp->fbl_ptr->bd[i].BDCtl =
952 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
953 RX_BUF_SIZE);
954 }
955#ifdef TC35815_USE_PACKEDBUFFER
956 lp->fbl_curid = 0;
957#endif
958
959 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
960 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
961 return 0;
962}
963
964static void
965tc35815_clear_queues(struct net_device *dev)
966{
967 struct tc35815_local *lp = dev->priv;
968 int i;
969
970 for (i = 0; i < TX_FD_NUM; i++) {
971 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
972 struct sk_buff *skb =
973 fdsystem != 0xffffffff ?
974 lp->tx_skbs[fdsystem].skb : NULL;
975#ifdef DEBUG
976 if (lp->tx_skbs[i].skb != skb) {
977 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
978 panic_queues(dev);
979 }
980#else
981 BUG_ON(lp->tx_skbs[i].skb != skb);
982#endif
983 if (skb) {
984 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
985 lp->tx_skbs[i].skb = NULL;
986 lp->tx_skbs[i].skb_dma = 0;
987 dev_kfree_skb_any(skb);
988 }
989 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
990 }
991
992 tc35815_init_queues(dev);
993}
994
995static void
996tc35815_free_queues(struct net_device *dev)
997{
998 struct tc35815_local *lp = dev->priv;
999 int i;
1000
1001 if (lp->tfd_base) {
1002 for (i = 0; i < TX_FD_NUM; i++) {
1003 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1004 struct sk_buff *skb =
1005 fdsystem != 0xffffffff ?
1006 lp->tx_skbs[fdsystem].skb : NULL;
1007#ifdef DEBUG
1008 if (lp->tx_skbs[i].skb != skb) {
1009 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1010 panic_queues(dev);
1011 }
1012#else
1013 BUG_ON(lp->tx_skbs[i].skb != skb);
1014#endif
1015 if (skb) {
1016 dev_kfree_skb(skb);
1017 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1018 lp->tx_skbs[i].skb = NULL;
1019 lp->tx_skbs[i].skb_dma = 0;
1020 }
1021 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1022 }
1023 }
1024
1025 lp->rfd_base = NULL;
1026 lp->rfd_limit = NULL;
1027 lp->rfd_cur = NULL;
1028 lp->fbl_ptr = NULL;
1029
1030 for (i = 0; i < RX_BUF_NUM; i++) {
1031#ifdef TC35815_USE_PACKEDBUFFER
1032 if (lp->data_buf[i]) {
1033 free_rxbuf_page(lp->pci_dev,
1034 lp->data_buf[i], lp->data_buf_dma[i]);
1035 lp->data_buf[i] = NULL;
1036 }
1037#else
1038 if (lp->rx_skbs[i].skb) {
1039 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1040 lp->rx_skbs[i].skb_dma);
1041 lp->rx_skbs[i].skb = NULL;
1042 }
1043#endif
1044 }
1045 if (lp->fd_buf) {
1046 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
1047 lp->fd_buf, lp->fd_buf_dma);
1048 lp->fd_buf = NULL;
1049 }
1050}
1051
1052static void
1053dump_txfd(struct TxFD *fd)
1054{
1055 printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
1056 le32_to_cpu(fd->fd.FDNext),
1057 le32_to_cpu(fd->fd.FDSystem),
1058 le32_to_cpu(fd->fd.FDStat),
1059 le32_to_cpu(fd->fd.FDCtl));
1060 printk("BD: ");
1061 printk(" %08x %08x",
1062 le32_to_cpu(fd->bd.BuffData),
1063 le32_to_cpu(fd->bd.BDCtl));
1064 printk("\n");
1065}
1066
1067static int
1068dump_rxfd(struct RxFD *fd)
1069{
1070 int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1071 if (bd_count > 8)
1072 bd_count = 8;
1073 printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
1074 le32_to_cpu(fd->fd.FDNext),
1075 le32_to_cpu(fd->fd.FDSystem),
1076 le32_to_cpu(fd->fd.FDStat),
1077 le32_to_cpu(fd->fd.FDCtl));
1078 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1079 return 0;
1080 printk("BD: ");
1081 for (i = 0; i < bd_count; i++)
1082 printk(" %08x %08x",
1083 le32_to_cpu(fd->bd[i].BuffData),
1084 le32_to_cpu(fd->bd[i].BDCtl));
1085 printk("\n");
1086 return bd_count;
1087}
1088
1089#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER)
1090static void
1091dump_frfd(struct FrFD *fd)
1092{
1093 int i;
1094 printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
1095 le32_to_cpu(fd->fd.FDNext),
1096 le32_to_cpu(fd->fd.FDSystem),
1097 le32_to_cpu(fd->fd.FDStat),
1098 le32_to_cpu(fd->fd.FDCtl));
1099 printk("BD: ");
1100 for (i = 0; i < RX_BUF_NUM; i++)
1101 printk(" %08x %08x",
1102 le32_to_cpu(fd->bd[i].BuffData),
1103 le32_to_cpu(fd->bd[i].BDCtl));
1104 printk("\n");
1105}
1106#endif
1107
1108#ifdef DEBUG
1109static void
1110panic_queues(struct net_device *dev)
1111{
1112 struct tc35815_local *lp = dev->priv;
1113 int i;
1114
1115 printk("TxFD base %p, start %u, end %u\n",
1116 lp->tfd_base, lp->tfd_start, lp->tfd_end);
1117 printk("RxFD base %p limit %p cur %p\n",
1118 lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1119 printk("FrFD %p\n", lp->fbl_ptr);
1120 for (i = 0; i < TX_FD_NUM; i++)
1121 dump_txfd(&lp->tfd_base[i]);
1122 for (i = 0; i < RX_FD_NUM; i++) {
1123 int bd_count = dump_rxfd(&lp->rfd_base[i]);
1124 i += (bd_count + 1) / 2;
1125 }
1126 dump_frfd(lp->fbl_ptr);
1127 panic("%s: Illegal queue state.", dev->name);
1128}
1129#endif
1130
1131static void print_eth(char *add)
1132{
1133 int i;
1134
1135 printk("print_eth(%p)\n", add);
1136 for (i = 0; i < 6; i++)
1137 printk(" %2.2X", (unsigned char) add[i + 6]);
1138 printk(" =>");
1139 for (i = 0; i < 6; i++)
1140 printk(" %2.2X", (unsigned char) add[i]);
1141 printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]);
1142}
1143
1144static int tc35815_tx_full(struct net_device *dev)
1145{
1146 struct tc35815_local *lp = dev->priv;
1147 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);
1148}
1149
1150static void tc35815_restart(struct net_device *dev)
1151{
1152 struct tc35815_local *lp = dev->priv;
1153 int pid = lp->phy_addr;
1154 int do_phy_reset = 1;
1155 del_timer(&lp->timer);
1156
1157 if (lp->mii_id[0] == 0x0016 && (lp->mii_id[1] & 0xfc00) == 0xf800) {
1158
1159 do_phy_reset = 0;
1160 }
1161 if (do_phy_reset) {
1162 int timeout;
1163 tc_mdio_write(dev, pid, MII_BMCR, BMCR_RESET);
1164 timeout = 100;
1165 while (--timeout) {
1166 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_RESET))
1167 break;
1168 udelay(1);
1169 }
1170 if (!timeout)
1171 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
1172 }
1173
1174 tc35815_chip_reset(dev);
1175 tc35815_clear_queues(dev);
1176 tc35815_chip_init(dev);
1177
1178 tc35815_set_multicast_list(dev);
1179}
1180
1181static void tc35815_tx_timeout(struct net_device *dev)
1182{
1183 struct tc35815_local *lp = dev->priv;
1184 struct tc35815_regs __iomem *tr =
1185 (struct tc35815_regs __iomem *)dev->base_addr;
1186
1187 printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
1188 dev->name, tc_readl(&tr->Tx_Stat));
1189
1190
1191 spin_lock_irq(&lp->lock);
1192 tc35815_restart(dev);
1193 spin_unlock_irq(&lp->lock);
1194
1195 lp->stats.tx_errors++;
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 if (!tc35815_tx_full(dev))
1208 netif_wake_queue(dev);
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219static int
1220tc35815_open(struct net_device *dev)
1221{
1222 struct tc35815_local *lp = dev->priv;
1223
1224
1225
1226
1227
1228 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) {
1229 return -EAGAIN;
1230 }
1231
1232 del_timer(&lp->timer);
1233 tc35815_chip_reset(dev);
1234
1235 if (tc35815_init_queues(dev) != 0) {
1236 free_irq(dev->irq, dev);
1237 return -EAGAIN;
1238 }
1239
1240#ifdef TC35815_NAPI
1241 napi_enable(&lp->napi);
1242#endif
1243
1244
1245 spin_lock_irq(&lp->lock);
1246 tc35815_chip_init(dev);
1247 spin_unlock_irq(&lp->lock);
1248
1249
1250
1251
1252 netif_start_queue(dev);
1253
1254 return 0;
1255}
1256
1257
1258
1259
1260
1261
1262static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1263{
1264 struct tc35815_local *lp = dev->priv;
1265 struct TxFD *txfd;
1266 unsigned long flags;
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 spin_lock_irqsave(&lp->lock, flags);
1283
1284
1285 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1286 TX_FD_NUM / 2)
1287 tc35815_txdone(dev);
1288
1289 if (netif_msg_pktdata(lp))
1290 print_eth(skb->data);
1291#ifdef DEBUG
1292 if (lp->tx_skbs[lp->tfd_start].skb) {
1293 printk("%s: tx_skbs conflict.\n", dev->name);
1294 panic_queues(dev);
1295 }
1296#else
1297 BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1298#endif
1299 lp->tx_skbs[lp->tfd_start].skb = skb;
1300 lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1301
1302
1303 txfd = &lp->tfd_base[lp->tfd_start];
1304 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1305 txfd->bd.BDCtl = cpu_to_le32(skb->len);
1306 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1307 txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
1308
1309 if (lp->tfd_start == lp->tfd_end) {
1310 struct tc35815_regs __iomem *tr =
1311 (struct tc35815_regs __iomem *)dev->base_addr;
1312
1313 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1314#ifdef GATHER_TXINT
1315 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1316#endif
1317 if (netif_msg_tx_queued(lp)) {
1318 printk("%s: starting TxFD.\n", dev->name);
1319 dump_txfd(txfd);
1320 }
1321 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1322 } else {
1323 txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
1324 if (netif_msg_tx_queued(lp)) {
1325 printk("%s: queueing TxFD.\n", dev->name);
1326 dump_txfd(txfd);
1327 }
1328 }
1329 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1330
1331 dev->trans_start = jiffies;
1332
1333
1334
1335
1336
1337 if (tc35815_tx_full(dev)) {
1338 if (netif_msg_tx_queued(lp))
1339 printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
1340 netif_stop_queue(dev);
1341 }
1342
1343
1344
1345
1346
1347 spin_unlock_irqrestore(&lp->lock, flags);
1348 return 0;
1349}
1350
1351#define FATAL_ERROR_INT \
1352 (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
1353static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1354{
1355 static int count;
1356 printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
1357 dev->name, status);
1358 if (status & Int_IntPCI)
1359 printk(" IntPCI");
1360 if (status & Int_DmParErr)
1361 printk(" DmParErr");
1362 if (status & Int_IntNRAbt)
1363 printk(" IntNRAbt");
1364 printk("\n");
1365 if (count++ > 100)
1366 panic("%s: Too many fatal errors.", dev->name);
1367 printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1368
1369 tc35815_restart(dev);
1370}
1371
1372#ifdef TC35815_NAPI
1373static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1374#else
1375static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1376#endif
1377{
1378 struct tc35815_local *lp = dev->priv;
1379 struct tc35815_regs __iomem *tr =
1380 (struct tc35815_regs __iomem *)dev->base_addr;
1381 int ret = -1;
1382
1383
1384 if (status & FATAL_ERROR_INT) {
1385 tc35815_fatal_error_interrupt(dev, status);
1386 return 0;
1387 }
1388
1389 if (status & Int_IntFDAEx) {
1390
1391 tc_writel(tc_readl(&tr->Int_En) & ~Int_FDAExEn, &tr->Int_En);
1392 printk(KERN_WARNING
1393 "%s: Free Descriptor Area Exhausted (%#x).\n",
1394 dev->name, status);
1395 lp->stats.rx_dropped++;
1396 ret = 0;
1397 }
1398 if (status & Int_IntBLEx) {
1399
1400 tc_writel(tc_readl(&tr->Int_En) & ~Int_BLExEn, &tr->Int_En);
1401 printk(KERN_WARNING
1402 "%s: Buffer List Exhausted (%#x).\n",
1403 dev->name, status);
1404 lp->stats.rx_dropped++;
1405 ret = 0;
1406 }
1407 if (status & Int_IntExBD) {
1408 printk(KERN_WARNING
1409 "%s: Excessive Buffer Descriptiors (%#x).\n",
1410 dev->name, status);
1411 lp->stats.rx_length_errors++;
1412 ret = 0;
1413 }
1414
1415
1416 if (status & Int_IntMacRx) {
1417
1418#ifdef TC35815_NAPI
1419 ret = tc35815_rx(dev, limit);
1420#else
1421 tc35815_rx(dev);
1422 ret = 0;
1423#endif
1424 lp->lstats.rx_ints++;
1425 }
1426 if (status & Int_IntMacTx) {
1427
1428 lp->lstats.tx_ints++;
1429 tc35815_txdone(dev);
1430 netif_wake_queue(dev);
1431 ret = 0;
1432 }
1433 return ret;
1434}
1435
1436
1437
1438
1439
1440static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1441{
1442 struct net_device *dev = dev_id;
1443 struct tc35815_local *lp = netdev_priv(dev);
1444 struct tc35815_regs __iomem *tr =
1445 (struct tc35815_regs __iomem *)dev->base_addr;
1446#ifdef TC35815_NAPI
1447 u32 dmactl = tc_readl(&tr->DMA_Ctl);
1448
1449 if (!(dmactl & DMA_IntMask)) {
1450
1451 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1452 if (netif_rx_schedule_prep(dev, &lp->napi))
1453 __netif_rx_schedule(dev, &lp->napi);
1454 else {
1455 printk(KERN_ERR "%s: interrupt taken in poll\n",
1456 dev->name);
1457 BUG();
1458 }
1459 (void)tc_readl(&tr->Int_Src);
1460 return IRQ_HANDLED;
1461 }
1462 return IRQ_NONE;
1463#else
1464 int handled;
1465 u32 status;
1466
1467 spin_lock(&lp->lock);
1468 status = tc_readl(&tr->Int_Src);
1469 tc_writel(status, &tr->Int_Src);
1470 handled = tc35815_do_interrupt(dev, status);
1471 (void)tc_readl(&tr->Int_Src);
1472 spin_unlock(&lp->lock);
1473 return IRQ_RETVAL(handled >= 0);
1474#endif
1475}
1476
1477#ifdef CONFIG_NET_POLL_CONTROLLER
1478static void tc35815_poll_controller(struct net_device *dev)
1479{
1480 disable_irq(dev->irq);
1481 tc35815_interrupt(dev->irq, dev);
1482 enable_irq(dev->irq);
1483}
1484#endif
1485
1486
1487#ifdef TC35815_NAPI
1488static int
1489tc35815_rx(struct net_device *dev, int limit)
1490#else
1491static void
1492tc35815_rx(struct net_device *dev)
1493#endif
1494{
1495 struct tc35815_local *lp = dev->priv;
1496 unsigned int fdctl;
1497 int i;
1498 int buf_free_count = 0;
1499 int fd_free_count = 0;
1500#ifdef TC35815_NAPI
1501 int received = 0;
1502#endif
1503
1504 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1505 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1506 int pkt_len = fdctl & FD_FDLength_MASK;
1507 int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1508#ifdef DEBUG
1509 struct RxFD *next_rfd;
1510#endif
1511#if (RX_CTL_CMD & Rx_StripCRC) == 0
1512 pkt_len -= 4;
1513#endif
1514
1515 if (netif_msg_rx_status(lp))
1516 dump_rxfd(lp->rfd_cur);
1517 if (status & Rx_Good) {
1518 struct sk_buff *skb;
1519 unsigned char *data;
1520 int cur_bd;
1521#ifdef TC35815_USE_PACKEDBUFFER
1522 int offset;
1523#endif
1524
1525#ifdef TC35815_NAPI
1526 if (--limit < 0)
1527 break;
1528#endif
1529#ifdef TC35815_USE_PACKEDBUFFER
1530 BUG_ON(bd_count > 2);
1531 skb = dev_alloc_skb(pkt_len + 2);
1532 if (skb == NULL) {
1533 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1534 dev->name);
1535 lp->stats.rx_dropped++;
1536 break;
1537 }
1538 skb_reserve(skb, 2);
1539
1540 data = skb_put(skb, pkt_len);
1541
1542
1543 cur_bd = 0;
1544 offset = 0;
1545 while (offset < pkt_len && cur_bd < bd_count) {
1546 int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
1547 BD_BuffLength_MASK;
1548 dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
1549 void *rxbuf = rxbuf_bus_to_virt(lp, dma);
1550 if (offset + len > pkt_len)
1551 len = pkt_len - offset;
1552#ifdef TC35815_DMA_SYNC_ONDEMAND
1553 pci_dma_sync_single_for_cpu(lp->pci_dev,
1554 dma, len,
1555 PCI_DMA_FROMDEVICE);
1556#endif
1557 memcpy(data + offset, rxbuf, len);
1558#ifdef TC35815_DMA_SYNC_ONDEMAND
1559 pci_dma_sync_single_for_device(lp->pci_dev,
1560 dma, len,
1561 PCI_DMA_FROMDEVICE);
1562#endif
1563 offset += len;
1564 cur_bd++;
1565 }
1566#else
1567 BUG_ON(bd_count > 1);
1568 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1569 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1570#ifdef DEBUG
1571 if (cur_bd >= RX_BUF_NUM) {
1572 printk("%s: invalid BDID.\n", dev->name);
1573 panic_queues(dev);
1574 }
1575 BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1576 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1577 if (!lp->rx_skbs[cur_bd].skb) {
1578 printk("%s: NULL skb.\n", dev->name);
1579 panic_queues(dev);
1580 }
1581#else
1582 BUG_ON(cur_bd >= RX_BUF_NUM);
1583#endif
1584 skb = lp->rx_skbs[cur_bd].skb;
1585 prefetch(skb->data);
1586 lp->rx_skbs[cur_bd].skb = NULL;
1587 lp->fbl_count--;
1588 pci_unmap_single(lp->pci_dev,
1589 lp->rx_skbs[cur_bd].skb_dma,
1590 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1591 if (!HAVE_DMA_RXALIGN(lp))
1592 memmove(skb->data, skb->data - 2, pkt_len);
1593 data = skb_put(skb, pkt_len);
1594#endif
1595 if (netif_msg_pktdata(lp))
1596 print_eth(data);
1597 skb->protocol = eth_type_trans(skb, dev);
1598#ifdef TC35815_NAPI
1599 netif_receive_skb(skb);
1600 received++;
1601#else
1602 netif_rx(skb);
1603#endif
1604 dev->last_rx = jiffies;
1605 lp->stats.rx_packets++;
1606 lp->stats.rx_bytes += pkt_len;
1607 } else {
1608 lp->stats.rx_errors++;
1609 printk(KERN_DEBUG "%s: Rx error (status %x)\n",
1610 dev->name, status & Rx_Stat_Mask);
1611
1612 if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
1613 status &= ~(Rx_LongErr|Rx_CRCErr);
1614 status |= Rx_Over;
1615 }
1616 if (status & Rx_LongErr) lp->stats.rx_length_errors++;
1617 if (status & Rx_Over) lp->stats.rx_fifo_errors++;
1618 if (status & Rx_CRCErr) lp->stats.rx_crc_errors++;
1619 if (status & Rx_Align) lp->stats.rx_frame_errors++;
1620 }
1621
1622 if (bd_count > 0) {
1623
1624 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1625 unsigned char id =
1626 (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1627#ifdef DEBUG
1628 if (id >= RX_BUF_NUM) {
1629 printk("%s: invalid BDID.\n", dev->name);
1630 panic_queues(dev);
1631 }
1632#else
1633 BUG_ON(id >= RX_BUF_NUM);
1634#endif
1635
1636#ifdef TC35815_USE_PACKEDBUFFER
1637 while (lp->fbl_curid != id)
1638#else
1639 while (lp->fbl_count < RX_BUF_NUM)
1640#endif
1641 {
1642#ifdef TC35815_USE_PACKEDBUFFER
1643 unsigned char curid = lp->fbl_curid;
1644#else
1645 unsigned char curid =
1646 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1647#endif
1648 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1649#ifdef DEBUG
1650 bdctl = le32_to_cpu(bd->BDCtl);
1651 if (bdctl & BD_CownsBD) {
1652 printk("%s: Freeing invalid BD.\n",
1653 dev->name);
1654 panic_queues(dev);
1655 }
1656#endif
1657
1658#ifndef TC35815_USE_PACKEDBUFFER
1659 if (!lp->rx_skbs[curid].skb) {
1660 lp->rx_skbs[curid].skb =
1661 alloc_rxbuf_skb(dev,
1662 lp->pci_dev,
1663 &lp->rx_skbs[curid].skb_dma);
1664 if (!lp->rx_skbs[curid].skb)
1665 break;
1666 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1667 }
1668#endif
1669
1670 bd->BDCtl = cpu_to_le32(BD_CownsBD |
1671 (curid << BD_RxBDID_SHIFT) |
1672 RX_BUF_SIZE);
1673#ifdef TC35815_USE_PACKEDBUFFER
1674 lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
1675 if (netif_msg_rx_status(lp)) {
1676 printk("%s: Entering new FBD %d\n",
1677 dev->name, lp->fbl_curid);
1678 dump_frfd(lp->fbl_ptr);
1679 }
1680#else
1681 lp->fbl_count++;
1682#endif
1683 buf_free_count++;
1684 }
1685 }
1686
1687
1688#ifdef DEBUG
1689 next_rfd = fd_bus_to_virt(lp,
1690 le32_to_cpu(lp->rfd_cur->fd.FDNext));
1691 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1692 printk("%s: RxFD FDNext invalid.\n", dev->name);
1693 panic_queues(dev);
1694 }
1695#endif
1696 for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
1697
1698#ifdef DEBUG
1699 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1700#else
1701 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1702#endif
1703 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1704 lp->rfd_cur++;
1705 fd_free_count++;
1706 }
1707 if (lp->rfd_cur > lp->rfd_limit)
1708 lp->rfd_cur = lp->rfd_base;
1709#ifdef DEBUG
1710 if (lp->rfd_cur != next_rfd)
1711 printk("rfd_cur = %p, next_rfd %p\n",
1712 lp->rfd_cur, next_rfd);
1713#endif
1714 }
1715
1716
1717 if (fd_free_count) {
1718 struct tc35815_regs __iomem *tr =
1719 (struct tc35815_regs __iomem *)dev->base_addr;
1720 u32 en, en_old = tc_readl(&tr->Int_En);
1721 en = en_old | Int_FDAExEn;
1722 if (buf_free_count)
1723 en |= Int_BLExEn;
1724 if (en != en_old)
1725 tc_writel(en, &tr->Int_En);
1726 }
1727#ifdef TC35815_NAPI
1728 return received;
1729#endif
1730}
1731
1732#ifdef TC35815_NAPI
1733static int tc35815_poll(struct napi_struct *napi, int budget)
1734{
1735 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1736 struct net_device *dev = lp->dev;
1737 struct tc35815_regs __iomem *tr =
1738 (struct tc35815_regs __iomem *)dev->base_addr;
1739 int received = 0, handled;
1740 u32 status;
1741
1742 spin_lock(&lp->lock);
1743 status = tc_readl(&tr->Int_Src);
1744 do {
1745 tc_writel(status, &tr->Int_Src);
1746
1747 handled = tc35815_do_interrupt(dev, status, limit);
1748 if (handled >= 0) {
1749 received += handled;
1750 if (received >= budget)
1751 break;
1752 }
1753 status = tc_readl(&tr->Int_Src);
1754 } while (status);
1755 spin_unlock(&lp->lock);
1756
1757 if (received < budget) {
1758 netif_rx_complete(dev, napi);
1759
1760 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1761 }
1762 return received;
1763}
1764#endif
1765
1766#ifdef NO_CHECK_CARRIER
1767#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1768#else
1769#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1770#endif
1771
1772static void
1773tc35815_check_tx_stat(struct net_device *dev, int status)
1774{
1775 struct tc35815_local *lp = dev->priv;
1776 const char *msg = NULL;
1777
1778
1779 if (status & Tx_ExColl)
1780 lp->stats.collisions += 16;
1781 if (status & Tx_TxColl_MASK)
1782 lp->stats.collisions += status & Tx_TxColl_MASK;
1783
1784#ifndef NO_CHECK_CARRIER
1785
1786 if (lp->boardtype == TC35815_TX4939)
1787 status &= ~Tx_NCarr;
1788#ifdef WORKAROUND_LOSTCAR
1789
1790 if ((lp->timer_state != asleep && lp->timer_state != lcheck)
1791 || lp->fullduplex)
1792 status &= ~Tx_NCarr;
1793#endif
1794#endif
1795
1796 if (!(status & TX_STA_ERR)) {
1797
1798 lp->stats.tx_packets++;
1799 return;
1800 }
1801
1802 lp->stats.tx_errors++;
1803 if (status & Tx_ExColl) {
1804 lp->stats.tx_aborted_errors++;
1805 msg = "Excessive Collision.";
1806 }
1807 if (status & Tx_Under) {
1808 lp->stats.tx_fifo_errors++;
1809 msg = "Tx FIFO Underrun.";
1810 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1811 lp->lstats.tx_underrun++;
1812 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1813 struct tc35815_regs __iomem *tr =
1814 (struct tc35815_regs __iomem *)dev->base_addr;
1815 tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
1816 msg = "Tx FIFO Underrun.Change Tx threshold to max.";
1817 }
1818 }
1819 }
1820 if (status & Tx_Defer) {
1821 lp->stats.tx_fifo_errors++;
1822 msg = "Excessive Deferral.";
1823 }
1824#ifndef NO_CHECK_CARRIER
1825 if (status & Tx_NCarr) {
1826 lp->stats.tx_carrier_errors++;
1827 msg = "Lost Carrier Sense.";
1828 }
1829#endif
1830 if (status & Tx_LateColl) {
1831 lp->stats.tx_aborted_errors++;
1832 msg = "Late Collision.";
1833 }
1834 if (status & Tx_TxPar) {
1835 lp->stats.tx_fifo_errors++;
1836 msg = "Transmit Parity Error.";
1837 }
1838 if (status & Tx_SQErr) {
1839 lp->stats.tx_heartbeat_errors++;
1840 msg = "Signal Quality Error.";
1841 }
1842 if (msg && netif_msg_tx_err(lp))
1843 printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
1844}
1845
1846
1847
1848
1849static void
1850tc35815_txdone(struct net_device *dev)
1851{
1852 struct tc35815_local *lp = dev->priv;
1853 struct TxFD *txfd;
1854 unsigned int fdctl;
1855
1856 txfd = &lp->tfd_base[lp->tfd_end];
1857 while (lp->tfd_start != lp->tfd_end &&
1858 !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
1859 int status = le32_to_cpu(txfd->fd.FDStat);
1860 struct sk_buff *skb;
1861 unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
1862 u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
1863
1864 if (netif_msg_tx_done(lp)) {
1865 printk("%s: complete TxFD.\n", dev->name);
1866 dump_txfd(txfd);
1867 }
1868 tc35815_check_tx_stat(dev, status);
1869
1870 skb = fdsystem != 0xffffffff ?
1871 lp->tx_skbs[fdsystem].skb : NULL;
1872#ifdef DEBUG
1873 if (lp->tx_skbs[lp->tfd_end].skb != skb) {
1874 printk("%s: tx_skbs mismatch.\n", dev->name);
1875 panic_queues(dev);
1876 }
1877#else
1878 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1879#endif
1880 if (skb) {
1881 lp->stats.tx_bytes += skb->len;
1882 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
1883 lp->tx_skbs[lp->tfd_end].skb = NULL;
1884 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
1885#ifdef TC35815_NAPI
1886 dev_kfree_skb_any(skb);
1887#else
1888 dev_kfree_skb_irq(skb);
1889#endif
1890 }
1891 txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
1892
1893 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
1894 txfd = &lp->tfd_base[lp->tfd_end];
1895#ifdef DEBUG
1896 if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
1897 printk("%s: TxFD FDNext invalid.\n", dev->name);
1898 panic_queues(dev);
1899 }
1900#endif
1901 if (fdnext & FD_Next_EOL) {
1902
1903 if (lp->tfd_end != lp->tfd_start) {
1904 struct tc35815_regs __iomem *tr =
1905 (struct tc35815_regs __iomem *)dev->base_addr;
1906 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1907 struct TxFD* txhead = &lp->tfd_base[head];
1908 int qlen = (lp->tfd_start + TX_FD_NUM
1909 - lp->tfd_end) % TX_FD_NUM;
1910
1911#ifdef DEBUG
1912 if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
1913 printk("%s: TxFD FDCtl invalid.\n", dev->name);
1914 panic_queues(dev);
1915 }
1916#endif
1917
1918 if (lp->lstats.max_tx_qlen < qlen)
1919 lp->lstats.max_tx_qlen = qlen;
1920
1921
1922
1923 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1924#ifdef GATHER_TXINT
1925 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1926#endif
1927 if (netif_msg_tx_queued(lp)) {
1928 printk("%s: start TxFD on queue.\n",
1929 dev->name);
1930 dump_txfd(txfd);
1931 }
1932 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1933 }
1934 break;
1935 }
1936 }
1937
1938
1939
1940
1941
1942 if (netif_queue_stopped(dev) && ! tc35815_tx_full(dev))
1943 netif_wake_queue(dev);
1944}
1945
1946
1947static int
1948tc35815_close(struct net_device *dev)
1949{
1950 struct tc35815_local *lp = dev->priv;
1951
1952 netif_stop_queue(dev);
1953#ifdef TC35815_NAPI
1954 napi_disable(&lp->napi);
1955#endif
1956
1957
1958
1959 del_timer(&lp->timer);
1960 tc35815_chip_reset(dev);
1961 free_irq(dev->irq, dev);
1962
1963 tc35815_free_queues(dev);
1964
1965 return 0;
1966
1967}
1968
1969
1970
1971
1972
1973static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1974{
1975 struct tc35815_local *lp = dev->priv;
1976 struct tc35815_regs __iomem *tr =
1977 (struct tc35815_regs __iomem *)dev->base_addr;
1978 if (netif_running(dev)) {
1979
1980 lp->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt);
1981 }
1982
1983 return &lp->stats;
1984}
1985
1986static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
1987{
1988 struct tc35815_local *lp = dev->priv;
1989 struct tc35815_regs __iomem *tr =
1990 (struct tc35815_regs __iomem *)dev->base_addr;
1991 int cam_index = index * 6;
1992 u32 cam_data;
1993 u32 saved_addr;
1994 saved_addr = tc_readl(&tr->CAM_Adr);
1995
1996 if (netif_msg_hw(lp)) {
1997 int i;
1998 printk(KERN_DEBUG "%s: CAM %d:", dev->name, index);
1999 for (i = 0; i < 6; i++)
2000 printk(" %02x", addr[i]);
2001 printk("\n");
2002 }
2003 if (index & 1) {
2004
2005 tc_writel(cam_index - 2, &tr->CAM_Adr);
2006 cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
2007 cam_data |= addr[0] << 8 | addr[1];
2008 tc_writel(cam_data, &tr->CAM_Data);
2009
2010 tc_writel(cam_index + 2, &tr->CAM_Adr);
2011 cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
2012 tc_writel(cam_data, &tr->CAM_Data);
2013 } else {
2014
2015 tc_writel(cam_index, &tr->CAM_Adr);
2016 cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
2017 tc_writel(cam_data, &tr->CAM_Data);
2018
2019 tc_writel(cam_index + 4, &tr->CAM_Adr);
2020 cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
2021 cam_data |= addr[4] << 24 | (addr[5] << 16);
2022 tc_writel(cam_data, &tr->CAM_Data);
2023 }
2024
2025 tc_writel(saved_addr, &tr->CAM_Adr);
2026}
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036static void
2037tc35815_set_multicast_list(struct net_device *dev)
2038{
2039 struct tc35815_regs __iomem *tr =
2040 (struct tc35815_regs __iomem *)dev->base_addr;
2041
2042 if (dev->flags&IFF_PROMISC)
2043 {
2044#ifdef WORKAROUND_100HALF_PROMISC
2045
2046
2047 struct tc35815_local *lp = dev->priv;
2048 int pid = lp->phy_addr;
2049 if (!(tc_mdio_read(dev, pid, MII_BMSR) & BMSR_LSTATUS))
2050 return;
2051#endif
2052
2053 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2054 }
2055 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3)
2056 {
2057
2058
2059 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
2060 }
2061 else if(dev->mc_count)
2062 {
2063 struct dev_mc_list* cur_addr = dev->mc_list;
2064 int i;
2065 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
2066
2067 tc_writel(0, &tr->CAM_Ctl);
2068
2069 for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
2070 if (!cur_addr)
2071 break;
2072
2073 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
2074 ena_bits |= CAM_Ena_Bit(i + 2);
2075 }
2076 tc_writel(ena_bits, &tr->CAM_Ena);
2077 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2078 }
2079 else {
2080 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2081 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2082 }
2083}
2084
2085static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2086{
2087 struct tc35815_local *lp = dev->priv;
2088 strcpy(info->driver, MODNAME);
2089 strcpy(info->version, DRV_VERSION);
2090 strcpy(info->bus_info, pci_name(lp->pci_dev));
2091}
2092
2093static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2094{
2095 struct tc35815_local *lp = dev->priv;
2096 spin_lock_irq(&lp->lock);
2097 mii_ethtool_gset(&lp->mii, cmd);
2098 spin_unlock_irq(&lp->lock);
2099 return 0;
2100}
2101
2102static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2103{
2104 struct tc35815_local *lp = dev->priv;
2105 int rc;
2106#if 1
2107
2108 if (cmd->autoneg != AUTONEG_ENABLE &&
2109 cmd->autoneg != AUTONEG_DISABLE)
2110 return -EINVAL;
2111 if (cmd->autoneg == AUTONEG_DISABLE &&
2112 ((cmd->speed != SPEED_100 &&
2113 cmd->speed != SPEED_10) ||
2114 (cmd->duplex != DUPLEX_HALF &&
2115 cmd->duplex != DUPLEX_FULL)))
2116 return -EINVAL;
2117
2118
2119 spin_lock_irq(&lp->lock);
2120 del_timer(&lp->timer);
2121 tc35815_start_auto_negotiation(dev, cmd);
2122 spin_unlock_irq(&lp->lock);
2123 rc = 0;
2124#else
2125 spin_lock_irq(&lp->lock);
2126 rc = mii_ethtool_sset(&lp->mii, cmd);
2127 spin_unlock_irq(&lp->lock);
2128#endif
2129 return rc;
2130}
2131
2132static int tc35815_nway_reset(struct net_device *dev)
2133{
2134 struct tc35815_local *lp = dev->priv;
2135 int rc;
2136 spin_lock_irq(&lp->lock);
2137 rc = mii_nway_restart(&lp->mii);
2138 spin_unlock_irq(&lp->lock);
2139 return rc;
2140}
2141
2142static u32 tc35815_get_link(struct net_device *dev)
2143{
2144 struct tc35815_local *lp = dev->priv;
2145 int rc;
2146 spin_lock_irq(&lp->lock);
2147 rc = mii_link_ok(&lp->mii);
2148 spin_unlock_irq(&lp->lock);
2149 return rc;
2150}
2151
2152static u32 tc35815_get_msglevel(struct net_device *dev)
2153{
2154 struct tc35815_local *lp = dev->priv;
2155 return lp->msg_enable;
2156}
2157
2158static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
2159{
2160 struct tc35815_local *lp = dev->priv;
2161 lp->msg_enable = datum;
2162}
2163
2164static int tc35815_get_sset_count(struct net_device *dev, int sset)
2165{
2166 struct tc35815_local *lp = dev->priv;
2167
2168 switch (sset) {
2169 case ETH_SS_STATS:
2170 return sizeof(lp->lstats) / sizeof(int);
2171 default:
2172 return -EOPNOTSUPP;
2173 }
2174}
2175
2176static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2177{
2178 struct tc35815_local *lp = dev->priv;
2179 data[0] = lp->lstats.max_tx_qlen;
2180 data[1] = lp->lstats.tx_ints;
2181 data[2] = lp->lstats.rx_ints;
2182 data[3] = lp->lstats.tx_underrun;
2183}
2184
2185static struct {
2186 const char str[ETH_GSTRING_LEN];
2187} ethtool_stats_keys[] = {
2188 { "max_tx_qlen" },
2189 { "tx_ints" },
2190 { "rx_ints" },
2191 { "tx_underrun" },
2192};
2193
2194static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2195{
2196 memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2197}
2198
2199static const struct ethtool_ops tc35815_ethtool_ops = {
2200 .get_drvinfo = tc35815_get_drvinfo,
2201 .get_settings = tc35815_get_settings,
2202 .set_settings = tc35815_set_settings,
2203 .nway_reset = tc35815_nway_reset,
2204 .get_link = tc35815_get_link,
2205 .get_msglevel = tc35815_get_msglevel,
2206 .set_msglevel = tc35815_set_msglevel,
2207 .get_strings = tc35815_get_strings,
2208 .get_sset_count = tc35815_get_sset_count,
2209 .get_ethtool_stats = tc35815_get_ethtool_stats,
2210};
2211
2212static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2213{
2214 struct tc35815_local *lp = dev->priv;
2215 int rc;
2216
2217 if (!netif_running(dev))
2218 return -EINVAL;
2219
2220 spin_lock_irq(&lp->lock);
2221 rc = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
2222 spin_unlock_irq(&lp->lock);
2223
2224 return rc;
2225}
2226
2227static int tc_mdio_read(struct net_device *dev, int phy_id, int location)
2228{
2229 struct tc35815_regs __iomem *tr =
2230 (struct tc35815_regs __iomem *)dev->base_addr;
2231 u32 data;
2232 tc_writel(MD_CA_Busy | (phy_id << 5) | location, &tr->MD_CA);
2233 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2234 ;
2235 data = tc_readl(&tr->MD_Data);
2236 return data & 0xffff;
2237}
2238
2239static void tc_mdio_write(struct net_device *dev, int phy_id, int location,
2240 int val)
2241{
2242 struct tc35815_regs __iomem *tr =
2243 (struct tc35815_regs __iomem *)dev->base_addr;
2244 tc_writel(val, &tr->MD_Data);
2245 tc_writel(MD_CA_Busy | MD_CA_Wr | (phy_id << 5) | location, &tr->MD_CA);
2246 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2247 ;
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284static int tc35815_try_next_permutation(struct net_device *dev)
2285{
2286 struct tc35815_local *lp = dev->priv;
2287 int pid = lp->phy_addr;
2288 unsigned short bmcr;
2289
2290 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2291
2292
2293 if (bmcr & BMCR_FULLDPLX) {
2294 bmcr &= ~BMCR_FULLDPLX;
2295 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2296 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2297 return 0;
2298 }
2299
2300
2301 if (bmcr & BMCR_SPEED100) {
2302 bmcr &= ~BMCR_SPEED100;
2303 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2304 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2305 return 0;
2306 }
2307
2308
2309 return -1;
2310}
2311
2312static void
2313tc35815_display_link_mode(struct net_device *dev)
2314{
2315 struct tc35815_local *lp = dev->priv;
2316 int pid = lp->phy_addr;
2317 unsigned short lpa, bmcr;
2318 char *speed = "", *duplex = "";
2319
2320 lpa = tc_mdio_read(dev, pid, MII_LPA);
2321 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2322 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2323 speed = "100Mb/s";
2324 else
2325 speed = "10Mb/s";
2326 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2327 duplex = "Full Duplex";
2328 else
2329 duplex = "Half Duplex";
2330
2331 if (netif_msg_link(lp))
2332 printk(KERN_INFO "%s: Link is up at %s, %s.\n",
2333 dev->name, speed, duplex);
2334 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2335 dev->name,
2336 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2337}
2338
2339static void tc35815_display_forced_link_mode(struct net_device *dev)
2340{
2341 struct tc35815_local *lp = dev->priv;
2342 int pid = lp->phy_addr;
2343 unsigned short bmcr;
2344 char *speed = "", *duplex = "";
2345
2346 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2347 if (bmcr & BMCR_SPEED100)
2348 speed = "100Mb/s";
2349 else
2350 speed = "10Mb/s";
2351 if (bmcr & BMCR_FULLDPLX)
2352 duplex = "Full Duplex.\n";
2353 else
2354 duplex = "Half Duplex.\n";
2355
2356 if (netif_msg_link(lp))
2357 printk(KERN_INFO "%s: Link has been forced up at %s, %s",
2358 dev->name, speed, duplex);
2359}
2360
2361static void tc35815_set_link_modes(struct net_device *dev)
2362{
2363 struct tc35815_local *lp = dev->priv;
2364 struct tc35815_regs __iomem *tr =
2365 (struct tc35815_regs __iomem *)dev->base_addr;
2366 int pid = lp->phy_addr;
2367 unsigned short bmcr, lpa;
2368 int speed;
2369
2370 if (lp->timer_state == arbwait) {
2371 lpa = tc_mdio_read(dev, pid, MII_LPA);
2372 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2373 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2374 dev->name,
2375 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2376 if (!(lpa & (LPA_10HALF | LPA_10FULL |
2377 LPA_100HALF | LPA_100FULL))) {
2378
2379 printk(KERN_INFO "%s: bad ability %04x - falling back to 10HD.\n",
2380 dev->name, lpa);
2381 lpa = LPA_10HALF;
2382 }
2383 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2384 lp->fullduplex = 1;
2385 else
2386 lp->fullduplex = 0;
2387 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2388 speed = 100;
2389 else
2390 speed = 10;
2391 } else {
2392
2393 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2394 if (bmcr & BMCR_FULLDPLX)
2395 lp->fullduplex = 1;
2396 else
2397 lp->fullduplex = 0;
2398 if (bmcr & BMCR_SPEED100)
2399 speed = 100;
2400 else
2401 speed = 10;
2402 }
2403
2404 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_HaltReq, &tr->MAC_Ctl);
2405 if (lp->fullduplex) {
2406 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_FullDup, &tr->MAC_Ctl);
2407 } else {
2408 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_FullDup, &tr->MAC_Ctl);
2409 }
2410 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_HaltReq, &tr->MAC_Ctl);
2411
2412
2413
2414#ifndef NO_CHECK_CARRIER
2415
2416 if (lp->boardtype != TC35815_TX4939) {
2417#ifdef WORKAROUND_LOSTCAR
2418
2419 if (!lp->fullduplex && lp->boardtype != TC35815_TX4939)
2420 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, &tr->Tx_Ctl);
2421#endif
2422 }
2423#endif
2424 lp->mii.full_duplex = lp->fullduplex;
2425}
2426
2427static void tc35815_timer(unsigned long data)
2428{
2429 struct net_device *dev = (struct net_device *)data;
2430 struct tc35815_local *lp = dev->priv;
2431 int pid = lp->phy_addr;
2432 unsigned short bmsr, bmcr, lpa;
2433 int restart_timer = 0;
2434
2435 spin_lock_irq(&lp->lock);
2436
2437 lp->timer_ticks++;
2438 switch (lp->timer_state) {
2439 case arbwait:
2440
2441
2442
2443
2444
2445 if (lp->timer_ticks >= 10) {
2446
2447 if (!options.doforce) {
2448 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2449 " cable probblem?\n", dev->name);
2450
2451 tc35815_restart(dev);
2452 goto out;
2453 }
2454 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2455 " trying force link mode\n", dev->name);
2456 printk(KERN_DEBUG "%s: BMCR %x BMSR %x\n", dev->name,
2457 tc_mdio_read(dev, pid, MII_BMCR),
2458 tc_mdio_read(dev, pid, MII_BMSR));
2459 bmcr = BMCR_SPEED100;
2460 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2461
2462
2463
2464
2465
2466
2467
2468 lp->timer_state = ltrywait;
2469 lp->timer_ticks = 0;
2470 restart_timer = 1;
2471 } else {
2472
2473 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2474 if (bmsr & BMSR_ANEGCOMPLETE) {
2475
2476 tc35815_set_link_modes(dev);
2477
2478
2479
2480
2481
2482 lp->timer_state = lupwait;
2483 restart_timer = 1;
2484 } else {
2485 restart_timer = 1;
2486 }
2487 }
2488 break;
2489
2490 case lupwait:
2491
2492
2493
2494
2495
2496
2497 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2498 if (bmsr & BMSR_LSTATUS) {
2499
2500
2501
2502
2503 tc35815_display_link_mode(dev);
2504 netif_carrier_on(dev);
2505#ifdef WORKAROUND_100HALF_PROMISC
2506
2507 if (dev->flags & IFF_PROMISC)
2508 tc35815_set_multicast_list(dev);
2509#endif
2510#if 1
2511 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2512 lp->timer_state = lcheck;
2513 restart_timer = 1;
2514#else
2515 lp->timer_state = asleep;
2516 restart_timer = 0;
2517#endif
2518 } else {
2519 if (lp->timer_ticks >= 10) {
2520 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
2521 "not completely up.\n", dev->name);
2522 lp->timer_ticks = 0;
2523 restart_timer = 1;
2524 } else {
2525 restart_timer = 1;
2526 }
2527 }
2528 break;
2529
2530 case ltrywait:
2531
2532
2533
2534
2535
2536
2537 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2538 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2539 if (lp->timer_ticks == 1) {
2540
2541
2542
2543
2544
2545 restart_timer = 1;
2546 break;
2547 }
2548 if (lp->timer_ticks == 2) {
2549 restart_timer = 1;
2550 break;
2551 }
2552 if (bmsr & BMSR_LSTATUS) {
2553
2554 tc35815_display_forced_link_mode(dev);
2555 netif_carrier_on(dev);
2556 tc35815_set_link_modes(dev);
2557#ifdef WORKAROUND_100HALF_PROMISC
2558
2559 if (dev->flags & IFF_PROMISC)
2560 tc35815_set_multicast_list(dev);
2561#endif
2562#if 1
2563 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2564 lp->timer_state = lcheck;
2565 restart_timer = 1;
2566#else
2567 lp->timer_state = asleep;
2568 restart_timer = 0;
2569#endif
2570 } else {
2571 if (lp->timer_ticks >= 4) {
2572 int ret;
2573
2574 ret = tc35815_try_next_permutation(dev);
2575 if (ret == -1) {
2576
2577
2578
2579
2580 printk(KERN_NOTICE "%s: Link down, "
2581 "cable problem?\n",
2582 dev->name);
2583
2584
2585 tc35815_restart(dev);
2586 goto out;
2587 }
2588 lp->timer_ticks = 0;
2589 restart_timer = 1;
2590 } else {
2591 restart_timer = 1;
2592 }
2593 }
2594 break;
2595
2596 case lcheck:
2597 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2598 lpa = tc_mdio_read(dev, pid, MII_LPA);
2599 if (bmcr & (BMCR_PDOWN | BMCR_ISOLATE | BMCR_RESET)) {
2600 printk(KERN_ERR "%s: PHY down? (BMCR %x)\n", dev->name,
2601 bmcr);
2602 } else if ((lp->saved_lpa ^ lpa) &
2603 (LPA_100FULL|LPA_100HALF|LPA_10FULL|LPA_10HALF)) {
2604 printk(KERN_NOTICE "%s: link status changed"
2605 " (BMCR %x LPA %x->%x)\n", dev->name,
2606 bmcr, lp->saved_lpa, lpa);
2607 } else {
2608
2609 restart_timer = 1;
2610 break;
2611 }
2612
2613 tc35815_restart(dev);
2614 goto out;
2615
2616 case asleep:
2617 default:
2618
2619 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
2620 "one anyways!\n", dev->name);
2621 restart_timer = 0;
2622 lp->timer_ticks = 0;
2623 lp->timer_state = asleep;
2624 break;
2625 }
2626
2627 if (restart_timer) {
2628 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2629 add_timer(&lp->timer);
2630 }
2631out:
2632 spin_unlock_irq(&lp->lock);
2633}
2634
2635static void tc35815_start_auto_negotiation(struct net_device *dev,
2636 struct ethtool_cmd *ep)
2637{
2638 struct tc35815_local *lp = dev->priv;
2639 int pid = lp->phy_addr;
2640 unsigned short bmsr, bmcr, advertize;
2641 int timeout;
2642
2643 netif_carrier_off(dev);
2644 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2645 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2646 advertize = tc_mdio_read(dev, pid, MII_ADVERTISE);
2647
2648 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2649 if (options.speed || options.duplex) {
2650
2651 advertize &= ~(ADVERTISE_10HALF |
2652 ADVERTISE_10FULL |
2653 ADVERTISE_100HALF |
2654 ADVERTISE_100FULL);
2655 if (options.speed != 10) {
2656 if (options.duplex != 1)
2657 advertize |= ADVERTISE_100FULL;
2658 if (options.duplex != 2)
2659 advertize |= ADVERTISE_100HALF;
2660 }
2661 if (options.speed != 100) {
2662 if (options.duplex != 1)
2663 advertize |= ADVERTISE_10FULL;
2664 if (options.duplex != 2)
2665 advertize |= ADVERTISE_10HALF;
2666 }
2667 if (options.speed == 100)
2668 bmcr |= BMCR_SPEED100;
2669 else if (options.speed == 10)
2670 bmcr &= ~BMCR_SPEED100;
2671 if (options.duplex == 2)
2672 bmcr |= BMCR_FULLDPLX;
2673 else if (options.duplex == 1)
2674 bmcr &= ~BMCR_FULLDPLX;
2675 } else {
2676
2677 if (bmsr & BMSR_10HALF)
2678 advertize |= ADVERTISE_10HALF;
2679 else
2680 advertize &= ~ADVERTISE_10HALF;
2681 if (bmsr & BMSR_10FULL)
2682 advertize |= ADVERTISE_10FULL;
2683 else
2684 advertize &= ~ADVERTISE_10FULL;
2685 if (bmsr & BMSR_100HALF)
2686 advertize |= ADVERTISE_100HALF;
2687 else
2688 advertize &= ~ADVERTISE_100HALF;
2689 if (bmsr & BMSR_100FULL)
2690 advertize |= ADVERTISE_100FULL;
2691 else
2692 advertize &= ~ADVERTISE_100FULL;
2693 }
2694
2695 tc_mdio_write(dev, pid, MII_ADVERTISE, advertize);
2696
2697
2698 bmcr |= BMCR_ANENABLE;
2699 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2700
2701
2702 bmcr |= BMCR_ANRESTART;
2703 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2704 printk(KERN_DEBUG "%s: ADVERTISE %x BMCR %x\n", dev->name, advertize, bmcr);
2705
2706
2707 timeout = 64;
2708 while (--timeout) {
2709 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2710 if (!(bmcr & BMCR_ANRESTART))
2711 break;
2712 udelay(10);
2713 }
2714 if (!timeout) {
2715 printk(KERN_ERR "%s: TC35815 would not start auto "
2716 "negotiation BMCR=0x%04x\n",
2717 dev->name, bmcr);
2718 printk(KERN_NOTICE "%s: Performing force link "
2719 "detection.\n", dev->name);
2720 goto force_link;
2721 } else {
2722 printk(KERN_DEBUG "%s: auto negotiation started.\n", dev->name);
2723 lp->timer_state = arbwait;
2724 }
2725 } else {
2726force_link:
2727
2728
2729
2730
2731
2732
2733
2734
2735 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2736 bmcr = BMCR_SPEED100;
2737 } else {
2738 if (ep->speed == SPEED_100)
2739 bmcr = BMCR_SPEED100;
2740 else
2741 bmcr = 0;
2742 if (ep->duplex == DUPLEX_FULL)
2743 bmcr |= BMCR_FULLDPLX;
2744 }
2745 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2746
2747
2748
2749
2750
2751 lp->timer_state = ltrywait;
2752 }
2753
2754 del_timer(&lp->timer);
2755 lp->timer_ticks = 0;
2756 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2757 add_timer(&lp->timer);
2758}
2759
2760static void tc35815_find_phy(struct net_device *dev)
2761{
2762 struct tc35815_local *lp = dev->priv;
2763 int pid = lp->phy_addr;
2764 unsigned short id0;
2765
2766
2767 for (pid = 31; pid >= 0; pid--) {
2768 id0 = tc_mdio_read(dev, pid, MII_BMSR);
2769 if (id0 != 0xffff && id0 != 0x0000 &&
2770 (id0 & BMSR_RESV) != (0xffff & BMSR_RESV)
2771 ) {
2772 lp->phy_addr = pid;
2773 break;
2774 }
2775 }
2776 if (pid < 0) {
2777 printk(KERN_ERR "%s: No MII Phy found.\n",
2778 dev->name);
2779 lp->phy_addr = pid = 0;
2780 }
2781
2782 lp->mii_id[0] = tc_mdio_read(dev, pid, MII_PHYSID1);
2783 lp->mii_id[1] = tc_mdio_read(dev, pid, MII_PHYSID2);
2784 if (netif_msg_hw(lp))
2785 printk(KERN_INFO "%s: PHY(%02x) ID %04x %04x\n", dev->name,
2786 pid, lp->mii_id[0], lp->mii_id[1]);
2787}
2788
2789static void tc35815_phy_chip_init(struct net_device *dev)
2790{
2791 struct tc35815_local *lp = dev->priv;
2792 int pid = lp->phy_addr;
2793 unsigned short bmcr;
2794 struct ethtool_cmd ecmd, *ep;
2795
2796
2797 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2798 if (bmcr & BMCR_ISOLATE) {
2799 int count = 32;
2800 printk(KERN_DEBUG "%s: unisolating...", dev->name);
2801 tc_mdio_write(dev, pid, MII_BMCR, bmcr & ~BMCR_ISOLATE);
2802 while (--count) {
2803 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_ISOLATE))
2804 break;
2805 udelay(20);
2806 }
2807 printk(" %s.\n", count ? "done" : "failed");
2808 }
2809
2810 if (options.speed && options.duplex) {
2811 ecmd.autoneg = AUTONEG_DISABLE;
2812 ecmd.speed = options.speed == 10 ? SPEED_10 : SPEED_100;
2813 ecmd.duplex = options.duplex == 1 ? DUPLEX_HALF : DUPLEX_FULL;
2814 ep = &ecmd;
2815 } else {
2816 ep = NULL;
2817 }
2818 tc35815_start_auto_negotiation(dev, ep);
2819}
2820
2821static void tc35815_chip_reset(struct net_device *dev)
2822{
2823 struct tc35815_regs __iomem *tr =
2824 (struct tc35815_regs __iomem *)dev->base_addr;
2825 int i;
2826
2827 tc_writel(MAC_Reset, &tr->MAC_Ctl);
2828 udelay(4);
2829 i = 0;
2830 while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
2831 if (i++ > 100) {
2832 printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
2833 break;
2834 }
2835 mdelay(1);
2836 }
2837 tc_writel(0, &tr->MAC_Ctl);
2838
2839
2840 tc_writel(0, &tr->DMA_Ctl);
2841 tc_writel(0, &tr->TxThrsh);
2842 tc_writel(0, &tr->TxPollCtr);
2843 tc_writel(0, &tr->RxFragSize);
2844 tc_writel(0, &tr->Int_En);
2845 tc_writel(0, &tr->FDA_Bas);
2846 tc_writel(0, &tr->FDA_Lim);
2847 tc_writel(0xffffffff, &tr->Int_Src);
2848 tc_writel(0, &tr->CAM_Ctl);
2849 tc_writel(0, &tr->Tx_Ctl);
2850 tc_writel(0, &tr->Rx_Ctl);
2851 tc_writel(0, &tr->CAM_Ena);
2852 (void)tc_readl(&tr->Miss_Cnt);
2853
2854
2855 tc_writel(DMA_TestMode, &tr->DMA_Ctl);
2856 for (i = 0; i < 0x1000; i += 4) {
2857 tc_writel(i, &tr->CAM_Adr);
2858 tc_writel(0, &tr->CAM_Data);
2859 }
2860 tc_writel(0, &tr->DMA_Ctl);
2861}
2862
2863static void tc35815_chip_init(struct net_device *dev)
2864{
2865 struct tc35815_local *lp = dev->priv;
2866 struct tc35815_regs __iomem *tr =
2867 (struct tc35815_regs __iomem *)dev->base_addr;
2868 unsigned long txctl = TX_CTL_CMD;
2869
2870 tc35815_phy_chip_init(dev);
2871
2872
2873 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2874
2875
2876 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2877 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2878
2879
2880 if (HAVE_DMA_RXALIGN(lp))
2881 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2882 else
2883 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2884#ifdef TC35815_USE_PACKEDBUFFER
2885 tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize);
2886#else
2887 tc_writel(ETH_ZLEN, &tr->RxFragSize);
2888#endif
2889 tc_writel(0, &tr->TxPollCtr);
2890 tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2891 tc_writel(INT_EN_CMD, &tr->Int_En);
2892
2893
2894 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2895 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2896 &tr->FDA_Lim);
2897
2898
2899
2900
2901
2902 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr);
2903 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);
2904
2905
2906#ifndef NO_CHECK_CARRIER
2907
2908 if (lp->boardtype == TC35815_TX4939)
2909 txctl &= ~Tx_EnLCarr;
2910#ifdef WORKAROUND_LOSTCAR
2911
2912 if ((lp->timer_state != asleep && lp->timer_state != lcheck) ||
2913 lp->fullduplex)
2914 txctl &= ~Tx_EnLCarr;
2915#endif
2916#endif
2917#ifdef GATHER_TXINT
2918 txctl &= ~Tx_EnComp;
2919#endif
2920 tc_writel(txctl, &tr->Tx_Ctl);
2921}
2922
2923#ifdef CONFIG_PM
2924static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2925{
2926 struct net_device *dev = pci_get_drvdata(pdev);
2927 struct tc35815_local *lp = dev->priv;
2928 unsigned long flags;
2929
2930 pci_save_state(pdev);
2931 if (!netif_running(dev))
2932 return 0;
2933 netif_device_detach(dev);
2934 spin_lock_irqsave(&lp->lock, flags);
2935 del_timer(&lp->timer);
2936 tc35815_chip_reset(dev);
2937 spin_unlock_irqrestore(&lp->lock, flags);
2938 pci_set_power_state(pdev, PCI_D3hot);
2939 return 0;
2940}
2941
2942static int tc35815_resume(struct pci_dev *pdev)
2943{
2944 struct net_device *dev = pci_get_drvdata(pdev);
2945 struct tc35815_local *lp = dev->priv;
2946 unsigned long flags;
2947
2948 pci_restore_state(pdev);
2949 if (!netif_running(dev))
2950 return 0;
2951 pci_set_power_state(pdev, PCI_D0);
2952 spin_lock_irqsave(&lp->lock, flags);
2953 tc35815_restart(dev);
2954 spin_unlock_irqrestore(&lp->lock, flags);
2955 netif_device_attach(dev);
2956 return 0;
2957}
2958#endif
2959
2960static struct pci_driver tc35815_pci_driver = {
2961 .name = MODNAME,
2962 .id_table = tc35815_pci_tbl,
2963 .probe = tc35815_init_one,
2964 .remove = __devexit_p(tc35815_remove_one),
2965#ifdef CONFIG_PM
2966 .suspend = tc35815_suspend,
2967 .resume = tc35815_resume,
2968#endif
2969};
2970
2971module_param_named(speed, options.speed, int, 0);
2972MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2973module_param_named(duplex, options.duplex, int, 0);
2974MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2975module_param_named(doforce, options.doforce, int, 0);
2976MODULE_PARM_DESC(doforce, "try force link mode if auto-negotiation failed");
2977
2978static int __init tc35815_init_module(void)
2979{
2980 return pci_register_driver(&tc35815_pci_driver);
2981}
2982
2983static void __exit tc35815_cleanup_module(void)
2984{
2985 pci_unregister_driver(&tc35815_pci_driver);
2986}
2987
2988module_init(tc35815_init_module);
2989module_exit(tc35815_cleanup_module);
2990
2991MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2992MODULE_LICENSE("GPL");
2993