1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/pci.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/delay.h>
24#include <linux/mii.h>
25#include <linux/ethtool.h>
26#include <linux/crc32.h>
27#include <linux/spinlock.h>
28#include <linux/bitops.h>
29#include <linux/io.h>
30#include <linux/irq.h>
31#include <linux/uaccess.h>
32#include <linux/phy.h>
33
34#include <asm/processor.h>
35
36#define DRV_NAME "r6040"
37#define DRV_VERSION "0.29"
38#define DRV_RELDATE "04Jul2016"
39
40
41#define TX_TIMEOUT (6000 * HZ / 1000)
42
43
44#define R6040_IO_SIZE 256
45
46
47#define MAX_MAC 2
48
49
50#define MCR0 0x00
51#define MCR0_RCVEN 0x0002
52#define MCR0_PROMISC 0x0020
53#define MCR0_HASH_EN 0x0100
54#define MCR0_XMTEN 0x1000
55#define MCR0_FD 0x8000
56#define MCR1 0x04
57#define MAC_RST 0x0001
58#define MBCR 0x08
59#define MT_ICR 0x0C
60#define MR_ICR 0x10
61#define MTPR 0x14
62#define TM2TX 0x0001
63#define MR_BSR 0x18
64#define MR_DCR 0x1A
65#define MLSR 0x1C
66#define TX_FIFO_UNDR 0x0200
67#define TX_EXCEEDC 0x2000
68#define TX_LATEC 0x4000
69#define MMDIO 0x20
70#define MDIO_WRITE 0x4000
71#define MDIO_READ 0x2000
72#define MMRD 0x24
73#define MMWD 0x28
74#define MTD_SA0 0x2C
75#define MTD_SA1 0x30
76#define MRD_SA0 0x34
77#define MRD_SA1 0x38
78#define MISR 0x3C
79#define MIER 0x40
80#define MSK_INT 0x0000
81#define RX_FINISH 0x0001
82#define RX_NO_DESC 0x0002
83#define RX_FIFO_FULL 0x0004
84#define RX_EARLY 0x0008
85#define TX_FINISH 0x0010
86#define TX_EARLY 0x0080
87#define EVENT_OVRFL 0x0100
88#define LINK_CHANGED 0x0200
89#define ME_CISR 0x44
90#define ME_CIER 0x48
91#define MR_CNT 0x50
92#define ME_CNT0 0x52
93#define ME_CNT1 0x54
94#define ME_CNT2 0x56
95#define ME_CNT3 0x58
96#define MT_CNT 0x5A
97#define ME_CNT4 0x5C
98#define MP_CNT 0x5E
99#define MAR0 0x60
100#define MAR1 0x62
101#define MAR2 0x64
102#define MAR3 0x66
103#define MID_0L 0x68
104#define MID_0M 0x6A
105#define MID_0H 0x6C
106#define MID_1L 0x70
107#define MID_1M 0x72
108#define MID_1H 0x74
109#define MID_2L 0x78
110#define MID_2M 0x7A
111#define MID_2H 0x7C
112#define MID_3L 0x80
113#define MID_3M 0x82
114#define MID_3H 0x84
115#define PHY_CC 0x88
116#define SCEN 0x8000
117#define PHYAD_SHIFT 8
118#define TMRDIV_SHIFT 0
119#define PHY_ST 0x8A
120#define MAC_SM 0xAC
121#define MAC_SM_RST 0x0002
122#define MAC_ID 0xBE
123
124#define TX_DCNT 0x80
125#define RX_DCNT 0x80
126#define MAX_BUF_SIZE 0x600
127#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
128#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
129#define MBCR_DEFAULT 0x012A
130#define MCAST_MAX 3
131
132#define MAC_DEF_TIMEOUT 2048
133
134
135#define DSC_OWNER_MAC 0x8000
136#define DSC_RX_OK 0x4000
137#define DSC_RX_ERR 0x0800
138#define DSC_RX_ERR_DRI 0x0400
139#define DSC_RX_ERR_BUF 0x0200
140#define DSC_RX_ERR_LONG 0x0100
141#define DSC_RX_ERR_RUNT 0x0080
142#define DSC_RX_ERR_CRC 0x0040
143#define DSC_RX_BCAST 0x0020
144#define DSC_RX_MCAST 0x0010
145#define DSC_RX_MCH_HIT 0x0008
146#define DSC_RX_MIDH_HIT 0x0004
147#define DSC_RX_IDX_MID_MASK 3
148
149MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
150 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
151 "Florian Fainelli <f.fainelli@gmail.com>");
152MODULE_LICENSE("GPL");
153MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
154MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
155
156
157#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
158#define TX_INTS (TX_FINISH)
159#define INT_MASK (RX_INTS | TX_INTS)
160
161struct r6040_descriptor {
162 u16 status, len;
163 __le32 buf;
164 __le32 ndesc;
165 u32 rev1;
166 char *vbufp;
167 struct r6040_descriptor *vndescp;
168 struct sk_buff *skb_ptr;
169 u32 rev2;
170} __aligned(32);
171
172struct r6040_private {
173 spinlock_t lock;
174 struct pci_dev *pdev;
175 struct r6040_descriptor *rx_insert_ptr;
176 struct r6040_descriptor *rx_remove_ptr;
177 struct r6040_descriptor *tx_insert_ptr;
178 struct r6040_descriptor *tx_remove_ptr;
179 struct r6040_descriptor *rx_ring;
180 struct r6040_descriptor *tx_ring;
181 dma_addr_t rx_ring_dma;
182 dma_addr_t tx_ring_dma;
183 u16 tx_free_desc;
184 u16 mcr0;
185 struct net_device *dev;
186 struct mii_bus *mii_bus;
187 struct napi_struct napi;
188 void __iomem *base;
189 int old_link;
190 int old_duplex;
191};
192
193static char version[] = DRV_NAME
194 ": RDC R6040 NAPI net driver,"
195 "version "DRV_VERSION " (" DRV_RELDATE ")";
196
197
198static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
199{
200 int limit = MAC_DEF_TIMEOUT;
201 u16 cmd;
202
203 iowrite16(MDIO_READ | reg | (phy_addr << 8), ioaddr + MMDIO);
204
205 while (limit--) {
206 cmd = ioread16(ioaddr + MMDIO);
207 if (!(cmd & MDIO_READ))
208 break;
209 udelay(1);
210 }
211
212 if (limit < 0)
213 return -ETIMEDOUT;
214
215 return ioread16(ioaddr + MMRD);
216}
217
218
219static int r6040_phy_write(void __iomem *ioaddr,
220 int phy_addr, int reg, u16 val)
221{
222 int limit = MAC_DEF_TIMEOUT;
223 u16 cmd;
224
225 iowrite16(val, ioaddr + MMWD);
226
227 iowrite16(MDIO_WRITE | reg | (phy_addr << 8), ioaddr + MMDIO);
228
229 while (limit--) {
230 cmd = ioread16(ioaddr + MMDIO);
231 if (!(cmd & MDIO_WRITE))
232 break;
233 udelay(1);
234 }
235
236 return (limit < 0) ? -ETIMEDOUT : 0;
237}
238
239static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
240{
241 struct net_device *dev = bus->priv;
242 struct r6040_private *lp = netdev_priv(dev);
243 void __iomem *ioaddr = lp->base;
244
245 return r6040_phy_read(ioaddr, phy_addr, reg);
246}
247
248static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
249 int reg, u16 value)
250{
251 struct net_device *dev = bus->priv;
252 struct r6040_private *lp = netdev_priv(dev);
253 void __iomem *ioaddr = lp->base;
254
255 return r6040_phy_write(ioaddr, phy_addr, reg, value);
256}
257
258static void r6040_free_txbufs(struct net_device *dev)
259{
260 struct r6040_private *lp = netdev_priv(dev);
261 int i;
262
263 for (i = 0; i < TX_DCNT; i++) {
264 if (lp->tx_insert_ptr->skb_ptr) {
265 dma_unmap_single(&lp->pdev->dev,
266 le32_to_cpu(lp->tx_insert_ptr->buf),
267 MAX_BUF_SIZE, DMA_TO_DEVICE);
268 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
269 lp->tx_insert_ptr->skb_ptr = NULL;
270 }
271 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
272 }
273}
274
275static void r6040_free_rxbufs(struct net_device *dev)
276{
277 struct r6040_private *lp = netdev_priv(dev);
278 int i;
279
280 for (i = 0; i < RX_DCNT; i++) {
281 if (lp->rx_insert_ptr->skb_ptr) {
282 dma_unmap_single(&lp->pdev->dev,
283 le32_to_cpu(lp->rx_insert_ptr->buf),
284 MAX_BUF_SIZE, DMA_FROM_DEVICE);
285 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
286 lp->rx_insert_ptr->skb_ptr = NULL;
287 }
288 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
289 }
290}
291
292static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
293 dma_addr_t desc_dma, int size)
294{
295 struct r6040_descriptor *desc = desc_ring;
296 dma_addr_t mapping = desc_dma;
297
298 while (size-- > 0) {
299 mapping += sizeof(*desc);
300 desc->ndesc = cpu_to_le32(mapping);
301 desc->vndescp = desc + 1;
302 desc++;
303 }
304 desc--;
305 desc->ndesc = cpu_to_le32(desc_dma);
306 desc->vndescp = desc_ring;
307}
308
309static void r6040_init_txbufs(struct net_device *dev)
310{
311 struct r6040_private *lp = netdev_priv(dev);
312
313 lp->tx_free_desc = TX_DCNT;
314
315 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
316 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
317}
318
319static int r6040_alloc_rxbufs(struct net_device *dev)
320{
321 struct r6040_private *lp = netdev_priv(dev);
322 struct r6040_descriptor *desc;
323 struct sk_buff *skb;
324 int rc;
325
326 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
327 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
328
329
330 desc = lp->rx_ring;
331 do {
332 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
333 if (!skb) {
334 rc = -ENOMEM;
335 goto err_exit;
336 }
337 desc->skb_ptr = skb;
338 desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev,
339 desc->skb_ptr->data,
340 MAX_BUF_SIZE,
341 DMA_FROM_DEVICE));
342 desc->status = DSC_OWNER_MAC;
343 desc = desc->vndescp;
344 } while (desc != lp->rx_ring);
345
346 return 0;
347
348err_exit:
349
350 r6040_free_rxbufs(dev);
351 return rc;
352}
353
354static void r6040_reset_mac(struct r6040_private *lp)
355{
356 void __iomem *ioaddr = lp->base;
357 int limit = MAC_DEF_TIMEOUT;
358 u16 cmd;
359
360 iowrite16(MAC_RST, ioaddr + MCR1);
361 while (limit--) {
362 cmd = ioread16(ioaddr + MCR1);
363 if (cmd & MAC_RST)
364 break;
365 }
366
367
368 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
369 iowrite16(0, ioaddr + MAC_SM);
370 mdelay(5);
371}
372
373static void r6040_init_mac_regs(struct net_device *dev)
374{
375 struct r6040_private *lp = netdev_priv(dev);
376 void __iomem *ioaddr = lp->base;
377
378
379 iowrite16(MSK_INT, ioaddr + MIER);
380
381
382 r6040_reset_mac(lp);
383
384
385 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
386
387
388 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
389
390
391 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
392 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
393
394
395 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
396 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
397
398
399 iowrite16(0, ioaddr + MT_ICR);
400 iowrite16(0, ioaddr + MR_ICR);
401
402
403 iowrite16(INT_MASK, ioaddr + MIER);
404
405
406 iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
407
408
409
410
411 iowrite16(TM2TX, ioaddr + MTPR);
412}
413
414static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
415{
416 struct r6040_private *priv = netdev_priv(dev);
417 void __iomem *ioaddr = priv->base;
418
419 netdev_warn(dev, "transmit timed out, int enable %4.4x "
420 "status %4.4x\n",
421 ioread16(ioaddr + MIER),
422 ioread16(ioaddr + MISR));
423
424 dev->stats.tx_errors++;
425
426
427 r6040_init_mac_regs(dev);
428}
429
430static struct net_device_stats *r6040_get_stats(struct net_device *dev)
431{
432 struct r6040_private *priv = netdev_priv(dev);
433 void __iomem *ioaddr = priv->base;
434 unsigned long flags;
435
436 spin_lock_irqsave(&priv->lock, flags);
437 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
438 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
439 spin_unlock_irqrestore(&priv->lock, flags);
440
441 return &dev->stats;
442}
443
444
445static void r6040_down(struct net_device *dev)
446{
447 struct r6040_private *lp = netdev_priv(dev);
448 void __iomem *ioaddr = lp->base;
449 u16 *adrp;
450
451
452 iowrite16(MSK_INT, ioaddr + MIER);
453
454
455 r6040_reset_mac(lp);
456
457
458 adrp = (u16 *) dev->dev_addr;
459 iowrite16(adrp[0], ioaddr + MID_0L);
460 iowrite16(adrp[1], ioaddr + MID_0M);
461 iowrite16(adrp[2], ioaddr + MID_0H);
462}
463
464static int r6040_close(struct net_device *dev)
465{
466 struct r6040_private *lp = netdev_priv(dev);
467 struct pci_dev *pdev = lp->pdev;
468
469 phy_stop(dev->phydev);
470 napi_disable(&lp->napi);
471 netif_stop_queue(dev);
472
473 spin_lock_irq(&lp->lock);
474 r6040_down(dev);
475
476
477 r6040_free_rxbufs(dev);
478
479
480 r6040_free_txbufs(dev);
481
482 spin_unlock_irq(&lp->lock);
483
484 free_irq(dev->irq, dev);
485
486
487 if (lp->rx_ring) {
488 dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring,
489 lp->rx_ring_dma);
490 lp->rx_ring = NULL;
491 }
492
493 if (lp->tx_ring) {
494 dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring,
495 lp->tx_ring_dma);
496 lp->tx_ring = NULL;
497 }
498
499 return 0;
500}
501
502static int r6040_rx(struct net_device *dev, int limit)
503{
504 struct r6040_private *priv = netdev_priv(dev);
505 struct r6040_descriptor *descptr = priv->rx_remove_ptr;
506 struct sk_buff *skb_ptr, *new_skb;
507 int count = 0;
508 u16 err;
509
510
511 while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
512
513 err = descptr->status;
514
515 if (err & DSC_RX_ERR) {
516
517 if (err & DSC_RX_ERR_DRI)
518 dev->stats.rx_frame_errors++;
519
520 if (err & DSC_RX_ERR_BUF)
521 dev->stats.rx_length_errors++;
522
523 if (err & DSC_RX_ERR_LONG)
524 dev->stats.rx_length_errors++;
525
526 if (err & DSC_RX_ERR_RUNT)
527 dev->stats.rx_length_errors++;
528
529 if (err & DSC_RX_ERR_CRC) {
530 spin_lock(&priv->lock);
531 dev->stats.rx_crc_errors++;
532 spin_unlock(&priv->lock);
533 }
534 goto next_descr;
535 }
536
537
538 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
539 if (!new_skb) {
540 dev->stats.rx_dropped++;
541 goto next_descr;
542 }
543 skb_ptr = descptr->skb_ptr;
544 skb_ptr->dev = priv->dev;
545
546
547 skb_put(skb_ptr, descptr->len - ETH_FCS_LEN);
548 dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
549 MAX_BUF_SIZE, DMA_FROM_DEVICE);
550 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
551
552
553 netif_receive_skb(skb_ptr);
554 dev->stats.rx_packets++;
555 dev->stats.rx_bytes += descptr->len - ETH_FCS_LEN;
556
557
558 descptr->skb_ptr = new_skb;
559 descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev,
560 descptr->skb_ptr->data,
561 MAX_BUF_SIZE,
562 DMA_FROM_DEVICE));
563
564next_descr:
565
566 descptr->status = DSC_OWNER_MAC;
567 descptr = descptr->vndescp;
568 count++;
569 }
570 priv->rx_remove_ptr = descptr;
571
572 return count;
573}
574
575static void r6040_tx(struct net_device *dev)
576{
577 struct r6040_private *priv = netdev_priv(dev);
578 struct r6040_descriptor *descptr;
579 void __iomem *ioaddr = priv->base;
580 struct sk_buff *skb_ptr;
581 u16 err;
582
583 spin_lock(&priv->lock);
584 descptr = priv->tx_remove_ptr;
585 while (priv->tx_free_desc < TX_DCNT) {
586
587 err = ioread16(ioaddr + MLSR);
588
589 if (err & TX_FIFO_UNDR)
590 dev->stats.tx_fifo_errors++;
591 if (err & (TX_EXCEEDC | TX_LATEC))
592 dev->stats.tx_carrier_errors++;
593
594 if (descptr->status & DSC_OWNER_MAC)
595 break;
596 skb_ptr = descptr->skb_ptr;
597
598
599 dev->stats.tx_packets++;
600 dev->stats.tx_bytes += skb_ptr->len;
601
602 dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
603 skb_ptr->len, DMA_TO_DEVICE);
604
605 dev_kfree_skb(skb_ptr);
606 descptr->skb_ptr = NULL;
607
608 descptr = descptr->vndescp;
609 priv->tx_free_desc++;
610 }
611 priv->tx_remove_ptr = descptr;
612
613 if (priv->tx_free_desc)
614 netif_wake_queue(dev);
615 spin_unlock(&priv->lock);
616}
617
618static int r6040_poll(struct napi_struct *napi, int budget)
619{
620 struct r6040_private *priv =
621 container_of(napi, struct r6040_private, napi);
622 struct net_device *dev = priv->dev;
623 void __iomem *ioaddr = priv->base;
624 int work_done;
625
626 r6040_tx(dev);
627
628 work_done = r6040_rx(dev, budget);
629
630 if (work_done < budget) {
631 napi_complete_done(napi, work_done);
632
633 iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
634 ioaddr + MIER);
635 }
636 return work_done;
637}
638
639
640static irqreturn_t r6040_interrupt(int irq, void *dev_id)
641{
642 struct net_device *dev = dev_id;
643 struct r6040_private *lp = netdev_priv(dev);
644 void __iomem *ioaddr = lp->base;
645 u16 misr, status;
646
647
648 misr = ioread16(ioaddr + MIER);
649
650 iowrite16(MSK_INT, ioaddr + MIER);
651
652 status = ioread16(ioaddr + MISR);
653
654 if (status == 0x0000 || status == 0xffff) {
655
656 iowrite16(misr, ioaddr + MIER);
657 return IRQ_NONE;
658 }
659
660
661 if (status & (RX_INTS | TX_INTS)) {
662 if (status & RX_NO_DESC) {
663
664 dev->stats.rx_dropped++;
665 dev->stats.rx_missed_errors++;
666 }
667 if (status & RX_FIFO_FULL)
668 dev->stats.rx_fifo_errors++;
669
670 if (likely(napi_schedule_prep(&lp->napi))) {
671
672 misr &= ~(RX_INTS | TX_INTS);
673 __napi_schedule_irqoff(&lp->napi);
674 }
675 }
676
677
678 iowrite16(misr, ioaddr + MIER);
679
680 return IRQ_HANDLED;
681}
682
683#ifdef CONFIG_NET_POLL_CONTROLLER
684static void r6040_poll_controller(struct net_device *dev)
685{
686 disable_irq(dev->irq);
687 r6040_interrupt(dev->irq, dev);
688 enable_irq(dev->irq);
689}
690#endif
691
692
693static int r6040_up(struct net_device *dev)
694{
695 struct r6040_private *lp = netdev_priv(dev);
696 void __iomem *ioaddr = lp->base;
697 int ret;
698
699
700 r6040_init_txbufs(dev);
701 ret = r6040_alloc_rxbufs(dev);
702 if (ret)
703 return ret;
704
705
706 r6040_phy_write(ioaddr, 30, 17,
707 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
708 r6040_phy_write(ioaddr, 30, 17,
709 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
710 r6040_phy_write(ioaddr, 0, 19, 0x0000);
711 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
712
713
714 r6040_init_mac_regs(dev);
715
716 phy_start(dev->phydev);
717
718 return 0;
719}
720
721
722
723static void r6040_mac_address(struct net_device *dev)
724{
725 struct r6040_private *lp = netdev_priv(dev);
726 void __iomem *ioaddr = lp->base;
727 u16 *adrp;
728
729
730 r6040_reset_mac(lp);
731
732
733 adrp = (u16 *) dev->dev_addr;
734 iowrite16(adrp[0], ioaddr + MID_0L);
735 iowrite16(adrp[1], ioaddr + MID_0M);
736 iowrite16(adrp[2], ioaddr + MID_0H);
737}
738
739static int r6040_open(struct net_device *dev)
740{
741 struct r6040_private *lp = netdev_priv(dev);
742 int ret;
743
744
745 ret = request_irq(dev->irq, r6040_interrupt,
746 IRQF_SHARED, dev->name, dev);
747 if (ret)
748 goto out;
749
750
751 r6040_mac_address(dev);
752
753
754 lp->rx_ring =
755 dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE,
756 &lp->rx_ring_dma, GFP_KERNEL);
757 if (!lp->rx_ring) {
758 ret = -ENOMEM;
759 goto err_free_irq;
760 }
761
762 lp->tx_ring =
763 dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE,
764 &lp->tx_ring_dma, GFP_KERNEL);
765 if (!lp->tx_ring) {
766 ret = -ENOMEM;
767 goto err_free_rx_ring;
768 }
769
770 ret = r6040_up(dev);
771 if (ret)
772 goto err_free_tx_ring;
773
774 napi_enable(&lp->napi);
775 netif_start_queue(dev);
776
777 return 0;
778
779err_free_tx_ring:
780 dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring,
781 lp->tx_ring_dma);
782err_free_rx_ring:
783 dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring,
784 lp->rx_ring_dma);
785err_free_irq:
786 free_irq(dev->irq, dev);
787out:
788 return ret;
789}
790
791static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
792 struct net_device *dev)
793{
794 struct r6040_private *lp = netdev_priv(dev);
795 struct r6040_descriptor *descptr;
796 void __iomem *ioaddr = lp->base;
797 unsigned long flags;
798
799 if (skb_put_padto(skb, ETH_ZLEN) < 0)
800 return NETDEV_TX_OK;
801
802
803 spin_lock_irqsave(&lp->lock, flags);
804
805
806 if (!lp->tx_free_desc) {
807 spin_unlock_irqrestore(&lp->lock, flags);
808 netif_stop_queue(dev);
809 netdev_err(dev, ": no tx descriptor\n");
810 return NETDEV_TX_BUSY;
811 }
812
813
814 lp->tx_free_desc--;
815 descptr = lp->tx_insert_ptr;
816 descptr->len = skb->len;
817 descptr->skb_ptr = skb;
818 descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data,
819 skb->len, DMA_TO_DEVICE));
820 descptr->status = DSC_OWNER_MAC;
821
822 skb_tx_timestamp(skb);
823
824
825 if (!netdev_xmit_more() || netif_queue_stopped(dev))
826 iowrite16(TM2TX, ioaddr + MTPR);
827 lp->tx_insert_ptr = descptr->vndescp;
828
829
830 if (!lp->tx_free_desc)
831 netif_stop_queue(dev);
832
833 spin_unlock_irqrestore(&lp->lock, flags);
834
835 return NETDEV_TX_OK;
836}
837
838static void r6040_multicast_list(struct net_device *dev)
839{
840 struct r6040_private *lp = netdev_priv(dev);
841 void __iomem *ioaddr = lp->base;
842 unsigned long flags;
843 struct netdev_hw_addr *ha;
844 int i;
845 u16 *adrp;
846 u16 hash_table[4] = { 0 };
847
848 spin_lock_irqsave(&lp->lock, flags);
849
850
851 adrp = (u16 *)dev->dev_addr;
852 iowrite16(adrp[0], ioaddr + MID_0L);
853 iowrite16(adrp[1], ioaddr + MID_0M);
854 iowrite16(adrp[2], ioaddr + MID_0H);
855
856
857 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
858
859
860 if (dev->flags & IFF_PROMISC)
861 lp->mcr0 |= MCR0_PROMISC;
862
863
864
865 else if (dev->flags & IFF_ALLMULTI) {
866 lp->mcr0 |= MCR0_HASH_EN;
867
868 for (i = 0; i < MCAST_MAX ; i++) {
869 iowrite16(0, ioaddr + MID_1L + 8 * i);
870 iowrite16(0, ioaddr + MID_1M + 8 * i);
871 iowrite16(0, ioaddr + MID_1H + 8 * i);
872 }
873
874 for (i = 0; i < 4; i++)
875 hash_table[i] = 0xffff;
876 }
877
878
879 else if (netdev_mc_count(dev) <= MCAST_MAX) {
880 i = 0;
881 netdev_for_each_mc_addr(ha, dev) {
882 u16 *adrp = (u16 *) ha->addr;
883 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
884 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
885 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
886 i++;
887 }
888 while (i < MCAST_MAX) {
889 iowrite16(0, ioaddr + MID_1L + 8 * i);
890 iowrite16(0, ioaddr + MID_1M + 8 * i);
891 iowrite16(0, ioaddr + MID_1H + 8 * i);
892 i++;
893 }
894 }
895
896 else {
897 u32 crc;
898
899 lp->mcr0 |= MCR0_HASH_EN;
900
901 for (i = 0; i < MCAST_MAX ; i++) {
902 iowrite16(0, ioaddr + MID_1L + 8 * i);
903 iowrite16(0, ioaddr + MID_1M + 8 * i);
904 iowrite16(0, ioaddr + MID_1H + 8 * i);
905 }
906
907
908 netdev_for_each_mc_addr(ha, dev) {
909 u8 *addrs = ha->addr;
910
911 crc = ether_crc(ETH_ALEN, addrs);
912 crc >>= 26;
913 hash_table[crc >> 4] |= 1 << (crc & 0xf);
914 }
915 }
916
917 iowrite16(lp->mcr0, ioaddr + MCR0);
918
919
920 if (lp->mcr0 & MCR0_HASH_EN) {
921 iowrite16(hash_table[0], ioaddr + MAR0);
922 iowrite16(hash_table[1], ioaddr + MAR1);
923 iowrite16(hash_table[2], ioaddr + MAR2);
924 iowrite16(hash_table[3], ioaddr + MAR3);
925 }
926
927 spin_unlock_irqrestore(&lp->lock, flags);
928}
929
930static void netdev_get_drvinfo(struct net_device *dev,
931 struct ethtool_drvinfo *info)
932{
933 struct r6040_private *rp = netdev_priv(dev);
934
935 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
936 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
937 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
938}
939
940static const struct ethtool_ops netdev_ethtool_ops = {
941 .get_drvinfo = netdev_get_drvinfo,
942 .get_link = ethtool_op_get_link,
943 .get_ts_info = ethtool_op_get_ts_info,
944 .get_link_ksettings = phy_ethtool_get_link_ksettings,
945 .set_link_ksettings = phy_ethtool_set_link_ksettings,
946 .nway_reset = phy_ethtool_nway_reset,
947};
948
949static const struct net_device_ops r6040_netdev_ops = {
950 .ndo_open = r6040_open,
951 .ndo_stop = r6040_close,
952 .ndo_start_xmit = r6040_start_xmit,
953 .ndo_get_stats = r6040_get_stats,
954 .ndo_set_rx_mode = r6040_multicast_list,
955 .ndo_validate_addr = eth_validate_addr,
956 .ndo_set_mac_address = eth_mac_addr,
957 .ndo_do_ioctl = phy_do_ioctl,
958 .ndo_tx_timeout = r6040_tx_timeout,
959#ifdef CONFIG_NET_POLL_CONTROLLER
960 .ndo_poll_controller = r6040_poll_controller,
961#endif
962};
963
964static void r6040_adjust_link(struct net_device *dev)
965{
966 struct r6040_private *lp = netdev_priv(dev);
967 struct phy_device *phydev = dev->phydev;
968 int status_changed = 0;
969 void __iomem *ioaddr = lp->base;
970
971 BUG_ON(!phydev);
972
973 if (lp->old_link != phydev->link) {
974 status_changed = 1;
975 lp->old_link = phydev->link;
976 }
977
978
979 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
980 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
981 iowrite16(lp->mcr0, ioaddr);
982
983 status_changed = 1;
984 lp->old_duplex = phydev->duplex;
985 }
986
987 if (status_changed)
988 phy_print_status(phydev);
989}
990
991static int r6040_mii_probe(struct net_device *dev)
992{
993 struct r6040_private *lp = netdev_priv(dev);
994 struct phy_device *phydev = NULL;
995
996 phydev = phy_find_first(lp->mii_bus);
997 if (!phydev) {
998 dev_err(&lp->pdev->dev, "no PHY found\n");
999 return -ENODEV;
1000 }
1001
1002 phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link,
1003 PHY_INTERFACE_MODE_MII);
1004
1005 if (IS_ERR(phydev)) {
1006 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1007 return PTR_ERR(phydev);
1008 }
1009
1010 phy_set_max_speed(phydev, SPEED_100);
1011
1012 lp->old_link = 0;
1013 lp->old_duplex = -1;
1014
1015 phy_attached_info(phydev);
1016
1017 return 0;
1018}
1019
1020static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1021{
1022 struct net_device *dev;
1023 struct r6040_private *lp;
1024 void __iomem *ioaddr;
1025 int err, io_size = R6040_IO_SIZE;
1026 static int card_idx = -1;
1027 int bar = 0;
1028 u16 *adrp;
1029
1030 pr_info("%s\n", version);
1031
1032 err = pci_enable_device(pdev);
1033 if (err)
1034 goto err_out;
1035
1036
1037 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1038 if (err) {
1039 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1040 goto err_out_disable_dev;
1041 }
1042 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1043 if (err) {
1044 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1045 goto err_out_disable_dev;
1046 }
1047
1048
1049 if (pci_resource_len(pdev, bar) < io_size) {
1050 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1051 err = -EIO;
1052 goto err_out_disable_dev;
1053 }
1054
1055 pci_set_master(pdev);
1056
1057 dev = alloc_etherdev(sizeof(struct r6040_private));
1058 if (!dev) {
1059 err = -ENOMEM;
1060 goto err_out_disable_dev;
1061 }
1062 SET_NETDEV_DEV(dev, &pdev->dev);
1063 lp = netdev_priv(dev);
1064
1065 err = pci_request_regions(pdev, DRV_NAME);
1066
1067 if (err) {
1068 dev_err(&pdev->dev, "Failed to request PCI regions\n");
1069 goto err_out_free_dev;
1070 }
1071
1072 ioaddr = pci_iomap(pdev, bar, io_size);
1073 if (!ioaddr) {
1074 dev_err(&pdev->dev, "ioremap failed for device\n");
1075 err = -EIO;
1076 goto err_out_free_res;
1077 }
1078
1079
1080
1081
1082
1083
1084 if (ioread16(ioaddr + PHY_CC) == 0)
1085 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1086 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1087
1088
1089 lp->base = ioaddr;
1090 dev->irq = pdev->irq;
1091
1092 spin_lock_init(&lp->lock);
1093 pci_set_drvdata(pdev, dev);
1094
1095
1096 card_idx++;
1097
1098 adrp = (u16 *)dev->dev_addr;
1099 adrp[0] = ioread16(ioaddr + MID_0L);
1100 adrp[1] = ioread16(ioaddr + MID_0M);
1101 adrp[2] = ioread16(ioaddr + MID_0H);
1102
1103
1104
1105 if (!(adrp[0] || adrp[1] || adrp[2])) {
1106 netdev_warn(dev, "MAC address not initialized, "
1107 "generating random\n");
1108 eth_hw_addr_random(dev);
1109 }
1110
1111
1112 lp->pdev = pdev;
1113 lp->dev = dev;
1114
1115
1116 lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
1117
1118
1119 dev->netdev_ops = &r6040_netdev_ops;
1120 dev->ethtool_ops = &netdev_ethtool_ops;
1121 dev->watchdog_timeo = TX_TIMEOUT;
1122
1123 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1124
1125 lp->mii_bus = mdiobus_alloc();
1126 if (!lp->mii_bus) {
1127 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1128 err = -ENOMEM;
1129 goto err_out_unmap;
1130 }
1131
1132 lp->mii_bus->priv = dev;
1133 lp->mii_bus->read = r6040_mdiobus_read;
1134 lp->mii_bus->write = r6040_mdiobus_write;
1135 lp->mii_bus->name = "r6040_eth_mii";
1136 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1137 dev_name(&pdev->dev), card_idx);
1138
1139 err = mdiobus_register(lp->mii_bus);
1140 if (err) {
1141 dev_err(&pdev->dev, "failed to register MII bus\n");
1142 goto err_out_mdio;
1143 }
1144
1145 err = r6040_mii_probe(dev);
1146 if (err) {
1147 dev_err(&pdev->dev, "failed to probe MII bus\n");
1148 goto err_out_mdio_unregister;
1149 }
1150
1151
1152 err = register_netdev(dev);
1153 if (err) {
1154 dev_err(&pdev->dev, "Failed to register net device\n");
1155 goto err_out_mdio_unregister;
1156 }
1157 return 0;
1158
1159err_out_mdio_unregister:
1160 mdiobus_unregister(lp->mii_bus);
1161err_out_mdio:
1162 mdiobus_free(lp->mii_bus);
1163err_out_unmap:
1164 netif_napi_del(&lp->napi);
1165 pci_iounmap(pdev, ioaddr);
1166err_out_free_res:
1167 pci_release_regions(pdev);
1168err_out_free_dev:
1169 free_netdev(dev);
1170err_out_disable_dev:
1171 pci_disable_device(pdev);
1172err_out:
1173 return err;
1174}
1175
1176static void r6040_remove_one(struct pci_dev *pdev)
1177{
1178 struct net_device *dev = pci_get_drvdata(pdev);
1179 struct r6040_private *lp = netdev_priv(dev);
1180
1181 unregister_netdev(dev);
1182 mdiobus_unregister(lp->mii_bus);
1183 mdiobus_free(lp->mii_bus);
1184 netif_napi_del(&lp->napi);
1185 pci_iounmap(pdev, lp->base);
1186 pci_release_regions(pdev);
1187 free_netdev(dev);
1188 pci_disable_device(pdev);
1189}
1190
1191
1192static const struct pci_device_id r6040_pci_tbl[] = {
1193 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1194 { 0 }
1195};
1196MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
1197
1198static struct pci_driver r6040_driver = {
1199 .name = DRV_NAME,
1200 .id_table = r6040_pci_tbl,
1201 .probe = r6040_init_one,
1202 .remove = r6040_remove_one,
1203};
1204
1205module_pci_driver(r6040_driver);
1206