1
2
3
4
5
6
7
8
9
10
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/platform_device.h>
23#include <linux/phy.h>
24
25#include "dnet.h"
26
27#undef DEBUG
28
29
30static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
31{
32 u16 data_read;
33
34
35 dnet_writel(bp, reg, MACREG_ADDR);
36
37
38
39 ndelay(500);
40
41
42 data_read = dnet_readl(bp, MACREG_DATA);
43
44
45 return data_read;
46}
47
48
49static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
50{
51
52 dnet_writel(bp, val, MACREG_DATA);
53
54
55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
56
57
58
59 ndelay(500);
60}
61
62static void __dnet_set_hwaddr(struct dnet *bp)
63{
64 u16 tmp;
65
66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
72}
73
74static void dnet_get_hwaddr(struct dnet *bp)
75{
76 u16 tmp;
77 u8 addr[6];
78
79
80
81
82
83
84
85
86
87
88
89
90
91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
92 *((__be16 *)addr) = cpu_to_be16(tmp);
93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
94 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
96 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
97
98 if (is_valid_ether_addr(addr))
99 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
100}
101
102static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
103{
104 struct dnet *bp = bus->priv;
105 u16 value;
106
107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
108 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
109 cpu_relax();
110
111
112 mii_id &= 0x1f;
113 regnum &= 0x1f;
114
115
116 value = (mii_id << 8);
117 value |= regnum;
118
119
120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
121
122
123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
124 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
125 cpu_relax();
126
127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
128
129 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
130
131 return value;
132}
133
134static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
135 u16 value)
136{
137 struct dnet *bp = bus->priv;
138 u16 tmp;
139
140 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
141
142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
143 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
144 cpu_relax();
145
146
147 tmp = (1 << 13);
148
149
150 mii_id &= 0x1f;
151 regnum &= 0x1f;
152
153
154 value &= 0xffff;
155
156
157 tmp |= (mii_id << 8);
158 tmp |= regnum;
159
160
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
162
163
164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
165
166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
167 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
168 cpu_relax();
169
170 return 0;
171}
172
173static void dnet_handle_link_change(struct net_device *dev)
174{
175 struct dnet *bp = netdev_priv(dev);
176 struct phy_device *phydev = dev->phydev;
177 unsigned long flags;
178 u32 mode_reg, ctl_reg;
179
180 int status_change = 0;
181
182 spin_lock_irqsave(&bp->lock, flags);
183
184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
186
187 if (phydev->link) {
188 if (bp->duplex != phydev->duplex) {
189 if (phydev->duplex)
190 ctl_reg &=
191 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
192 else
193 ctl_reg |=
194 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
195
196 bp->duplex = phydev->duplex;
197 status_change = 1;
198 }
199
200 if (bp->speed != phydev->speed) {
201 status_change = 1;
202 switch (phydev->speed) {
203 case 1000:
204 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
205 break;
206 case 100:
207 case 10:
208 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
209 break;
210 default:
211 printk(KERN_WARNING
212 "%s: Ack! Speed (%d) is not "
213 "10/100/1000!\n", dev->name,
214 phydev->speed);
215 break;
216 }
217 bp->speed = phydev->speed;
218 }
219 }
220
221 if (phydev->link != bp->link) {
222 if (phydev->link) {
223 mode_reg |=
224 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
225 } else {
226 mode_reg &=
227 ~(DNET_INTERNAL_MODE_RXEN |
228 DNET_INTERNAL_MODE_TXEN);
229 bp->speed = 0;
230 bp->duplex = -1;
231 }
232 bp->link = phydev->link;
233
234 status_change = 1;
235 }
236
237 if (status_change) {
238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
240 }
241
242 spin_unlock_irqrestore(&bp->lock, flags);
243
244 if (status_change) {
245 if (phydev->link)
246 printk(KERN_INFO "%s: link up (%d/%s)\n",
247 dev->name, phydev->speed,
248 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
249 else
250 printk(KERN_INFO "%s: link down\n", dev->name);
251 }
252}
253
254static int dnet_mii_probe(struct net_device *dev)
255{
256 struct dnet *bp = netdev_priv(dev);
257 struct phy_device *phydev = NULL;
258
259
260 phydev = phy_find_first(bp->mii_bus);
261
262 if (!phydev) {
263 printk(KERN_ERR "%s: no PHY found\n", dev->name);
264 return -ENODEV;
265 }
266
267
268
269
270 if (bp->capabilities & DNET_HAS_RMII) {
271 phydev = phy_connect(dev, phydev_name(phydev),
272 &dnet_handle_link_change,
273 PHY_INTERFACE_MODE_RMII);
274 } else {
275 phydev = phy_connect(dev, phydev_name(phydev),
276 &dnet_handle_link_change,
277 PHY_INTERFACE_MODE_MII);
278 }
279
280 if (IS_ERR(phydev)) {
281 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
282 return PTR_ERR(phydev);
283 }
284
285
286 if (bp->capabilities & DNET_HAS_GIGABIT)
287 phydev->supported &= PHY_GBIT_FEATURES;
288 else
289 phydev->supported &= PHY_BASIC_FEATURES;
290
291 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
292
293 phydev->advertising = phydev->supported;
294
295 bp->link = 0;
296 bp->speed = 0;
297 bp->duplex = -1;
298
299 return 0;
300}
301
302static int dnet_mii_init(struct dnet *bp)
303{
304 int err;
305
306 bp->mii_bus = mdiobus_alloc();
307 if (bp->mii_bus == NULL)
308 return -ENOMEM;
309
310 bp->mii_bus->name = "dnet_mii_bus";
311 bp->mii_bus->read = &dnet_mdio_read;
312 bp->mii_bus->write = &dnet_mdio_write;
313
314 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
315 bp->pdev->name, bp->pdev->id);
316
317 bp->mii_bus->priv = bp;
318
319 if (mdiobus_register(bp->mii_bus)) {
320 err = -ENXIO;
321 goto err_out;
322 }
323
324 if (dnet_mii_probe(bp->dev) != 0) {
325 err = -ENXIO;
326 goto err_out_unregister_bus;
327 }
328
329 return 0;
330
331err_out_unregister_bus:
332 mdiobus_unregister(bp->mii_bus);
333err_out:
334 mdiobus_free(bp->mii_bus);
335 return err;
336}
337
338
339static int dnet_phy_marvell_fixup(struct phy_device *phydev)
340{
341 return phy_write(phydev, 0x18, 0x4148);
342}
343
344static void dnet_update_stats(struct dnet *bp)
345{
346 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
347 u32 *p = &bp->hw_stats.rx_pkt_ignr;
348 u32 *end = &bp->hw_stats.rx_byte + 1;
349
350 WARN_ON((unsigned long)(end - p - 1) !=
351 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
352
353 for (; p < end; p++, reg++)
354 *p += readl(reg);
355
356 reg = bp->regs + DNET_TX_UNICAST_CNT;
357 p = &bp->hw_stats.tx_unicast;
358 end = &bp->hw_stats.tx_byte + 1;
359
360 WARN_ON((unsigned long)(end - p - 1) !=
361 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
362
363 for (; p < end; p++, reg++)
364 *p += readl(reg);
365}
366
367static int dnet_poll(struct napi_struct *napi, int budget)
368{
369 struct dnet *bp = container_of(napi, struct dnet, napi);
370 struct net_device *dev = bp->dev;
371 int npackets = 0;
372 unsigned int pkt_len;
373 struct sk_buff *skb;
374 unsigned int *data_ptr;
375 u32 int_enable;
376 u32 cmd_word;
377 int i;
378
379 while (npackets < budget) {
380
381
382
383
384 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
385 break;
386
387 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
388 pkt_len = cmd_word & 0xFFFF;
389
390 if (cmd_word & 0xDF180000)
391 printk(KERN_ERR "%s packet receive error %x\n",
392 __func__, cmd_word);
393
394 skb = netdev_alloc_skb(dev, pkt_len + 5);
395 if (skb != NULL) {
396
397 skb_reserve(skb, 2);
398
399
400
401
402 data_ptr = skb_put(skb, pkt_len);
403 for (i = 0; i < (pkt_len + 3) >> 2; i++)
404 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
405 skb->protocol = eth_type_trans(skb, dev);
406 netif_receive_skb(skb);
407 npackets++;
408 } else
409 printk(KERN_NOTICE
410 "%s: No memory to allocate a sk_buff of "
411 "size %u.\n", dev->name, pkt_len);
412 }
413
414 if (npackets < budget) {
415
416
417
418 napi_complete_done(napi, npackets);
419 int_enable = dnet_readl(bp, INTR_ENB);
420 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
421 dnet_writel(bp, int_enable, INTR_ENB);
422 }
423
424 return npackets;
425}
426
427static irqreturn_t dnet_interrupt(int irq, void *dev_id)
428{
429 struct net_device *dev = dev_id;
430 struct dnet *bp = netdev_priv(dev);
431 u32 int_src, int_enable, int_current;
432 unsigned long flags;
433 unsigned int handled = 0;
434
435 spin_lock_irqsave(&bp->lock, flags);
436
437
438 int_src = dnet_readl(bp, INTR_SRC);
439 int_enable = dnet_readl(bp, INTR_ENB);
440 int_current = int_src & int_enable;
441
442
443 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
444 int_enable = dnet_readl(bp, INTR_ENB);
445 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
446 dnet_writel(bp, int_enable, INTR_ENB);
447 netif_wake_queue(dev);
448 handled = 1;
449 }
450
451
452 if (int_current &
453 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
454 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
455 dnet_readl(bp, RX_STATUS), int_current);
456
457 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
458 ndelay(500);
459 dnet_writel(bp, 0, SYS_CTL);
460 handled = 1;
461 }
462
463
464 if (int_current &
465 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
466 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
467 dnet_readl(bp, TX_STATUS), int_current);
468
469 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
470 ndelay(500);
471 dnet_writel(bp, 0, SYS_CTL);
472 handled = 1;
473 }
474
475 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
476 if (napi_schedule_prep(&bp->napi)) {
477
478
479
480
481
482 int_enable = dnet_readl(bp, INTR_ENB);
483 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
484 dnet_writel(bp, int_enable, INTR_ENB);
485 __napi_schedule(&bp->napi);
486 }
487 handled = 1;
488 }
489
490 if (!handled)
491 pr_debug("%s: irq %x remains\n", __func__, int_current);
492
493 spin_unlock_irqrestore(&bp->lock, flags);
494
495 return IRQ_RETVAL(handled);
496}
497
498#ifdef DEBUG
499static inline void dnet_print_skb(struct sk_buff *skb)
500{
501 int k;
502 printk(KERN_DEBUG PFX "data:");
503 for (k = 0; k < skb->len; k++)
504 printk(" %02x", (unsigned int)skb->data[k]);
505 printk("\n");
506}
507#else
508#define dnet_print_skb(skb) do {} while (0)
509#endif
510
511static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
512{
513
514 struct dnet *bp = netdev_priv(dev);
515 u32 tx_status, irq_enable;
516 unsigned int len, i, tx_cmd, wrsz;
517 unsigned long flags;
518 unsigned int *bufp;
519
520 tx_status = dnet_readl(bp, TX_STATUS);
521
522 pr_debug("start_xmit: len %u head %p data %p\n",
523 skb->len, skb->head, skb->data);
524 dnet_print_skb(skb);
525
526
527 len = (skb->len + 3) >> 2;
528
529 spin_lock_irqsave(&bp->lock, flags);
530
531 tx_status = dnet_readl(bp, TX_STATUS);
532
533 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
534 wrsz = (u32) skb->len + 3;
535 wrsz += ((unsigned long) skb->data) & 0x3;
536 wrsz >>= 2;
537 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
538
539
540 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
541 for (i = 0; i < wrsz; i++)
542 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
543
544
545
546
547
548 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
549 }
550
551 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
552 netif_stop_queue(dev);
553 tx_status = dnet_readl(bp, INTR_SRC);
554 irq_enable = dnet_readl(bp, INTR_ENB);
555 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
556 dnet_writel(bp, irq_enable, INTR_ENB);
557 }
558
559 skb_tx_timestamp(skb);
560
561
562 dev_kfree_skb(skb);
563
564 spin_unlock_irqrestore(&bp->lock, flags);
565
566 return NETDEV_TX_OK;
567}
568
569static void dnet_reset_hw(struct dnet *bp)
570{
571
572 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
573
574
575
576
577
578 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
579
580
581
582
583 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
584
585
586 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
587 SYS_CTL);
588 msleep(1);
589 dnet_writel(bp, 0, SYS_CTL);
590}
591
592static void dnet_init_hw(struct dnet *bp)
593{
594 u32 config;
595
596 dnet_reset_hw(bp);
597 __dnet_set_hwaddr(bp);
598
599 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
600
601 if (bp->dev->flags & IFF_PROMISC)
602
603 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
604 if (!(bp->dev->flags & IFF_BROADCAST))
605
606 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
607
608 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
609 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
610 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
611 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
612
613 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
614
615
616 config = dnet_readl(bp, INTR_SRC);
617
618
619 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
620 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
621 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
622 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
623 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
624}
625
626static int dnet_open(struct net_device *dev)
627{
628 struct dnet *bp = netdev_priv(dev);
629
630
631 if (!dev->phydev)
632 return -EAGAIN;
633
634 napi_enable(&bp->napi);
635 dnet_init_hw(bp);
636
637 phy_start_aneg(dev->phydev);
638
639
640 phy_start(dev->phydev);
641
642 netif_start_queue(dev);
643
644 return 0;
645}
646
647static int dnet_close(struct net_device *dev)
648{
649 struct dnet *bp = netdev_priv(dev);
650
651 netif_stop_queue(dev);
652 napi_disable(&bp->napi);
653
654 if (dev->phydev)
655 phy_stop(dev->phydev);
656
657 dnet_reset_hw(bp);
658 netif_carrier_off(dev);
659
660 return 0;
661}
662
663static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
664{
665 pr_debug("%s\n", __func__);
666 pr_debug("----------------------------- RX statistics "
667 "-------------------------------\n");
668 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
669 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
670 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
671 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
672 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
673 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
674 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
675 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
676 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
677 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
678 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
679 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
680 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
681 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
682 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
683 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
684 pr_debug("----------------------------- TX statistics "
685 "-------------------------------\n");
686 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
687 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
688 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
689 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
690 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
691 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
692 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
693 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
694}
695
696static struct net_device_stats *dnet_get_stats(struct net_device *dev)
697{
698
699 struct dnet *bp = netdev_priv(dev);
700 struct net_device_stats *nstat = &dev->stats;
701 struct dnet_stats *hwstat = &bp->hw_stats;
702
703
704 dnet_update_stats(bp);
705
706
707 nstat->rx_errors = (hwstat->rx_len_chk_err +
708 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
709
710
711 hwstat->rx_crc_err +
712 hwstat->rx_pre_shrink +
713 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
714 nstat->tx_errors = hwstat->tx_bad_fcs;
715 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
716 hwstat->rx_lng_frm +
717 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
718 nstat->rx_crc_errors = hwstat->rx_crc_err;
719 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
720 nstat->rx_packets = hwstat->rx_ok_pkt;
721 nstat->tx_packets = (hwstat->tx_unicast +
722 hwstat->tx_multicast + hwstat->tx_brdcast);
723 nstat->rx_bytes = hwstat->rx_byte;
724 nstat->tx_bytes = hwstat->tx_byte;
725 nstat->multicast = hwstat->rx_multicast;
726 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
727
728 dnet_print_pretty_hwstats(hwstat);
729
730 return nstat;
731}
732
733static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
734{
735 struct phy_device *phydev = dev->phydev;
736
737 if (!netif_running(dev))
738 return -EINVAL;
739
740 if (!phydev)
741 return -ENODEV;
742
743 return phy_mii_ioctl(phydev, rq, cmd);
744}
745
746static void dnet_get_drvinfo(struct net_device *dev,
747 struct ethtool_drvinfo *info)
748{
749 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
750 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
751 strlcpy(info->bus_info, "0", sizeof(info->bus_info));
752}
753
754static const struct ethtool_ops dnet_ethtool_ops = {
755 .get_drvinfo = dnet_get_drvinfo,
756 .get_link = ethtool_op_get_link,
757 .get_ts_info = ethtool_op_get_ts_info,
758 .get_link_ksettings = phy_ethtool_get_link_ksettings,
759 .set_link_ksettings = phy_ethtool_set_link_ksettings,
760};
761
762static const struct net_device_ops dnet_netdev_ops = {
763 .ndo_open = dnet_open,
764 .ndo_stop = dnet_close,
765 .ndo_get_stats = dnet_get_stats,
766 .ndo_start_xmit = dnet_start_xmit,
767 .ndo_do_ioctl = dnet_ioctl,
768 .ndo_set_mac_address = eth_mac_addr,
769 .ndo_validate_addr = eth_validate_addr,
770};
771
772static int dnet_probe(struct platform_device *pdev)
773{
774 struct resource *res;
775 struct net_device *dev;
776 struct dnet *bp;
777 struct phy_device *phydev;
778 int err;
779 unsigned int irq;
780
781 irq = platform_get_irq(pdev, 0);
782
783 dev = alloc_etherdev(sizeof(*bp));
784 if (!dev)
785 return -ENOMEM;
786
787
788 dev->features |= 0;
789
790 bp = netdev_priv(dev);
791 bp->dev = dev;
792
793 platform_set_drvdata(pdev, dev);
794 SET_NETDEV_DEV(dev, &pdev->dev);
795
796 spin_lock_init(&bp->lock);
797
798 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
799 bp->regs = devm_ioremap_resource(&pdev->dev, res);
800 if (IS_ERR(bp->regs)) {
801 err = PTR_ERR(bp->regs);
802 goto err_out_free_dev;
803 }
804
805 dev->irq = irq;
806 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
807 if (err) {
808 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
809 irq, err);
810 goto err_out_free_dev;
811 }
812
813 dev->netdev_ops = &dnet_netdev_ops;
814 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
815 dev->ethtool_ops = &dnet_ethtool_ops;
816
817 dev->base_addr = (unsigned long)bp->regs;
818
819 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
820
821 dnet_get_hwaddr(bp);
822
823 if (!is_valid_ether_addr(dev->dev_addr)) {
824
825 eth_hw_addr_random(dev);
826 __dnet_set_hwaddr(bp);
827 }
828
829 err = register_netdev(dev);
830 if (err) {
831 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
832 goto err_out_free_irq;
833 }
834
835
836 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
837 dnet_phy_marvell_fixup);
838
839 if (err)
840 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
841
842 err = dnet_mii_init(bp);
843 if (err)
844 goto err_out_unregister_netdev;
845
846 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
847 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
848 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
849 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
850 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
851 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
852 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
853 phydev = dev->phydev;
854 phy_attached_info(phydev);
855
856 return 0;
857
858err_out_unregister_netdev:
859 unregister_netdev(dev);
860err_out_free_irq:
861 free_irq(dev->irq, dev);
862err_out_free_dev:
863 free_netdev(dev);
864 return err;
865}
866
867static int dnet_remove(struct platform_device *pdev)
868{
869
870 struct net_device *dev;
871 struct dnet *bp;
872
873 dev = platform_get_drvdata(pdev);
874
875 if (dev) {
876 bp = netdev_priv(dev);
877 if (dev->phydev)
878 phy_disconnect(dev->phydev);
879 mdiobus_unregister(bp->mii_bus);
880 mdiobus_free(bp->mii_bus);
881 unregister_netdev(dev);
882 free_irq(dev->irq, dev);
883 free_netdev(dev);
884 }
885
886 return 0;
887}
888
889static struct platform_driver dnet_driver = {
890 .probe = dnet_probe,
891 .remove = dnet_remove,
892 .driver = {
893 .name = "dnet",
894 },
895};
896
897module_platform_driver(dnet_driver);
898
899MODULE_LICENSE("GPL");
900MODULE_DESCRIPTION("Dave DNET Ethernet driver");
901MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
902 "Matteo Vit <matteo.vit@dave.eu>");
903