1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/dma-mapping.h>
20#include <linux/module.h>
21
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/crc32.h>
30#include <linux/hardirq.h>
31#include <linux/delay.h>
32#include <linux/of_device.h>
33#include <linux/of_mdio.h>
34#include <linux/of_net.h>
35#include <linux/of_platform.h>
36
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/ethtool.h>
40#include <linux/skbuff.h>
41
42#include <asm/io.h>
43#include <asm/delay.h>
44#include <asm/mpc52xx.h>
45
46#include <linux/fsl/bestcomm/bestcomm.h>
47#include <linux/fsl/bestcomm/fec.h>
48
49#include "fec_mpc52xx.h"
50
51#define DRIVER_NAME "mpc52xx-fec"
52
53
54struct mpc52xx_fec_priv {
55 struct net_device *ndev;
56 int duplex;
57 int speed;
58 int r_irq;
59 int t_irq;
60 struct mpc52xx_fec __iomem *fec;
61 struct bcom_task *rx_dmatsk;
62 struct bcom_task *tx_dmatsk;
63 spinlock_t lock;
64 int msg_enable;
65
66
67 unsigned int mdio_speed;
68 struct device_node *phy_node;
69 enum phy_state link;
70 int seven_wire_mode;
71};
72
73
74static irqreturn_t mpc52xx_fec_interrupt(int, void *);
75static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
76static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
77static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep);
78static void mpc52xx_fec_start(struct net_device *dev);
79static void mpc52xx_fec_reset(struct net_device *dev);
80
81#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
83static int debug = -1;
84module_param(debug, int, 0);
85MODULE_PARM_DESC(debug, "debugging messages level");
86
87static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
88{
89 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
90 unsigned long flags;
91
92 dev_warn(&dev->dev, "transmit timed out\n");
93
94 spin_lock_irqsave(&priv->lock, flags);
95 mpc52xx_fec_reset(dev);
96 dev->stats.tx_errors++;
97 spin_unlock_irqrestore(&priv->lock, flags);
98
99 netif_wake_queue(dev);
100}
101
102static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
103{
104 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
105 struct mpc52xx_fec __iomem *fec = priv->fec;
106
107 out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
108 out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
109}
110
111static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
112{
113 struct sockaddr *sock = addr;
114
115 memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
116
117 mpc52xx_fec_set_paddr(dev, sock->sa_data);
118 return 0;
119}
120
121static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
122{
123 while (!bcom_queue_empty(s)) {
124 struct bcom_fec_bd *bd;
125 struct sk_buff *skb;
126
127 skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
128 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
129 DMA_FROM_DEVICE);
130 kfree_skb(skb);
131 }
132}
133
134static void
135mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
136{
137 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
138 struct bcom_fec_bd *bd;
139
140 bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
141 bd->status = FEC_RX_BUFFER_SIZE;
142 bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
143 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
144 bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
145}
146
147static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
148{
149 struct sk_buff *skb;
150
151 while (!bcom_queue_full(rxtsk)) {
152 skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
153 if (!skb)
154 return -EAGAIN;
155
156
157 memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
158 mpc52xx_fec_rx_submit(dev, skb);
159 }
160 return 0;
161}
162
163
164static void mpc52xx_fec_adjust_link(struct net_device *dev)
165{
166 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
167 struct phy_device *phydev = dev->phydev;
168 int new_state = 0;
169
170 if (phydev->link != PHY_DOWN) {
171 if (phydev->duplex != priv->duplex) {
172 struct mpc52xx_fec __iomem *fec = priv->fec;
173 u32 rcntrl;
174 u32 tcntrl;
175
176 new_state = 1;
177 priv->duplex = phydev->duplex;
178
179 rcntrl = in_be32(&fec->r_cntrl);
180 tcntrl = in_be32(&fec->x_cntrl);
181
182 rcntrl &= ~FEC_RCNTRL_DRT;
183 tcntrl &= ~FEC_TCNTRL_FDEN;
184 if (phydev->duplex == DUPLEX_FULL)
185 tcntrl |= FEC_TCNTRL_FDEN;
186 else
187 rcntrl |= FEC_RCNTRL_DRT;
188
189 out_be32(&fec->r_cntrl, rcntrl);
190 out_be32(&fec->x_cntrl, tcntrl);
191 }
192
193 if (phydev->speed != priv->speed) {
194 new_state = 1;
195 priv->speed = phydev->speed;
196 }
197
198 if (priv->link == PHY_DOWN) {
199 new_state = 1;
200 priv->link = phydev->link;
201 }
202
203 } else if (priv->link) {
204 new_state = 1;
205 priv->link = PHY_DOWN;
206 priv->speed = 0;
207 priv->duplex = -1;
208 }
209
210 if (new_state && netif_msg_link(priv))
211 phy_print_status(phydev);
212}
213
214static int mpc52xx_fec_open(struct net_device *dev)
215{
216 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
217 struct phy_device *phydev = NULL;
218 int err = -EBUSY;
219
220 if (priv->phy_node) {
221 phydev = of_phy_connect(priv->ndev, priv->phy_node,
222 mpc52xx_fec_adjust_link, 0, 0);
223 if (!phydev) {
224 dev_err(&dev->dev, "of_phy_connect failed\n");
225 return -ENODEV;
226 }
227 phy_start(phydev);
228 }
229
230 if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
231 DRIVER_NAME "_ctrl", dev)) {
232 dev_err(&dev->dev, "ctrl interrupt request failed\n");
233 goto free_phy;
234 }
235 if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
236 DRIVER_NAME "_rx", dev)) {
237 dev_err(&dev->dev, "rx interrupt request failed\n");
238 goto free_ctrl_irq;
239 }
240 if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
241 DRIVER_NAME "_tx", dev)) {
242 dev_err(&dev->dev, "tx interrupt request failed\n");
243 goto free_2irqs;
244 }
245
246 bcom_fec_rx_reset(priv->rx_dmatsk);
247 bcom_fec_tx_reset(priv->tx_dmatsk);
248
249 err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
250 if (err) {
251 dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
252 goto free_irqs;
253 }
254
255 bcom_enable(priv->rx_dmatsk);
256 bcom_enable(priv->tx_dmatsk);
257
258 mpc52xx_fec_start(dev);
259
260 netif_start_queue(dev);
261
262 return 0;
263
264 free_irqs:
265 free_irq(priv->t_irq, dev);
266 free_2irqs:
267 free_irq(priv->r_irq, dev);
268 free_ctrl_irq:
269 free_irq(dev->irq, dev);
270 free_phy:
271 if (phydev) {
272 phy_stop(phydev);
273 phy_disconnect(phydev);
274 }
275
276 return err;
277}
278
279static int mpc52xx_fec_close(struct net_device *dev)
280{
281 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
282 struct phy_device *phydev = dev->phydev;
283
284 netif_stop_queue(dev);
285
286 mpc52xx_fec_stop(dev, true);
287
288 mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
289
290 free_irq(dev->irq, dev);
291 free_irq(priv->r_irq, dev);
292 free_irq(priv->t_irq, dev);
293
294 if (phydev) {
295
296 phy_stop(phydev);
297 phy_disconnect(phydev);
298 }
299
300 return 0;
301}
302
303
304
305
306
307
308static netdev_tx_t
309mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
310{
311 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
312 struct bcom_fec_bd *bd;
313 unsigned long flags;
314
315 if (bcom_queue_full(priv->tx_dmatsk)) {
316 if (net_ratelimit())
317 dev_err(&dev->dev, "transmit queue overrun\n");
318 return NETDEV_TX_BUSY;
319 }
320
321 spin_lock_irqsave(&priv->lock, flags);
322
323 bd = (struct bcom_fec_bd *)
324 bcom_prepare_next_buffer(priv->tx_dmatsk);
325
326 bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
327 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
328 DMA_TO_DEVICE);
329
330 skb_tx_timestamp(skb);
331 bcom_submit_next_buffer(priv->tx_dmatsk, skb);
332 spin_unlock_irqrestore(&priv->lock, flags);
333
334 if (bcom_queue_full(priv->tx_dmatsk)) {
335 netif_stop_queue(dev);
336 }
337
338 return NETDEV_TX_OK;
339}
340
341#ifdef CONFIG_NET_POLL_CONTROLLER
342static void mpc52xx_fec_poll_controller(struct net_device *dev)
343{
344 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
345
346 disable_irq(priv->t_irq);
347 mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
348 enable_irq(priv->t_irq);
349 disable_irq(priv->r_irq);
350 mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
351 enable_irq(priv->r_irq);
352}
353#endif
354
355
356
357
358static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
359{
360 struct net_device *dev = dev_id;
361 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
362
363 spin_lock(&priv->lock);
364 while (bcom_buffer_done(priv->tx_dmatsk)) {
365 struct sk_buff *skb;
366 struct bcom_fec_bd *bd;
367 skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
368 (struct bcom_bd **)&bd);
369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
370 DMA_TO_DEVICE);
371
372 dev_consume_skb_irq(skb);
373 }
374 spin_unlock(&priv->lock);
375
376 netif_wake_queue(dev);
377
378 return IRQ_HANDLED;
379}
380
381static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
382{
383 struct net_device *dev = dev_id;
384 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
385 struct sk_buff *rskb;
386 struct sk_buff *skb;
387 struct bcom_fec_bd *bd;
388 u32 status, physaddr;
389 int length;
390
391 spin_lock(&priv->lock);
392
393 while (bcom_buffer_done(priv->rx_dmatsk)) {
394
395 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
396 (struct bcom_bd **)&bd);
397 physaddr = bd->skb_pa;
398
399
400 if (status & BCOM_FEC_RX_BD_ERRORS) {
401
402 mpc52xx_fec_rx_submit(dev, rskb);
403 dev->stats.rx_dropped++;
404 continue;
405 }
406
407
408
409 skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
410 if (!skb) {
411
412 dev_notice(&dev->dev, "Low memory - dropped packet.\n");
413 mpc52xx_fec_rx_submit(dev, rskb);
414 dev->stats.rx_dropped++;
415 continue;
416 }
417
418
419 mpc52xx_fec_rx_submit(dev, skb);
420
421
422
423 spin_unlock(&priv->lock);
424
425 dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
426 DMA_FROM_DEVICE);
427 length = status & BCOM_FEC_RX_BD_LEN_MASK;
428 skb_put(rskb, length - 4);
429 rskb->protocol = eth_type_trans(rskb, dev);
430 if (!skb_defer_rx_timestamp(rskb))
431 netif_rx(rskb);
432
433 spin_lock(&priv->lock);
434 }
435
436 spin_unlock(&priv->lock);
437
438 return IRQ_HANDLED;
439}
440
441static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
442{
443 struct net_device *dev = dev_id;
444 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
445 struct mpc52xx_fec __iomem *fec = priv->fec;
446 u32 ievent;
447
448 ievent = in_be32(&fec->ievent);
449
450 ievent &= ~FEC_IEVENT_MII;
451 if (!ievent)
452 return IRQ_NONE;
453
454 out_be32(&fec->ievent, ievent);
455
456
457 if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
458
459 if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
460 dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
461 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
462 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
463
464 spin_lock(&priv->lock);
465 mpc52xx_fec_reset(dev);
466 spin_unlock(&priv->lock);
467
468 return IRQ_HANDLED;
469 }
470
471 if (ievent & ~FEC_IEVENT_TFINT)
472 dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
473
474 return IRQ_HANDLED;
475}
476
477
478
479
480
481static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
482{
483 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
484 struct net_device_stats *stats = &dev->stats;
485 struct mpc52xx_fec __iomem *fec = priv->fec;
486
487 stats->rx_bytes = in_be32(&fec->rmon_r_octets);
488 stats->rx_packets = in_be32(&fec->rmon_r_packets);
489 stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
490 in_be32(&fec->rmon_r_undersize) +
491 in_be32(&fec->rmon_r_oversize) +
492 in_be32(&fec->rmon_r_frag) +
493 in_be32(&fec->rmon_r_jab);
494
495 stats->tx_bytes = in_be32(&fec->rmon_t_octets);
496 stats->tx_packets = in_be32(&fec->rmon_t_packets);
497 stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
498 in_be32(&fec->rmon_t_undersize) +
499 in_be32(&fec->rmon_t_oversize) +
500 in_be32(&fec->rmon_t_frag) +
501 in_be32(&fec->rmon_t_jab);
502
503 stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
504 stats->collisions = in_be32(&fec->rmon_t_col);
505
506
507 stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
508 + in_be32(&fec->rmon_r_oversize)
509 + in_be32(&fec->rmon_r_frag)
510 + in_be32(&fec->rmon_r_jab);
511 stats->rx_over_errors = in_be32(&fec->r_macerr);
512 stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
513 stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
514 stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
515 stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
516
517
518 stats->tx_aborted_errors = 0;
519 stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
520 stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
521 stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
522 stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
523
524 return stats;
525}
526
527
528
529
530
531static void mpc52xx_fec_reset_stats(struct net_device *dev)
532{
533 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
534 struct mpc52xx_fec __iomem *fec = priv->fec;
535
536 out_be32(&fec->mib_control, FEC_MIB_DISABLE);
537 memset_io(&fec->rmon_t_drop, 0,
538 offsetof(struct mpc52xx_fec, reserved10) -
539 offsetof(struct mpc52xx_fec, rmon_t_drop));
540 out_be32(&fec->mib_control, 0);
541
542 memset(&dev->stats, 0, sizeof(dev->stats));
543}
544
545
546
547
548static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
549{
550 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
551 struct mpc52xx_fec __iomem *fec = priv->fec;
552 u32 rx_control;
553
554 rx_control = in_be32(&fec->r_cntrl);
555
556 if (dev->flags & IFF_PROMISC) {
557 rx_control |= FEC_RCNTRL_PROM;
558 out_be32(&fec->r_cntrl, rx_control);
559 } else {
560 rx_control &= ~FEC_RCNTRL_PROM;
561 out_be32(&fec->r_cntrl, rx_control);
562
563 if (dev->flags & IFF_ALLMULTI) {
564 out_be32(&fec->gaddr1, 0xffffffff);
565 out_be32(&fec->gaddr2, 0xffffffff);
566 } else {
567 u32 crc;
568 struct netdev_hw_addr *ha;
569 u32 gaddr1 = 0x00000000;
570 u32 gaddr2 = 0x00000000;
571
572 netdev_for_each_mc_addr(ha, dev) {
573 crc = ether_crc_le(6, ha->addr) >> 26;
574 if (crc >= 32)
575 gaddr1 |= 1 << (crc-32);
576 else
577 gaddr2 |= 1 << crc;
578 }
579 out_be32(&fec->gaddr1, gaddr1);
580 out_be32(&fec->gaddr2, gaddr2);
581 }
582 }
583}
584
585
586
587
588
589
590
591static void mpc52xx_fec_hw_init(struct net_device *dev)
592{
593 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
594 struct mpc52xx_fec __iomem *fec = priv->fec;
595 int i;
596
597
598 out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
599 for (i = 0; i < FEC_RESET_DELAY; ++i) {
600 if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
601 break;
602 udelay(1);
603 }
604 if (i == FEC_RESET_DELAY)
605 dev_err(&dev->dev, "FEC Reset timeout!\n");
606
607
608 out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
609
610
611
612
613 out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
614 out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
615
616
617 out_be32(&fec->rfifo_alarm, 0x0000030c);
618 out_be32(&fec->tfifo_alarm, 0x00000100);
619
620
621 out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
622
623
624 out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
625 out_be32(&fec->iaddr1, 0x00000000);
626 out_be32(&fec->iaddr2, 0x00000000);
627
628
629
630
631 out_be32(&fec->mii_speed, priv->mdio_speed);
632}
633
634
635
636
637
638
639
640
641
642static void mpc52xx_fec_start(struct net_device *dev)
643{
644 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
645 struct mpc52xx_fec __iomem *fec = priv->fec;
646 u32 rcntrl;
647 u32 tcntrl;
648 u32 tmp;
649
650
651 tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
652 out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
653 out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
654
655
656 out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
657
658
659 mpc52xx_fec_set_paddr(dev, dev->dev_addr);
660
661 mpc52xx_fec_set_multicast_list(dev);
662
663
664 rcntrl = FEC_RX_BUFFER_SIZE << 16;
665 rcntrl |= FEC_RCNTRL_FCE;
666
667 if (!priv->seven_wire_mode)
668 rcntrl |= FEC_RCNTRL_MII_MODE;
669
670 if (priv->duplex == DUPLEX_FULL)
671 tcntrl = FEC_TCNTRL_FDEN;
672 else {
673 rcntrl |= FEC_RCNTRL_DRT;
674 tcntrl = 0;
675 }
676 out_be32(&fec->r_cntrl, rcntrl);
677 out_be32(&fec->x_cntrl, tcntrl);
678
679
680 out_be32(&fec->ievent, 0xffffffff);
681
682
683 out_be32(&fec->imask, FEC_IMASK_ENABLE);
684
685
686 out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
687 out_be32(&fec->r_des_active, 0x01000000);
688}
689
690
691
692
693
694
695
696static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep)
697{
698 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
699 struct mpc52xx_fec __iomem *fec = priv->fec;
700 unsigned long timeout;
701
702
703 out_be32(&fec->imask, 0);
704
705
706 bcom_disable(priv->rx_dmatsk);
707
708
709 if (may_sleep) {
710 timeout = jiffies + msecs_to_jiffies(2000);
711 while (time_before(jiffies, timeout) &&
712 !bcom_queue_empty(priv->tx_dmatsk))
713 msleep(100);
714
715 if (time_after_eq(jiffies, timeout))
716 dev_err(&dev->dev, "queues didn't drain\n");
717#if 1
718 if (time_after_eq(jiffies, timeout)) {
719 dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
720 priv->tx_dmatsk->index,
721 priv->tx_dmatsk->outdex);
722 dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
723 priv->rx_dmatsk->index,
724 priv->rx_dmatsk->outdex);
725 }
726#endif
727 }
728
729 bcom_disable(priv->tx_dmatsk);
730
731
732 out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
733}
734
735
736static void mpc52xx_fec_reset(struct net_device *dev)
737{
738 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
739 struct mpc52xx_fec __iomem *fec = priv->fec;
740
741 mpc52xx_fec_stop(dev, false);
742
743 out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
744 out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
745
746 mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
747
748 mpc52xx_fec_hw_init(dev);
749
750 bcom_fec_rx_reset(priv->rx_dmatsk);
751 bcom_fec_tx_reset(priv->tx_dmatsk);
752
753 mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
754
755 bcom_enable(priv->rx_dmatsk);
756 bcom_enable(priv->tx_dmatsk);
757
758 mpc52xx_fec_start(dev);
759
760 netif_wake_queue(dev);
761}
762
763
764
765
766static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
767{
768 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
769 return priv->msg_enable;
770}
771
772static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
773{
774 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
775 priv->msg_enable = level;
776}
777
778static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
779 .get_link = ethtool_op_get_link,
780 .get_msglevel = mpc52xx_fec_get_msglevel,
781 .set_msglevel = mpc52xx_fec_set_msglevel,
782 .get_ts_info = ethtool_op_get_ts_info,
783 .get_link_ksettings = phy_ethtool_get_link_ksettings,
784 .set_link_ksettings = phy_ethtool_set_link_ksettings,
785};
786
787
788static const struct net_device_ops mpc52xx_fec_netdev_ops = {
789 .ndo_open = mpc52xx_fec_open,
790 .ndo_stop = mpc52xx_fec_close,
791 .ndo_start_xmit = mpc52xx_fec_start_xmit,
792 .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
793 .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
794 .ndo_validate_addr = eth_validate_addr,
795 .ndo_do_ioctl = phy_do_ioctl,
796 .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
797 .ndo_get_stats = mpc52xx_fec_get_stats,
798#ifdef CONFIG_NET_POLL_CONTROLLER
799 .ndo_poll_controller = mpc52xx_fec_poll_controller,
800#endif
801};
802
803
804
805
806
807static int mpc52xx_fec_probe(struct platform_device *op)
808{
809 int rv;
810 struct net_device *ndev;
811 struct mpc52xx_fec_priv *priv = NULL;
812 struct resource mem;
813 const u32 *prop;
814 int prop_size;
815 struct device_node *np = op->dev.of_node;
816 const char *mac_addr;
817
818 phys_addr_t rx_fifo;
819 phys_addr_t tx_fifo;
820
821
822 ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
823 if (!ndev)
824 return -ENOMEM;
825
826 priv = netdev_priv(ndev);
827 priv->ndev = ndev;
828
829
830 rv = of_address_to_resource(np, 0, &mem);
831 if (rv) {
832 pr_err("Error while parsing device node resource\n");
833 goto err_netdev;
834 }
835 if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
836 pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
837 (unsigned long)resource_size(&mem),
838 sizeof(struct mpc52xx_fec));
839 rv = -EINVAL;
840 goto err_netdev;
841 }
842
843 if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
844 DRIVER_NAME)) {
845 rv = -EBUSY;
846 goto err_netdev;
847 }
848
849
850 ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
851 ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
852 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
853 ndev->base_addr = mem.start;
854 SET_NETDEV_DEV(ndev, &op->dev);
855
856 spin_lock_init(&priv->lock);
857
858
859 priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
860
861 if (!priv->fec) {
862 rv = -ENOMEM;
863 goto err_mem_region;
864 }
865
866
867 rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
868 tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
869
870 priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
871 priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
872
873 if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
874 pr_err("Can not init SDMA tasks\n");
875 rv = -ENOMEM;
876 goto err_rx_tx_dmatsk;
877 }
878
879
880
881 ndev->irq = irq_of_parse_and_map(np, 0);
882
883
884 priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
885
886
887 priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
888
889
890
891
892
893
894 mac_addr = of_get_mac_address(np);
895 if (!IS_ERR(mac_addr)) {
896 ether_addr_copy(ndev->dev_addr, mac_addr);
897 } else {
898 struct mpc52xx_fec __iomem *fec = priv->fec;
899
900
901
902
903
904 *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
905 *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
906 }
907
908
909
910
911 if (!is_valid_ether_addr(ndev->dev_addr)) {
912 eth_hw_addr_random(ndev);
913 dev_warn(&ndev->dev, "using random MAC address %pM\n",
914 ndev->dev_addr);
915 }
916
917 priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
918
919
920
921
922
923
924 priv->speed = 100;
925 priv->duplex = DUPLEX_HALF;
926 priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
927
928
929 prop = of_get_property(np, "current-speed", &prop_size);
930 if (prop && (prop_size >= sizeof(u32) * 2)) {
931 priv->speed = prop[0];
932 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
933 }
934
935
936 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
937
938
939 if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
940 priv->seven_wire_mode = 1;
941 dev_info(&ndev->dev, "using 7-wire PHY mode\n");
942 }
943
944
945 mpc52xx_fec_hw_init(ndev);
946 mpc52xx_fec_reset_stats(ndev);
947
948 rv = register_netdev(ndev);
949 if (rv < 0)
950 goto err_node;
951
952
953 platform_set_drvdata(op, ndev);
954 netdev_info(ndev, "%pOF MAC %pM\n",
955 op->dev.of_node, ndev->dev_addr);
956
957 return 0;
958
959err_node:
960 of_node_put(priv->phy_node);
961 irq_dispose_mapping(ndev->irq);
962err_rx_tx_dmatsk:
963 if (priv->rx_dmatsk)
964 bcom_fec_rx_release(priv->rx_dmatsk);
965 if (priv->tx_dmatsk)
966 bcom_fec_tx_release(priv->tx_dmatsk);
967 iounmap(priv->fec);
968err_mem_region:
969 release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
970err_netdev:
971 free_netdev(ndev);
972
973 return rv;
974}
975
976static int
977mpc52xx_fec_remove(struct platform_device *op)
978{
979 struct net_device *ndev;
980 struct mpc52xx_fec_priv *priv;
981
982 ndev = platform_get_drvdata(op);
983 priv = netdev_priv(ndev);
984
985 unregister_netdev(ndev);
986
987 of_node_put(priv->phy_node);
988 priv->phy_node = NULL;
989
990 irq_dispose_mapping(ndev->irq);
991
992 bcom_fec_rx_release(priv->rx_dmatsk);
993 bcom_fec_tx_release(priv->tx_dmatsk);
994
995 iounmap(priv->fec);
996
997 release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
998
999 free_netdev(ndev);
1000
1001 return 0;
1002}
1003
1004#ifdef CONFIG_PM
1005static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
1006{
1007 struct net_device *dev = platform_get_drvdata(op);
1008
1009 if (netif_running(dev))
1010 mpc52xx_fec_close(dev);
1011
1012 return 0;
1013}
1014
1015static int mpc52xx_fec_of_resume(struct platform_device *op)
1016{
1017 struct net_device *dev = platform_get_drvdata(op);
1018
1019 mpc52xx_fec_hw_init(dev);
1020 mpc52xx_fec_reset_stats(dev);
1021
1022 if (netif_running(dev))
1023 mpc52xx_fec_open(dev);
1024
1025 return 0;
1026}
1027#endif
1028
1029static const struct of_device_id mpc52xx_fec_match[] = {
1030 { .compatible = "fsl,mpc5200b-fec", },
1031 { .compatible = "fsl,mpc5200-fec", },
1032 { .compatible = "mpc5200-fec", },
1033 { }
1034};
1035
1036MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
1037
1038static struct platform_driver mpc52xx_fec_driver = {
1039 .driver = {
1040 .name = DRIVER_NAME,
1041 .of_match_table = mpc52xx_fec_match,
1042 },
1043 .probe = mpc52xx_fec_probe,
1044 .remove = mpc52xx_fec_remove,
1045#ifdef CONFIG_PM
1046 .suspend = mpc52xx_fec_of_suspend,
1047 .resume = mpc52xx_fec_of_resume,
1048#endif
1049};
1050
1051
1052
1053
1054
1055
1056static struct platform_driver * const drivers[] = {
1057#ifdef CONFIG_FEC_MPC52xx_MDIO
1058 &mpc52xx_fec_mdio_driver,
1059#endif
1060 &mpc52xx_fec_driver,
1061};
1062
1063static int __init
1064mpc52xx_fec_init(void)
1065{
1066 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1067}
1068
1069static void __exit
1070mpc52xx_fec_exit(void)
1071{
1072 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1073}
1074
1075
1076module_init(mpc52xx_fec_init);
1077module_exit(mpc52xx_fec_exit);
1078
1079MODULE_LICENSE("GPL");
1080MODULE_AUTHOR("Dale Farnsworth");
1081MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
1082