1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/uaccess.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/skbuff.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/of_platform.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/phy.h>
28#include <linux/interrupt.h>
29
30#define DRIVER_NAME "xilinx_emaclite"
31
32
33#define XEL_TXBUFF_OFFSET 0x0
34#define XEL_MDIOADDR_OFFSET 0x07E4
35#define XEL_MDIOWR_OFFSET 0x07E8
36#define XEL_MDIORD_OFFSET 0x07EC
37#define XEL_MDIOCTRL_OFFSET 0x07F0
38#define XEL_GIER_OFFSET 0x07F8
39#define XEL_TSR_OFFSET 0x07FC
40#define XEL_TPLR_OFFSET 0x07F4
41
42#define XEL_RXBUFF_OFFSET 0x1000
43#define XEL_RPLR_OFFSET 0x100C
44#define XEL_RSR_OFFSET 0x17FC
45
46#define XEL_BUFFER_OFFSET 0x0800
47
48
49#define XEL_MDIOADDR_REGADR_MASK 0x0000001F
50#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0
51#define XEL_MDIOADDR_PHYADR_SHIFT 5
52#define XEL_MDIOADDR_OP_MASK 0x00000400
53
54
55#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF
56
57
58#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF
59
60
61#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001
62#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008
63
64
65#define XEL_GIER_GIE_MASK 0x80000000
66
67
68#define XEL_TSR_XMIT_BUSY_MASK 0x00000001
69#define XEL_TSR_PROGRAM_MASK 0x00000002
70#define XEL_TSR_XMIT_IE_MASK 0x00000008
71#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000
72
73
74
75
76#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
77
78
79#define XEL_RSR_RECV_DONE_MASK 0x00000001
80#define XEL_RSR_RECV_IE_MASK 0x00000008
81
82
83#define XEL_TPLR_LENGTH_MASK 0x0000FFFF
84
85
86#define XEL_RPLR_LENGTH_MASK 0x0000FFFF
87
88#define XEL_HEADER_OFFSET 12
89#define XEL_HEADER_SHIFT 16
90
91
92#define XEL_ARP_PACKET_SIZE 28
93#define XEL_HEADER_IP_LENGTH_OFFSET 16
94
95
96
97#define TX_TIMEOUT (60*HZ)
98#define ALIGNMENT 4
99
100
101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct net_local {
122
123 struct net_device *ndev;
124
125 bool tx_ping_pong;
126 bool rx_ping_pong;
127 u32 next_tx_buf_to_use;
128 u32 next_rx_buf_to_use;
129 void __iomem *base_addr;
130
131 spinlock_t reset_lock;
132 struct sk_buff *deferred_skb;
133
134 struct phy_device *phy_dev;
135 struct device_node *phy_node;
136
137 struct mii_bus *mii_bus;
138 int mdio_irqs[PHY_MAX_ADDR];
139
140 int last_link;
141 bool has_mdio;
142};
143
144
145
146
147
148
149
150
151
152
153
154
155
156static void xemaclite_enable_interrupts(struct net_local *drvdata)
157{
158 u32 reg_data;
159
160
161 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
162 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
163 drvdata->base_addr + XEL_TSR_OFFSET);
164
165
166 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
167
168
169 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
170}
171
172
173
174
175
176
177
178
179static void xemaclite_disable_interrupts(struct net_local *drvdata)
180{
181 u32 reg_data;
182
183
184 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
185
186
187 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
188 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
189 drvdata->base_addr + XEL_TSR_OFFSET);
190
191
192 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
193 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
194 drvdata->base_addr + XEL_RSR_OFFSET);
195}
196
197
198
199
200
201
202
203
204
205
206static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
207 unsigned length)
208{
209 u32 align_buffer;
210 u32 *to_u32_ptr;
211 u16 *from_u16_ptr, *to_u16_ptr;
212
213 to_u32_ptr = dest_ptr;
214 from_u16_ptr = src_ptr;
215 align_buffer = 0;
216
217 for (; length > 3; length -= 4) {
218 to_u16_ptr = (u16 *)&align_buffer;
219 *to_u16_ptr++ = *from_u16_ptr++;
220 *to_u16_ptr++ = *from_u16_ptr++;
221
222
223
224
225
226
227 wmb();
228
229
230 *to_u32_ptr++ = align_buffer;
231 }
232 if (length) {
233 u8 *from_u8_ptr, *to_u8_ptr;
234
235
236 align_buffer = 0;
237 to_u8_ptr = (u8 *) &align_buffer;
238 from_u8_ptr = (u8 *) from_u16_ptr;
239
240
241 for (; length > 0; length--)
242 *to_u8_ptr++ = *from_u8_ptr++;
243
244
245
246
247
248
249 wmb();
250 *to_u32_ptr = align_buffer;
251 }
252}
253
254
255
256
257
258
259
260
261
262
263static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
264 unsigned length)
265{
266 u16 *to_u16_ptr, *from_u16_ptr;
267 u32 *from_u32_ptr;
268 u32 align_buffer;
269
270 from_u32_ptr = src_ptr;
271 to_u16_ptr = (u16 *) dest_ptr;
272
273 for (; length > 3; length -= 4) {
274
275 align_buffer = *from_u32_ptr++;
276 from_u16_ptr = (u16 *)&align_buffer;
277
278
279 *to_u16_ptr++ = *from_u16_ptr++;
280 *to_u16_ptr++ = *from_u16_ptr++;
281 }
282
283 if (length) {
284 u8 *to_u8_ptr, *from_u8_ptr;
285
286
287 to_u8_ptr = (u8 *) to_u16_ptr;
288 align_buffer = *from_u32_ptr++;
289 from_u8_ptr = (u8 *) &align_buffer;
290
291
292 for (; length > 0; length--)
293 *to_u8_ptr = *from_u8_ptr;
294 }
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
313 unsigned int byte_count)
314{
315 u32 reg_data;
316 void __iomem *addr;
317
318
319 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
320
321
322 if (byte_count > ETH_FRAME_LEN)
323 byte_count = ETH_FRAME_LEN;
324
325
326 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
327 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
328 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
329
330
331 if (drvdata->tx_ping_pong != 0)
332 drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
333 } else if (drvdata->tx_ping_pong != 0) {
334
335
336
337 addr = (void __iomem __force *)((u32 __force)addr ^
338 XEL_BUFFER_OFFSET);
339 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
340
341 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
342 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
343 return -1;
344 } else
345 return -1;
346
347
348 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
349
350 __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
351 addr + XEL_TPLR_OFFSET);
352
353
354
355
356
357 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
358 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
359 __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
360
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
375{
376 void __iomem *addr;
377 u16 length, proto_type;
378 u32 reg_data;
379
380
381 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
382
383
384 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
385
386 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
387 if (drvdata->rx_ping_pong != 0)
388 drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
389 } else {
390
391
392
393
394 if (drvdata->rx_ping_pong != 0)
395 addr = (void __iomem __force *)((u32 __force)addr ^
396 XEL_BUFFER_OFFSET);
397 else
398 return 0;
399
400
401 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
402 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
403 XEL_RSR_RECV_DONE_MASK)
404 return 0;
405 }
406
407
408 proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
409 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
410 XEL_RPLR_LENGTH_MASK);
411
412
413
414 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
415
416 if (proto_type == ETH_P_IP) {
417 length = ((ntohl(__raw_readl(addr +
418 XEL_HEADER_IP_LENGTH_OFFSET +
419 XEL_RXBUFF_OFFSET)) >>
420 XEL_HEADER_SHIFT) &
421 XEL_RPLR_LENGTH_MASK);
422 length += ETH_HLEN + ETH_FCS_LEN;
423
424 } else if (proto_type == ETH_P_ARP)
425 length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
426 else
427
428
429 length = ETH_FRAME_LEN + ETH_FCS_LEN;
430 } else
431
432 length = proto_type + ETH_HLEN + ETH_FCS_LEN;
433
434
435 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
436 data, length);
437
438
439 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
440 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
441 __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
442
443 return length;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457static void xemaclite_update_address(struct net_local *drvdata,
458 u8 *address_ptr)
459{
460 void __iomem *addr;
461 u32 reg_data;
462
463
464 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
465
466 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
467
468 __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
469
470
471 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
472 __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
473
474
475 while ((__raw_readl(addr + XEL_TSR_OFFSET) &
476 XEL_TSR_PROG_MAC_ADDR) != 0)
477 ;
478}
479
480
481
482
483
484
485
486
487
488
489
490
491static int xemaclite_set_mac_address(struct net_device *dev, void *address)
492{
493 struct net_local *lp = netdev_priv(dev);
494 struct sockaddr *addr = address;
495
496 if (netif_running(dev))
497 return -EBUSY;
498
499 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
500 xemaclite_update_address(lp, dev->dev_addr);
501 return 0;
502}
503
504
505
506
507
508
509
510static void xemaclite_tx_timeout(struct net_device *dev)
511{
512 struct net_local *lp = netdev_priv(dev);
513 unsigned long flags;
514
515 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
516 TX_TIMEOUT * 1000UL / HZ);
517
518 dev->stats.tx_errors++;
519
520
521 spin_lock_irqsave(&lp->reset_lock, flags);
522
523
524 netif_stop_queue(dev);
525
526 xemaclite_disable_interrupts(lp);
527 xemaclite_enable_interrupts(lp);
528
529 if (lp->deferred_skb) {
530 dev_kfree_skb(lp->deferred_skb);
531 lp->deferred_skb = NULL;
532 dev->stats.tx_errors++;
533 }
534
535
536 dev->trans_start = jiffies;
537
538
539 netif_wake_queue(dev);
540 spin_unlock_irqrestore(&lp->reset_lock, flags);
541}
542
543
544
545
546
547
548
549
550
551
552
553
554static void xemaclite_tx_handler(struct net_device *dev)
555{
556 struct net_local *lp = netdev_priv(dev);
557
558 dev->stats.tx_packets++;
559 if (lp->deferred_skb) {
560 if (xemaclite_send_data(lp,
561 (u8 *) lp->deferred_skb->data,
562 lp->deferred_skb->len) != 0)
563 return;
564 else {
565 dev->stats.tx_bytes += lp->deferred_skb->len;
566 dev_kfree_skb_irq(lp->deferred_skb);
567 lp->deferred_skb = NULL;
568 dev->trans_start = jiffies;
569 netif_wake_queue(dev);
570 }
571 }
572}
573
574
575
576
577
578
579
580
581static void xemaclite_rx_handler(struct net_device *dev)
582{
583 struct net_local *lp = netdev_priv(dev);
584 struct sk_buff *skb;
585 unsigned int align;
586 u32 len;
587
588 len = ETH_FRAME_LEN + ETH_FCS_LEN;
589 skb = netdev_alloc_skb(dev, len + ALIGNMENT);
590 if (!skb) {
591
592 dev->stats.rx_dropped++;
593 dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
594 return;
595 }
596
597
598
599
600
601
602 align = BUFFER_ALIGN(skb->data);
603 if (align)
604 skb_reserve(skb, align);
605
606 skb_reserve(skb, 2);
607
608 len = xemaclite_recv_data(lp, (u8 *) skb->data);
609
610 if (!len) {
611 dev->stats.rx_errors++;
612 dev_kfree_skb_irq(skb);
613 return;
614 }
615
616 skb_put(skb, len);
617
618 skb->protocol = eth_type_trans(skb, dev);
619 skb_checksum_none_assert(skb);
620
621 dev->stats.rx_packets++;
622 dev->stats.rx_bytes += len;
623
624 if (!skb_defer_rx_timestamp(skb))
625 netif_rx(skb);
626}
627
628
629
630
631
632
633
634
635
636static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
637{
638 bool tx_complete = false;
639 struct net_device *dev = dev_id;
640 struct net_local *lp = netdev_priv(dev);
641 void __iomem *base_addr = lp->base_addr;
642 u32 tx_status;
643
644
645 if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
646 XEL_RSR_RECV_DONE_MASK) ||
647 (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
648 & XEL_RSR_RECV_DONE_MASK))
649
650 xemaclite_rx_handler(dev);
651
652
653 tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
654 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
655 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
656
657 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
658 __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
659
660 tx_complete = true;
661 }
662
663
664 tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
665 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
666 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
667
668 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
669 __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
670 XEL_TSR_OFFSET);
671
672 tx_complete = true;
673 }
674
675
676 if (tx_complete != 0)
677 xemaclite_tx_handler(dev);
678
679 return IRQ_HANDLED;
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696static int xemaclite_mdio_wait(struct net_local *lp)
697{
698 unsigned long end = jiffies + 2;
699
700
701
702
703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
704 XEL_MDIOCTRL_MDIOSTS_MASK) {
705 if (time_before_eq(end, jiffies)) {
706 WARN_ON(1);
707 return -ETIMEDOUT;
708 }
709 msleep(1);
710 }
711 return 0;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
727{
728 struct net_local *lp = bus->priv;
729 u32 ctrl_reg;
730 u32 rc;
731
732 if (xemaclite_mdio_wait(lp))
733 return -ETIMEDOUT;
734
735
736
737
738
739 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
740 __raw_writel(XEL_MDIOADDR_OP_MASK |
741 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
742 lp->base_addr + XEL_MDIOADDR_OFFSET);
743 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
744 lp->base_addr + XEL_MDIOCTRL_OFFSET);
745
746 if (xemaclite_mdio_wait(lp))
747 return -ETIMEDOUT;
748
749 rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
750
751 dev_dbg(&lp->ndev->dev,
752 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
753 phy_id, reg, rc);
754
755 return rc;
756}
757
758
759
760
761
762
763
764
765
766
767
768static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
769 u16 val)
770{
771 struct net_local *lp = bus->priv;
772 u32 ctrl_reg;
773
774 dev_dbg(&lp->ndev->dev,
775 "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
776 phy_id, reg, val);
777
778 if (xemaclite_mdio_wait(lp))
779 return -ETIMEDOUT;
780
781
782
783
784
785
786 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
787 __raw_writel(~XEL_MDIOADDR_OP_MASK &
788 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
789 lp->base_addr + XEL_MDIOADDR_OFFSET);
790 __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
791 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
792 lp->base_addr + XEL_MDIOCTRL_OFFSET);
793
794 return 0;
795}
796
797
798
799
800
801
802
803
804
805
806
807static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
808{
809 struct mii_bus *bus;
810 int rc;
811 struct resource res;
812 struct device_node *np = of_get_parent(lp->phy_node);
813 struct device_node *npp;
814
815
816
817
818 if (!np) {
819 dev_err(dev, "Failed to register mdio bus.\n");
820 return -ENODEV;
821 }
822 npp = of_get_parent(np);
823
824 of_address_to_resource(npp, 0, &res);
825 if (lp->ndev->mem_start != res.start) {
826 struct phy_device *phydev;
827 phydev = of_phy_find_device(lp->phy_node);
828 if (!phydev)
829 dev_info(dev,
830 "MDIO of the phy is not registered yet\n");
831 return 0;
832 }
833
834
835
836
837 __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
838 lp->base_addr + XEL_MDIOCTRL_OFFSET);
839
840 bus = mdiobus_alloc();
841 if (!bus) {
842 dev_err(dev, "Failed to allocate mdiobus\n");
843 return -ENOMEM;
844 }
845
846 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
847 (unsigned long long)res.start);
848 bus->priv = lp;
849 bus->name = "Xilinx Emaclite MDIO";
850 bus->read = xemaclite_mdio_read;
851 bus->write = xemaclite_mdio_write;
852 bus->parent = dev;
853 bus->irq = lp->mdio_irqs;
854
855 lp->mii_bus = bus;
856
857 rc = of_mdiobus_register(bus, np);
858 if (rc) {
859 dev_err(dev, "Failed to register mdio bus.\n");
860 goto err_register;
861 }
862
863 return 0;
864
865err_register:
866 mdiobus_free(bus);
867 return rc;
868}
869
870
871
872
873
874
875
876
877static void xemaclite_adjust_link(struct net_device *ndev)
878{
879 struct net_local *lp = netdev_priv(ndev);
880 struct phy_device *phy = lp->phy_dev;
881 int link_state;
882
883
884 link_state = phy->speed | (phy->duplex << 1) | phy->link;
885
886 if (lp->last_link != link_state) {
887 lp->last_link = link_state;
888 phy_print_status(phy);
889 }
890}
891
892
893
894
895
896
897
898
899
900static int xemaclite_open(struct net_device *dev)
901{
902 struct net_local *lp = netdev_priv(dev);
903 int retval;
904
905
906 xemaclite_disable_interrupts(lp);
907
908 if (lp->phy_node) {
909 u32 bmcr;
910
911 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
912 xemaclite_adjust_link, 0,
913 PHY_INTERFACE_MODE_MII);
914 if (!lp->phy_dev) {
915 dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
916 return -ENODEV;
917 }
918
919
920 lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
921 lp->phy_dev->advertising = lp->phy_dev->supported;
922
923
924 phy_write(lp->phy_dev, MII_CTRL1000, 0);
925
926
927 phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
928 ADVERTISE_CSMA);
929
930
931 bmcr = phy_read(lp->phy_dev, MII_BMCR);
932 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
933 phy_write(lp->phy_dev, MII_BMCR, bmcr);
934
935 phy_start(lp->phy_dev);
936 }
937
938
939 xemaclite_update_address(lp, dev->dev_addr);
940
941
942 retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
943 if (retval) {
944 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
945 dev->irq);
946 if (lp->phy_dev)
947 phy_disconnect(lp->phy_dev);
948 lp->phy_dev = NULL;
949
950 return retval;
951 }
952
953
954 xemaclite_enable_interrupts(lp);
955
956
957 netif_start_queue(dev);
958
959 return 0;
960}
961
962
963
964
965
966
967
968
969
970static int xemaclite_close(struct net_device *dev)
971{
972 struct net_local *lp = netdev_priv(dev);
973
974 netif_stop_queue(dev);
975 xemaclite_disable_interrupts(lp);
976 free_irq(dev->irq, dev);
977
978 if (lp->phy_dev)
979 phy_disconnect(lp->phy_dev);
980 lp->phy_dev = NULL;
981
982 return 0;
983}
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1000{
1001 struct net_local *lp = netdev_priv(dev);
1002 struct sk_buff *new_skb;
1003 unsigned int len;
1004 unsigned long flags;
1005
1006 len = orig_skb->len;
1007
1008 new_skb = orig_skb;
1009
1010 spin_lock_irqsave(&lp->reset_lock, flags);
1011 if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
1012
1013
1014
1015 netif_stop_queue(dev);
1016 lp->deferred_skb = new_skb;
1017
1018 skb_tx_timestamp(new_skb);
1019 spin_unlock_irqrestore(&lp->reset_lock, flags);
1020 return 0;
1021 }
1022 spin_unlock_irqrestore(&lp->reset_lock, flags);
1023
1024 skb_tx_timestamp(new_skb);
1025
1026 dev->stats.tx_bytes += len;
1027 dev_consume_skb_any(new_skb);
1028
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039static void xemaclite_remove_ndev(struct net_device *ndev)
1040{
1041 if (ndev) {
1042 free_netdev(ndev);
1043 }
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static bool get_bool(struct platform_device *ofdev, const char *s)
1057{
1058 u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
1059
1060 if (p) {
1061 return (bool)*p;
1062 } else {
1063 dev_warn(&ofdev->dev, "Parameter %s not found,"
1064 "defaulting to false\n", s);
1065 return 0;
1066 }
1067}
1068
1069static struct net_device_ops xemaclite_netdev_ops;
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static int xemaclite_of_probe(struct platform_device *ofdev)
1086{
1087 struct resource *res;
1088 struct net_device *ndev = NULL;
1089 struct net_local *lp = NULL;
1090 struct device *dev = &ofdev->dev;
1091 const void *mac_address;
1092
1093 int rc = 0;
1094
1095 dev_info(dev, "Device Tree Probing\n");
1096
1097
1098 ndev = alloc_etherdev(sizeof(struct net_local));
1099 if (!ndev)
1100 return -ENOMEM;
1101
1102 dev_set_drvdata(dev, ndev);
1103 SET_NETDEV_DEV(ndev, &ofdev->dev);
1104
1105 lp = netdev_priv(ndev);
1106 lp->ndev = ndev;
1107
1108
1109 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
1110 if (!res) {
1111 dev_err(dev, "no IRQ found\n");
1112 rc = -ENXIO;
1113 goto error;
1114 }
1115
1116 ndev->irq = res->start;
1117
1118 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1119 lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
1120 if (IS_ERR(lp->base_addr)) {
1121 rc = PTR_ERR(lp->base_addr);
1122 goto error;
1123 }
1124
1125 ndev->mem_start = res->start;
1126 ndev->mem_end = res->end;
1127
1128 spin_lock_init(&lp->reset_lock);
1129 lp->next_tx_buf_to_use = 0x0;
1130 lp->next_rx_buf_to_use = 0x0;
1131 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
1132 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
1133 mac_address = of_get_mac_address(ofdev->dev.of_node);
1134
1135 if (mac_address)
1136
1137 memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
1138 else
1139 dev_warn(dev, "No MAC address found\n");
1140
1141
1142 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
1143 __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
1144
1145
1146 xemaclite_update_address(lp, ndev->dev_addr);
1147
1148 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1149 rc = xemaclite_mdio_setup(lp, &ofdev->dev);
1150 if (rc)
1151 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
1152
1153 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
1154
1155 ndev->netdev_ops = &xemaclite_netdev_ops;
1156 ndev->flags &= ~IFF_MULTICAST;
1157 ndev->watchdog_timeo = TX_TIMEOUT;
1158
1159
1160 rc = register_netdev(ndev);
1161 if (rc) {
1162 dev_err(dev,
1163 "Cannot register network device, aborting\n");
1164 goto error;
1165 }
1166
1167 dev_info(dev,
1168 "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
1169 (unsigned int __force)ndev->mem_start,
1170 (unsigned int __force)lp->base_addr, ndev->irq);
1171 return 0;
1172
1173error:
1174 xemaclite_remove_ndev(ndev);
1175 return rc;
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static int xemaclite_of_remove(struct platform_device *of_dev)
1189{
1190 struct net_device *ndev = platform_get_drvdata(of_dev);
1191
1192 struct net_local *lp = netdev_priv(ndev);
1193
1194
1195 if (lp->has_mdio) {
1196 mdiobus_unregister(lp->mii_bus);
1197 kfree(lp->mii_bus->irq);
1198 mdiobus_free(lp->mii_bus);
1199 lp->mii_bus = NULL;
1200 }
1201
1202 unregister_netdev(ndev);
1203
1204 of_node_put(lp->phy_node);
1205 lp->phy_node = NULL;
1206
1207 xemaclite_remove_ndev(ndev);
1208
1209 return 0;
1210}
1211
1212#ifdef CONFIG_NET_POLL_CONTROLLER
1213static void
1214xemaclite_poll_controller(struct net_device *ndev)
1215{
1216 disable_irq(ndev->irq);
1217 xemaclite_interrupt(ndev->irq, ndev);
1218 enable_irq(ndev->irq);
1219}
1220#endif
1221
1222static struct net_device_ops xemaclite_netdev_ops = {
1223 .ndo_open = xemaclite_open,
1224 .ndo_stop = xemaclite_close,
1225 .ndo_start_xmit = xemaclite_send,
1226 .ndo_set_mac_address = xemaclite_set_mac_address,
1227 .ndo_tx_timeout = xemaclite_tx_timeout,
1228#ifdef CONFIG_NET_POLL_CONTROLLER
1229 .ndo_poll_controller = xemaclite_poll_controller,
1230#endif
1231};
1232
1233
1234static struct of_device_id xemaclite_of_match[] = {
1235 { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
1236 { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
1237 { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
1238 { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
1239 { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
1240 { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
1241 { },
1242};
1243MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1244
1245static struct platform_driver xemaclite_of_driver = {
1246 .driver = {
1247 .name = DRIVER_NAME,
1248 .of_match_table = xemaclite_of_match,
1249 },
1250 .probe = xemaclite_of_probe,
1251 .remove = xemaclite_of_remove,
1252};
1253
1254module_platform_driver(xemaclite_of_driver);
1255
1256MODULE_AUTHOR("Xilinx, Inc.");
1257MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
1258MODULE_LICENSE("GPL");
1259