1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/uaccess.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/skbuff.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/of_platform.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/phy.h>
28#include <linux/interrupt.h>
29
30#define DRIVER_NAME "xilinx_emaclite"
31
32
33#define XEL_TXBUFF_OFFSET 0x0
34#define XEL_MDIOADDR_OFFSET 0x07E4
35#define XEL_MDIOWR_OFFSET 0x07E8
36#define XEL_MDIORD_OFFSET 0x07EC
37#define XEL_MDIOCTRL_OFFSET 0x07F0
38#define XEL_GIER_OFFSET 0x07F8
39#define XEL_TSR_OFFSET 0x07FC
40#define XEL_TPLR_OFFSET 0x07F4
41
42#define XEL_RXBUFF_OFFSET 0x1000
43#define XEL_RPLR_OFFSET 0x100C
44#define XEL_RSR_OFFSET 0x17FC
45
46#define XEL_BUFFER_OFFSET 0x0800
47
48
49#define XEL_MDIOADDR_REGADR_MASK 0x0000001F
50#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0
51#define XEL_MDIOADDR_PHYADR_SHIFT 5
52#define XEL_MDIOADDR_OP_MASK 0x00000400
53
54
55#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF
56
57
58#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF
59
60
61#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001
62#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008
63
64
65#define XEL_GIER_GIE_MASK 0x80000000
66
67
68#define XEL_TSR_XMIT_BUSY_MASK 0x00000001
69#define XEL_TSR_PROGRAM_MASK 0x00000002
70#define XEL_TSR_XMIT_IE_MASK 0x00000008
71#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000
72
73
74
75
76#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
77
78
79#define XEL_RSR_RECV_DONE_MASK 0x00000001
80#define XEL_RSR_RECV_IE_MASK 0x00000008
81
82
83#define XEL_TPLR_LENGTH_MASK 0x0000FFFF
84
85
86#define XEL_RPLR_LENGTH_MASK 0x0000FFFF
87
88#define XEL_HEADER_OFFSET 12
89#define XEL_HEADER_SHIFT 16
90
91
92#define XEL_ARP_PACKET_SIZE 28
93#define XEL_HEADER_IP_LENGTH_OFFSET 16
94
95
96
97#define TX_TIMEOUT (60*HZ)
98#define ALIGNMENT 4
99
100
101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120struct net_local {
121
122 struct net_device *ndev;
123
124 bool tx_ping_pong;
125 bool rx_ping_pong;
126 u32 next_tx_buf_to_use;
127 u32 next_rx_buf_to_use;
128 void __iomem *base_addr;
129
130 spinlock_t reset_lock;
131 struct sk_buff *deferred_skb;
132
133 struct phy_device *phy_dev;
134 struct device_node *phy_node;
135
136 struct mii_bus *mii_bus;
137
138 int last_link;
139 bool has_mdio;
140};
141
142
143
144
145
146
147
148
149
150
151
152
153
154static void xemaclite_enable_interrupts(struct net_local *drvdata)
155{
156 u32 reg_data;
157
158
159 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
160 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
161 drvdata->base_addr + XEL_TSR_OFFSET);
162
163
164 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
165
166
167 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
168}
169
170
171
172
173
174
175
176
177static void xemaclite_disable_interrupts(struct net_local *drvdata)
178{
179 u32 reg_data;
180
181
182 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
183
184
185 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
186 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
187 drvdata->base_addr + XEL_TSR_OFFSET);
188
189
190 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
191 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
192 drvdata->base_addr + XEL_RSR_OFFSET);
193}
194
195
196
197
198
199
200
201
202
203
204static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
205 unsigned length)
206{
207 u32 align_buffer;
208 u32 *to_u32_ptr;
209 u16 *from_u16_ptr, *to_u16_ptr;
210
211 to_u32_ptr = dest_ptr;
212 from_u16_ptr = src_ptr;
213 align_buffer = 0;
214
215 for (; length > 3; length -= 4) {
216 to_u16_ptr = (u16 *)&align_buffer;
217 *to_u16_ptr++ = *from_u16_ptr++;
218 *to_u16_ptr++ = *from_u16_ptr++;
219
220
221
222
223
224
225 wmb();
226
227
228 *to_u32_ptr++ = align_buffer;
229 }
230 if (length) {
231 u8 *from_u8_ptr, *to_u8_ptr;
232
233
234 align_buffer = 0;
235 to_u8_ptr = (u8 *) &align_buffer;
236 from_u8_ptr = (u8 *) from_u16_ptr;
237
238
239 for (; length > 0; length--)
240 *to_u8_ptr++ = *from_u8_ptr++;
241
242
243
244
245
246
247 wmb();
248 *to_u32_ptr = align_buffer;
249 }
250}
251
252
253
254
255
256
257
258
259
260
261static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
262 unsigned length)
263{
264 u16 *to_u16_ptr, *from_u16_ptr;
265 u32 *from_u32_ptr;
266 u32 align_buffer;
267
268 from_u32_ptr = src_ptr;
269 to_u16_ptr = (u16 *) dest_ptr;
270
271 for (; length > 3; length -= 4) {
272
273 align_buffer = *from_u32_ptr++;
274 from_u16_ptr = (u16 *)&align_buffer;
275
276
277 *to_u16_ptr++ = *from_u16_ptr++;
278 *to_u16_ptr++ = *from_u16_ptr++;
279 }
280
281 if (length) {
282 u8 *to_u8_ptr, *from_u8_ptr;
283
284
285 to_u8_ptr = (u8 *) to_u16_ptr;
286 align_buffer = *from_u32_ptr++;
287 from_u8_ptr = (u8 *) &align_buffer;
288
289
290 for (; length > 0; length--)
291 *to_u8_ptr = *from_u8_ptr;
292 }
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
311 unsigned int byte_count)
312{
313 u32 reg_data;
314 void __iomem *addr;
315
316
317 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
318
319
320 if (byte_count > ETH_FRAME_LEN)
321 byte_count = ETH_FRAME_LEN;
322
323
324 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
325 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
326 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
327
328
329 if (drvdata->tx_ping_pong != 0)
330 drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
331 } else if (drvdata->tx_ping_pong != 0) {
332
333
334
335 addr = (void __iomem __force *)((u32 __force)addr ^
336 XEL_BUFFER_OFFSET);
337 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
338
339 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
340 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
341 return -1;
342 } else
343 return -1;
344
345
346 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
347
348 __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
349 addr + XEL_TPLR_OFFSET);
350
351
352
353
354
355 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
356 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
357 __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
358
359 return 0;
360}
361
362
363
364
365
366
367
368
369
370
371
372static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
373{
374 void __iomem *addr;
375 u16 length, proto_type;
376 u32 reg_data;
377
378
379 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
380
381
382 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
383
384 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
385 if (drvdata->rx_ping_pong != 0)
386 drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
387 } else {
388
389
390
391
392 if (drvdata->rx_ping_pong != 0)
393 addr = (void __iomem __force *)((u32 __force)addr ^
394 XEL_BUFFER_OFFSET);
395 else
396 return 0;
397
398
399 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
400 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
401 XEL_RSR_RECV_DONE_MASK)
402 return 0;
403 }
404
405
406 proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
407 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
408 XEL_RPLR_LENGTH_MASK);
409
410
411
412 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
413
414 if (proto_type == ETH_P_IP) {
415 length = ((ntohl(__raw_readl(addr +
416 XEL_HEADER_IP_LENGTH_OFFSET +
417 XEL_RXBUFF_OFFSET)) >>
418 XEL_HEADER_SHIFT) &
419 XEL_RPLR_LENGTH_MASK);
420 length += ETH_HLEN + ETH_FCS_LEN;
421
422 } else if (proto_type == ETH_P_ARP)
423 length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
424 else
425
426
427 length = ETH_FRAME_LEN + ETH_FCS_LEN;
428 } else
429
430 length = proto_type + ETH_HLEN + ETH_FCS_LEN;
431
432
433 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
434 data, length);
435
436
437 reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
438 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
439 __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
440
441 return length;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455static void xemaclite_update_address(struct net_local *drvdata,
456 u8 *address_ptr)
457{
458 void __iomem *addr;
459 u32 reg_data;
460
461
462 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
463
464 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
465
466 __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
467
468
469 reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
470 __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
471
472
473 while ((__raw_readl(addr + XEL_TSR_OFFSET) &
474 XEL_TSR_PROG_MAC_ADDR) != 0)
475 ;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489static int xemaclite_set_mac_address(struct net_device *dev, void *address)
490{
491 struct net_local *lp = netdev_priv(dev);
492 struct sockaddr *addr = address;
493
494 if (netif_running(dev))
495 return -EBUSY;
496
497 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
498 xemaclite_update_address(lp, dev->dev_addr);
499 return 0;
500}
501
502
503
504
505
506
507
508static void xemaclite_tx_timeout(struct net_device *dev)
509{
510 struct net_local *lp = netdev_priv(dev);
511 unsigned long flags;
512
513 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
514 TX_TIMEOUT * 1000UL / HZ);
515
516 dev->stats.tx_errors++;
517
518
519 spin_lock_irqsave(&lp->reset_lock, flags);
520
521
522 netif_stop_queue(dev);
523
524 xemaclite_disable_interrupts(lp);
525 xemaclite_enable_interrupts(lp);
526
527 if (lp->deferred_skb) {
528 dev_kfree_skb(lp->deferred_skb);
529 lp->deferred_skb = NULL;
530 dev->stats.tx_errors++;
531 }
532
533
534 netif_trans_update(dev);
535
536
537 netif_wake_queue(dev);
538 spin_unlock_irqrestore(&lp->reset_lock, flags);
539}
540
541
542
543
544
545
546
547
548
549
550
551
552static void xemaclite_tx_handler(struct net_device *dev)
553{
554 struct net_local *lp = netdev_priv(dev);
555
556 dev->stats.tx_packets++;
557 if (lp->deferred_skb) {
558 if (xemaclite_send_data(lp,
559 (u8 *) lp->deferred_skb->data,
560 lp->deferred_skb->len) != 0)
561 return;
562 else {
563 dev->stats.tx_bytes += lp->deferred_skb->len;
564 dev_kfree_skb_irq(lp->deferred_skb);
565 lp->deferred_skb = NULL;
566 netif_trans_update(dev);
567 netif_wake_queue(dev);
568 }
569 }
570}
571
572
573
574
575
576
577
578
579static void xemaclite_rx_handler(struct net_device *dev)
580{
581 struct net_local *lp = netdev_priv(dev);
582 struct sk_buff *skb;
583 unsigned int align;
584 u32 len;
585
586 len = ETH_FRAME_LEN + ETH_FCS_LEN;
587 skb = netdev_alloc_skb(dev, len + ALIGNMENT);
588 if (!skb) {
589
590 dev->stats.rx_dropped++;
591 dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
592 return;
593 }
594
595
596
597
598
599
600 align = BUFFER_ALIGN(skb->data);
601 if (align)
602 skb_reserve(skb, align);
603
604 skb_reserve(skb, 2);
605
606 len = xemaclite_recv_data(lp, (u8 *) skb->data);
607
608 if (!len) {
609 dev->stats.rx_errors++;
610 dev_kfree_skb_irq(skb);
611 return;
612 }
613
614 skb_put(skb, len);
615
616 skb->protocol = eth_type_trans(skb, dev);
617 skb_checksum_none_assert(skb);
618
619 dev->stats.rx_packets++;
620 dev->stats.rx_bytes += len;
621
622 if (!skb_defer_rx_timestamp(skb))
623 netif_rx(skb);
624}
625
626
627
628
629
630
631
632
633
634static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
635{
636 bool tx_complete = false;
637 struct net_device *dev = dev_id;
638 struct net_local *lp = netdev_priv(dev);
639 void __iomem *base_addr = lp->base_addr;
640 u32 tx_status;
641
642
643 if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
644 XEL_RSR_RECV_DONE_MASK) ||
645 (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
646 & XEL_RSR_RECV_DONE_MASK))
647
648 xemaclite_rx_handler(dev);
649
650
651 tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
652 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
653 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
654
655 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
656 __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
657
658 tx_complete = true;
659 }
660
661
662 tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
663 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
664 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
665
666 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
667 __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
668 XEL_TSR_OFFSET);
669
670 tx_complete = true;
671 }
672
673
674 if (tx_complete != 0)
675 xemaclite_tx_handler(dev);
676
677 return IRQ_HANDLED;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694static int xemaclite_mdio_wait(struct net_local *lp)
695{
696 unsigned long end = jiffies + 2;
697
698
699
700
701 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
702 XEL_MDIOCTRL_MDIOSTS_MASK) {
703 if (time_before_eq(end, jiffies)) {
704 WARN_ON(1);
705 return -ETIMEDOUT;
706 }
707 msleep(1);
708 }
709 return 0;
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
725{
726 struct net_local *lp = bus->priv;
727 u32 ctrl_reg;
728 u32 rc;
729
730 if (xemaclite_mdio_wait(lp))
731 return -ETIMEDOUT;
732
733
734
735
736
737 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
738 __raw_writel(XEL_MDIOADDR_OP_MASK |
739 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
740 lp->base_addr + XEL_MDIOADDR_OFFSET);
741 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
742 lp->base_addr + XEL_MDIOCTRL_OFFSET);
743
744 if (xemaclite_mdio_wait(lp))
745 return -ETIMEDOUT;
746
747 rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
748
749 dev_dbg(&lp->ndev->dev,
750 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
751 phy_id, reg, rc);
752
753 return rc;
754}
755
756
757
758
759
760
761
762
763
764
765
766static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
767 u16 val)
768{
769 struct net_local *lp = bus->priv;
770 u32 ctrl_reg;
771
772 dev_dbg(&lp->ndev->dev,
773 "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
774 phy_id, reg, val);
775
776 if (xemaclite_mdio_wait(lp))
777 return -ETIMEDOUT;
778
779
780
781
782
783
784 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
785 __raw_writel(~XEL_MDIOADDR_OP_MASK &
786 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
787 lp->base_addr + XEL_MDIOADDR_OFFSET);
788 __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
789 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
790 lp->base_addr + XEL_MDIOCTRL_OFFSET);
791
792 return 0;
793}
794
795
796
797
798
799
800
801
802
803
804
805static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
806{
807 struct mii_bus *bus;
808 int rc;
809 struct resource res;
810 struct device_node *np = of_get_parent(lp->phy_node);
811 struct device_node *npp;
812
813
814
815
816 if (!np) {
817 dev_err(dev, "Failed to register mdio bus.\n");
818 return -ENODEV;
819 }
820 npp = of_get_parent(np);
821
822 of_address_to_resource(npp, 0, &res);
823 if (lp->ndev->mem_start != res.start) {
824 struct phy_device *phydev;
825 phydev = of_phy_find_device(lp->phy_node);
826 if (!phydev)
827 dev_info(dev,
828 "MDIO of the phy is not registered yet\n");
829 else
830 put_device(&phydev->mdio.dev);
831 return 0;
832 }
833
834
835
836
837 __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
838 lp->base_addr + XEL_MDIOCTRL_OFFSET);
839
840 bus = mdiobus_alloc();
841 if (!bus) {
842 dev_err(dev, "Failed to allocate mdiobus\n");
843 return -ENOMEM;
844 }
845
846 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
847 (unsigned long long)res.start);
848 bus->priv = lp;
849 bus->name = "Xilinx Emaclite MDIO";
850 bus->read = xemaclite_mdio_read;
851 bus->write = xemaclite_mdio_write;
852 bus->parent = dev;
853
854 lp->mii_bus = bus;
855
856 rc = of_mdiobus_register(bus, np);
857 if (rc) {
858 dev_err(dev, "Failed to register mdio bus.\n");
859 goto err_register;
860 }
861
862 return 0;
863
864err_register:
865 mdiobus_free(bus);
866 return rc;
867}
868
869
870
871
872
873
874
875
876static void xemaclite_adjust_link(struct net_device *ndev)
877{
878 struct net_local *lp = netdev_priv(ndev);
879 struct phy_device *phy = lp->phy_dev;
880 int link_state;
881
882
883 link_state = phy->speed | (phy->duplex << 1) | phy->link;
884
885 if (lp->last_link != link_state) {
886 lp->last_link = link_state;
887 phy_print_status(phy);
888 }
889}
890
891
892
893
894
895
896
897
898
899static int xemaclite_open(struct net_device *dev)
900{
901 struct net_local *lp = netdev_priv(dev);
902 int retval;
903
904
905 xemaclite_disable_interrupts(lp);
906
907 if (lp->phy_node) {
908 u32 bmcr;
909
910 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
911 xemaclite_adjust_link, 0,
912 PHY_INTERFACE_MODE_MII);
913 if (!lp->phy_dev) {
914 dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
915 return -ENODEV;
916 }
917
918
919 lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
920 lp->phy_dev->advertising = lp->phy_dev->supported;
921
922
923 phy_write(lp->phy_dev, MII_CTRL1000, 0);
924
925
926 phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
927 ADVERTISE_CSMA);
928
929
930 bmcr = phy_read(lp->phy_dev, MII_BMCR);
931 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
932 phy_write(lp->phy_dev, MII_BMCR, bmcr);
933
934 phy_start(lp->phy_dev);
935 }
936
937
938 xemaclite_update_address(lp, dev->dev_addr);
939
940
941 retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
942 if (retval) {
943 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
944 dev->irq);
945 if (lp->phy_dev)
946 phy_disconnect(lp->phy_dev);
947 lp->phy_dev = NULL;
948
949 return retval;
950 }
951
952
953 xemaclite_enable_interrupts(lp);
954
955
956 netif_start_queue(dev);
957
958 return 0;
959}
960
961
962
963
964
965
966
967
968
969static int xemaclite_close(struct net_device *dev)
970{
971 struct net_local *lp = netdev_priv(dev);
972
973 netif_stop_queue(dev);
974 xemaclite_disable_interrupts(lp);
975 free_irq(dev->irq, dev);
976
977 if (lp->phy_dev)
978 phy_disconnect(lp->phy_dev);
979 lp->phy_dev = NULL;
980
981 return 0;
982}
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
999{
1000 struct net_local *lp = netdev_priv(dev);
1001 struct sk_buff *new_skb;
1002 unsigned int len;
1003 unsigned long flags;
1004
1005 len = orig_skb->len;
1006
1007 new_skb = orig_skb;
1008
1009 spin_lock_irqsave(&lp->reset_lock, flags);
1010 if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
1011
1012
1013
1014 netif_stop_queue(dev);
1015 lp->deferred_skb = new_skb;
1016
1017 skb_tx_timestamp(new_skb);
1018 spin_unlock_irqrestore(&lp->reset_lock, flags);
1019 return 0;
1020 }
1021 spin_unlock_irqrestore(&lp->reset_lock, flags);
1022
1023 skb_tx_timestamp(new_skb);
1024
1025 dev->stats.tx_bytes += len;
1026 dev_consume_skb_any(new_skb);
1027
1028 return 0;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038static void xemaclite_remove_ndev(struct net_device *ndev)
1039{
1040 if (ndev) {
1041 free_netdev(ndev);
1042 }
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055static bool get_bool(struct platform_device *ofdev, const char *s)
1056{
1057 u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
1058
1059 if (p) {
1060 return (bool)*p;
1061 } else {
1062 dev_warn(&ofdev->dev, "Parameter %s not found,"
1063 "defaulting to false\n", s);
1064 return false;
1065 }
1066}
1067
1068static struct net_device_ops xemaclite_netdev_ops;
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084static int xemaclite_of_probe(struct platform_device *ofdev)
1085{
1086 struct resource *res;
1087 struct net_device *ndev = NULL;
1088 struct net_local *lp = NULL;
1089 struct device *dev = &ofdev->dev;
1090 const void *mac_address;
1091
1092 int rc = 0;
1093
1094 dev_info(dev, "Device Tree Probing\n");
1095
1096
1097 ndev = alloc_etherdev(sizeof(struct net_local));
1098 if (!ndev)
1099 return -ENOMEM;
1100
1101 dev_set_drvdata(dev, ndev);
1102 SET_NETDEV_DEV(ndev, &ofdev->dev);
1103
1104 lp = netdev_priv(ndev);
1105 lp->ndev = ndev;
1106
1107
1108 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
1109 if (!res) {
1110 dev_err(dev, "no IRQ found\n");
1111 rc = -ENXIO;
1112 goto error;
1113 }
1114
1115 ndev->irq = res->start;
1116
1117 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1118 lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
1119 if (IS_ERR(lp->base_addr)) {
1120 rc = PTR_ERR(lp->base_addr);
1121 goto error;
1122 }
1123
1124 ndev->mem_start = res->start;
1125 ndev->mem_end = res->end;
1126
1127 spin_lock_init(&lp->reset_lock);
1128 lp->next_tx_buf_to_use = 0x0;
1129 lp->next_rx_buf_to_use = 0x0;
1130 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
1131 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
1132 mac_address = of_get_mac_address(ofdev->dev.of_node);
1133
1134 if (mac_address) {
1135
1136 memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
1137 } else {
1138 dev_warn(dev, "No MAC address found, using random\n");
1139 eth_hw_addr_random(ndev);
1140 }
1141
1142
1143 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
1144 __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
1145
1146
1147 xemaclite_update_address(lp, ndev->dev_addr);
1148
1149 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1150 rc = xemaclite_mdio_setup(lp, &ofdev->dev);
1151 if (rc)
1152 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
1153
1154 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
1155
1156 ndev->netdev_ops = &xemaclite_netdev_ops;
1157 ndev->flags &= ~IFF_MULTICAST;
1158 ndev->watchdog_timeo = TX_TIMEOUT;
1159
1160
1161 rc = register_netdev(ndev);
1162 if (rc) {
1163 dev_err(dev,
1164 "Cannot register network device, aborting\n");
1165 goto error;
1166 }
1167
1168 dev_info(dev,
1169 "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
1170 (unsigned int __force)ndev->mem_start,
1171 (unsigned int __force)lp->base_addr, ndev->irq);
1172 return 0;
1173
1174error:
1175 xemaclite_remove_ndev(ndev);
1176 return rc;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static int xemaclite_of_remove(struct platform_device *of_dev)
1190{
1191 struct net_device *ndev = platform_get_drvdata(of_dev);
1192
1193 struct net_local *lp = netdev_priv(ndev);
1194
1195
1196 if (lp->has_mdio) {
1197 mdiobus_unregister(lp->mii_bus);
1198 mdiobus_free(lp->mii_bus);
1199 lp->mii_bus = NULL;
1200 }
1201
1202 unregister_netdev(ndev);
1203
1204 of_node_put(lp->phy_node);
1205 lp->phy_node = NULL;
1206
1207 xemaclite_remove_ndev(ndev);
1208
1209 return 0;
1210}
1211
1212#ifdef CONFIG_NET_POLL_CONTROLLER
1213static void
1214xemaclite_poll_controller(struct net_device *ndev)
1215{
1216 disable_irq(ndev->irq);
1217 xemaclite_interrupt(ndev->irq, ndev);
1218 enable_irq(ndev->irq);
1219}
1220#endif
1221
1222static struct net_device_ops xemaclite_netdev_ops = {
1223 .ndo_open = xemaclite_open,
1224 .ndo_stop = xemaclite_close,
1225 .ndo_start_xmit = xemaclite_send,
1226 .ndo_set_mac_address = xemaclite_set_mac_address,
1227 .ndo_tx_timeout = xemaclite_tx_timeout,
1228#ifdef CONFIG_NET_POLL_CONTROLLER
1229 .ndo_poll_controller = xemaclite_poll_controller,
1230#endif
1231};
1232
1233
1234static const struct of_device_id xemaclite_of_match[] = {
1235 { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
1236 { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
1237 { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
1238 { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
1239 { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
1240 { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
1241 { },
1242};
1243MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1244
1245static struct platform_driver xemaclite_of_driver = {
1246 .driver = {
1247 .name = DRIVER_NAME,
1248 .of_match_table = xemaclite_of_match,
1249 },
1250 .probe = xemaclite_of_probe,
1251 .remove = xemaclite_of_remove,
1252};
1253
1254module_platform_driver(xemaclite_of_driver);
1255
1256MODULE_AUTHOR("Xilinx, Inc.");
1257MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
1258MODULE_LICENSE("GPL");
1259