1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/uaccess.h>
17#include <linux/init.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/skbuff.h>
21#include <linux/io.h>
22
23#include <linux/of_device.h>
24#include <linux/of_platform.h>
25
26#define DRIVER_NAME "xilinx_emaclite"
27
28
29#define XEL_TXBUFF_OFFSET 0x0
30#define XEL_GIER_OFFSET 0x07F8
31#define XEL_TSR_OFFSET 0x07FC
32#define XEL_TPLR_OFFSET 0x07F4
33
34#define XEL_RXBUFF_OFFSET 0x1000
35#define XEL_RPLR_OFFSET 0x100C
36#define XEL_RSR_OFFSET 0x17FC
37
38#define XEL_BUFFER_OFFSET 0x0800
39
40
41#define XEL_GIER_GIE_MASK 0x80000000
42
43
44#define XEL_TSR_XMIT_BUSY_MASK 0x00000001
45#define XEL_TSR_PROGRAM_MASK 0x00000002
46#define XEL_TSR_XMIT_IE_MASK 0x00000008
47#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000
48
49
50
51
52#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
53
54
55#define XEL_RSR_RECV_DONE_MASK 0x00000001
56#define XEL_RSR_RECV_IE_MASK 0x00000008
57
58
59#define XEL_TPLR_LENGTH_MASK 0x0000FFFF
60
61
62#define XEL_RPLR_LENGTH_MASK 0x0000FFFF
63
64#define XEL_HEADER_OFFSET 12
65#define XEL_HEADER_SHIFT 16
66
67
68#define XEL_ARP_PACKET_SIZE 28
69#define XEL_HEADER_IP_LENGTH_OFFSET 16
70
71
72
73#define TX_TIMEOUT (60*HZ)
74#define ALIGNMENT 4
75
76
77#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
78
79
80
81
82
83
84
85
86
87
88
89
90
91struct net_local {
92
93 struct net_device *ndev;
94
95 bool tx_ping_pong;
96 bool rx_ping_pong;
97 u32 next_tx_buf_to_use;
98 u32 next_rx_buf_to_use;
99 void __iomem *base_addr;
100
101 spinlock_t reset_lock;
102 struct sk_buff *deferred_skb;
103};
104
105
106
107
108
109
110
111
112
113
114
115
116
117static void xemaclite_enable_interrupts(struct net_local *drvdata)
118{
119 u32 reg_data;
120
121
122 reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
123 out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
124 reg_data | XEL_TSR_XMIT_IE_MASK);
125
126
127
128 if (drvdata->tx_ping_pong != 0) {
129 reg_data = in_be32(drvdata->base_addr +
130 XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
131 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
132 XEL_TSR_OFFSET,
133 reg_data | XEL_TSR_XMIT_IE_MASK);
134 }
135
136
137 out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
138 XEL_RSR_RECV_IE_MASK);
139
140
141
142 if (drvdata->rx_ping_pong != 0) {
143 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
144 XEL_RSR_OFFSET,
145 XEL_RSR_RECV_IE_MASK);
146 }
147
148
149 out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
150}
151
152
153
154
155
156
157
158
159static void xemaclite_disable_interrupts(struct net_local *drvdata)
160{
161 u32 reg_data;
162
163
164 out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
165
166
167 reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
168 out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
169 reg_data & (~XEL_TSR_XMIT_IE_MASK));
170
171
172
173 if (drvdata->tx_ping_pong != 0) {
174 reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
175 XEL_TSR_OFFSET);
176 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
177 XEL_TSR_OFFSET,
178 reg_data & (~XEL_TSR_XMIT_IE_MASK));
179 }
180
181
182 reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
183 out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
184 reg_data & (~XEL_RSR_RECV_IE_MASK));
185
186
187
188 if (drvdata->rx_ping_pong != 0) {
189
190 reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
191 XEL_RSR_OFFSET);
192 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
193 XEL_RSR_OFFSET,
194 reg_data & (~XEL_RSR_RECV_IE_MASK));
195 }
196}
197
198
199
200
201
202
203
204
205
206
207static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
208 unsigned length)
209{
210 u32 align_buffer;
211 u32 *to_u32_ptr;
212 u16 *from_u16_ptr, *to_u16_ptr;
213
214 to_u32_ptr = dest_ptr;
215 from_u16_ptr = (u16 *) src_ptr;
216 align_buffer = 0;
217
218 for (; length > 3; length -= 4) {
219 to_u16_ptr = (u16 *) ((void *) &align_buffer);
220 *to_u16_ptr++ = *from_u16_ptr++;
221 *to_u16_ptr++ = *from_u16_ptr++;
222
223
224 *to_u32_ptr++ = align_buffer;
225 }
226 if (length) {
227 u8 *from_u8_ptr, *to_u8_ptr;
228
229
230 align_buffer = 0;
231 to_u8_ptr = (u8 *) &align_buffer;
232 from_u8_ptr = (u8 *) from_u16_ptr;
233
234
235 for (; length > 0; length--)
236 *to_u8_ptr++ = *from_u8_ptr++;
237
238 *to_u32_ptr = align_buffer;
239 }
240}
241
242
243
244
245
246
247
248
249
250
251static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
252 unsigned length)
253{
254 u16 *to_u16_ptr, *from_u16_ptr;
255 u32 *from_u32_ptr;
256 u32 align_buffer;
257
258 from_u32_ptr = src_ptr;
259 to_u16_ptr = (u16 *) dest_ptr;
260
261 for (; length > 3; length -= 4) {
262
263 align_buffer = *from_u32_ptr++;
264 from_u16_ptr = (u16 *)&align_buffer;
265
266
267 *to_u16_ptr++ = *from_u16_ptr++;
268 *to_u16_ptr++ = *from_u16_ptr++;
269 }
270
271 if (length) {
272 u8 *to_u8_ptr, *from_u8_ptr;
273
274
275 to_u8_ptr = (u8 *) to_u16_ptr;
276 align_buffer = *from_u32_ptr++;
277 from_u8_ptr = (u8 *) &align_buffer;
278
279
280 for (; length > 0; length--)
281 *to_u8_ptr = *from_u8_ptr;
282 }
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
301 unsigned int byte_count)
302{
303 u32 reg_data;
304 void __iomem *addr;
305
306
307 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
308
309
310 if (byte_count > ETH_FRAME_LEN)
311 byte_count = ETH_FRAME_LEN;
312
313
314 reg_data = in_be32(addr + XEL_TSR_OFFSET);
315 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
316 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
317
318
319 if (drvdata->tx_ping_pong != 0)
320 drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
321 } else if (drvdata->tx_ping_pong != 0) {
322
323
324
325 addr = (void __iomem __force *)((u32 __force)addr ^
326 XEL_BUFFER_OFFSET);
327 reg_data = in_be32(addr + XEL_TSR_OFFSET);
328
329 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
330 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
331 return -1;
332 } else
333 return -1;
334
335
336 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
337
338 out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK));
339
340
341
342
343
344 reg_data = in_be32(addr + XEL_TSR_OFFSET);
345 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
346 out_be32(addr + XEL_TSR_OFFSET, reg_data);
347
348 return 0;
349}
350
351
352
353
354
355
356
357
358
359
360
361static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
362{
363 void __iomem *addr;
364 u16 length, proto_type;
365 u32 reg_data;
366
367
368 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
369
370
371 reg_data = in_be32(addr + XEL_RSR_OFFSET);
372
373 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
374 if (drvdata->rx_ping_pong != 0)
375 drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
376 } else {
377
378
379
380
381 if (drvdata->rx_ping_pong != 0)
382 addr = (void __iomem __force *)((u32 __force)addr ^
383 XEL_BUFFER_OFFSET);
384 else
385 return 0;
386
387
388 reg_data = in_be32(addr + XEL_RSR_OFFSET);
389 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
390 XEL_RSR_RECV_DONE_MASK)
391 return 0;
392 }
393
394
395 proto_type = ((in_be32(addr + XEL_HEADER_OFFSET +
396 XEL_RXBUFF_OFFSET) >> XEL_HEADER_SHIFT) &
397 XEL_RPLR_LENGTH_MASK);
398
399
400
401 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
402
403 if (proto_type == ETH_P_IP) {
404 length = ((in_be32(addr +
405 XEL_HEADER_IP_LENGTH_OFFSET +
406 XEL_RXBUFF_OFFSET) >>
407 XEL_HEADER_SHIFT) &
408 XEL_RPLR_LENGTH_MASK);
409 length += ETH_HLEN + ETH_FCS_LEN;
410
411 } else if (proto_type == ETH_P_ARP)
412 length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
413 else
414
415
416 length = ETH_FRAME_LEN + ETH_FCS_LEN;
417 } else
418
419 length = proto_type + ETH_HLEN + ETH_FCS_LEN;
420
421
422 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
423 data, length);
424
425
426 reg_data = in_be32(addr + XEL_RSR_OFFSET);
427 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
428 out_be32(addr + XEL_RSR_OFFSET, reg_data);
429
430 return length;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444static void xemaclite_set_mac_address(struct net_local *drvdata,
445 u8 *address_ptr)
446{
447 void __iomem *addr;
448 u32 reg_data;
449
450
451 addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
452
453 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
454
455 out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN);
456
457
458 reg_data = in_be32(addr + XEL_TSR_OFFSET);
459 out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR);
460
461
462 while ((in_be32(addr + XEL_TSR_OFFSET) &
463 XEL_TSR_PROG_MAC_ADDR) != 0)
464 ;
465}
466
467
468
469
470
471
472
473static void xemaclite_tx_timeout(struct net_device *dev)
474{
475 struct net_local *lp = (struct net_local *) netdev_priv(dev);
476 unsigned long flags;
477
478 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
479 TX_TIMEOUT * 1000UL / HZ);
480
481 dev->stats.tx_errors++;
482
483
484 spin_lock_irqsave(&lp->reset_lock, flags);
485
486
487 netif_stop_queue(dev);
488
489 xemaclite_disable_interrupts(lp);
490 xemaclite_enable_interrupts(lp);
491
492 if (lp->deferred_skb) {
493 dev_kfree_skb(lp->deferred_skb);
494 lp->deferred_skb = NULL;
495 dev->stats.tx_errors++;
496 }
497
498
499 dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT;
500
501
502 netif_wake_queue(dev);
503 spin_unlock_irqrestore(&lp->reset_lock, flags);
504}
505
506
507
508
509
510
511
512
513
514
515
516
517static void xemaclite_tx_handler(struct net_device *dev)
518{
519 struct net_local *lp = (struct net_local *) netdev_priv(dev);
520
521 dev->stats.tx_packets++;
522 if (lp->deferred_skb) {
523 if (xemaclite_send_data(lp,
524 (u8 *) lp->deferred_skb->data,
525 lp->deferred_skb->len) != 0)
526 return;
527 else {
528 dev->stats.tx_bytes += lp->deferred_skb->len;
529 dev_kfree_skb_irq(lp->deferred_skb);
530 lp->deferred_skb = NULL;
531 dev->trans_start = jiffies;
532 netif_wake_queue(dev);
533 }
534 }
535}
536
537
538
539
540
541
542
543
544static void xemaclite_rx_handler(struct net_device *dev)
545{
546 struct net_local *lp = (struct net_local *) netdev_priv(dev);
547 struct sk_buff *skb;
548 unsigned int align;
549 u32 len;
550
551 len = ETH_FRAME_LEN + ETH_FCS_LEN;
552 skb = dev_alloc_skb(len + ALIGNMENT);
553 if (!skb) {
554
555 dev->stats.rx_dropped++;
556 dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
557 return;
558 }
559
560
561
562
563
564
565 align = BUFFER_ALIGN(skb->data);
566 if (align)
567 skb_reserve(skb, align);
568
569 skb_reserve(skb, 2);
570
571 len = xemaclite_recv_data(lp, (u8 *) skb->data);
572
573 if (!len) {
574 dev->stats.rx_errors++;
575 dev_kfree_skb_irq(skb);
576 return;
577 }
578
579 skb_put(skb, len);
580 skb->dev = dev;
581
582 skb->protocol = eth_type_trans(skb, dev);
583 skb->ip_summed = CHECKSUM_NONE;
584
585 dev->stats.rx_packets++;
586 dev->stats.rx_bytes += len;
587
588 netif_rx(skb);
589}
590
591
592
593
594
595
596
597
598
599static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
600{
601 bool tx_complete = 0;
602 struct net_device *dev = dev_id;
603 struct net_local *lp = (struct net_local *) netdev_priv(dev);
604 void __iomem *base_addr = lp->base_addr;
605 u32 tx_status;
606
607
608 if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) ||
609 (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
610 & XEL_RSR_RECV_DONE_MASK))
611
612 xemaclite_rx_handler(dev);
613
614
615 tx_status = in_be32(base_addr + XEL_TSR_OFFSET);
616 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
617 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
618
619 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
620 out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
621
622 tx_complete = 1;
623 }
624
625
626 tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
627 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
628 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
629
630 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
631 out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
632 tx_status);
633
634 tx_complete = 1;
635 }
636
637
638 if (tx_complete != 0)
639 xemaclite_tx_handler(dev);
640
641 return IRQ_HANDLED;
642}
643
644
645
646
647
648
649
650
651static int xemaclite_open(struct net_device *dev)
652{
653 struct net_local *lp = (struct net_local *) netdev_priv(dev);
654 int retval;
655
656
657 xemaclite_disable_interrupts(lp);
658
659
660 xemaclite_set_mac_address(lp, dev->dev_addr);
661
662
663 retval = request_irq(dev->irq, &xemaclite_interrupt, 0, dev->name, dev);
664 if (retval) {
665 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
666 dev->irq);
667 return retval;
668 }
669
670
671 xemaclite_enable_interrupts(lp);
672
673
674 netif_start_queue(dev);
675
676 return 0;
677}
678
679
680
681
682
683
684
685
686static int xemaclite_close(struct net_device *dev)
687{
688 struct net_local *lp = (struct net_local *) netdev_priv(dev);
689
690 netif_stop_queue(dev);
691 xemaclite_disable_interrupts(lp);
692 free_irq(dev->irq, dev);
693
694 return 0;
695}
696
697
698
699
700
701
702
703
704
705
706
707static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
708{
709 return &dev->stats;
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
727{
728 struct net_local *lp = (struct net_local *) netdev_priv(dev);
729 struct sk_buff *new_skb;
730 unsigned int len;
731 unsigned long flags;
732
733 len = orig_skb->len;
734
735 new_skb = orig_skb;
736
737 spin_lock_irqsave(&lp->reset_lock, flags);
738 if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
739
740
741
742 netif_stop_queue(dev);
743 lp->deferred_skb = new_skb;
744 spin_unlock_irqrestore(&lp->reset_lock, flags);
745 return 0;
746 }
747 spin_unlock_irqrestore(&lp->reset_lock, flags);
748
749 dev->stats.tx_bytes += len;
750 dev_kfree_skb(new_skb);
751 dev->trans_start = jiffies;
752
753 return 0;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
768{
769 struct net_local *lp = (struct net_local *) netdev_priv(dev);
770 struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
771
772 switch (cmd) {
773 case SIOCETHTOOL:
774 return -EIO;
775
776 case SIOCSIFHWADDR:
777 dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
778
779
780 copy_from_user((void __force *) dev->dev_addr,
781 (void __user __force *) hw_addr,
782 IFHWADDRLEN);
783 xemaclite_set_mac_address(lp, dev->dev_addr);
784 break;
785 default:
786 return -EOPNOTSUPP;
787 }
788
789 return 0;
790}
791
792
793
794
795
796
797
798
799static void xemaclite_remove_ndev(struct net_device *ndev)
800{
801 if (ndev) {
802 struct net_local *lp = (struct net_local *) netdev_priv(ndev);
803
804 if (lp->base_addr)
805 iounmap((void __iomem __force *) (lp->base_addr));
806 free_netdev(ndev);
807 }
808}
809
810
811
812
813
814
815
816
817
818
819
820static bool get_bool(struct of_device *ofdev, const char *s)
821{
822 u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
823
824 if (p) {
825 return (bool)*p;
826 } else {
827 dev_warn(&ofdev->dev, "Parameter %s not found,"
828 "defaulting to false\n", s);
829 return 0;
830 }
831}
832
833static struct net_device_ops xemaclite_netdev_ops;
834
835
836
837
838
839
840
841
842
843
844
845
846
847static int __devinit xemaclite_of_probe(struct of_device *ofdev,
848 const struct of_device_id *match)
849{
850 struct resource r_irq;
851 struct resource r_mem;
852 struct net_device *ndev = NULL;
853 struct net_local *lp = NULL;
854 struct device *dev = &ofdev->dev;
855 const void *mac_address;
856
857 int rc = 0;
858
859 dev_info(dev, "Device Tree Probing\n");
860
861
862 rc = of_address_to_resource(ofdev->node, 0, &r_mem);
863 if (rc) {
864 dev_err(dev, "invalid address\n");
865 return rc;
866 }
867
868
869 rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
870 if (rc == NO_IRQ) {
871 dev_err(dev, "no IRQ found\n");
872 return rc;
873 }
874
875
876 ndev = alloc_etherdev(sizeof(struct net_local));
877 if (!ndev) {
878 dev_err(dev, "Could not allocate network device\n");
879 return -ENOMEM;
880 }
881
882 dev_set_drvdata(dev, ndev);
883
884 ndev->irq = r_irq.start;
885 ndev->mem_start = r_mem.start;
886 ndev->mem_end = r_mem.end;
887
888 lp = netdev_priv(ndev);
889 lp->ndev = ndev;
890
891 if (!request_mem_region(ndev->mem_start,
892 ndev->mem_end - ndev->mem_start + 1,
893 DRIVER_NAME)) {
894 dev_err(dev, "Couldn't lock memory region at %p\n",
895 (void *)ndev->mem_start);
896 rc = -EBUSY;
897 goto error2;
898 }
899
900
901 lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1);
902 if (NULL == lp->base_addr) {
903 dev_err(dev, "EmacLite: Could not allocate iomem\n");
904 rc = -EIO;
905 goto error1;
906 }
907
908 spin_lock_init(&lp->reset_lock);
909 lp->next_tx_buf_to_use = 0x0;
910 lp->next_rx_buf_to_use = 0x0;
911 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
912 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
913 mac_address = of_get_mac_address(ofdev->node);
914
915 if (mac_address)
916
917 memcpy(ndev->dev_addr, mac_address, 6);
918 else
919 dev_warn(dev, "No MAC address found\n");
920
921
922 out_be32(lp->base_addr + XEL_TSR_OFFSET, 0);
923 out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
924
925
926 xemaclite_set_mac_address(lp, ndev->dev_addr);
927
928 dev_info(dev,
929 "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
930 ndev->dev_addr[0], ndev->dev_addr[1],
931 ndev->dev_addr[2], ndev->dev_addr[3],
932 ndev->dev_addr[4], ndev->dev_addr[5]);
933
934 ndev->netdev_ops = &xemaclite_netdev_ops;
935 ndev->flags &= ~IFF_MULTICAST;
936 ndev->watchdog_timeo = TX_TIMEOUT;
937
938
939 rc = register_netdev(ndev);
940 if (rc) {
941 dev_err(dev,
942 "Cannot register network device, aborting\n");
943 goto error1;
944 }
945
946 dev_info(dev,
947 "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
948 (unsigned int __force)ndev->mem_start,
949 (unsigned int __force)lp->base_addr, ndev->irq);
950 return 0;
951
952error1:
953 release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1);
954
955error2:
956 xemaclite_remove_ndev(ndev);
957 return rc;
958}
959
960
961
962
963
964
965
966
967
968
969
970static int __devexit xemaclite_of_remove(struct of_device *of_dev)
971{
972 struct device *dev = &of_dev->dev;
973 struct net_device *ndev = dev_get_drvdata(dev);
974
975 unregister_netdev(ndev);
976
977 release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
978
979 xemaclite_remove_ndev(ndev);
980
981 dev_set_drvdata(dev, NULL);
982
983 return 0;
984}
985
986static struct net_device_ops xemaclite_netdev_ops = {
987 .ndo_open = xemaclite_open,
988 .ndo_stop = xemaclite_close,
989 .ndo_start_xmit = xemaclite_send,
990 .ndo_do_ioctl = xemaclite_ioctl,
991 .ndo_tx_timeout = xemaclite_tx_timeout,
992 .ndo_get_stats = xemaclite_get_stats,
993};
994
995
996static struct of_device_id xemaclite_of_match[] __devinitdata = {
997 { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
998 { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
999 { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
1000 { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
1001 { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
1002 { },
1003};
1004MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1005
1006static struct of_platform_driver xemaclite_of_driver = {
1007 .name = DRIVER_NAME,
1008 .match_table = xemaclite_of_match,
1009 .probe = xemaclite_of_probe,
1010 .remove = __devexit_p(xemaclite_of_remove),
1011};
1012
1013
1014
1015
1016
1017
1018static int __init xemaclite_init(void)
1019{
1020
1021 return of_register_platform_driver(&xemaclite_of_driver);
1022}
1023
1024
1025
1026
1027static void __exit xemaclite_cleanup(void)
1028{
1029 of_unregister_platform_driver(&xemaclite_of_driver);
1030}
1031
1032module_init(xemaclite_init);
1033module_exit(xemaclite_cleanup);
1034
1035MODULE_AUTHOR("Xilinx, Inc.");
1036MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
1037MODULE_LICENSE("GPL");
1038