1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/delay.h>
24#include <linux/etherdevice.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/netdevice.h>
28#include <linux/of_mdio.h>
29#include <linux/of_platform.h>
30#include <linux/of_address.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/phy.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36
37#include "xilinx_axienet.h"
38
39
40#define TX_BD_NUM 64
41#define RX_BD_NUM 128
42
43
44#define DRIVER_NAME "xaxienet"
45#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
46#define DRIVER_VERSION "1.00a"
47
48#define AXIENET_REGS_N 32
49
50
51static struct of_device_id axienet_of_match[] = {
52 { .compatible = "xlnx,axi-ethernet-1.00.a", },
53 { .compatible = "xlnx,axi-ethernet-1.01.a", },
54 { .compatible = "xlnx,axi-ethernet-2.01.a", },
55 {},
56};
57
58MODULE_DEVICE_TABLE(of, axienet_of_match);
59
60
61static struct axienet_option axienet_options[] = {
62
63 {
64 .opt = XAE_OPTION_JUMBO,
65 .reg = XAE_TC_OFFSET,
66 .m_or = XAE_TC_JUM_MASK,
67 }, {
68 .opt = XAE_OPTION_JUMBO,
69 .reg = XAE_RCW1_OFFSET,
70 .m_or = XAE_RCW1_JUM_MASK,
71 }, {
72 .opt = XAE_OPTION_VLAN,
73 .reg = XAE_TC_OFFSET,
74 .m_or = XAE_TC_VLAN_MASK,
75 }, {
76 .opt = XAE_OPTION_VLAN,
77 .reg = XAE_RCW1_OFFSET,
78 .m_or = XAE_RCW1_VLAN_MASK,
79 }, {
80 .opt = XAE_OPTION_FCS_STRIP,
81 .reg = XAE_RCW1_OFFSET,
82 .m_or = XAE_RCW1_FCS_MASK,
83 }, {
84 .opt = XAE_OPTION_FCS_INSERT,
85 .reg = XAE_TC_OFFSET,
86 .m_or = XAE_TC_FCS_MASK,
87 }, {
88 .opt = XAE_OPTION_LENTYPE_ERR,
89 .reg = XAE_RCW1_OFFSET,
90 .m_or = XAE_RCW1_LT_DIS_MASK,
91 }, {
92 .opt = XAE_OPTION_FLOW_CONTROL,
93 .reg = XAE_FCC_OFFSET,
94 .m_or = XAE_FCC_FCRX_MASK,
95 }, {
96 .opt = XAE_OPTION_FLOW_CONTROL,
97 .reg = XAE_FCC_OFFSET,
98 .m_or = XAE_FCC_FCTX_MASK,
99 }, {
100 .opt = XAE_OPTION_PROMISC,
101 .reg = XAE_FMI_OFFSET,
102 .m_or = XAE_FMI_PM_MASK,
103 }, {
104 .opt = XAE_OPTION_TXEN,
105 .reg = XAE_TC_OFFSET,
106 .m_or = XAE_TC_TX_MASK,
107 }, {
108 .opt = XAE_OPTION_RXEN,
109 .reg = XAE_RCW1_OFFSET,
110 .m_or = XAE_RCW1_RX_MASK,
111 },
112 {}
113};
114
115
116
117
118
119
120
121
122
123
124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
125{
126 return in_be32(lp->dma_regs + reg);
127}
128
129
130
131
132
133
134
135
136
137
138static inline void axienet_dma_out32(struct axienet_local *lp,
139 off_t reg, u32 value)
140{
141 out_be32((lp->dma_regs + reg), value);
142}
143
144
145
146
147
148
149
150
151
152static void axienet_dma_bd_release(struct net_device *ndev)
153{
154 int i;
155 struct axienet_local *lp = netdev_priv(ndev);
156
157 for (i = 0; i < RX_BD_NUM; i++) {
158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
159 lp->max_frm_size, DMA_FROM_DEVICE);
160 dev_kfree_skb((struct sk_buff *)
161 (lp->rx_bd_v[i].sw_id_offset));
162 }
163
164 if (lp->rx_bd_v) {
165 dma_free_coherent(ndev->dev.parent,
166 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
167 lp->rx_bd_v,
168 lp->rx_bd_p);
169 }
170 if (lp->tx_bd_v) {
171 dma_free_coherent(ndev->dev.parent,
172 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
173 lp->tx_bd_v,
174 lp->tx_bd_p);
175 }
176}
177
178
179
180
181
182
183
184
185
186
187
188
189static int axienet_dma_bd_init(struct net_device *ndev)
190{
191 u32 cr;
192 int i;
193 struct sk_buff *skb;
194 struct axienet_local *lp = netdev_priv(ndev);
195
196
197 lp->tx_bd_ci = 0;
198 lp->tx_bd_tail = 0;
199 lp->rx_bd_ci = 0;
200
201
202
203
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p,
207 GFP_KERNEL);
208 if (!lp->tx_bd_v) {
209 dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
210 "descriptors");
211 goto out;
212 }
213
214 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
215 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
216 &lp->rx_bd_p,
217 GFP_KERNEL);
218 if (!lp->rx_bd_v) {
219 dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
220 "descriptors");
221 goto out;
222 }
223
224 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
225 for (i = 0; i < TX_BD_NUM; i++) {
226 lp->tx_bd_v[i].next = lp->tx_bd_p +
227 sizeof(*lp->tx_bd_v) *
228 ((i + 1) % TX_BD_NUM);
229 }
230
231 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
232 for (i = 0; i < RX_BD_NUM; i++) {
233 lp->rx_bd_v[i].next = lp->rx_bd_p +
234 sizeof(*lp->rx_bd_v) *
235 ((i + 1) % RX_BD_NUM);
236
237 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
238 if (!skb) {
239 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
240 goto out;
241 }
242
243 lp->rx_bd_v[i].sw_id_offset = (u32) skb;
244 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
245 skb->data,
246 lp->max_frm_size,
247 DMA_FROM_DEVICE);
248 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
249 }
250
251
252 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
253
254 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
255 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
256
257 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
258 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
259
260 cr |= XAXIDMA_IRQ_ALL_MASK;
261
262 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
263
264
265 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
266
267 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
268 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
269
270 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
271 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
272
273 cr |= XAXIDMA_IRQ_ALL_MASK;
274
275 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
276
277
278
279 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
280 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
281 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
282 cr | XAXIDMA_CR_RUNSTOP_MASK);
283 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
284 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
285
286
287
288
289 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
290 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
291 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
292 cr | XAXIDMA_CR_RUNSTOP_MASK);
293
294 return 0;
295out:
296 axienet_dma_bd_release(ndev);
297 return -ENOMEM;
298}
299
300
301
302
303
304
305
306
307
308static void axienet_set_mac_address(struct net_device *ndev, void *address)
309{
310 struct axienet_local *lp = netdev_priv(ndev);
311
312 if (address)
313 memcpy(ndev->dev_addr, address, ETH_ALEN);
314 if (!is_valid_ether_addr(ndev->dev_addr))
315 eth_random_addr(ndev->dev_addr);
316
317
318 axienet_iow(lp, XAE_UAW0_OFFSET,
319 (ndev->dev_addr[0]) |
320 (ndev->dev_addr[1] << 8) |
321 (ndev->dev_addr[2] << 16) |
322 (ndev->dev_addr[3] << 24));
323 axienet_iow(lp, XAE_UAW1_OFFSET,
324 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
325 ~XAE_UAW1_UNICASTADDR_MASK) |
326 (ndev->dev_addr[4] |
327 (ndev->dev_addr[5] << 8))));
328}
329
330
331
332
333
334
335
336
337
338
339
340
341static int netdev_set_mac_address(struct net_device *ndev, void *p)
342{
343 struct sockaddr *addr = p;
344 axienet_set_mac_address(ndev, addr->sa_data);
345 return 0;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359static void axienet_set_multicast_list(struct net_device *ndev)
360{
361 int i;
362 u32 reg, af0reg, af1reg;
363 struct axienet_local *lp = netdev_priv(ndev);
364
365 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
366 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
367
368
369
370 ndev->flags |= IFF_PROMISC;
371 reg = axienet_ior(lp, XAE_FMI_OFFSET);
372 reg |= XAE_FMI_PM_MASK;
373 axienet_iow(lp, XAE_FMI_OFFSET, reg);
374 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
375 } else if (!netdev_mc_empty(ndev)) {
376 struct netdev_hw_addr *ha;
377
378 i = 0;
379 netdev_for_each_mc_addr(ha, ndev) {
380 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
381 break;
382
383 af0reg = (ha->addr[0]);
384 af0reg |= (ha->addr[1] << 8);
385 af0reg |= (ha->addr[2] << 16);
386 af0reg |= (ha->addr[3] << 24);
387
388 af1reg = (ha->addr[4]);
389 af1reg |= (ha->addr[5] << 8);
390
391 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
392 reg |= i;
393
394 axienet_iow(lp, XAE_FMI_OFFSET, reg);
395 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
396 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
397 i++;
398 }
399 } else {
400 reg = axienet_ior(lp, XAE_FMI_OFFSET);
401 reg &= ~XAE_FMI_PM_MASK;
402
403 axienet_iow(lp, XAE_FMI_OFFSET, reg);
404
405 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
406 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
407 reg |= i;
408
409 axienet_iow(lp, XAE_FMI_OFFSET, reg);
410 axienet_iow(lp, XAE_AF0_OFFSET, 0);
411 axienet_iow(lp, XAE_AF1_OFFSET, 0);
412 }
413
414 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
415 }
416}
417
418
419
420
421
422
423
424
425
426
427
428
429static void axienet_setoptions(struct net_device *ndev, u32 options)
430{
431 int reg;
432 struct axienet_local *lp = netdev_priv(ndev);
433 struct axienet_option *tp = &axienet_options[0];
434
435 while (tp->opt) {
436 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
437 if (options & tp->opt)
438 reg |= tp->m_or;
439 axienet_iow(lp, tp->reg, reg);
440 tp++;
441 }
442
443 lp->options |= options;
444}
445
446static void __axienet_device_reset(struct axienet_local *lp,
447 struct device *dev, off_t offset)
448{
449 u32 timeout;
450
451
452
453
454 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
455 timeout = DELAY_OF_ONE_MILLISEC;
456 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
457 udelay(1);
458 if (--timeout == 0) {
459 dev_err(dev, "axienet_device_reset DMA "
460 "reset timeout!\n");
461 break;
462 }
463 }
464}
465
466
467
468
469
470
471
472
473
474
475
476
477static void axienet_device_reset(struct net_device *ndev)
478{
479 u32 axienet_status;
480 struct axienet_local *lp = netdev_priv(ndev);
481
482 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
483 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
484
485 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
486 lp->options &= (~XAE_OPTION_JUMBO);
487
488 if ((ndev->mtu > XAE_MTU) &&
489 (ndev->mtu <= XAE_JUMBO_MTU) &&
490 (lp->jumbo_support)) {
491 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
492 XAE_TRL_SIZE;
493 lp->options |= XAE_OPTION_JUMBO;
494 }
495
496 if (axienet_dma_bd_init(ndev)) {
497 dev_err(&ndev->dev, "axienet_device_reset descriptor "
498 "allocation failed\n");
499 }
500
501 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
502 axienet_status &= ~XAE_RCW1_RX_MASK;
503 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
504
505 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
506 if (axienet_status & XAE_INT_RXRJECT_MASK)
507 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
508
509 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
510
511
512
513 axienet_setoptions(ndev, lp->options &
514 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
515 axienet_set_mac_address(ndev, NULL);
516 axienet_set_multicast_list(ndev);
517 axienet_setoptions(ndev, lp->options);
518
519 ndev->trans_start = jiffies;
520}
521
522
523
524
525
526
527
528
529
530static void axienet_adjust_link(struct net_device *ndev)
531{
532 u32 emmc_reg;
533 u32 link_state;
534 u32 setspeed = 1;
535 struct axienet_local *lp = netdev_priv(ndev);
536 struct phy_device *phy = lp->phy_dev;
537
538 link_state = phy->speed | (phy->duplex << 1) | phy->link;
539 if (lp->last_link != link_state) {
540 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
541 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
542 setspeed = 0;
543 } else {
544 if ((phy->speed == SPEED_1000) &&
545 (lp->phy_type == XAE_PHY_TYPE_MII))
546 setspeed = 0;
547 }
548
549 if (setspeed == 1) {
550 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
551 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
552
553 switch (phy->speed) {
554 case SPEED_1000:
555 emmc_reg |= XAE_EMMC_LINKSPD_1000;
556 break;
557 case SPEED_100:
558 emmc_reg |= XAE_EMMC_LINKSPD_100;
559 break;
560 case SPEED_10:
561 emmc_reg |= XAE_EMMC_LINKSPD_10;
562 break;
563 default:
564 dev_err(&ndev->dev, "Speed other than 10, 100 "
565 "or 1Gbps is not supported\n");
566 break;
567 }
568
569 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
570 lp->last_link = link_state;
571 phy_print_status(phy);
572 } else {
573 dev_err(&ndev->dev, "Error setting Axi Ethernet "
574 "mac speed\n");
575 }
576 }
577}
578
579
580
581
582
583
584
585
586
587
588
589
590static void axienet_start_xmit_done(struct net_device *ndev)
591{
592 u32 size = 0;
593 u32 packets = 0;
594 struct axienet_local *lp = netdev_priv(ndev);
595 struct axidma_bd *cur_p;
596 unsigned int status = 0;
597
598 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
599 status = cur_p->status;
600 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
601 dma_unmap_single(ndev->dev.parent, cur_p->phys,
602 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
603 DMA_TO_DEVICE);
604 if (cur_p->app4)
605 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
606
607 cur_p->app0 = 0;
608 cur_p->app1 = 0;
609 cur_p->app2 = 0;
610 cur_p->app4 = 0;
611 cur_p->status = 0;
612
613 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
614 packets++;
615
616 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
617 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
618 status = cur_p->status;
619 }
620
621 ndev->stats.tx_packets += packets;
622 ndev->stats.tx_bytes += size;
623 netif_wake_queue(ndev);
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
640 int num_frag)
641{
642 struct axidma_bd *cur_p;
643 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
644 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
645 return NETDEV_TX_BUSY;
646 return 0;
647}
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
663{
664 u32 ii;
665 u32 num_frag;
666 u32 csum_start_off;
667 u32 csum_index_off;
668 skb_frag_t *frag;
669 dma_addr_t tail_p;
670 struct axienet_local *lp = netdev_priv(ndev);
671 struct axidma_bd *cur_p;
672
673 num_frag = skb_shinfo(skb)->nr_frags;
674 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
675
676 if (axienet_check_tx_bd_space(lp, num_frag)) {
677 if (!netif_queue_stopped(ndev))
678 netif_stop_queue(ndev);
679 return NETDEV_TX_BUSY;
680 }
681
682 if (skb->ip_summed == CHECKSUM_PARTIAL) {
683 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
684
685 cur_p->app0 |= 2;
686 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
687 csum_start_off = skb_transport_offset(skb);
688 csum_index_off = csum_start_off + skb->csum_offset;
689
690 cur_p->app0 |= 1;
691 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
692 }
693 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
694 cur_p->app0 |= 2;
695 }
696
697 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
698 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
699 skb_headlen(skb), DMA_TO_DEVICE);
700
701 for (ii = 0; ii < num_frag; ii++) {
702 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
703 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
704 frag = &skb_shinfo(skb)->frags[ii];
705 cur_p->phys = dma_map_single(ndev->dev.parent,
706 skb_frag_address(frag),
707 skb_frag_size(frag),
708 DMA_TO_DEVICE);
709 cur_p->cntrl = skb_frag_size(frag);
710 }
711
712 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
713 cur_p->app4 = (unsigned long)skb;
714
715 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
716
717 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
718 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
719
720 return NETDEV_TX_OK;
721}
722
723
724
725
726
727
728
729
730
731
732static void axienet_recv(struct net_device *ndev)
733{
734 u32 length;
735 u32 csumstatus;
736 u32 size = 0;
737 u32 packets = 0;
738 dma_addr_t tail_p;
739 struct axienet_local *lp = netdev_priv(ndev);
740 struct sk_buff *skb, *new_skb;
741 struct axidma_bd *cur_p;
742
743 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
744 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
745
746 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
747 skb = (struct sk_buff *) (cur_p->sw_id_offset);
748 length = cur_p->app4 & 0x0000FFFF;
749
750 dma_unmap_single(ndev->dev.parent, cur_p->phys,
751 lp->max_frm_size,
752 DMA_FROM_DEVICE);
753
754 skb_put(skb, length);
755 skb->protocol = eth_type_trans(skb, ndev);
756
757 skb->ip_summed = CHECKSUM_NONE;
758
759
760 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
761 csumstatus = (cur_p->app2 &
762 XAE_FULL_CSUM_STATUS_MASK) >> 3;
763 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
764 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
765 skb->ip_summed = CHECKSUM_UNNECESSARY;
766 }
767 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
768 skb->protocol == __constant_htons(ETH_P_IP) &&
769 skb->len > 64) {
770 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
771 skb->ip_summed = CHECKSUM_COMPLETE;
772 }
773
774 netif_rx(skb);
775
776 size += length;
777 packets++;
778
779 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
780 if (!new_skb) {
781 dev_err(&ndev->dev, "no memory for new sk_buff\n");
782 return;
783 }
784 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
785 lp->max_frm_size,
786 DMA_FROM_DEVICE);
787 cur_p->cntrl = lp->max_frm_size;
788 cur_p->status = 0;
789 cur_p->sw_id_offset = (u32) new_skb;
790
791 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
792 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
793 }
794
795 ndev->stats.rx_packets += packets;
796 ndev->stats.rx_bytes += size;
797
798 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
799}
800
801
802
803
804
805
806
807
808
809
810
811static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
812{
813 u32 cr;
814 unsigned int status;
815 struct net_device *ndev = _ndev;
816 struct axienet_local *lp = netdev_priv(ndev);
817
818 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
819 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
820 axienet_start_xmit_done(lp->ndev);
821 goto out;
822 }
823 if (!(status & XAXIDMA_IRQ_ALL_MASK))
824 dev_err(&ndev->dev, "No interrupts asserted in Tx path");
825 if (status & XAXIDMA_IRQ_ERROR_MASK) {
826 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
827 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
828 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
829
830 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
831
832 cr &= (~XAXIDMA_IRQ_ALL_MASK);
833
834 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
835
836 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
837
838 cr &= (~XAXIDMA_IRQ_ALL_MASK);
839
840 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
841
842 tasklet_schedule(&lp->dma_err_tasklet);
843 }
844out:
845 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
846 return IRQ_HANDLED;
847}
848
849
850
851
852
853
854
855
856
857
858
859static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
860{
861 u32 cr;
862 unsigned int status;
863 struct net_device *ndev = _ndev;
864 struct axienet_local *lp = netdev_priv(ndev);
865
866 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
867 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
868 axienet_recv(lp->ndev);
869 goto out;
870 }
871 if (!(status & XAXIDMA_IRQ_ALL_MASK))
872 dev_err(&ndev->dev, "No interrupts asserted in Rx path");
873 if (status & XAXIDMA_IRQ_ERROR_MASK) {
874 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
875 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
876 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
877
878 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
879
880 cr &= (~XAXIDMA_IRQ_ALL_MASK);
881
882 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
883
884 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
885
886 cr &= (~XAXIDMA_IRQ_ALL_MASK);
887
888 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
889
890 tasklet_schedule(&lp->dma_err_tasklet);
891 }
892out:
893 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
894 return IRQ_HANDLED;
895}
896
897static void axienet_dma_err_handler(unsigned long data);
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912static int axienet_open(struct net_device *ndev)
913{
914 int ret, mdio_mcreg;
915 struct axienet_local *lp = netdev_priv(ndev);
916
917 dev_dbg(&ndev->dev, "axienet_open()\n");
918
919 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
920 ret = axienet_mdio_wait_until_ready(lp);
921 if (ret < 0)
922 return ret;
923
924
925
926
927 axienet_iow(lp, XAE_MDIO_MC_OFFSET,
928 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
929 axienet_device_reset(ndev);
930
931 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
932 ret = axienet_mdio_wait_until_ready(lp);
933 if (ret < 0)
934 return ret;
935
936 if (lp->phy_node) {
937 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
938 axienet_adjust_link, 0,
939 PHY_INTERFACE_MODE_GMII);
940 if (!lp->phy_dev) {
941 dev_err(lp->dev, "of_phy_connect() failed\n");
942 return -ENODEV;
943 }
944 phy_start(lp->phy_dev);
945 }
946
947
948 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
949 (unsigned long) lp);
950
951
952 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
953 if (ret)
954 goto err_tx_irq;
955
956 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
957 if (ret)
958 goto err_rx_irq;
959
960 return 0;
961
962err_rx_irq:
963 free_irq(lp->tx_irq, ndev);
964err_tx_irq:
965 if (lp->phy_dev)
966 phy_disconnect(lp->phy_dev);
967 lp->phy_dev = NULL;
968 tasklet_kill(&lp->dma_err_tasklet);
969 dev_err(lp->dev, "request_irq() failed\n");
970 return ret;
971}
972
973
974
975
976
977
978
979
980
981
982
983static int axienet_stop(struct net_device *ndev)
984{
985 u32 cr;
986 struct axienet_local *lp = netdev_priv(ndev);
987
988 dev_dbg(&ndev->dev, "axienet_close()\n");
989
990 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
991 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
992 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
993 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
994 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
995 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
996 axienet_setoptions(ndev, lp->options &
997 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
998
999 tasklet_kill(&lp->dma_err_tasklet);
1000
1001 free_irq(lp->tx_irq, ndev);
1002 free_irq(lp->rx_irq, ndev);
1003
1004 if (lp->phy_dev)
1005 phy_disconnect(lp->phy_dev);
1006 lp->phy_dev = NULL;
1007
1008 axienet_dma_bd_release(ndev);
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1024{
1025 struct axienet_local *lp = netdev_priv(ndev);
1026
1027 if (netif_running(ndev))
1028 return -EBUSY;
1029 if (lp->jumbo_support) {
1030 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1031 return -EINVAL;
1032 ndev->mtu = new_mtu;
1033 } else {
1034 if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1035 return -EINVAL;
1036 ndev->mtu = new_mtu;
1037 }
1038
1039 return 0;
1040}
1041
1042#ifdef CONFIG_NET_POLL_CONTROLLER
1043
1044
1045
1046
1047
1048
1049
1050static void axienet_poll_controller(struct net_device *ndev)
1051{
1052 struct axienet_local *lp = netdev_priv(ndev);
1053 disable_irq(lp->tx_irq);
1054 disable_irq(lp->rx_irq);
1055 axienet_rx_irq(lp->tx_irq, ndev);
1056 axienet_tx_irq(lp->rx_irq, ndev);
1057 enable_irq(lp->tx_irq);
1058 enable_irq(lp->rx_irq);
1059}
1060#endif
1061
1062static const struct net_device_ops axienet_netdev_ops = {
1063 .ndo_open = axienet_open,
1064 .ndo_stop = axienet_stop,
1065 .ndo_start_xmit = axienet_start_xmit,
1066 .ndo_change_mtu = axienet_change_mtu,
1067 .ndo_set_mac_address = netdev_set_mac_address,
1068 .ndo_validate_addr = eth_validate_addr,
1069 .ndo_set_rx_mode = axienet_set_multicast_list,
1070#ifdef CONFIG_NET_POLL_CONTROLLER
1071 .ndo_poll_controller = axienet_poll_controller,
1072#endif
1073};
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static int axienet_ethtools_get_settings(struct net_device *ndev,
1086 struct ethtool_cmd *ecmd)
1087{
1088 struct axienet_local *lp = netdev_priv(ndev);
1089 struct phy_device *phydev = lp->phy_dev;
1090 if (!phydev)
1091 return -ENODEV;
1092 return phy_ethtool_gset(phydev, ecmd);
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static int axienet_ethtools_set_settings(struct net_device *ndev,
1107 struct ethtool_cmd *ecmd)
1108{
1109 struct axienet_local *lp = netdev_priv(ndev);
1110 struct phy_device *phydev = lp->phy_dev;
1111 if (!phydev)
1112 return -ENODEV;
1113 return phy_ethtool_sset(phydev, ecmd);
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1125 struct ethtool_drvinfo *ed)
1126{
1127 memset(ed, 0, sizeof(struct ethtool_drvinfo));
1128 strcpy(ed->driver, DRIVER_NAME);
1129 strcpy(ed->version, DRIVER_VERSION);
1130 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1142{
1143 return sizeof(u32) * AXIENET_REGS_N;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static void axienet_ethtools_get_regs(struct net_device *ndev,
1157 struct ethtool_regs *regs, void *ret)
1158{
1159 u32 *data = (u32 *) ret;
1160 size_t len = sizeof(u32) * AXIENET_REGS_N;
1161 struct axienet_local *lp = netdev_priv(ndev);
1162
1163 regs->version = 0;
1164 regs->len = len;
1165
1166 memset(data, 0, len);
1167 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1168 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1169 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1170 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1171 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1172 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1173 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1174 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1175 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1176 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1177 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1178 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1179 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1180 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1181 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1182 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1183 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1184 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1185 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1186 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1187 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1188 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1189 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1190 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1191 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1192 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1193 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1194 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1195 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1196 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1197 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1198 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static void
1211axienet_ethtools_get_pauseparam(struct net_device *ndev,
1212 struct ethtool_pauseparam *epauseparm)
1213{
1214 u32 regval;
1215 struct axienet_local *lp = netdev_priv(ndev);
1216 epauseparm->autoneg = 0;
1217 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1218 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1219 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232static int
1233axienet_ethtools_set_pauseparam(struct net_device *ndev,
1234 struct ethtool_pauseparam *epauseparm)
1235{
1236 u32 regval = 0;
1237 struct axienet_local *lp = netdev_priv(ndev);
1238
1239 if (netif_running(ndev)) {
1240 printk(KERN_ERR "%s: Please stop netif before applying "
1241 "configruation\n", ndev->name);
1242 return -EFAULT;
1243 }
1244
1245 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1246 if (epauseparm->tx_pause)
1247 regval |= XAE_FCC_FCTX_MASK;
1248 else
1249 regval &= ~XAE_FCC_FCTX_MASK;
1250 if (epauseparm->rx_pause)
1251 regval |= XAE_FCC_FCRX_MASK;
1252 else
1253 regval &= ~XAE_FCC_FCRX_MASK;
1254 axienet_iow(lp, XAE_FCC_OFFSET, regval);
1255
1256 return 0;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1269 struct ethtool_coalesce *ecoalesce)
1270{
1271 u32 regval = 0;
1272 struct axienet_local *lp = netdev_priv(ndev);
1273 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1274 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1275 >> XAXIDMA_COALESCE_SHIFT;
1276 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1277 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1278 >> XAXIDMA_COALESCE_SHIFT;
1279 return 0;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1292 struct ethtool_coalesce *ecoalesce)
1293{
1294 struct axienet_local *lp = netdev_priv(ndev);
1295
1296 if (netif_running(ndev)) {
1297 printk(KERN_ERR "%s: Please stop netif before applying "
1298 "configruation\n", ndev->name);
1299 return -EFAULT;
1300 }
1301
1302 if ((ecoalesce->rx_coalesce_usecs) ||
1303 (ecoalesce->rx_coalesce_usecs_irq) ||
1304 (ecoalesce->rx_max_coalesced_frames_irq) ||
1305 (ecoalesce->tx_coalesce_usecs) ||
1306 (ecoalesce->tx_coalesce_usecs_irq) ||
1307 (ecoalesce->tx_max_coalesced_frames_irq) ||
1308 (ecoalesce->stats_block_coalesce_usecs) ||
1309 (ecoalesce->use_adaptive_rx_coalesce) ||
1310 (ecoalesce->use_adaptive_tx_coalesce) ||
1311 (ecoalesce->pkt_rate_low) ||
1312 (ecoalesce->rx_coalesce_usecs_low) ||
1313 (ecoalesce->rx_max_coalesced_frames_low) ||
1314 (ecoalesce->tx_coalesce_usecs_low) ||
1315 (ecoalesce->tx_max_coalesced_frames_low) ||
1316 (ecoalesce->pkt_rate_high) ||
1317 (ecoalesce->rx_coalesce_usecs_high) ||
1318 (ecoalesce->rx_max_coalesced_frames_high) ||
1319 (ecoalesce->tx_coalesce_usecs_high) ||
1320 (ecoalesce->tx_max_coalesced_frames_high) ||
1321 (ecoalesce->rate_sample_interval))
1322 return -EOPNOTSUPP;
1323 if (ecoalesce->rx_max_coalesced_frames)
1324 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1325 if (ecoalesce->tx_max_coalesced_frames)
1326 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1327
1328 return 0;
1329}
1330
1331static struct ethtool_ops axienet_ethtool_ops = {
1332 .get_settings = axienet_ethtools_get_settings,
1333 .set_settings = axienet_ethtools_set_settings,
1334 .get_drvinfo = axienet_ethtools_get_drvinfo,
1335 .get_regs_len = axienet_ethtools_get_regs_len,
1336 .get_regs = axienet_ethtools_get_regs,
1337 .get_link = ethtool_op_get_link,
1338 .get_pauseparam = axienet_ethtools_get_pauseparam,
1339 .set_pauseparam = axienet_ethtools_set_pauseparam,
1340 .get_coalesce = axienet_ethtools_get_coalesce,
1341 .set_coalesce = axienet_ethtools_set_coalesce,
1342};
1343
1344
1345
1346
1347
1348
1349
1350
1351static void axienet_dma_err_handler(unsigned long data)
1352{
1353 u32 axienet_status;
1354 u32 cr, i;
1355 int mdio_mcreg;
1356 struct axienet_local *lp = (struct axienet_local *) data;
1357 struct net_device *ndev = lp->ndev;
1358 struct axidma_bd *cur_p;
1359
1360 axienet_setoptions(ndev, lp->options &
1361 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1362 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1363 axienet_mdio_wait_until_ready(lp);
1364
1365
1366
1367
1368 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1369 ~XAE_MDIO_MC_MDIOEN_MASK));
1370
1371 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1372 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1373
1374 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1375 axienet_mdio_wait_until_ready(lp);
1376
1377 for (i = 0; i < TX_BD_NUM; i++) {
1378 cur_p = &lp->tx_bd_v[i];
1379 if (cur_p->phys)
1380 dma_unmap_single(ndev->dev.parent, cur_p->phys,
1381 (cur_p->cntrl &
1382 XAXIDMA_BD_CTRL_LENGTH_MASK),
1383 DMA_TO_DEVICE);
1384 if (cur_p->app4)
1385 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1386 cur_p->phys = 0;
1387 cur_p->cntrl = 0;
1388 cur_p->status = 0;
1389 cur_p->app0 = 0;
1390 cur_p->app1 = 0;
1391 cur_p->app2 = 0;
1392 cur_p->app3 = 0;
1393 cur_p->app4 = 0;
1394 cur_p->sw_id_offset = 0;
1395 }
1396
1397 for (i = 0; i < RX_BD_NUM; i++) {
1398 cur_p = &lp->rx_bd_v[i];
1399 cur_p->status = 0;
1400 cur_p->app0 = 0;
1401 cur_p->app1 = 0;
1402 cur_p->app2 = 0;
1403 cur_p->app3 = 0;
1404 cur_p->app4 = 0;
1405 }
1406
1407 lp->tx_bd_ci = 0;
1408 lp->tx_bd_tail = 0;
1409 lp->rx_bd_ci = 0;
1410
1411
1412 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1413
1414 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1415 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1416
1417 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1418 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1419
1420 cr |= XAXIDMA_IRQ_ALL_MASK;
1421
1422 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1423
1424
1425 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1426
1427 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1428 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1429
1430 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1431 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1432
1433 cr |= XAXIDMA_IRQ_ALL_MASK;
1434
1435 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1436
1437
1438
1439 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1440 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1441 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1442 cr | XAXIDMA_CR_RUNSTOP_MASK);
1443 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1444 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1445
1446
1447
1448
1449 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1450 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1451 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1452 cr | XAXIDMA_CR_RUNSTOP_MASK);
1453
1454 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1455 axienet_status &= ~XAE_RCW1_RX_MASK;
1456 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1457
1458 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1459 if (axienet_status & XAE_INT_RXRJECT_MASK)
1460 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1461 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1462
1463
1464
1465 axienet_setoptions(ndev, lp->options &
1466 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1467 axienet_set_mac_address(ndev, NULL);
1468 axienet_set_multicast_list(ndev);
1469 axienet_setoptions(ndev, lp->options);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485static int axienet_of_probe(struct platform_device *op)
1486{
1487 __be32 *p;
1488 int size, ret = 0;
1489 struct device_node *np;
1490 struct axienet_local *lp;
1491 struct net_device *ndev;
1492 const void *addr;
1493
1494 ndev = alloc_etherdev(sizeof(*lp));
1495 if (!ndev)
1496 return -ENOMEM;
1497
1498 ether_setup(ndev);
1499 dev_set_drvdata(&op->dev, ndev);
1500
1501 SET_NETDEV_DEV(ndev, &op->dev);
1502 ndev->flags &= ~IFF_MULTICAST;
1503 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1504 ndev->netdev_ops = &axienet_netdev_ops;
1505 ndev->ethtool_ops = &axienet_ethtool_ops;
1506
1507 lp = netdev_priv(ndev);
1508 lp->ndev = ndev;
1509 lp->dev = &op->dev;
1510 lp->options = XAE_OPTION_DEFAULTS;
1511
1512 lp->regs = of_iomap(op->dev.of_node, 0);
1513 if (!lp->regs) {
1514 dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1515 goto nodev;
1516 }
1517
1518 lp->features = 0;
1519
1520 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1521 if (p) {
1522 switch (be32_to_cpup(p)) {
1523 case 1:
1524 lp->csum_offload_on_tx_path =
1525 XAE_FEATURE_PARTIAL_TX_CSUM;
1526 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1527
1528 ndev->features |= NETIF_F_IP_CSUM;
1529 break;
1530 case 2:
1531 lp->csum_offload_on_tx_path =
1532 XAE_FEATURE_FULL_TX_CSUM;
1533 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1534
1535 ndev->features |= NETIF_F_IP_CSUM;
1536 break;
1537 default:
1538 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1539 }
1540 }
1541 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1542 if (p) {
1543 switch (be32_to_cpup(p)) {
1544 case 1:
1545 lp->csum_offload_on_rx_path =
1546 XAE_FEATURE_PARTIAL_RX_CSUM;
1547 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1548 break;
1549 case 2:
1550 lp->csum_offload_on_rx_path =
1551 XAE_FEATURE_FULL_RX_CSUM;
1552 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1553 break;
1554 default:
1555 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1556 }
1557 }
1558
1559
1560
1561
1562
1563
1564 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1565 if (p) {
1566 if ((be32_to_cpup(p)) >= 0x4000)
1567 lp->jumbo_support = 1;
1568 }
1569 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1570 NULL);
1571 if (p)
1572 lp->temac_type = be32_to_cpup(p);
1573 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1574 if (p)
1575 lp->phy_type = be32_to_cpup(p);
1576
1577
1578 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1579 if (!np) {
1580 dev_err(&op->dev, "could not find DMA node\n");
1581 goto err_iounmap;
1582 }
1583 lp->dma_regs = of_iomap(np, 0);
1584 if (lp->dma_regs) {
1585 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
1586 } else {
1587 dev_err(&op->dev, "unable to map DMA registers\n");
1588 of_node_put(np);
1589 }
1590 lp->rx_irq = irq_of_parse_and_map(np, 1);
1591 lp->tx_irq = irq_of_parse_and_map(np, 0);
1592 of_node_put(np);
1593 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1594 dev_err(&op->dev, "could not determine irqs\n");
1595 ret = -ENOMEM;
1596 goto err_iounmap_2;
1597 }
1598
1599
1600 addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1601 if ((!addr) || (size != 6)) {
1602 dev_err(&op->dev, "could not find MAC address\n");
1603 ret = -ENODEV;
1604 goto err_iounmap_2;
1605 }
1606 axienet_set_mac_address(ndev, (void *) addr);
1607
1608 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1609 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1610
1611 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1612 ret = axienet_mdio_setup(lp, op->dev.of_node);
1613 if (ret)
1614 dev_warn(&op->dev, "error registering MDIO bus\n");
1615
1616 ret = register_netdev(lp->ndev);
1617 if (ret) {
1618 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1619 goto err_iounmap_2;
1620 }
1621
1622 return 0;
1623
1624err_iounmap_2:
1625 if (lp->dma_regs)
1626 iounmap(lp->dma_regs);
1627err_iounmap:
1628 iounmap(lp->regs);
1629nodev:
1630 free_netdev(ndev);
1631 ndev = NULL;
1632 return ret;
1633}
1634
1635static int axienet_of_remove(struct platform_device *op)
1636{
1637 struct net_device *ndev = dev_get_drvdata(&op->dev);
1638 struct axienet_local *lp = netdev_priv(ndev);
1639
1640 axienet_mdio_teardown(lp);
1641 unregister_netdev(ndev);
1642
1643 if (lp->phy_node)
1644 of_node_put(lp->phy_node);
1645 lp->phy_node = NULL;
1646
1647 dev_set_drvdata(&op->dev, NULL);
1648
1649 iounmap(lp->regs);
1650 if (lp->dma_regs)
1651 iounmap(lp->dma_regs);
1652 free_netdev(ndev);
1653
1654 return 0;
1655}
1656
1657static struct platform_driver axienet_of_driver = {
1658 .probe = axienet_of_probe,
1659 .remove = axienet_of_remove,
1660 .driver = {
1661 .owner = THIS_MODULE,
1662 .name = "xilinx_axienet",
1663 .of_match_table = axienet_of_match,
1664 },
1665};
1666
1667module_platform_driver(axienet_of_driver);
1668
1669MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1670MODULE_AUTHOR("Xilinx");
1671MODULE_LICENSE("GPL");
1672