1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61#define DEBUG
62
63#include <linux/kernel.h>
64#include <linux/string.h>
65#include <linux/errno.h>
66#include <linux/unistd.h>
67#include <linux/slab.h>
68#include <linux/interrupt.h>
69#include <linux/delay.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
73#include <linux/if_vlan.h>
74#include <linux/spinlock.h>
75#include <linux/mm.h>
76#include <linux/of_address.h>
77#include <linux/of_irq.h>
78#include <linux/of_mdio.h>
79#include <linux/of_platform.h>
80#include <linux/ip.h>
81#include <linux/tcp.h>
82#include <linux/udp.h>
83#include <linux/in.h>
84#include <linux/net_tstamp.h>
85
86#include <asm/io.h>
87#ifdef CONFIG_PPC
88#include <asm/reg.h>
89#include <asm/mpc85xx.h>
90#endif
91#include <asm/irq.h>
92#include <linux/uaccess.h>
93#include <linux/module.h>
94#include <linux/dma-mapping.h>
95#include <linux/crc32.h>
96#include <linux/mii.h>
97#include <linux/phy.h>
98#include <linux/phy_fixed.h>
99#include <linux/of.h>
100#include <linux/of_net.h>
101
102#include "gianfar.h"
103
104#define TX_TIMEOUT (5*HZ)
105
106MODULE_AUTHOR("Freescale Semiconductor, Inc");
107MODULE_DESCRIPTION("Gianfar Ethernet Driver");
108MODULE_LICENSE("GPL");
109
110static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
111 dma_addr_t buf)
112{
113 u32 lstatus;
114
115 bdp->bufPtr = cpu_to_be32(buf);
116
117 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
119 lstatus |= BD_LFLAG(RXBD_WRAP);
120
121 gfar_wmb();
122
123 bdp->lstatus = cpu_to_be32(lstatus);
124}
125
126static void gfar_init_tx_rx_base(struct gfar_private *priv)
127{
128 struct gfar __iomem *regs = priv->gfargrp[0].regs;
129 u32 __iomem *baddr;
130 int i;
131
132 baddr = ®s->tbase0;
133 for (i = 0; i < priv->num_tx_queues; i++) {
134 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
135 baddr += 2;
136 }
137
138 baddr = ®s->rbase0;
139 for (i = 0; i < priv->num_rx_queues; i++) {
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
141 baddr += 2;
142 }
143}
144
145static void gfar_init_rqprm(struct gfar_private *priv)
146{
147 struct gfar __iomem *regs = priv->gfargrp[0].regs;
148 u32 __iomem *baddr;
149 int i;
150
151 baddr = ®s->rqprm0;
152 for (i = 0; i < priv->num_rx_queues; i++) {
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
154 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
155 baddr++;
156 }
157}
158
159static void gfar_rx_offload_en(struct gfar_private *priv)
160{
161
162 priv->uses_rxfcb = 0;
163
164 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
165 priv->uses_rxfcb = 1;
166
167 if (priv->hwts_rx_en || priv->rx_filer_enable)
168 priv->uses_rxfcb = 1;
169}
170
171static void gfar_mac_rx_config(struct gfar_private *priv)
172{
173 struct gfar __iomem *regs = priv->gfargrp[0].regs;
174 u32 rctrl = 0;
175
176 if (priv->rx_filer_enable) {
177 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
178
179 if (priv->poll_mode == GFAR_SQ_POLLING)
180 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
181 else
182 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
183 }
184
185
186 if (priv->ndev->flags & IFF_PROMISC)
187 rctrl |= RCTRL_PROM;
188
189 if (priv->ndev->features & NETIF_F_RXCSUM)
190 rctrl |= RCTRL_CHECKSUMMING;
191
192 if (priv->extended_hash)
193 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
194
195 if (priv->padding) {
196 rctrl &= ~RCTRL_PAL_MASK;
197 rctrl |= RCTRL_PADDING(priv->padding);
198 }
199
200
201 if (priv->hwts_rx_en)
202 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
203
204 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
205 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
206
207
208 gfar_write(®s->rctrl, rctrl);
209
210 gfar_init_rqprm(priv);
211 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
212 rctrl |= RCTRL_LFC;
213
214
215 gfar_write(®s->rctrl, rctrl);
216}
217
218static void gfar_mac_tx_config(struct gfar_private *priv)
219{
220 struct gfar __iomem *regs = priv->gfargrp[0].regs;
221 u32 tctrl = 0;
222
223 if (priv->ndev->features & NETIF_F_IP_CSUM)
224 tctrl |= TCTRL_INIT_CSUM;
225
226 if (priv->prio_sched_en)
227 tctrl |= TCTRL_TXSCHED_PRIO;
228 else {
229 tctrl |= TCTRL_TXSCHED_WRRS;
230 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
231 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
232 }
233
234 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
235 tctrl |= TCTRL_VLINS;
236
237 gfar_write(®s->tctrl, tctrl);
238}
239
240static void gfar_configure_coalescing(struct gfar_private *priv,
241 unsigned long tx_mask, unsigned long rx_mask)
242{
243 struct gfar __iomem *regs = priv->gfargrp[0].regs;
244 u32 __iomem *baddr;
245
246 if (priv->mode == MQ_MG_MODE) {
247 int i = 0;
248
249 baddr = ®s->txic0;
250 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
251 gfar_write(baddr + i, 0);
252 if (likely(priv->tx_queue[i]->txcoalescing))
253 gfar_write(baddr + i, priv->tx_queue[i]->txic);
254 }
255
256 baddr = ®s->rxic0;
257 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
258 gfar_write(baddr + i, 0);
259 if (likely(priv->rx_queue[i]->rxcoalescing))
260 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
261 }
262 } else {
263
264
265
266 gfar_write(®s->txic, 0);
267 if (likely(priv->tx_queue[0]->txcoalescing))
268 gfar_write(®s->txic, priv->tx_queue[0]->txic);
269
270 gfar_write(®s->rxic, 0);
271 if (unlikely(priv->rx_queue[0]->rxcoalescing))
272 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
273 }
274}
275
276static void gfar_configure_coalescing_all(struct gfar_private *priv)
277{
278 gfar_configure_coalescing(priv, 0xFF, 0xFF);
279}
280
281static struct net_device_stats *gfar_get_stats(struct net_device *dev)
282{
283 struct gfar_private *priv = netdev_priv(dev);
284 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
285 unsigned long tx_packets = 0, tx_bytes = 0;
286 int i;
287
288 for (i = 0; i < priv->num_rx_queues; i++) {
289 rx_packets += priv->rx_queue[i]->stats.rx_packets;
290 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
291 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
292 }
293
294 dev->stats.rx_packets = rx_packets;
295 dev->stats.rx_bytes = rx_bytes;
296 dev->stats.rx_dropped = rx_dropped;
297
298 for (i = 0; i < priv->num_tx_queues; i++) {
299 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
300 tx_packets += priv->tx_queue[i]->stats.tx_packets;
301 }
302
303 dev->stats.tx_bytes = tx_bytes;
304 dev->stats.tx_packets = tx_packets;
305
306 return &dev->stats;
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
324{
325 u32 tempval;
326 struct gfar_private *priv = netdev_priv(dev);
327 u32 result = ether_crc(ETH_ALEN, addr);
328 int width = priv->hash_width;
329 u8 whichbit = (result >> (32 - width)) & 0x1f;
330 u8 whichreg = result >> (32 - width + 5);
331 u32 value = (1 << (31-whichbit));
332
333 tempval = gfar_read(priv->hash_regs[whichreg]);
334 tempval |= value;
335 gfar_write(priv->hash_regs[whichreg], tempval);
336}
337
338
339
340
341static void gfar_set_mac_for_addr(struct net_device *dev, int num,
342 const u8 *addr)
343{
344 struct gfar_private *priv = netdev_priv(dev);
345 struct gfar __iomem *regs = priv->gfargrp[0].regs;
346 u32 tempval;
347 u32 __iomem *macptr = ®s->macstnaddr1;
348
349 macptr += num*2;
350
351
352
353
354
355 tempval = (addr[5] << 24) | (addr[4] << 16) |
356 (addr[3] << 8) | addr[2];
357
358 gfar_write(macptr, tempval);
359
360 tempval = (addr[1] << 24) | (addr[0] << 16);
361
362 gfar_write(macptr+1, tempval);
363}
364
365static int gfar_set_mac_addr(struct net_device *dev, void *p)
366{
367 eth_mac_addr(dev, p);
368
369 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
370
371 return 0;
372}
373
374static void gfar_ints_disable(struct gfar_private *priv)
375{
376 int i;
377 for (i = 0; i < priv->num_grps; i++) {
378 struct gfar __iomem *regs = priv->gfargrp[i].regs;
379
380 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
381
382
383 gfar_write(®s->imask, IMASK_INIT_CLEAR);
384 }
385}
386
387static void gfar_ints_enable(struct gfar_private *priv)
388{
389 int i;
390 for (i = 0; i < priv->num_grps; i++) {
391 struct gfar __iomem *regs = priv->gfargrp[i].regs;
392
393 gfar_write(®s->imask, IMASK_DEFAULT);
394 }
395}
396
397static int gfar_alloc_tx_queues(struct gfar_private *priv)
398{
399 int i;
400
401 for (i = 0; i < priv->num_tx_queues; i++) {
402 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
403 GFP_KERNEL);
404 if (!priv->tx_queue[i])
405 return -ENOMEM;
406
407 priv->tx_queue[i]->tx_skbuff = NULL;
408 priv->tx_queue[i]->qindex = i;
409 priv->tx_queue[i]->dev = priv->ndev;
410 spin_lock_init(&(priv->tx_queue[i]->txlock));
411 }
412 return 0;
413}
414
415static int gfar_alloc_rx_queues(struct gfar_private *priv)
416{
417 int i;
418
419 for (i = 0; i < priv->num_rx_queues; i++) {
420 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
421 GFP_KERNEL);
422 if (!priv->rx_queue[i])
423 return -ENOMEM;
424
425 priv->rx_queue[i]->qindex = i;
426 priv->rx_queue[i]->ndev = priv->ndev;
427 }
428 return 0;
429}
430
431static void gfar_free_tx_queues(struct gfar_private *priv)
432{
433 int i;
434
435 for (i = 0; i < priv->num_tx_queues; i++)
436 kfree(priv->tx_queue[i]);
437}
438
439static void gfar_free_rx_queues(struct gfar_private *priv)
440{
441 int i;
442
443 for (i = 0; i < priv->num_rx_queues; i++)
444 kfree(priv->rx_queue[i]);
445}
446
447static void unmap_group_regs(struct gfar_private *priv)
448{
449 int i;
450
451 for (i = 0; i < MAXGROUPS; i++)
452 if (priv->gfargrp[i].regs)
453 iounmap(priv->gfargrp[i].regs);
454}
455
456static void free_gfar_dev(struct gfar_private *priv)
457{
458 int i, j;
459
460 for (i = 0; i < priv->num_grps; i++)
461 for (j = 0; j < GFAR_NUM_IRQS; j++) {
462 kfree(priv->gfargrp[i].irqinfo[j]);
463 priv->gfargrp[i].irqinfo[j] = NULL;
464 }
465
466 free_netdev(priv->ndev);
467}
468
469static void disable_napi(struct gfar_private *priv)
470{
471 int i;
472
473 for (i = 0; i < priv->num_grps; i++) {
474 napi_disable(&priv->gfargrp[i].napi_rx);
475 napi_disable(&priv->gfargrp[i].napi_tx);
476 }
477}
478
479static void enable_napi(struct gfar_private *priv)
480{
481 int i;
482
483 for (i = 0; i < priv->num_grps; i++) {
484 napi_enable(&priv->gfargrp[i].napi_rx);
485 napi_enable(&priv->gfargrp[i].napi_tx);
486 }
487}
488
489static int gfar_parse_group(struct device_node *np,
490 struct gfar_private *priv, const char *model)
491{
492 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
493 int i;
494
495 for (i = 0; i < GFAR_NUM_IRQS; i++) {
496 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
497 GFP_KERNEL);
498 if (!grp->irqinfo[i])
499 return -ENOMEM;
500 }
501
502 grp->regs = of_iomap(np, 0);
503 if (!grp->regs)
504 return -ENOMEM;
505
506 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
507
508
509 if (model && strcasecmp(model, "FEC")) {
510 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
511 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
512 if (!gfar_irq(grp, TX)->irq ||
513 !gfar_irq(grp, RX)->irq ||
514 !gfar_irq(grp, ER)->irq)
515 return -EINVAL;
516 }
517
518 grp->priv = priv;
519 spin_lock_init(&grp->grplock);
520 if (priv->mode == MQ_MG_MODE) {
521 u32 rxq_mask, txq_mask;
522 int ret;
523
524 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
525 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
526
527 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
528 if (!ret) {
529 grp->rx_bit_map = rxq_mask ?
530 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
531 }
532
533 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
534 if (!ret) {
535 grp->tx_bit_map = txq_mask ?
536 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
537 }
538
539 if (priv->poll_mode == GFAR_SQ_POLLING) {
540
541 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
542 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
543 }
544 } else {
545 grp->rx_bit_map = 0xFF;
546 grp->tx_bit_map = 0xFF;
547 }
548
549
550
551
552 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
553 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
554
555
556
557
558 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
559 if (!grp->rx_queue)
560 grp->rx_queue = priv->rx_queue[i];
561 grp->num_rx_queues++;
562 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
563 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
564 priv->rx_queue[i]->grp = grp;
565 }
566
567 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
568 if (!grp->tx_queue)
569 grp->tx_queue = priv->tx_queue[i];
570 grp->num_tx_queues++;
571 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
572 priv->tqueue |= (TQUEUE_EN0 >> i);
573 priv->tx_queue[i]->grp = grp;
574 }
575
576 priv->num_grps++;
577
578 return 0;
579}
580
581static int gfar_of_group_count(struct device_node *np)
582{
583 struct device_node *child;
584 int num = 0;
585
586 for_each_available_child_of_node(np, child)
587 if (of_node_name_eq(child, "queue-group"))
588 num++;
589
590 return num;
591}
592
593
594
595
596static phy_interface_t gfar_get_interface(struct net_device *dev)
597{
598 struct gfar_private *priv = netdev_priv(dev);
599 struct gfar __iomem *regs = priv->gfargrp[0].regs;
600 u32 ecntrl;
601
602 ecntrl = gfar_read(®s->ecntrl);
603
604 if (ecntrl & ECNTRL_SGMII_MODE)
605 return PHY_INTERFACE_MODE_SGMII;
606
607 if (ecntrl & ECNTRL_TBI_MODE) {
608 if (ecntrl & ECNTRL_REDUCED_MODE)
609 return PHY_INTERFACE_MODE_RTBI;
610 else
611 return PHY_INTERFACE_MODE_TBI;
612 }
613
614 if (ecntrl & ECNTRL_REDUCED_MODE) {
615 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
616 return PHY_INTERFACE_MODE_RMII;
617 }
618 else {
619 phy_interface_t interface = priv->interface;
620
621
622
623
624 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
625 return PHY_INTERFACE_MODE_RGMII_ID;
626
627 return PHY_INTERFACE_MODE_RGMII;
628 }
629 }
630
631 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
632 return PHY_INTERFACE_MODE_GMII;
633
634 return PHY_INTERFACE_MODE_MII;
635}
636
637static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
638{
639 const char *model;
640 const void *mac_addr;
641 int err = 0, i;
642 phy_interface_t interface;
643 struct net_device *dev = NULL;
644 struct gfar_private *priv = NULL;
645 struct device_node *np = ofdev->dev.of_node;
646 struct device_node *child = NULL;
647 u32 stash_len = 0;
648 u32 stash_idx = 0;
649 unsigned int num_tx_qs, num_rx_qs;
650 unsigned short mode, poll_mode;
651
652 if (!np)
653 return -ENODEV;
654
655 if (of_device_is_compatible(np, "fsl,etsec2")) {
656 mode = MQ_MG_MODE;
657 poll_mode = GFAR_SQ_POLLING;
658 } else {
659 mode = SQ_SG_MODE;
660 poll_mode = GFAR_SQ_POLLING;
661 }
662
663 if (mode == SQ_SG_MODE) {
664 num_tx_qs = 1;
665 num_rx_qs = 1;
666 } else {
667
668 unsigned int num_grps = gfar_of_group_count(np);
669
670 if (num_grps == 0 || num_grps > MAXGROUPS) {
671 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
672 num_grps);
673 pr_err("Cannot do alloc_etherdev, aborting\n");
674 return -EINVAL;
675 }
676
677 if (poll_mode == GFAR_SQ_POLLING) {
678 num_tx_qs = num_grps;
679 num_rx_qs = num_grps;
680 } else {
681 u32 tx_queues, rx_queues;
682 int ret;
683
684
685 ret = of_property_read_u32(np, "fsl,num_tx_queues",
686 &tx_queues);
687 num_tx_qs = ret ? 1 : tx_queues;
688
689 ret = of_property_read_u32(np, "fsl,num_rx_queues",
690 &rx_queues);
691 num_rx_qs = ret ? 1 : rx_queues;
692 }
693 }
694
695 if (num_tx_qs > MAX_TX_QS) {
696 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
697 num_tx_qs, MAX_TX_QS);
698 pr_err("Cannot do alloc_etherdev, aborting\n");
699 return -EINVAL;
700 }
701
702 if (num_rx_qs > MAX_RX_QS) {
703 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
704 num_rx_qs, MAX_RX_QS);
705 pr_err("Cannot do alloc_etherdev, aborting\n");
706 return -EINVAL;
707 }
708
709 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
710 dev = *pdev;
711 if (NULL == dev)
712 return -ENOMEM;
713
714 priv = netdev_priv(dev);
715 priv->ndev = dev;
716
717 priv->mode = mode;
718 priv->poll_mode = poll_mode;
719
720 priv->num_tx_queues = num_tx_qs;
721 netif_set_real_num_rx_queues(dev, num_rx_qs);
722 priv->num_rx_queues = num_rx_qs;
723
724 err = gfar_alloc_tx_queues(priv);
725 if (err)
726 goto tx_alloc_failed;
727
728 err = gfar_alloc_rx_queues(priv);
729 if (err)
730 goto rx_alloc_failed;
731
732 err = of_property_read_string(np, "model", &model);
733 if (err) {
734 pr_err("Device model property missing, aborting\n");
735 goto rx_alloc_failed;
736 }
737
738
739 INIT_LIST_HEAD(&priv->rx_list.list);
740 priv->rx_list.count = 0;
741 mutex_init(&priv->rx_queue_access);
742
743 for (i = 0; i < MAXGROUPS; i++)
744 priv->gfargrp[i].regs = NULL;
745
746
747 if (priv->mode == MQ_MG_MODE) {
748 for_each_available_child_of_node(np, child) {
749 if (!of_node_name_eq(child, "queue-group"))
750 continue;
751
752 err = gfar_parse_group(child, priv, model);
753 if (err)
754 goto err_grp_init;
755 }
756 } else {
757 err = gfar_parse_group(np, priv, model);
758 if (err)
759 goto err_grp_init;
760 }
761
762 if (of_property_read_bool(np, "bd-stash")) {
763 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
764 priv->bd_stash_en = 1;
765 }
766
767 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
768
769 if (err == 0)
770 priv->rx_stash_size = stash_len;
771
772 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
773
774 if (err == 0)
775 priv->rx_stash_index = stash_idx;
776
777 if (stash_len || stash_idx)
778 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
779
780 mac_addr = of_get_mac_address(np);
781
782 if (!IS_ERR(mac_addr))
783 ether_addr_copy(dev->dev_addr, mac_addr);
784
785 if (model && !strcasecmp(model, "TSEC"))
786 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
787 FSL_GIANFAR_DEV_HAS_COALESCE |
788 FSL_GIANFAR_DEV_HAS_RMON |
789 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
790
791 if (model && !strcasecmp(model, "eTSEC"))
792 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
793 FSL_GIANFAR_DEV_HAS_COALESCE |
794 FSL_GIANFAR_DEV_HAS_RMON |
795 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
796 FSL_GIANFAR_DEV_HAS_CSUM |
797 FSL_GIANFAR_DEV_HAS_VLAN |
798 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
799 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
800 FSL_GIANFAR_DEV_HAS_TIMER |
801 FSL_GIANFAR_DEV_HAS_RX_FILER;
802
803
804
805
806
807 err = of_get_phy_mode(np, &interface);
808 if (!err)
809 priv->interface = interface;
810 else
811 priv->interface = gfar_get_interface(dev);
812
813 if (of_find_property(np, "fsl,magic-packet", NULL))
814 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
815
816 if (of_get_property(np, "fsl,wake-on-filer", NULL))
817 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
818
819 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
820
821
822
823
824 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
825 err = of_phy_register_fixed_link(np);
826 if (err)
827 goto err_grp_init;
828
829 priv->phy_node = of_node_get(np);
830 }
831
832
833 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
834
835 return 0;
836
837err_grp_init:
838 unmap_group_regs(priv);
839rx_alloc_failed:
840 gfar_free_rx_queues(priv);
841tx_alloc_failed:
842 gfar_free_tx_queues(priv);
843 free_gfar_dev(priv);
844 return err;
845}
846
847static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
848 u32 class)
849{
850 u32 rqfpr = FPR_FILER_MASK;
851 u32 rqfcr = 0x0;
852
853 rqfar--;
854 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
855 priv->ftp_rqfpr[rqfar] = rqfpr;
856 priv->ftp_rqfcr[rqfar] = rqfcr;
857 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
858
859 rqfar--;
860 rqfcr = RQFCR_CMP_NOMATCH;
861 priv->ftp_rqfpr[rqfar] = rqfpr;
862 priv->ftp_rqfcr[rqfar] = rqfcr;
863 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
864
865 rqfar--;
866 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
867 rqfpr = class;
868 priv->ftp_rqfcr[rqfar] = rqfcr;
869 priv->ftp_rqfpr[rqfar] = rqfpr;
870 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
871
872 rqfar--;
873 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
874 rqfpr = class;
875 priv->ftp_rqfcr[rqfar] = rqfcr;
876 priv->ftp_rqfpr[rqfar] = rqfpr;
877 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
878
879 return rqfar;
880}
881
882static void gfar_init_filer_table(struct gfar_private *priv)
883{
884 int i = 0x0;
885 u32 rqfar = MAX_FILER_IDX;
886 u32 rqfcr = 0x0;
887 u32 rqfpr = FPR_FILER_MASK;
888
889
890 rqfcr = RQFCR_CMP_MATCH;
891 priv->ftp_rqfcr[rqfar] = rqfcr;
892 priv->ftp_rqfpr[rqfar] = rqfpr;
893 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
894
895 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
896 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
897 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
898 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
899 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
900 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
901
902
903 priv->cur_filer_idx = rqfar;
904
905
906 rqfcr = RQFCR_CMP_NOMATCH;
907 for (i = 0; i < rqfar; i++) {
908 priv->ftp_rqfcr[i] = rqfcr;
909 priv->ftp_rqfpr[i] = rqfpr;
910 gfar_write_filer(priv, i, rqfcr, rqfpr);
911 }
912}
913
914#ifdef CONFIG_PPC
915static void __gfar_detect_errata_83xx(struct gfar_private *priv)
916{
917 unsigned int pvr = mfspr(SPRN_PVR);
918 unsigned int svr = mfspr(SPRN_SVR);
919 unsigned int mod = (svr >> 16) & 0xfff6;
920 unsigned int rev = svr & 0xffff;
921
922
923 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
924 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
925 priv->errata |= GFAR_ERRATA_74;
926
927
928 if ((pvr == 0x80850010 && mod == 0x80b0) ||
929 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
930 priv->errata |= GFAR_ERRATA_76;
931
932
933 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
934 priv->errata |= GFAR_ERRATA_12;
935}
936
937static void __gfar_detect_errata_85xx(struct gfar_private *priv)
938{
939 unsigned int svr = mfspr(SPRN_SVR);
940
941 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
942 priv->errata |= GFAR_ERRATA_12;
943
944 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
945 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
946 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
947 priv->errata |= GFAR_ERRATA_76;
948}
949#endif
950
951static void gfar_detect_errata(struct gfar_private *priv)
952{
953 struct device *dev = &priv->ofdev->dev;
954
955
956 priv->errata |= GFAR_ERRATA_A002;
957
958#ifdef CONFIG_PPC
959 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
960 __gfar_detect_errata_85xx(priv);
961 else
962 __gfar_detect_errata_83xx(priv);
963#endif
964
965 if (priv->errata)
966 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
967 priv->errata);
968}
969
970static void gfar_init_addr_hash_table(struct gfar_private *priv)
971{
972 struct gfar __iomem *regs = priv->gfargrp[0].regs;
973
974 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
975 priv->extended_hash = 1;
976 priv->hash_width = 9;
977
978 priv->hash_regs[0] = ®s->igaddr0;
979 priv->hash_regs[1] = ®s->igaddr1;
980 priv->hash_regs[2] = ®s->igaddr2;
981 priv->hash_regs[3] = ®s->igaddr3;
982 priv->hash_regs[4] = ®s->igaddr4;
983 priv->hash_regs[5] = ®s->igaddr5;
984 priv->hash_regs[6] = ®s->igaddr6;
985 priv->hash_regs[7] = ®s->igaddr7;
986 priv->hash_regs[8] = ®s->gaddr0;
987 priv->hash_regs[9] = ®s->gaddr1;
988 priv->hash_regs[10] = ®s->gaddr2;
989 priv->hash_regs[11] = ®s->gaddr3;
990 priv->hash_regs[12] = ®s->gaddr4;
991 priv->hash_regs[13] = ®s->gaddr5;
992 priv->hash_regs[14] = ®s->gaddr6;
993 priv->hash_regs[15] = ®s->gaddr7;
994
995 } else {
996 priv->extended_hash = 0;
997 priv->hash_width = 8;
998
999 priv->hash_regs[0] = ®s->gaddr0;
1000 priv->hash_regs[1] = ®s->gaddr1;
1001 priv->hash_regs[2] = ®s->gaddr2;
1002 priv->hash_regs[3] = ®s->gaddr3;
1003 priv->hash_regs[4] = ®s->gaddr4;
1004 priv->hash_regs[5] = ®s->gaddr5;
1005 priv->hash_regs[6] = ®s->gaddr6;
1006 priv->hash_regs[7] = ®s->gaddr7;
1007 }
1008}
1009
1010static int __gfar_is_rx_idle(struct gfar_private *priv)
1011{
1012 u32 res;
1013
1014
1015
1016
1017 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1018 return 0;
1019
1020
1021
1022
1023
1024 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1025 res &= 0x7f807f80;
1026 if ((res & 0xffff) == (res >> 16))
1027 return 1;
1028
1029 return 0;
1030}
1031
1032
1033static void gfar_halt_nodisable(struct gfar_private *priv)
1034{
1035 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1036 u32 tempval;
1037 unsigned int timeout;
1038 int stopped;
1039
1040 gfar_ints_disable(priv);
1041
1042 if (gfar_is_dma_stopped(priv))
1043 return;
1044
1045
1046 tempval = gfar_read(®s->dmactrl);
1047 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1048 gfar_write(®s->dmactrl, tempval);
1049
1050retry:
1051 timeout = 1000;
1052 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1053 cpu_relax();
1054 timeout--;
1055 }
1056
1057 if (!timeout)
1058 stopped = gfar_is_dma_stopped(priv);
1059
1060 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1061 !__gfar_is_rx_idle(priv))
1062 goto retry;
1063}
1064
1065
1066static void gfar_halt(struct gfar_private *priv)
1067{
1068 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1069 u32 tempval;
1070
1071
1072 gfar_write(®s->rqueue, 0);
1073 gfar_write(®s->tqueue, 0);
1074
1075 mdelay(10);
1076
1077 gfar_halt_nodisable(priv);
1078
1079
1080 tempval = gfar_read(®s->maccfg1);
1081 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1082 gfar_write(®s->maccfg1, tempval);
1083}
1084
1085static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1086{
1087 struct txbd8 *txbdp;
1088 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1089 int i, j;
1090
1091 txbdp = tx_queue->tx_bd_base;
1092
1093 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1094 if (!tx_queue->tx_skbuff[i])
1095 continue;
1096
1097 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1098 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1099 txbdp->lstatus = 0;
1100 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1101 j++) {
1102 txbdp++;
1103 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1104 be16_to_cpu(txbdp->length),
1105 DMA_TO_DEVICE);
1106 }
1107 txbdp++;
1108 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1109 tx_queue->tx_skbuff[i] = NULL;
1110 }
1111 kfree(tx_queue->tx_skbuff);
1112 tx_queue->tx_skbuff = NULL;
1113}
1114
1115static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1116{
1117 int i;
1118
1119 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1120
1121 dev_kfree_skb(rx_queue->skb);
1122
1123 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1124 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1125
1126 rxbdp->lstatus = 0;
1127 rxbdp->bufPtr = 0;
1128 rxbdp++;
1129
1130 if (!rxb->page)
1131 continue;
1132
1133 dma_unmap_page(rx_queue->dev, rxb->dma,
1134 PAGE_SIZE, DMA_FROM_DEVICE);
1135 __free_page(rxb->page);
1136
1137 rxb->page = NULL;
1138 }
1139
1140 kfree(rx_queue->rx_buff);
1141 rx_queue->rx_buff = NULL;
1142}
1143
1144
1145
1146
1147static void free_skb_resources(struct gfar_private *priv)
1148{
1149 struct gfar_priv_tx_q *tx_queue = NULL;
1150 struct gfar_priv_rx_q *rx_queue = NULL;
1151 int i;
1152
1153
1154 for (i = 0; i < priv->num_tx_queues; i++) {
1155 struct netdev_queue *txq;
1156
1157 tx_queue = priv->tx_queue[i];
1158 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1159 if (tx_queue->tx_skbuff)
1160 free_skb_tx_queue(tx_queue);
1161 netdev_tx_reset_queue(txq);
1162 }
1163
1164 for (i = 0; i < priv->num_rx_queues; i++) {
1165 rx_queue = priv->rx_queue[i];
1166 if (rx_queue->rx_buff)
1167 free_skb_rx_queue(rx_queue);
1168 }
1169
1170 dma_free_coherent(priv->dev,
1171 sizeof(struct txbd8) * priv->total_tx_ring_size +
1172 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1173 priv->tx_queue[0]->tx_bd_base,
1174 priv->tx_queue[0]->tx_bd_dma_base);
1175}
1176
1177void stop_gfar(struct net_device *dev)
1178{
1179 struct gfar_private *priv = netdev_priv(dev);
1180
1181 netif_tx_stop_all_queues(dev);
1182
1183 smp_mb__before_atomic();
1184 set_bit(GFAR_DOWN, &priv->state);
1185 smp_mb__after_atomic();
1186
1187 disable_napi(priv);
1188
1189
1190 gfar_halt(priv);
1191
1192 phy_stop(dev->phydev);
1193
1194 free_skb_resources(priv);
1195}
1196
1197static void gfar_start(struct gfar_private *priv)
1198{
1199 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1200 u32 tempval;
1201 int i = 0;
1202
1203
1204 gfar_write(®s->rqueue, priv->rqueue);
1205 gfar_write(®s->tqueue, priv->tqueue);
1206
1207
1208 tempval = gfar_read(®s->dmactrl);
1209 tempval |= DMACTRL_INIT_SETTINGS;
1210 gfar_write(®s->dmactrl, tempval);
1211
1212
1213 tempval = gfar_read(®s->dmactrl);
1214 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1215 gfar_write(®s->dmactrl, tempval);
1216
1217 for (i = 0; i < priv->num_grps; i++) {
1218 regs = priv->gfargrp[i].regs;
1219
1220 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1221 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1222 }
1223
1224
1225 tempval = gfar_read(®s->maccfg1);
1226 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1227 gfar_write(®s->maccfg1, tempval);
1228
1229 gfar_ints_enable(priv);
1230
1231 netif_trans_update(priv->ndev);
1232}
1233
1234static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1235{
1236 struct page *page;
1237 dma_addr_t addr;
1238
1239 page = dev_alloc_page();
1240 if (unlikely(!page))
1241 return false;
1242
1243 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1244 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1245 __free_page(page);
1246
1247 return false;
1248 }
1249
1250 rxb->dma = addr;
1251 rxb->page = page;
1252 rxb->page_offset = 0;
1253
1254 return true;
1255}
1256
1257static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1258{
1259 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1260 struct gfar_extra_stats *estats = &priv->extra_stats;
1261
1262 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1263 atomic64_inc(&estats->rx_alloc_err);
1264}
1265
1266static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1267 int alloc_cnt)
1268{
1269 struct rxbd8 *bdp;
1270 struct gfar_rx_buff *rxb;
1271 int i;
1272
1273 i = rx_queue->next_to_use;
1274 bdp = &rx_queue->rx_bd_base[i];
1275 rxb = &rx_queue->rx_buff[i];
1276
1277 while (alloc_cnt--) {
1278
1279 if (unlikely(!rxb->page)) {
1280 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1281 gfar_rx_alloc_err(rx_queue);
1282 break;
1283 }
1284 }
1285
1286
1287 gfar_init_rxbdp(rx_queue, bdp,
1288 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1289
1290
1291 bdp++;
1292 rxb++;
1293
1294 if (unlikely(++i == rx_queue->rx_ring_size)) {
1295 i = 0;
1296 bdp = rx_queue->rx_bd_base;
1297 rxb = rx_queue->rx_buff;
1298 }
1299 }
1300
1301 rx_queue->next_to_use = i;
1302 rx_queue->next_to_alloc = i;
1303}
1304
1305static void gfar_init_bds(struct net_device *ndev)
1306{
1307 struct gfar_private *priv = netdev_priv(ndev);
1308 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1309 struct gfar_priv_tx_q *tx_queue = NULL;
1310 struct gfar_priv_rx_q *rx_queue = NULL;
1311 struct txbd8 *txbdp;
1312 u32 __iomem *rfbptr;
1313 int i, j;
1314
1315 for (i = 0; i < priv->num_tx_queues; i++) {
1316 tx_queue = priv->tx_queue[i];
1317
1318 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1319 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1320 tx_queue->cur_tx = tx_queue->tx_bd_base;
1321 tx_queue->skb_curtx = 0;
1322 tx_queue->skb_dirtytx = 0;
1323
1324
1325 txbdp = tx_queue->tx_bd_base;
1326 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1327 txbdp->lstatus = 0;
1328 txbdp->bufPtr = 0;
1329 txbdp++;
1330 }
1331
1332
1333 txbdp--;
1334 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1335 TXBD_WRAP);
1336 }
1337
1338 rfbptr = ®s->rfbptr0;
1339 for (i = 0; i < priv->num_rx_queues; i++) {
1340 rx_queue = priv->rx_queue[i];
1341
1342 rx_queue->next_to_clean = 0;
1343 rx_queue->next_to_use = 0;
1344 rx_queue->next_to_alloc = 0;
1345
1346
1347
1348
1349 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1350
1351 rx_queue->rfbptr = rfbptr;
1352 rfbptr += 2;
1353 }
1354}
1355
1356static int gfar_alloc_skb_resources(struct net_device *ndev)
1357{
1358 void *vaddr;
1359 dma_addr_t addr;
1360 int i, j;
1361 struct gfar_private *priv = netdev_priv(ndev);
1362 struct device *dev = priv->dev;
1363 struct gfar_priv_tx_q *tx_queue = NULL;
1364 struct gfar_priv_rx_q *rx_queue = NULL;
1365
1366 priv->total_tx_ring_size = 0;
1367 for (i = 0; i < priv->num_tx_queues; i++)
1368 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1369
1370 priv->total_rx_ring_size = 0;
1371 for (i = 0; i < priv->num_rx_queues; i++)
1372 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1373
1374
1375 vaddr = dma_alloc_coherent(dev,
1376 (priv->total_tx_ring_size *
1377 sizeof(struct txbd8)) +
1378 (priv->total_rx_ring_size *
1379 sizeof(struct rxbd8)),
1380 &addr, GFP_KERNEL);
1381 if (!vaddr)
1382 return -ENOMEM;
1383
1384 for (i = 0; i < priv->num_tx_queues; i++) {
1385 tx_queue = priv->tx_queue[i];
1386 tx_queue->tx_bd_base = vaddr;
1387 tx_queue->tx_bd_dma_base = addr;
1388 tx_queue->dev = ndev;
1389
1390 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1391 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1392 }
1393
1394
1395 for (i = 0; i < priv->num_rx_queues; i++) {
1396 rx_queue = priv->rx_queue[i];
1397 rx_queue->rx_bd_base = vaddr;
1398 rx_queue->rx_bd_dma_base = addr;
1399 rx_queue->ndev = ndev;
1400 rx_queue->dev = dev;
1401 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1402 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1403 }
1404
1405
1406 for (i = 0; i < priv->num_tx_queues; i++) {
1407 tx_queue = priv->tx_queue[i];
1408 tx_queue->tx_skbuff =
1409 kmalloc_array(tx_queue->tx_ring_size,
1410 sizeof(*tx_queue->tx_skbuff),
1411 GFP_KERNEL);
1412 if (!tx_queue->tx_skbuff)
1413 goto cleanup;
1414
1415 for (j = 0; j < tx_queue->tx_ring_size; j++)
1416 tx_queue->tx_skbuff[j] = NULL;
1417 }
1418
1419 for (i = 0; i < priv->num_rx_queues; i++) {
1420 rx_queue = priv->rx_queue[i];
1421 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1422 sizeof(*rx_queue->rx_buff),
1423 GFP_KERNEL);
1424 if (!rx_queue->rx_buff)
1425 goto cleanup;
1426 }
1427
1428 gfar_init_bds(ndev);
1429
1430 return 0;
1431
1432cleanup:
1433 free_skb_resources(priv);
1434 return -ENOMEM;
1435}
1436
1437
1438int startup_gfar(struct net_device *ndev)
1439{
1440 struct gfar_private *priv = netdev_priv(ndev);
1441 int err;
1442
1443 gfar_mac_reset(priv);
1444
1445 err = gfar_alloc_skb_resources(ndev);
1446 if (err)
1447 return err;
1448
1449 gfar_init_tx_rx_base(priv);
1450
1451 smp_mb__before_atomic();
1452 clear_bit(GFAR_DOWN, &priv->state);
1453 smp_mb__after_atomic();
1454
1455
1456 gfar_start(priv);
1457
1458
1459 priv->oldlink = 0;
1460 priv->oldspeed = 0;
1461 priv->oldduplex = -1;
1462
1463 phy_start(ndev->phydev);
1464
1465 enable_napi(priv);
1466
1467 netif_tx_wake_all_queues(ndev);
1468
1469 return 0;
1470}
1471
1472static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1473{
1474 struct net_device *ndev = priv->ndev;
1475 struct phy_device *phydev = ndev->phydev;
1476 u32 val = 0;
1477
1478 if (!phydev->duplex)
1479 return val;
1480
1481 if (!priv->pause_aneg_en) {
1482 if (priv->tx_pause_en)
1483 val |= MACCFG1_TX_FLOW;
1484 if (priv->rx_pause_en)
1485 val |= MACCFG1_RX_FLOW;
1486 } else {
1487 u16 lcl_adv, rmt_adv;
1488 u8 flowctrl;
1489
1490 rmt_adv = 0;
1491 if (phydev->pause)
1492 rmt_adv = LPA_PAUSE_CAP;
1493 if (phydev->asym_pause)
1494 rmt_adv |= LPA_PAUSE_ASYM;
1495
1496 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1497 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1498 if (flowctrl & FLOW_CTRL_TX)
1499 val |= MACCFG1_TX_FLOW;
1500 if (flowctrl & FLOW_CTRL_RX)
1501 val |= MACCFG1_RX_FLOW;
1502 }
1503
1504 return val;
1505}
1506
1507static noinline void gfar_update_link_state(struct gfar_private *priv)
1508{
1509 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1510 struct net_device *ndev = priv->ndev;
1511 struct phy_device *phydev = ndev->phydev;
1512 struct gfar_priv_rx_q *rx_queue = NULL;
1513 int i;
1514
1515 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1516 return;
1517
1518 if (phydev->link) {
1519 u32 tempval1 = gfar_read(®s->maccfg1);
1520 u32 tempval = gfar_read(®s->maccfg2);
1521 u32 ecntrl = gfar_read(®s->ecntrl);
1522 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1523
1524 if (phydev->duplex != priv->oldduplex) {
1525 if (!(phydev->duplex))
1526 tempval &= ~(MACCFG2_FULL_DUPLEX);
1527 else
1528 tempval |= MACCFG2_FULL_DUPLEX;
1529
1530 priv->oldduplex = phydev->duplex;
1531 }
1532
1533 if (phydev->speed != priv->oldspeed) {
1534 switch (phydev->speed) {
1535 case 1000:
1536 tempval =
1537 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1538
1539 ecntrl &= ~(ECNTRL_R100);
1540 break;
1541 case 100:
1542 case 10:
1543 tempval =
1544 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1545
1546
1547
1548
1549 if (phydev->speed == SPEED_100)
1550 ecntrl |= ECNTRL_R100;
1551 else
1552 ecntrl &= ~(ECNTRL_R100);
1553 break;
1554 default:
1555 netif_warn(priv, link, priv->ndev,
1556 "Ack! Speed (%d) is not 10/100/1000!\n",
1557 phydev->speed);
1558 break;
1559 }
1560
1561 priv->oldspeed = phydev->speed;
1562 }
1563
1564 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1565 tempval1 |= gfar_get_flowctrl_cfg(priv);
1566
1567
1568 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1569 for (i = 0; i < priv->num_rx_queues; i++) {
1570 u32 bdp_dma;
1571
1572 rx_queue = priv->rx_queue[i];
1573 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1574 gfar_write(rx_queue->rfbptr, bdp_dma);
1575 }
1576
1577 priv->tx_actual_en = 1;
1578 }
1579
1580 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1581 priv->tx_actual_en = 0;
1582
1583 gfar_write(®s->maccfg1, tempval1);
1584 gfar_write(®s->maccfg2, tempval);
1585 gfar_write(®s->ecntrl, ecntrl);
1586
1587 if (!priv->oldlink)
1588 priv->oldlink = 1;
1589
1590 } else if (priv->oldlink) {
1591 priv->oldlink = 0;
1592 priv->oldspeed = 0;
1593 priv->oldduplex = -1;
1594 }
1595
1596 if (netif_msg_link(priv))
1597 phy_print_status(phydev);
1598}
1599
1600
1601
1602
1603
1604
1605
1606static void adjust_link(struct net_device *dev)
1607{
1608 struct gfar_private *priv = netdev_priv(dev);
1609 struct phy_device *phydev = dev->phydev;
1610
1611 if (unlikely(phydev->link != priv->oldlink ||
1612 (phydev->link && (phydev->duplex != priv->oldduplex ||
1613 phydev->speed != priv->oldspeed))))
1614 gfar_update_link_state(priv);
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static void gfar_configure_serdes(struct net_device *dev)
1626{
1627 struct gfar_private *priv = netdev_priv(dev);
1628 struct phy_device *tbiphy;
1629
1630 if (!priv->tbi_node) {
1631 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1632 "device tree specify a tbi-handle\n");
1633 return;
1634 }
1635
1636 tbiphy = of_phy_find_device(priv->tbi_node);
1637 if (!tbiphy) {
1638 dev_err(&dev->dev, "error: Could not get TBI device\n");
1639 return;
1640 }
1641
1642
1643
1644
1645
1646
1647 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1648 put_device(&tbiphy->mdio.dev);
1649 return;
1650 }
1651
1652
1653 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1654
1655 phy_write(tbiphy, MII_ADVERTISE,
1656 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1657 ADVERTISE_1000XPSE_ASYM);
1658
1659 phy_write(tbiphy, MII_BMCR,
1660 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1661 BMCR_SPEED1000);
1662
1663 put_device(&tbiphy->mdio.dev);
1664}
1665
1666
1667
1668
1669static int init_phy(struct net_device *dev)
1670{
1671 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1672 struct gfar_private *priv = netdev_priv(dev);
1673 phy_interface_t interface = priv->interface;
1674 struct phy_device *phydev;
1675 struct ethtool_eee edata;
1676
1677 linkmode_set_bit_array(phy_10_100_features_array,
1678 ARRAY_SIZE(phy_10_100_features_array),
1679 mask);
1680 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1681 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1683 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1684
1685 priv->oldlink = 0;
1686 priv->oldspeed = 0;
1687 priv->oldduplex = -1;
1688
1689 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1690 interface);
1691 if (!phydev) {
1692 dev_err(&dev->dev, "could not attach to PHY\n");
1693 return -ENODEV;
1694 }
1695
1696 if (interface == PHY_INTERFACE_MODE_SGMII)
1697 gfar_configure_serdes(dev);
1698
1699
1700 linkmode_and(phydev->supported, phydev->supported, mask);
1701 linkmode_copy(phydev->advertising, phydev->supported);
1702
1703
1704 phy_support_asym_pause(phydev);
1705
1706
1707 memset(&edata, 0, sizeof(struct ethtool_eee));
1708 phy_ethtool_set_eee(phydev, &edata);
1709
1710 return 0;
1711}
1712
1713static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1714{
1715 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1716
1717 memset(fcb, 0, GMAC_FCB_LEN);
1718
1719 return fcb;
1720}
1721
1722static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1723 int fcb_length)
1724{
1725
1726
1727
1728
1729 u8 flags = TXFCB_DEFAULT;
1730
1731
1732
1733
1734 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1735 flags |= TXFCB_UDP;
1736 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1737 } else
1738 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1739
1740
1741
1742
1743
1744
1745 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1746 fcb->l4os = skb_network_header_len(skb);
1747
1748 fcb->flags = flags;
1749}
1750
1751static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1752{
1753 fcb->flags |= TXFCB_VLN;
1754 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1755}
1756
1757static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1758 struct txbd8 *base, int ring_size)
1759{
1760 struct txbd8 *new_bd = bdp + stride;
1761
1762 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1763}
1764
1765static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1766 int ring_size)
1767{
1768 return skip_txbd(bdp, 1, base, ring_size);
1769}
1770
1771
1772static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1773 unsigned long fcb_addr)
1774{
1775 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1776 (fcb_addr % 0x20) > 0x18);
1777}
1778
1779
1780
1781
1782static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1783 unsigned int len)
1784{
1785 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1786 (len > 2500));
1787}
1788
1789
1790
1791
1792static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1793{
1794 struct gfar_private *priv = netdev_priv(dev);
1795 struct gfar_priv_tx_q *tx_queue = NULL;
1796 struct netdev_queue *txq;
1797 struct gfar __iomem *regs = NULL;
1798 struct txfcb *fcb = NULL;
1799 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1800 u32 lstatus;
1801 skb_frag_t *frag;
1802 int i, rq = 0;
1803 int do_tstamp, do_csum, do_vlan;
1804 u32 bufaddr;
1805 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1806
1807 rq = skb->queue_mapping;
1808 tx_queue = priv->tx_queue[rq];
1809 txq = netdev_get_tx_queue(dev, rq);
1810 base = tx_queue->tx_bd_base;
1811 regs = tx_queue->grp->regs;
1812
1813 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1814 do_vlan = skb_vlan_tag_present(skb);
1815 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1816 priv->hwts_tx_en;
1817
1818 if (do_csum || do_vlan)
1819 fcb_len = GMAC_FCB_LEN;
1820
1821
1822 if (unlikely(do_tstamp))
1823 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1824
1825
1826 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
1827 struct sk_buff *skb_new;
1828
1829 skb_new = skb_realloc_headroom(skb, fcb_len);
1830 if (!skb_new) {
1831 dev->stats.tx_errors++;
1832 dev_kfree_skb_any(skb);
1833 return NETDEV_TX_OK;
1834 }
1835
1836 if (skb->sk)
1837 skb_set_owner_w(skb_new, skb->sk);
1838 dev_consume_skb_any(skb);
1839 skb = skb_new;
1840 }
1841
1842
1843 nr_frags = skb_shinfo(skb)->nr_frags;
1844
1845
1846 if (unlikely(do_tstamp))
1847 nr_txbds = nr_frags + 2;
1848 else
1849 nr_txbds = nr_frags + 1;
1850
1851
1852 if (nr_txbds > tx_queue->num_txbdfree) {
1853
1854 netif_tx_stop_queue(txq);
1855 dev->stats.tx_fifo_errors++;
1856 return NETDEV_TX_BUSY;
1857 }
1858
1859
1860 bytes_sent = skb->len;
1861 tx_queue->stats.tx_bytes += bytes_sent;
1862
1863 GFAR_CB(skb)->bytes_sent = bytes_sent;
1864 tx_queue->stats.tx_packets++;
1865
1866 txbdp = txbdp_start = tx_queue->cur_tx;
1867 lstatus = be32_to_cpu(txbdp->lstatus);
1868
1869
1870 if (unlikely(do_tstamp)) {
1871 skb_push(skb, GMAC_TXPAL_LEN);
1872 memset(skb->data, 0, GMAC_TXPAL_LEN);
1873 }
1874
1875
1876 if (fcb_len) {
1877 fcb = gfar_add_fcb(skb);
1878 lstatus |= BD_LFLAG(TXBD_TOE);
1879 }
1880
1881
1882 if (do_csum) {
1883 gfar_tx_checksum(skb, fcb, fcb_len);
1884
1885 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1886 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1887 __skb_pull(skb, GMAC_FCB_LEN);
1888 skb_checksum_help(skb);
1889 if (do_vlan || do_tstamp) {
1890
1891 fcb = gfar_add_fcb(skb);
1892 } else {
1893
1894 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1895 fcb = NULL;
1896 }
1897 }
1898 }
1899
1900 if (do_vlan)
1901 gfar_tx_vlan(skb, fcb);
1902
1903 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1904 DMA_TO_DEVICE);
1905 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1906 goto dma_map_err;
1907
1908 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1909
1910
1911 if (unlikely(do_tstamp))
1912 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1913 tx_queue->tx_ring_size);
1914
1915 if (likely(!nr_frags)) {
1916 if (likely(!do_tstamp))
1917 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1918 } else {
1919 u32 lstatus_start = lstatus;
1920
1921
1922 frag = &skb_shinfo(skb)->frags[0];
1923 for (i = 0; i < nr_frags; i++, frag++) {
1924 unsigned int size;
1925
1926
1927 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1928
1929 size = skb_frag_size(frag);
1930
1931 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1932 BD_LFLAG(TXBD_READY);
1933
1934
1935 if (i == nr_frags - 1)
1936 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1937
1938 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1939 size, DMA_TO_DEVICE);
1940 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1941 goto dma_map_err;
1942
1943
1944 txbdp->bufPtr = cpu_to_be32(bufaddr);
1945 txbdp->lstatus = cpu_to_be32(lstatus);
1946 }
1947
1948 lstatus = lstatus_start;
1949 }
1950
1951
1952
1953
1954
1955
1956 if (unlikely(do_tstamp)) {
1957 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1958
1959 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1960 bufaddr += fcb_len;
1961
1962 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1963 (skb_headlen(skb) - fcb_len);
1964 if (!nr_frags)
1965 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1966
1967 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1968 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1969 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1970
1971
1972 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1973 fcb->ptp = 1;
1974 } else {
1975 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1976 }
1977
1978 netdev_tx_sent_queue(txq, bytes_sent);
1979
1980 gfar_wmb();
1981
1982 txbdp_start->lstatus = cpu_to_be32(lstatus);
1983
1984 gfar_wmb();
1985
1986 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1987
1988
1989
1990
1991 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1992 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1993
1994 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1995
1996
1997
1998
1999
2000
2001 spin_lock_bh(&tx_queue->txlock);
2002
2003 tx_queue->num_txbdfree -= (nr_txbds);
2004 spin_unlock_bh(&tx_queue->txlock);
2005
2006
2007
2008
2009 if (!tx_queue->num_txbdfree) {
2010 netif_tx_stop_queue(txq);
2011
2012 dev->stats.tx_fifo_errors++;
2013 }
2014
2015
2016 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2017
2018 return NETDEV_TX_OK;
2019
2020dma_map_err:
2021 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2022 if (do_tstamp)
2023 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2024 for (i = 0; i < nr_frags; i++) {
2025 lstatus = be32_to_cpu(txbdp->lstatus);
2026 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2027 break;
2028
2029 lstatus &= ~BD_LFLAG(TXBD_READY);
2030 txbdp->lstatus = cpu_to_be32(lstatus);
2031 bufaddr = be32_to_cpu(txbdp->bufPtr);
2032 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2033 DMA_TO_DEVICE);
2034 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2035 }
2036 gfar_wmb();
2037 dev_kfree_skb_any(skb);
2038 return NETDEV_TX_OK;
2039}
2040
2041
2042static int gfar_set_mac_address(struct net_device *dev)
2043{
2044 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2045
2046 return 0;
2047}
2048
2049static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2050{
2051 struct gfar_private *priv = netdev_priv(dev);
2052
2053 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2054 cpu_relax();
2055
2056 if (dev->flags & IFF_UP)
2057 stop_gfar(dev);
2058
2059 dev->mtu = new_mtu;
2060
2061 if (dev->flags & IFF_UP)
2062 startup_gfar(dev);
2063
2064 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2065
2066 return 0;
2067}
2068
2069static void reset_gfar(struct net_device *ndev)
2070{
2071 struct gfar_private *priv = netdev_priv(ndev);
2072
2073 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2074 cpu_relax();
2075
2076 stop_gfar(ndev);
2077 startup_gfar(ndev);
2078
2079 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2080}
2081
2082
2083
2084
2085
2086
2087static void gfar_reset_task(struct work_struct *work)
2088{
2089 struct gfar_private *priv = container_of(work, struct gfar_private,
2090 reset_task);
2091 reset_gfar(priv->ndev);
2092}
2093
2094static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2095{
2096 struct gfar_private *priv = netdev_priv(dev);
2097
2098 dev->stats.tx_errors++;
2099 schedule_work(&priv->reset_task);
2100}
2101
2102static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2103{
2104 struct hwtstamp_config config;
2105 struct gfar_private *priv = netdev_priv(netdev);
2106
2107 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2108 return -EFAULT;
2109
2110
2111 if (config.flags)
2112 return -EINVAL;
2113
2114 switch (config.tx_type) {
2115 case HWTSTAMP_TX_OFF:
2116 priv->hwts_tx_en = 0;
2117 break;
2118 case HWTSTAMP_TX_ON:
2119 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2120 return -ERANGE;
2121 priv->hwts_tx_en = 1;
2122 break;
2123 default:
2124 return -ERANGE;
2125 }
2126
2127 switch (config.rx_filter) {
2128 case HWTSTAMP_FILTER_NONE:
2129 if (priv->hwts_rx_en) {
2130 priv->hwts_rx_en = 0;
2131 reset_gfar(netdev);
2132 }
2133 break;
2134 default:
2135 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2136 return -ERANGE;
2137 if (!priv->hwts_rx_en) {
2138 priv->hwts_rx_en = 1;
2139 reset_gfar(netdev);
2140 }
2141 config.rx_filter = HWTSTAMP_FILTER_ALL;
2142 break;
2143 }
2144
2145 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2146 -EFAULT : 0;
2147}
2148
2149static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2150{
2151 struct hwtstamp_config config;
2152 struct gfar_private *priv = netdev_priv(netdev);
2153
2154 config.flags = 0;
2155 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2156 config.rx_filter = (priv->hwts_rx_en ?
2157 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2158
2159 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2160 -EFAULT : 0;
2161}
2162
2163static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2164{
2165 struct phy_device *phydev = dev->phydev;
2166
2167 if (!netif_running(dev))
2168 return -EINVAL;
2169
2170 if (cmd == SIOCSHWTSTAMP)
2171 return gfar_hwtstamp_set(dev, rq);
2172 if (cmd == SIOCGHWTSTAMP)
2173 return gfar_hwtstamp_get(dev, rq);
2174
2175 if (!phydev)
2176 return -ENODEV;
2177
2178 return phy_mii_ioctl(phydev, rq, cmd);
2179}
2180
2181
2182static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2183{
2184 struct net_device *dev = tx_queue->dev;
2185 struct netdev_queue *txq;
2186 struct gfar_private *priv = netdev_priv(dev);
2187 struct txbd8 *bdp, *next = NULL;
2188 struct txbd8 *lbdp = NULL;
2189 struct txbd8 *base = tx_queue->tx_bd_base;
2190 struct sk_buff *skb;
2191 int skb_dirtytx;
2192 int tx_ring_size = tx_queue->tx_ring_size;
2193 int frags = 0, nr_txbds = 0;
2194 int i;
2195 int howmany = 0;
2196 int tqi = tx_queue->qindex;
2197 unsigned int bytes_sent = 0;
2198 u32 lstatus;
2199 size_t buflen;
2200
2201 txq = netdev_get_tx_queue(dev, tqi);
2202 bdp = tx_queue->dirty_tx;
2203 skb_dirtytx = tx_queue->skb_dirtytx;
2204
2205 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2206 bool do_tstamp;
2207
2208 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2209 priv->hwts_tx_en;
2210
2211 frags = skb_shinfo(skb)->nr_frags;
2212
2213
2214
2215
2216 if (unlikely(do_tstamp))
2217 nr_txbds = frags + 2;
2218 else
2219 nr_txbds = frags + 1;
2220
2221 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2222
2223 lstatus = be32_to_cpu(lbdp->lstatus);
2224
2225
2226 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2227 (lstatus & BD_LENGTH_MASK))
2228 break;
2229
2230 if (unlikely(do_tstamp)) {
2231 next = next_txbd(bdp, base, tx_ring_size);
2232 buflen = be16_to_cpu(next->length) +
2233 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2234 } else
2235 buflen = be16_to_cpu(bdp->length);
2236
2237 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2238 buflen, DMA_TO_DEVICE);
2239
2240 if (unlikely(do_tstamp)) {
2241 struct skb_shared_hwtstamps shhwtstamps;
2242 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2243 ~0x7UL);
2244
2245 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2246 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2247 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2248 skb_tstamp_tx(skb, &shhwtstamps);
2249 gfar_clear_txbd_status(bdp);
2250 bdp = next;
2251 }
2252
2253 gfar_clear_txbd_status(bdp);
2254 bdp = next_txbd(bdp, base, tx_ring_size);
2255
2256 for (i = 0; i < frags; i++) {
2257 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2258 be16_to_cpu(bdp->length),
2259 DMA_TO_DEVICE);
2260 gfar_clear_txbd_status(bdp);
2261 bdp = next_txbd(bdp, base, tx_ring_size);
2262 }
2263
2264 bytes_sent += GFAR_CB(skb)->bytes_sent;
2265
2266 dev_kfree_skb_any(skb);
2267
2268 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2269
2270 skb_dirtytx = (skb_dirtytx + 1) &
2271 TX_RING_MOD_MASK(tx_ring_size);
2272
2273 howmany++;
2274 spin_lock(&tx_queue->txlock);
2275 tx_queue->num_txbdfree += nr_txbds;
2276 spin_unlock(&tx_queue->txlock);
2277 }
2278
2279
2280 if (tx_queue->num_txbdfree &&
2281 netif_tx_queue_stopped(txq) &&
2282 !(test_bit(GFAR_DOWN, &priv->state)))
2283 netif_wake_subqueue(priv->ndev, tqi);
2284
2285
2286 tx_queue->skb_dirtytx = skb_dirtytx;
2287 tx_queue->dirty_tx = bdp;
2288
2289 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2290}
2291
2292static void count_errors(u32 lstatus, struct net_device *ndev)
2293{
2294 struct gfar_private *priv = netdev_priv(ndev);
2295 struct net_device_stats *stats = &ndev->stats;
2296 struct gfar_extra_stats *estats = &priv->extra_stats;
2297
2298
2299 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2300 stats->rx_length_errors++;
2301
2302 atomic64_inc(&estats->rx_trunc);
2303
2304 return;
2305 }
2306
2307 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2308 stats->rx_length_errors++;
2309
2310 if (lstatus & BD_LFLAG(RXBD_LARGE))
2311 atomic64_inc(&estats->rx_large);
2312 else
2313 atomic64_inc(&estats->rx_short);
2314 }
2315 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2316 stats->rx_frame_errors++;
2317 atomic64_inc(&estats->rx_nonoctet);
2318 }
2319 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2320 atomic64_inc(&estats->rx_crcerr);
2321 stats->rx_crc_errors++;
2322 }
2323 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2324 atomic64_inc(&estats->rx_overrun);
2325 stats->rx_over_errors++;
2326 }
2327}
2328
2329static irqreturn_t gfar_receive(int irq, void *grp_id)
2330{
2331 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2332 unsigned long flags;
2333 u32 imask, ievent;
2334
2335 ievent = gfar_read(&grp->regs->ievent);
2336
2337 if (unlikely(ievent & IEVENT_FGPI)) {
2338 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2339 return IRQ_HANDLED;
2340 }
2341
2342 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2343 spin_lock_irqsave(&grp->grplock, flags);
2344 imask = gfar_read(&grp->regs->imask);
2345 imask &= IMASK_RX_DISABLED;
2346 gfar_write(&grp->regs->imask, imask);
2347 spin_unlock_irqrestore(&grp->grplock, flags);
2348 __napi_schedule(&grp->napi_rx);
2349 } else {
2350
2351
2352
2353 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2354 }
2355
2356 return IRQ_HANDLED;
2357}
2358
2359
2360static irqreturn_t gfar_transmit(int irq, void *grp_id)
2361{
2362 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2363 unsigned long flags;
2364 u32 imask;
2365
2366 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2367 spin_lock_irqsave(&grp->grplock, flags);
2368 imask = gfar_read(&grp->regs->imask);
2369 imask &= IMASK_TX_DISABLED;
2370 gfar_write(&grp->regs->imask, imask);
2371 spin_unlock_irqrestore(&grp->grplock, flags);
2372 __napi_schedule(&grp->napi_tx);
2373 } else {
2374
2375
2376
2377 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2378 }
2379
2380 return IRQ_HANDLED;
2381}
2382
2383static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2384 struct sk_buff *skb, bool first)
2385{
2386 int size = lstatus & BD_LENGTH_MASK;
2387 struct page *page = rxb->page;
2388
2389 if (likely(first)) {
2390 skb_put(skb, size);
2391 } else {
2392
2393 if (lstatus & BD_LFLAG(RXBD_LAST))
2394 size -= skb->len;
2395
2396 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2397 rxb->page_offset + RXBUF_ALIGNMENT,
2398 size, GFAR_RXB_TRUESIZE);
2399 }
2400
2401
2402 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2403 return false;
2404
2405
2406 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2407
2408 page_ref_inc(page);
2409
2410 return true;
2411}
2412
2413static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2414 struct gfar_rx_buff *old_rxb)
2415{
2416 struct gfar_rx_buff *new_rxb;
2417 u16 nta = rxq->next_to_alloc;
2418
2419 new_rxb = &rxq->rx_buff[nta];
2420
2421
2422 nta++;
2423 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2424
2425
2426 *new_rxb = *old_rxb;
2427
2428
2429 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2430 old_rxb->page_offset,
2431 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2432}
2433
2434static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2435 u32 lstatus, struct sk_buff *skb)
2436{
2437 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2438 struct page *page = rxb->page;
2439 bool first = false;
2440
2441 if (likely(!skb)) {
2442 void *buff_addr = page_address(page) + rxb->page_offset;
2443
2444 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2445 if (unlikely(!skb)) {
2446 gfar_rx_alloc_err(rx_queue);
2447 return NULL;
2448 }
2449 skb_reserve(skb, RXBUF_ALIGNMENT);
2450 first = true;
2451 }
2452
2453 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2454 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2455
2456 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2457
2458 gfar_reuse_rx_page(rx_queue, rxb);
2459 } else {
2460
2461 dma_unmap_page(rx_queue->dev, rxb->dma,
2462 PAGE_SIZE, DMA_FROM_DEVICE);
2463 }
2464
2465
2466 rxb->page = NULL;
2467
2468 return skb;
2469}
2470
2471static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2472{
2473
2474
2475
2476
2477 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2478 (RXFCB_CIP | RXFCB_CTU))
2479 skb->ip_summed = CHECKSUM_UNNECESSARY;
2480 else
2481 skb_checksum_none_assert(skb);
2482}
2483
2484
2485static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2486{
2487 struct gfar_private *priv = netdev_priv(ndev);
2488 struct rxfcb *fcb = NULL;
2489
2490
2491 fcb = (struct rxfcb *)skb->data;
2492
2493
2494
2495
2496 if (priv->uses_rxfcb)
2497 skb_pull(skb, GMAC_FCB_LEN);
2498
2499
2500 if (priv->hwts_rx_en) {
2501 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2502 u64 *ns = (u64 *) skb->data;
2503
2504 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2505 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2506 }
2507
2508 if (priv->padding)
2509 skb_pull(skb, priv->padding);
2510
2511
2512 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2513
2514 if (ndev->features & NETIF_F_RXCSUM)
2515 gfar_rx_checksum(skb, fcb);
2516
2517
2518
2519
2520
2521 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2522 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2523 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2524 be16_to_cpu(fcb->vlctl));
2525}
2526
2527
2528
2529
2530
2531static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2532 int rx_work_limit)
2533{
2534 struct net_device *ndev = rx_queue->ndev;
2535 struct gfar_private *priv = netdev_priv(ndev);
2536 struct rxbd8 *bdp;
2537 int i, howmany = 0;
2538 struct sk_buff *skb = rx_queue->skb;
2539 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2540 unsigned int total_bytes = 0, total_pkts = 0;
2541
2542
2543 i = rx_queue->next_to_clean;
2544
2545 while (rx_work_limit--) {
2546 u32 lstatus;
2547
2548 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2549 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2550 cleaned_cnt = 0;
2551 }
2552
2553 bdp = &rx_queue->rx_bd_base[i];
2554 lstatus = be32_to_cpu(bdp->lstatus);
2555 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2556 break;
2557
2558
2559 rmb();
2560
2561
2562 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2563 if (unlikely(!skb))
2564 break;
2565
2566 cleaned_cnt++;
2567 howmany++;
2568
2569 if (unlikely(++i == rx_queue->rx_ring_size))
2570 i = 0;
2571
2572 rx_queue->next_to_clean = i;
2573
2574
2575 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2576 continue;
2577
2578 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2579 count_errors(lstatus, ndev);
2580
2581
2582 dev_kfree_skb(skb);
2583 skb = NULL;
2584 rx_queue->stats.rx_dropped++;
2585 continue;
2586 }
2587
2588 gfar_process_frame(ndev, skb);
2589
2590
2591 total_pkts++;
2592 total_bytes += skb->len;
2593
2594 skb_record_rx_queue(skb, rx_queue->qindex);
2595
2596 skb->protocol = eth_type_trans(skb, ndev);
2597
2598
2599 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2600
2601 skb = NULL;
2602 }
2603
2604
2605 rx_queue->skb = skb;
2606
2607 rx_queue->stats.rx_packets += total_pkts;
2608 rx_queue->stats.rx_bytes += total_bytes;
2609
2610 if (cleaned_cnt)
2611 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2612
2613
2614 if (unlikely(priv->tx_actual_en)) {
2615 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2616
2617 gfar_write(rx_queue->rfbptr, bdp_dma);
2618 }
2619
2620 return howmany;
2621}
2622
2623static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2624{
2625 struct gfar_priv_grp *gfargrp =
2626 container_of(napi, struct gfar_priv_grp, napi_rx);
2627 struct gfar __iomem *regs = gfargrp->regs;
2628 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2629 int work_done = 0;
2630
2631
2632
2633
2634 gfar_write(®s->ievent, IEVENT_RX_MASK);
2635
2636 work_done = gfar_clean_rx_ring(rx_queue, budget);
2637
2638 if (work_done < budget) {
2639 u32 imask;
2640 napi_complete_done(napi, work_done);
2641
2642 gfar_write(®s->rstat, gfargrp->rstat);
2643
2644 spin_lock_irq(&gfargrp->grplock);
2645 imask = gfar_read(®s->imask);
2646 imask |= IMASK_RX_DEFAULT;
2647 gfar_write(®s->imask, imask);
2648 spin_unlock_irq(&gfargrp->grplock);
2649 }
2650
2651 return work_done;
2652}
2653
2654static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2655{
2656 struct gfar_priv_grp *gfargrp =
2657 container_of(napi, struct gfar_priv_grp, napi_tx);
2658 struct gfar __iomem *regs = gfargrp->regs;
2659 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2660 u32 imask;
2661
2662
2663
2664
2665 gfar_write(®s->ievent, IEVENT_TX_MASK);
2666
2667
2668 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2669 gfar_clean_tx_ring(tx_queue);
2670
2671 napi_complete(napi);
2672
2673 spin_lock_irq(&gfargrp->grplock);
2674 imask = gfar_read(®s->imask);
2675 imask |= IMASK_TX_DEFAULT;
2676 gfar_write(®s->imask, imask);
2677 spin_unlock_irq(&gfargrp->grplock);
2678
2679 return 0;
2680}
2681
2682static int gfar_poll_rx(struct napi_struct *napi, int budget)
2683{
2684 struct gfar_priv_grp *gfargrp =
2685 container_of(napi, struct gfar_priv_grp, napi_rx);
2686 struct gfar_private *priv = gfargrp->priv;
2687 struct gfar __iomem *regs = gfargrp->regs;
2688 struct gfar_priv_rx_q *rx_queue = NULL;
2689 int work_done = 0, work_done_per_q = 0;
2690 int i, budget_per_q = 0;
2691 unsigned long rstat_rxf;
2692 int num_act_queues;
2693
2694
2695
2696
2697 gfar_write(®s->ievent, IEVENT_RX_MASK);
2698
2699 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
2700
2701 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2702 if (num_act_queues)
2703 budget_per_q = budget/num_act_queues;
2704
2705 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2706
2707 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2708 continue;
2709
2710 rx_queue = priv->rx_queue[i];
2711 work_done_per_q =
2712 gfar_clean_rx_ring(rx_queue, budget_per_q);
2713 work_done += work_done_per_q;
2714
2715
2716 if (work_done_per_q < budget_per_q) {
2717
2718 gfar_write(®s->rstat,
2719 RSTAT_CLEAR_RXF0 >> i);
2720 num_act_queues--;
2721
2722 if (!num_act_queues)
2723 break;
2724 }
2725 }
2726
2727 if (!num_act_queues) {
2728 u32 imask;
2729 napi_complete_done(napi, work_done);
2730
2731
2732 gfar_write(®s->rstat, gfargrp->rstat);
2733
2734 spin_lock_irq(&gfargrp->grplock);
2735 imask = gfar_read(®s->imask);
2736 imask |= IMASK_RX_DEFAULT;
2737 gfar_write(®s->imask, imask);
2738 spin_unlock_irq(&gfargrp->grplock);
2739 }
2740
2741 return work_done;
2742}
2743
2744static int gfar_poll_tx(struct napi_struct *napi, int budget)
2745{
2746 struct gfar_priv_grp *gfargrp =
2747 container_of(napi, struct gfar_priv_grp, napi_tx);
2748 struct gfar_private *priv = gfargrp->priv;
2749 struct gfar __iomem *regs = gfargrp->regs;
2750 struct gfar_priv_tx_q *tx_queue = NULL;
2751 int has_tx_work = 0;
2752 int i;
2753
2754
2755
2756
2757 gfar_write(®s->ievent, IEVENT_TX_MASK);
2758
2759 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2760 tx_queue = priv->tx_queue[i];
2761
2762 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2763 gfar_clean_tx_ring(tx_queue);
2764 has_tx_work = 1;
2765 }
2766 }
2767
2768 if (!has_tx_work) {
2769 u32 imask;
2770 napi_complete(napi);
2771
2772 spin_lock_irq(&gfargrp->grplock);
2773 imask = gfar_read(®s->imask);
2774 imask |= IMASK_TX_DEFAULT;
2775 gfar_write(®s->imask, imask);
2776 spin_unlock_irq(&gfargrp->grplock);
2777 }
2778
2779 return 0;
2780}
2781
2782
2783static irqreturn_t gfar_error(int irq, void *grp_id)
2784{
2785 struct gfar_priv_grp *gfargrp = grp_id;
2786 struct gfar __iomem *regs = gfargrp->regs;
2787 struct gfar_private *priv= gfargrp->priv;
2788 struct net_device *dev = priv->ndev;
2789
2790
2791 u32 events = gfar_read(®s->ievent);
2792
2793
2794 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2795
2796
2797 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2798 (events & IEVENT_MAG))
2799 events &= ~IEVENT_MAG;
2800
2801
2802 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2803 netdev_dbg(dev,
2804 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2805 events, gfar_read(®s->imask));
2806
2807
2808 if (events & IEVENT_TXE) {
2809 dev->stats.tx_errors++;
2810
2811 if (events & IEVENT_LC)
2812 dev->stats.tx_window_errors++;
2813 if (events & IEVENT_CRL)
2814 dev->stats.tx_aborted_errors++;
2815 if (events & IEVENT_XFUN) {
2816 netif_dbg(priv, tx_err, dev,
2817 "TX FIFO underrun, packet dropped\n");
2818 dev->stats.tx_dropped++;
2819 atomic64_inc(&priv->extra_stats.tx_underrun);
2820
2821 schedule_work(&priv->reset_task);
2822 }
2823 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2824 }
2825 if (events & IEVENT_BSY) {
2826 dev->stats.rx_over_errors++;
2827 atomic64_inc(&priv->extra_stats.rx_bsy);
2828
2829 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2830 gfar_read(®s->rstat));
2831 }
2832 if (events & IEVENT_BABR) {
2833 dev->stats.rx_errors++;
2834 atomic64_inc(&priv->extra_stats.rx_babr);
2835
2836 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2837 }
2838 if (events & IEVENT_EBERR) {
2839 atomic64_inc(&priv->extra_stats.eberr);
2840 netif_dbg(priv, rx_err, dev, "bus error\n");
2841 }
2842 if (events & IEVENT_RXC)
2843 netif_dbg(priv, rx_status, dev, "control frame\n");
2844
2845 if (events & IEVENT_BABT) {
2846 atomic64_inc(&priv->extra_stats.tx_babt);
2847 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2848 }
2849 return IRQ_HANDLED;
2850}
2851
2852
2853static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2854{
2855 struct gfar_priv_grp *gfargrp = grp_id;
2856
2857
2858 u32 events = gfar_read(&gfargrp->regs->ievent);
2859
2860
2861 if (events & IEVENT_RX_MASK)
2862 gfar_receive(irq, grp_id);
2863
2864
2865 if (events & IEVENT_TX_MASK)
2866 gfar_transmit(irq, grp_id);
2867
2868
2869 if (events & IEVENT_ERR_MASK)
2870 gfar_error(irq, grp_id);
2871
2872 return IRQ_HANDLED;
2873}
2874
2875#ifdef CONFIG_NET_POLL_CONTROLLER
2876
2877
2878
2879
2880static void gfar_netpoll(struct net_device *dev)
2881{
2882 struct gfar_private *priv = netdev_priv(dev);
2883 int i;
2884
2885
2886 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2887 for (i = 0; i < priv->num_grps; i++) {
2888 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2889
2890 disable_irq(gfar_irq(grp, TX)->irq);
2891 disable_irq(gfar_irq(grp, RX)->irq);
2892 disable_irq(gfar_irq(grp, ER)->irq);
2893 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2894 enable_irq(gfar_irq(grp, ER)->irq);
2895 enable_irq(gfar_irq(grp, RX)->irq);
2896 enable_irq(gfar_irq(grp, TX)->irq);
2897 }
2898 } else {
2899 for (i = 0; i < priv->num_grps; i++) {
2900 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2901
2902 disable_irq(gfar_irq(grp, TX)->irq);
2903 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2904 enable_irq(gfar_irq(grp, TX)->irq);
2905 }
2906 }
2907}
2908#endif
2909
2910static void free_grp_irqs(struct gfar_priv_grp *grp)
2911{
2912 free_irq(gfar_irq(grp, TX)->irq, grp);
2913 free_irq(gfar_irq(grp, RX)->irq, grp);
2914 free_irq(gfar_irq(grp, ER)->irq, grp);
2915}
2916
2917static int register_grp_irqs(struct gfar_priv_grp *grp)
2918{
2919 struct gfar_private *priv = grp->priv;
2920 struct net_device *dev = priv->ndev;
2921 int err;
2922
2923
2924
2925
2926 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2927
2928
2929
2930 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2931 gfar_irq(grp, ER)->name, grp);
2932 if (err < 0) {
2933 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2934 gfar_irq(grp, ER)->irq);
2935
2936 goto err_irq_fail;
2937 }
2938 enable_irq_wake(gfar_irq(grp, ER)->irq);
2939
2940 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2941 gfar_irq(grp, TX)->name, grp);
2942 if (err < 0) {
2943 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2944 gfar_irq(grp, TX)->irq);
2945 goto tx_irq_fail;
2946 }
2947 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2948 gfar_irq(grp, RX)->name, grp);
2949 if (err < 0) {
2950 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2951 gfar_irq(grp, RX)->irq);
2952 goto rx_irq_fail;
2953 }
2954 enable_irq_wake(gfar_irq(grp, RX)->irq);
2955
2956 } else {
2957 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2958 gfar_irq(grp, TX)->name, grp);
2959 if (err < 0) {
2960 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2961 gfar_irq(grp, TX)->irq);
2962 goto err_irq_fail;
2963 }
2964 enable_irq_wake(gfar_irq(grp, TX)->irq);
2965 }
2966
2967 return 0;
2968
2969rx_irq_fail:
2970 free_irq(gfar_irq(grp, TX)->irq, grp);
2971tx_irq_fail:
2972 free_irq(gfar_irq(grp, ER)->irq, grp);
2973err_irq_fail:
2974 return err;
2975
2976}
2977
2978static void gfar_free_irq(struct gfar_private *priv)
2979{
2980 int i;
2981
2982
2983 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2984 for (i = 0; i < priv->num_grps; i++)
2985 free_grp_irqs(&priv->gfargrp[i]);
2986 } else {
2987 for (i = 0; i < priv->num_grps; i++)
2988 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2989 &priv->gfargrp[i]);
2990 }
2991}
2992
2993static int gfar_request_irq(struct gfar_private *priv)
2994{
2995 int err, i, j;
2996
2997 for (i = 0; i < priv->num_grps; i++) {
2998 err = register_grp_irqs(&priv->gfargrp[i]);
2999 if (err) {
3000 for (j = 0; j < i; j++)
3001 free_grp_irqs(&priv->gfargrp[j]);
3002 return err;
3003 }
3004 }
3005
3006 return 0;
3007}
3008
3009
3010
3011
3012static int gfar_enet_open(struct net_device *dev)
3013{
3014 struct gfar_private *priv = netdev_priv(dev);
3015 int err;
3016
3017 err = init_phy(dev);
3018 if (err)
3019 return err;
3020
3021 err = gfar_request_irq(priv);
3022 if (err)
3023 return err;
3024
3025 err = startup_gfar(dev);
3026 if (err)
3027 return err;
3028
3029 return err;
3030}
3031
3032
3033static int gfar_close(struct net_device *dev)
3034{
3035 struct gfar_private *priv = netdev_priv(dev);
3036
3037 cancel_work_sync(&priv->reset_task);
3038 stop_gfar(dev);
3039
3040
3041 phy_disconnect(dev->phydev);
3042
3043 gfar_free_irq(priv);
3044
3045 return 0;
3046}
3047
3048
3049
3050
3051static void gfar_clear_exact_match(struct net_device *dev)
3052{
3053 int idx;
3054 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3055
3056 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3057 gfar_set_mac_for_addr(dev, idx, zero_arr);
3058}
3059
3060
3061
3062
3063
3064
3065static void gfar_set_multi(struct net_device *dev)
3066{
3067 struct netdev_hw_addr *ha;
3068 struct gfar_private *priv = netdev_priv(dev);
3069 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3070 u32 tempval;
3071
3072 if (dev->flags & IFF_PROMISC) {
3073
3074 tempval = gfar_read(®s->rctrl);
3075 tempval |= RCTRL_PROM;
3076 gfar_write(®s->rctrl, tempval);
3077 } else {
3078
3079 tempval = gfar_read(®s->rctrl);
3080 tempval &= ~(RCTRL_PROM);
3081 gfar_write(®s->rctrl, tempval);
3082 }
3083
3084 if (dev->flags & IFF_ALLMULTI) {
3085
3086 gfar_write(®s->igaddr0, 0xffffffff);
3087 gfar_write(®s->igaddr1, 0xffffffff);
3088 gfar_write(®s->igaddr2, 0xffffffff);
3089 gfar_write(®s->igaddr3, 0xffffffff);
3090 gfar_write(®s->igaddr4, 0xffffffff);
3091 gfar_write(®s->igaddr5, 0xffffffff);
3092 gfar_write(®s->igaddr6, 0xffffffff);
3093 gfar_write(®s->igaddr7, 0xffffffff);
3094 gfar_write(®s->gaddr0, 0xffffffff);
3095 gfar_write(®s->gaddr1, 0xffffffff);
3096 gfar_write(®s->gaddr2, 0xffffffff);
3097 gfar_write(®s->gaddr3, 0xffffffff);
3098 gfar_write(®s->gaddr4, 0xffffffff);
3099 gfar_write(®s->gaddr5, 0xffffffff);
3100 gfar_write(®s->gaddr6, 0xffffffff);
3101 gfar_write(®s->gaddr7, 0xffffffff);
3102 } else {
3103 int em_num;
3104 int idx;
3105
3106
3107 gfar_write(®s->igaddr0, 0x0);
3108 gfar_write(®s->igaddr1, 0x0);
3109 gfar_write(®s->igaddr2, 0x0);
3110 gfar_write(®s->igaddr3, 0x0);
3111 gfar_write(®s->igaddr4, 0x0);
3112 gfar_write(®s->igaddr5, 0x0);
3113 gfar_write(®s->igaddr6, 0x0);
3114 gfar_write(®s->igaddr7, 0x0);
3115 gfar_write(®s->gaddr0, 0x0);
3116 gfar_write(®s->gaddr1, 0x0);
3117 gfar_write(®s->gaddr2, 0x0);
3118 gfar_write(®s->gaddr3, 0x0);
3119 gfar_write(®s->gaddr4, 0x0);
3120 gfar_write(®s->gaddr5, 0x0);
3121 gfar_write(®s->gaddr6, 0x0);
3122 gfar_write(®s->gaddr7, 0x0);
3123
3124
3125
3126
3127
3128 if (priv->extended_hash) {
3129 em_num = GFAR_EM_NUM + 1;
3130 gfar_clear_exact_match(dev);
3131 idx = 1;
3132 } else {
3133 idx = 0;
3134 em_num = 0;
3135 }
3136
3137 if (netdev_mc_empty(dev))
3138 return;
3139
3140
3141 netdev_for_each_mc_addr(ha, dev) {
3142 if (idx < em_num) {
3143 gfar_set_mac_for_addr(dev, idx, ha->addr);
3144 idx++;
3145 } else
3146 gfar_set_hash_for_addr(dev, ha->addr);
3147 }
3148 }
3149}
3150
3151void gfar_mac_reset(struct gfar_private *priv)
3152{
3153 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3154 u32 tempval;
3155
3156
3157 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3158
3159
3160 udelay(3);
3161
3162
3163
3164
3165 gfar_write(®s->maccfg1, 0);
3166
3167 udelay(3);
3168
3169 gfar_rx_offload_en(priv);
3170
3171
3172 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3173 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3174
3175
3176 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3177
3178
3179 tempval = MACCFG2_INIT_SETTINGS;
3180
3181
3182
3183
3184
3185 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3186 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3187
3188 gfar_write(®s->maccfg2, tempval);
3189
3190
3191 gfar_write(®s->igaddr0, 0);
3192 gfar_write(®s->igaddr1, 0);
3193 gfar_write(®s->igaddr2, 0);
3194 gfar_write(®s->igaddr3, 0);
3195 gfar_write(®s->igaddr4, 0);
3196 gfar_write(®s->igaddr5, 0);
3197 gfar_write(®s->igaddr6, 0);
3198 gfar_write(®s->igaddr7, 0);
3199
3200 gfar_write(®s->gaddr0, 0);
3201 gfar_write(®s->gaddr1, 0);
3202 gfar_write(®s->gaddr2, 0);
3203 gfar_write(®s->gaddr3, 0);
3204 gfar_write(®s->gaddr4, 0);
3205 gfar_write(®s->gaddr5, 0);
3206 gfar_write(®s->gaddr6, 0);
3207 gfar_write(®s->gaddr7, 0);
3208
3209 if (priv->extended_hash)
3210 gfar_clear_exact_match(priv->ndev);
3211
3212 gfar_mac_rx_config(priv);
3213
3214 gfar_mac_tx_config(priv);
3215
3216 gfar_set_mac_address(priv->ndev);
3217
3218 gfar_set_multi(priv->ndev);
3219
3220
3221 gfar_ints_disable(priv);
3222
3223
3224 gfar_configure_coalescing_all(priv);
3225}
3226
3227static void gfar_hw_init(struct gfar_private *priv)
3228{
3229 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3230 u32 attrs;
3231
3232
3233
3234
3235 gfar_halt(priv);
3236
3237 gfar_mac_reset(priv);
3238
3239
3240 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3241 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3242
3243
3244 gfar_write(®s->rmon.cam1, 0xffffffff);
3245 gfar_write(®s->rmon.cam2, 0xffffffff);
3246 }
3247
3248
3249 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3250
3251
3252 attrs = ATTRELI_EL(priv->rx_stash_size) |
3253 ATTRELI_EI(priv->rx_stash_index);
3254
3255 gfar_write(®s->attreli, attrs);
3256
3257
3258
3259
3260 attrs = ATTR_INIT_SETTINGS;
3261
3262 if (priv->bd_stash_en)
3263 attrs |= ATTR_BDSTASH;
3264
3265 if (priv->rx_stash_size != 0)
3266 attrs |= ATTR_BUFSTASH;
3267
3268 gfar_write(®s->attr, attrs);
3269
3270
3271 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3272 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3273 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3274
3275
3276 if (priv->num_grps > 1)
3277 gfar_write_isrg(priv);
3278}
3279
3280static const struct net_device_ops gfar_netdev_ops = {
3281 .ndo_open = gfar_enet_open,
3282 .ndo_start_xmit = gfar_start_xmit,
3283 .ndo_stop = gfar_close,
3284 .ndo_change_mtu = gfar_change_mtu,
3285 .ndo_set_features = gfar_set_features,
3286 .ndo_set_rx_mode = gfar_set_multi,
3287 .ndo_tx_timeout = gfar_timeout,
3288 .ndo_do_ioctl = gfar_ioctl,
3289 .ndo_get_stats = gfar_get_stats,
3290 .ndo_change_carrier = fixed_phy_change_carrier,
3291 .ndo_set_mac_address = gfar_set_mac_addr,
3292 .ndo_validate_addr = eth_validate_addr,
3293#ifdef CONFIG_NET_POLL_CONTROLLER
3294 .ndo_poll_controller = gfar_netpoll,
3295#endif
3296};
3297
3298
3299
3300
3301static int gfar_probe(struct platform_device *ofdev)
3302{
3303 struct device_node *np = ofdev->dev.of_node;
3304 struct net_device *dev = NULL;
3305 struct gfar_private *priv = NULL;
3306 int err = 0, i;
3307
3308 err = gfar_of_init(ofdev, &dev);
3309
3310 if (err)
3311 return err;
3312
3313 priv = netdev_priv(dev);
3314 priv->ndev = dev;
3315 priv->ofdev = ofdev;
3316 priv->dev = &ofdev->dev;
3317 SET_NETDEV_DEV(dev, &ofdev->dev);
3318
3319 INIT_WORK(&priv->reset_task, gfar_reset_task);
3320
3321 platform_set_drvdata(ofdev, priv);
3322
3323 gfar_detect_errata(priv);
3324
3325
3326 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3327
3328
3329 dev->watchdog_timeo = TX_TIMEOUT;
3330
3331 dev->mtu = 1500;
3332 dev->min_mtu = 50;
3333 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3334 dev->netdev_ops = &gfar_netdev_ops;
3335 dev->ethtool_ops = &gfar_ethtool_ops;
3336
3337
3338 for (i = 0; i < priv->num_grps; i++) {
3339 if (priv->poll_mode == GFAR_SQ_POLLING) {
3340 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3341 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3342 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3343 gfar_poll_tx_sq, 2);
3344 } else {
3345 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3346 gfar_poll_rx, GFAR_DEV_WEIGHT);
3347 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3348 gfar_poll_tx, 2);
3349 }
3350 }
3351
3352 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3353 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3354 NETIF_F_RXCSUM;
3355 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3356 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3357 }
3358
3359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3360 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3361 NETIF_F_HW_VLAN_CTAG_RX;
3362 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3363 }
3364
3365 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3366
3367 gfar_init_addr_hash_table(priv);
3368
3369
3370
3371
3372 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3373 priv->padding = 8 + DEFAULT_PADDING;
3374
3375 if (dev->features & NETIF_F_IP_CSUM ||
3376 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3377 dev->needed_headroom = GMAC_FCB_LEN;
3378
3379
3380 for (i = 0; i < priv->num_tx_queues; i++) {
3381 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3382 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3383 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3384 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3385 }
3386
3387 for (i = 0; i < priv->num_rx_queues; i++) {
3388 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3389 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3390 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3391 }
3392
3393
3394 priv->rx_filer_enable =
3395 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3396
3397 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3398
3399 if (priv->num_tx_queues == 1)
3400 priv->prio_sched_en = 1;
3401
3402 set_bit(GFAR_DOWN, &priv->state);
3403
3404 gfar_hw_init(priv);
3405
3406
3407 netif_carrier_off(dev);
3408
3409 err = register_netdev(dev);
3410
3411 if (err) {
3412 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3413 goto register_fail;
3414 }
3415
3416 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3417 priv->wol_supported |= GFAR_WOL_MAGIC;
3418
3419 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3420 priv->rx_filer_enable)
3421 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3422
3423 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3424
3425
3426 for (i = 0; i < priv->num_grps; i++) {
3427 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3428 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3429 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3430 dev->name, "_g", '0' + i, "_tx");
3431 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3432 dev->name, "_g", '0' + i, "_rx");
3433 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3434 dev->name, "_g", '0' + i, "_er");
3435 } else
3436 strcpy(gfar_irq(grp, TX)->name, dev->name);
3437 }
3438
3439
3440 gfar_init_filer_table(priv);
3441
3442
3443 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3444
3445
3446
3447
3448 netdev_info(dev, "Running with NAPI enabled\n");
3449 for (i = 0; i < priv->num_rx_queues; i++)
3450 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3451 i, priv->rx_queue[i]->rx_ring_size);
3452 for (i = 0; i < priv->num_tx_queues; i++)
3453 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3454 i, priv->tx_queue[i]->tx_ring_size);
3455
3456 return 0;
3457
3458register_fail:
3459 if (of_phy_is_fixed_link(np))
3460 of_phy_deregister_fixed_link(np);
3461 unmap_group_regs(priv);
3462 gfar_free_rx_queues(priv);
3463 gfar_free_tx_queues(priv);
3464 of_node_put(priv->phy_node);
3465 of_node_put(priv->tbi_node);
3466 free_gfar_dev(priv);
3467 return err;
3468}
3469
3470static int gfar_remove(struct platform_device *ofdev)
3471{
3472 struct gfar_private *priv = platform_get_drvdata(ofdev);
3473 struct device_node *np = ofdev->dev.of_node;
3474
3475 of_node_put(priv->phy_node);
3476 of_node_put(priv->tbi_node);
3477
3478 unregister_netdev(priv->ndev);
3479
3480 if (of_phy_is_fixed_link(np))
3481 of_phy_deregister_fixed_link(np);
3482
3483 unmap_group_regs(priv);
3484 gfar_free_rx_queues(priv);
3485 gfar_free_tx_queues(priv);
3486 free_gfar_dev(priv);
3487
3488 return 0;
3489}
3490
3491#ifdef CONFIG_PM
3492
3493static void __gfar_filer_disable(struct gfar_private *priv)
3494{
3495 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3496 u32 temp;
3497
3498 temp = gfar_read(®s->rctrl);
3499 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3500 gfar_write(®s->rctrl, temp);
3501}
3502
3503static void __gfar_filer_enable(struct gfar_private *priv)
3504{
3505 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3506 u32 temp;
3507
3508 temp = gfar_read(®s->rctrl);
3509 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3510 gfar_write(®s->rctrl, temp);
3511}
3512
3513
3514static void gfar_filer_config_wol(struct gfar_private *priv)
3515{
3516 unsigned int i;
3517 u32 rqfcr;
3518
3519 __gfar_filer_disable(priv);
3520
3521
3522 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3523 for (i = 0; i <= MAX_FILER_IDX; i++)
3524 gfar_write_filer(priv, i, rqfcr, 0);
3525
3526 i = 0;
3527 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3528
3529 struct net_device *ndev = priv->ndev;
3530
3531 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3532 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3533 (ndev->dev_addr[1] << 8) |
3534 ndev->dev_addr[2];
3535
3536 rqfcr = (qindex << 10) | RQFCR_AND |
3537 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3538
3539 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3540
3541 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3542 (ndev->dev_addr[4] << 8) |
3543 ndev->dev_addr[5];
3544 rqfcr = (qindex << 10) | RQFCR_GPI |
3545 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3546 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3547 }
3548
3549 __gfar_filer_enable(priv);
3550}
3551
3552static void gfar_filer_restore_table(struct gfar_private *priv)
3553{
3554 u32 rqfcr, rqfpr;
3555 unsigned int i;
3556
3557 __gfar_filer_disable(priv);
3558
3559 for (i = 0; i <= MAX_FILER_IDX; i++) {
3560 rqfcr = priv->ftp_rqfcr[i];
3561 rqfpr = priv->ftp_rqfpr[i];
3562 gfar_write_filer(priv, i, rqfcr, rqfpr);
3563 }
3564
3565 __gfar_filer_enable(priv);
3566}
3567
3568
3569static void gfar_start_wol_filer(struct gfar_private *priv)
3570{
3571 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3572 u32 tempval;
3573 int i = 0;
3574
3575
3576 gfar_write(®s->rqueue, priv->rqueue);
3577
3578
3579 tempval = gfar_read(®s->dmactrl);
3580 tempval |= DMACTRL_INIT_SETTINGS;
3581 gfar_write(®s->dmactrl, tempval);
3582
3583
3584 tempval = gfar_read(®s->dmactrl);
3585 tempval &= ~DMACTRL_GRS;
3586 gfar_write(®s->dmactrl, tempval);
3587
3588 for (i = 0; i < priv->num_grps; i++) {
3589 regs = priv->gfargrp[i].regs;
3590
3591 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3592
3593 gfar_write(®s->imask, IMASK_FGPI);
3594 }
3595
3596
3597 tempval = gfar_read(®s->maccfg1);
3598 tempval |= MACCFG1_RX_EN;
3599 gfar_write(®s->maccfg1, tempval);
3600}
3601
3602static int gfar_suspend(struct device *dev)
3603{
3604 struct gfar_private *priv = dev_get_drvdata(dev);
3605 struct net_device *ndev = priv->ndev;
3606 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3607 u32 tempval;
3608 u16 wol = priv->wol_opts;
3609
3610 if (!netif_running(ndev))
3611 return 0;
3612
3613 disable_napi(priv);
3614 netif_tx_lock(ndev);
3615 netif_device_detach(ndev);
3616 netif_tx_unlock(ndev);
3617
3618 gfar_halt(priv);
3619
3620 if (wol & GFAR_WOL_MAGIC) {
3621
3622 gfar_write(®s->imask, IMASK_MAG);
3623
3624
3625 tempval = gfar_read(®s->maccfg2);
3626 tempval |= MACCFG2_MPEN;
3627 gfar_write(®s->maccfg2, tempval);
3628
3629
3630 tempval = gfar_read(®s->maccfg1);
3631 tempval |= MACCFG1_RX_EN;
3632 gfar_write(®s->maccfg1, tempval);
3633
3634 } else if (wol & GFAR_WOL_FILER_UCAST) {
3635 gfar_filer_config_wol(priv);
3636 gfar_start_wol_filer(priv);
3637
3638 } else {
3639 phy_stop(ndev->phydev);
3640 }
3641
3642 return 0;
3643}
3644
3645static int gfar_resume(struct device *dev)
3646{
3647 struct gfar_private *priv = dev_get_drvdata(dev);
3648 struct net_device *ndev = priv->ndev;
3649 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3650 u32 tempval;
3651 u16 wol = priv->wol_opts;
3652
3653 if (!netif_running(ndev))
3654 return 0;
3655
3656 if (wol & GFAR_WOL_MAGIC) {
3657
3658 tempval = gfar_read(®s->maccfg2);
3659 tempval &= ~MACCFG2_MPEN;
3660 gfar_write(®s->maccfg2, tempval);
3661
3662 } else if (wol & GFAR_WOL_FILER_UCAST) {
3663
3664 gfar_halt(priv);
3665 gfar_filer_restore_table(priv);
3666
3667 } else {
3668 phy_start(ndev->phydev);
3669 }
3670
3671 gfar_start(priv);
3672
3673 netif_device_attach(ndev);
3674 enable_napi(priv);
3675
3676 return 0;
3677}
3678
3679static int gfar_restore(struct device *dev)
3680{
3681 struct gfar_private *priv = dev_get_drvdata(dev);
3682 struct net_device *ndev = priv->ndev;
3683
3684 if (!netif_running(ndev)) {
3685 netif_device_attach(ndev);
3686
3687 return 0;
3688 }
3689
3690 gfar_init_bds(ndev);
3691
3692 gfar_mac_reset(priv);
3693
3694 gfar_init_tx_rx_base(priv);
3695
3696 gfar_start(priv);
3697
3698 priv->oldlink = 0;
3699 priv->oldspeed = 0;
3700 priv->oldduplex = -1;
3701
3702 if (ndev->phydev)
3703 phy_start(ndev->phydev);
3704
3705 netif_device_attach(ndev);
3706 enable_napi(priv);
3707
3708 return 0;
3709}
3710
3711static const struct dev_pm_ops gfar_pm_ops = {
3712 .suspend = gfar_suspend,
3713 .resume = gfar_resume,
3714 .freeze = gfar_suspend,
3715 .thaw = gfar_resume,
3716 .restore = gfar_restore,
3717};
3718
3719#define GFAR_PM_OPS (&gfar_pm_ops)
3720
3721#else
3722
3723#define GFAR_PM_OPS NULL
3724
3725#endif
3726
3727static const struct of_device_id gfar_match[] =
3728{
3729 {
3730 .type = "network",
3731 .compatible = "gianfar",
3732 },
3733 {
3734 .compatible = "fsl,etsec2",
3735 },
3736 {},
3737};
3738MODULE_DEVICE_TABLE(of, gfar_match);
3739
3740
3741static struct platform_driver gfar_driver = {
3742 .driver = {
3743 .name = "fsl-gianfar",
3744 .pm = GFAR_PM_OPS,
3745 .of_match_table = gfar_match,
3746 },
3747 .probe = gfar_probe,
3748 .remove = gfar_remove,
3749};
3750
3751module_platform_driver(gfar_driver);
3752