1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
67#include <linux/kernel.h>
68#include <linux/string.h>
69#include <linux/errno.h>
70#include <linux/unistd.h>
71#include <linux/slab.h>
72#include <linux/interrupt.h>
73#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
77#include <linux/if_vlan.h>
78#include <linux/spinlock.h>
79#include <linux/mm.h>
80#include <linux/of_address.h>
81#include <linux/of_irq.h>
82#include <linux/of_mdio.h>
83#include <linux/of_platform.h>
84#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
87#include <linux/in.h>
88#include <linux/net_tstamp.h>
89
90#include <asm/io.h>
91#ifdef CONFIG_PPC
92#include <asm/reg.h>
93#include <asm/mpc85xx.h>
94#endif
95#include <asm/irq.h>
96#include <linux/uaccess.h>
97#include <linux/module.h>
98#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
100#include <linux/mii.h>
101#include <linux/phy.h>
102#include <linux/phy_fixed.h>
103#include <linux/of.h>
104#include <linux/of_net.h>
105#include <linux/of_address.h>
106#include <linux/of_irq.h>
107
108#include "gianfar.h"
109
110#define TX_TIMEOUT (5*HZ)
111
112const char gfar_driver_version[] = "2.0";
113
114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_reset_task(struct work_struct *work);
117static void gfar_timeout(struct net_device *dev, unsigned int txqueue);
118static int gfar_close(struct net_device *dev);
119static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
120 int alloc_cnt);
121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126static void adjust_link(struct net_device *dev);
127static noinline void gfar_update_link_state(struct gfar_private *priv);
128static int init_phy(struct net_device *dev);
129static int gfar_probe(struct platform_device *ofdev);
130static int gfar_remove(struct platform_device *ofdev);
131static void free_skb_resources(struct gfar_private *priv);
132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134static void gfar_configure_serdes(struct net_device *dev);
135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
144static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
145static void gfar_halt_nodisable(struct gfar_private *priv);
146static void gfar_clear_exact_match(struct net_device *dev);
147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
150
151MODULE_AUTHOR("Freescale Semiconductor, Inc");
152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153MODULE_LICENSE("GPL");
154
155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
156 dma_addr_t buf)
157{
158 u32 lstatus;
159
160 bdp->bufPtr = cpu_to_be32(buf);
161
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
164 lstatus |= BD_LFLAG(RXBD_WRAP);
165
166 gfar_wmb();
167
168 bdp->lstatus = cpu_to_be32(lstatus);
169}
170
171static void gfar_init_bds(struct net_device *ndev)
172{
173 struct gfar_private *priv = netdev_priv(ndev);
174 struct gfar __iomem *regs = priv->gfargrp[0].regs;
175 struct gfar_priv_tx_q *tx_queue = NULL;
176 struct gfar_priv_rx_q *rx_queue = NULL;
177 struct txbd8 *txbdp;
178 u32 __iomem *rfbptr;
179 int i, j;
180
181 for (i = 0; i < priv->num_tx_queues; i++) {
182 tx_queue = priv->tx_queue[i];
183
184 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
185 tx_queue->dirty_tx = tx_queue->tx_bd_base;
186 tx_queue->cur_tx = tx_queue->tx_bd_base;
187 tx_queue->skb_curtx = 0;
188 tx_queue->skb_dirtytx = 0;
189
190
191 txbdp = tx_queue->tx_bd_base;
192 for (j = 0; j < tx_queue->tx_ring_size; j++) {
193 txbdp->lstatus = 0;
194 txbdp->bufPtr = 0;
195 txbdp++;
196 }
197
198
199 txbdp--;
200 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
201 TXBD_WRAP);
202 }
203
204 rfbptr = ®s->rfbptr0;
205 for (i = 0; i < priv->num_rx_queues; i++) {
206 rx_queue = priv->rx_queue[i];
207
208 rx_queue->next_to_clean = 0;
209 rx_queue->next_to_use = 0;
210 rx_queue->next_to_alloc = 0;
211
212
213
214
215 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
216
217 rx_queue->rfbptr = rfbptr;
218 rfbptr += 2;
219 }
220}
221
222static int gfar_alloc_skb_resources(struct net_device *ndev)
223{
224 void *vaddr;
225 dma_addr_t addr;
226 int i, j;
227 struct gfar_private *priv = netdev_priv(ndev);
228 struct device *dev = priv->dev;
229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
232 priv->total_tx_ring_size = 0;
233 for (i = 0; i < priv->num_tx_queues; i++)
234 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
235
236 priv->total_rx_ring_size = 0;
237 for (i = 0; i < priv->num_rx_queues; i++)
238 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
239
240
241 vaddr = dma_alloc_coherent(dev,
242 (priv->total_tx_ring_size *
243 sizeof(struct txbd8)) +
244 (priv->total_rx_ring_size *
245 sizeof(struct rxbd8)),
246 &addr, GFP_KERNEL);
247 if (!vaddr)
248 return -ENOMEM;
249
250 for (i = 0; i < priv->num_tx_queues; i++) {
251 tx_queue = priv->tx_queue[i];
252 tx_queue->tx_bd_base = vaddr;
253 tx_queue->tx_bd_dma_base = addr;
254 tx_queue->dev = ndev;
255
256 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
257 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
258 }
259
260
261 for (i = 0; i < priv->num_rx_queues; i++) {
262 rx_queue = priv->rx_queue[i];
263 rx_queue->rx_bd_base = vaddr;
264 rx_queue->rx_bd_dma_base = addr;
265 rx_queue->ndev = ndev;
266 rx_queue->dev = dev;
267 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
268 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
269 }
270
271
272 for (i = 0; i < priv->num_tx_queues; i++) {
273 tx_queue = priv->tx_queue[i];
274 tx_queue->tx_skbuff =
275 kmalloc_array(tx_queue->tx_ring_size,
276 sizeof(*tx_queue->tx_skbuff),
277 GFP_KERNEL);
278 if (!tx_queue->tx_skbuff)
279 goto cleanup;
280
281 for (j = 0; j < tx_queue->tx_ring_size; j++)
282 tx_queue->tx_skbuff[j] = NULL;
283 }
284
285 for (i = 0; i < priv->num_rx_queues; i++) {
286 rx_queue = priv->rx_queue[i];
287 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
288 sizeof(*rx_queue->rx_buff),
289 GFP_KERNEL);
290 if (!rx_queue->rx_buff)
291 goto cleanup;
292 }
293
294 gfar_init_bds(ndev);
295
296 return 0;
297
298cleanup:
299 free_skb_resources(priv);
300 return -ENOMEM;
301}
302
303static void gfar_init_tx_rx_base(struct gfar_private *priv)
304{
305 struct gfar __iomem *regs = priv->gfargrp[0].regs;
306 u32 __iomem *baddr;
307 int i;
308
309 baddr = ®s->tbase0;
310 for (i = 0; i < priv->num_tx_queues; i++) {
311 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
312 baddr += 2;
313 }
314
315 baddr = ®s->rbase0;
316 for (i = 0; i < priv->num_rx_queues; i++) {
317 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
318 baddr += 2;
319 }
320}
321
322static void gfar_init_rqprm(struct gfar_private *priv)
323{
324 struct gfar __iomem *regs = priv->gfargrp[0].regs;
325 u32 __iomem *baddr;
326 int i;
327
328 baddr = ®s->rqprm0;
329 for (i = 0; i < priv->num_rx_queues; i++) {
330 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
331 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
332 baddr++;
333 }
334}
335
336static void gfar_rx_offload_en(struct gfar_private *priv)
337{
338
339 priv->uses_rxfcb = 0;
340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
344 if (priv->hwts_rx_en || priv->rx_filer_enable)
345 priv->uses_rxfcb = 1;
346}
347
348static void gfar_mac_rx_config(struct gfar_private *priv)
349{
350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
351 u32 rctrl = 0;
352
353 if (priv->rx_filer_enable) {
354 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
355
356 if (priv->poll_mode == GFAR_SQ_POLLING)
357 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
358 else
359 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
360 }
361
362
363 if (priv->ndev->flags & IFF_PROMISC)
364 rctrl |= RCTRL_PROM;
365
366 if (priv->ndev->features & NETIF_F_RXCSUM)
367 rctrl |= RCTRL_CHECKSUMMING;
368
369 if (priv->extended_hash)
370 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
377
378 if (priv->hwts_rx_en)
379 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
380
381 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
383
384
385 gfar_write(®s->rctrl, rctrl);
386
387 gfar_init_rqprm(priv);
388 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
389 rctrl |= RCTRL_LFC;
390
391
392 gfar_write(®s->rctrl, rctrl);
393}
394
395static void gfar_mac_tx_config(struct gfar_private *priv)
396{
397 struct gfar __iomem *regs = priv->gfargrp[0].regs;
398 u32 tctrl = 0;
399
400 if (priv->ndev->features & NETIF_F_IP_CSUM)
401 tctrl |= TCTRL_INIT_CSUM;
402
403 if (priv->prio_sched_en)
404 tctrl |= TCTRL_TXSCHED_PRIO;
405 else {
406 tctrl |= TCTRL_TXSCHED_WRRS;
407 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
408 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
409 }
410
411 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
412 tctrl |= TCTRL_VLINS;
413
414 gfar_write(®s->tctrl, tctrl);
415}
416
417static void gfar_configure_coalescing(struct gfar_private *priv,
418 unsigned long tx_mask, unsigned long rx_mask)
419{
420 struct gfar __iomem *regs = priv->gfargrp[0].regs;
421 u32 __iomem *baddr;
422
423 if (priv->mode == MQ_MG_MODE) {
424 int i = 0;
425
426 baddr = ®s->txic0;
427 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
428 gfar_write(baddr + i, 0);
429 if (likely(priv->tx_queue[i]->txcoalescing))
430 gfar_write(baddr + i, priv->tx_queue[i]->txic);
431 }
432
433 baddr = ®s->rxic0;
434 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
435 gfar_write(baddr + i, 0);
436 if (likely(priv->rx_queue[i]->rxcoalescing))
437 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
438 }
439 } else {
440
441
442
443 gfar_write(®s->txic, 0);
444 if (likely(priv->tx_queue[0]->txcoalescing))
445 gfar_write(®s->txic, priv->tx_queue[0]->txic);
446
447 gfar_write(®s->rxic, 0);
448 if (unlikely(priv->rx_queue[0]->rxcoalescing))
449 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
450 }
451}
452
453void gfar_configure_coalescing_all(struct gfar_private *priv)
454{
455 gfar_configure_coalescing(priv, 0xFF, 0xFF);
456}
457
458static struct net_device_stats *gfar_get_stats(struct net_device *dev)
459{
460 struct gfar_private *priv = netdev_priv(dev);
461 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
462 unsigned long tx_packets = 0, tx_bytes = 0;
463 int i;
464
465 for (i = 0; i < priv->num_rx_queues; i++) {
466 rx_packets += priv->rx_queue[i]->stats.rx_packets;
467 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
468 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
469 }
470
471 dev->stats.rx_packets = rx_packets;
472 dev->stats.rx_bytes = rx_bytes;
473 dev->stats.rx_dropped = rx_dropped;
474
475 for (i = 0; i < priv->num_tx_queues; i++) {
476 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
477 tx_packets += priv->tx_queue[i]->stats.tx_packets;
478 }
479
480 dev->stats.tx_bytes = tx_bytes;
481 dev->stats.tx_packets = tx_packets;
482
483 return &dev->stats;
484}
485
486static int gfar_set_mac_addr(struct net_device *dev, void *p)
487{
488 eth_mac_addr(dev, p);
489
490 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
491
492 return 0;
493}
494
495static const struct net_device_ops gfar_netdev_ops = {
496 .ndo_open = gfar_enet_open,
497 .ndo_start_xmit = gfar_start_xmit,
498 .ndo_stop = gfar_close,
499 .ndo_change_mtu = gfar_change_mtu,
500 .ndo_set_features = gfar_set_features,
501 .ndo_set_rx_mode = gfar_set_multi,
502 .ndo_tx_timeout = gfar_timeout,
503 .ndo_do_ioctl = gfar_ioctl,
504 .ndo_get_stats = gfar_get_stats,
505 .ndo_set_mac_address = gfar_set_mac_addr,
506 .ndo_validate_addr = eth_validate_addr,
507#ifdef CONFIG_NET_POLL_CONTROLLER
508 .ndo_poll_controller = gfar_netpoll,
509#endif
510};
511
512static void gfar_ints_disable(struct gfar_private *priv)
513{
514 int i;
515 for (i = 0; i < priv->num_grps; i++) {
516 struct gfar __iomem *regs = priv->gfargrp[i].regs;
517
518 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
519
520
521 gfar_write(®s->imask, IMASK_INIT_CLEAR);
522 }
523}
524
525static void gfar_ints_enable(struct gfar_private *priv)
526{
527 int i;
528 for (i = 0; i < priv->num_grps; i++) {
529 struct gfar __iomem *regs = priv->gfargrp[i].regs;
530
531 gfar_write(®s->imask, IMASK_DEFAULT);
532 }
533}
534
535static int gfar_alloc_tx_queues(struct gfar_private *priv)
536{
537 int i;
538
539 for (i = 0; i < priv->num_tx_queues; i++) {
540 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
541 GFP_KERNEL);
542 if (!priv->tx_queue[i])
543 return -ENOMEM;
544
545 priv->tx_queue[i]->tx_skbuff = NULL;
546 priv->tx_queue[i]->qindex = i;
547 priv->tx_queue[i]->dev = priv->ndev;
548 spin_lock_init(&(priv->tx_queue[i]->txlock));
549 }
550 return 0;
551}
552
553static int gfar_alloc_rx_queues(struct gfar_private *priv)
554{
555 int i;
556
557 for (i = 0; i < priv->num_rx_queues; i++) {
558 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
559 GFP_KERNEL);
560 if (!priv->rx_queue[i])
561 return -ENOMEM;
562
563 priv->rx_queue[i]->qindex = i;
564 priv->rx_queue[i]->ndev = priv->ndev;
565 }
566 return 0;
567}
568
569static void gfar_free_tx_queues(struct gfar_private *priv)
570{
571 int i;
572
573 for (i = 0; i < priv->num_tx_queues; i++)
574 kfree(priv->tx_queue[i]);
575}
576
577static void gfar_free_rx_queues(struct gfar_private *priv)
578{
579 int i;
580
581 for (i = 0; i < priv->num_rx_queues; i++)
582 kfree(priv->rx_queue[i]);
583}
584
585static void unmap_group_regs(struct gfar_private *priv)
586{
587 int i;
588
589 for (i = 0; i < MAXGROUPS; i++)
590 if (priv->gfargrp[i].regs)
591 iounmap(priv->gfargrp[i].regs);
592}
593
594static void free_gfar_dev(struct gfar_private *priv)
595{
596 int i, j;
597
598 for (i = 0; i < priv->num_grps; i++)
599 for (j = 0; j < GFAR_NUM_IRQS; j++) {
600 kfree(priv->gfargrp[i].irqinfo[j]);
601 priv->gfargrp[i].irqinfo[j] = NULL;
602 }
603
604 free_netdev(priv->ndev);
605}
606
607static void disable_napi(struct gfar_private *priv)
608{
609 int i;
610
611 for (i = 0; i < priv->num_grps; i++) {
612 napi_disable(&priv->gfargrp[i].napi_rx);
613 napi_disable(&priv->gfargrp[i].napi_tx);
614 }
615}
616
617static void enable_napi(struct gfar_private *priv)
618{
619 int i;
620
621 for (i = 0; i < priv->num_grps; i++) {
622 napi_enable(&priv->gfargrp[i].napi_rx);
623 napi_enable(&priv->gfargrp[i].napi_tx);
624 }
625}
626
627static int gfar_parse_group(struct device_node *np,
628 struct gfar_private *priv, const char *model)
629{
630 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
631 int i;
632
633 for (i = 0; i < GFAR_NUM_IRQS; i++) {
634 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
635 GFP_KERNEL);
636 if (!grp->irqinfo[i])
637 return -ENOMEM;
638 }
639
640 grp->regs = of_iomap(np, 0);
641 if (!grp->regs)
642 return -ENOMEM;
643
644 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
645
646
647 if (model && strcasecmp(model, "FEC")) {
648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (!gfar_irq(grp, TX)->irq ||
651 !gfar_irq(grp, RX)->irq ||
652 !gfar_irq(grp, ER)->irq)
653 return -EINVAL;
654 }
655
656 grp->priv = priv;
657 spin_lock_init(&grp->grplock);
658 if (priv->mode == MQ_MG_MODE) {
659 u32 rxq_mask, txq_mask;
660 int ret;
661
662 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
663 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
664
665 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
666 if (!ret) {
667 grp->rx_bit_map = rxq_mask ?
668 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
669 }
670
671 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
672 if (!ret) {
673 grp->tx_bit_map = txq_mask ?
674 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
675 }
676
677 if (priv->poll_mode == GFAR_SQ_POLLING) {
678
679 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
680 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
681 }
682 } else {
683 grp->rx_bit_map = 0xFF;
684 grp->tx_bit_map = 0xFF;
685 }
686
687
688
689
690 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
691 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
692
693
694
695
696 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
697 if (!grp->rx_queue)
698 grp->rx_queue = priv->rx_queue[i];
699 grp->num_rx_queues++;
700 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
701 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
702 priv->rx_queue[i]->grp = grp;
703 }
704
705 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
706 if (!grp->tx_queue)
707 grp->tx_queue = priv->tx_queue[i];
708 grp->num_tx_queues++;
709 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
710 priv->tqueue |= (TQUEUE_EN0 >> i);
711 priv->tx_queue[i]->grp = grp;
712 }
713
714 priv->num_grps++;
715
716 return 0;
717}
718
719static int gfar_of_group_count(struct device_node *np)
720{
721 struct device_node *child;
722 int num = 0;
723
724 for_each_available_child_of_node(np, child)
725 if (!of_node_cmp(child->name, "queue-group"))
726 num++;
727
728 return num;
729}
730
731static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
732{
733 const char *model;
734 const char *ctype;
735 const void *mac_addr;
736 int err = 0, i;
737 struct net_device *dev = NULL;
738 struct gfar_private *priv = NULL;
739 struct device_node *np = ofdev->dev.of_node;
740 struct device_node *child = NULL;
741 u32 stash_len = 0;
742 u32 stash_idx = 0;
743 unsigned int num_tx_qs, num_rx_qs;
744 unsigned short mode, poll_mode;
745
746 if (!np)
747 return -ENODEV;
748
749 if (of_device_is_compatible(np, "fsl,etsec2")) {
750 mode = MQ_MG_MODE;
751 poll_mode = GFAR_SQ_POLLING;
752 } else {
753 mode = SQ_SG_MODE;
754 poll_mode = GFAR_SQ_POLLING;
755 }
756
757 if (mode == SQ_SG_MODE) {
758 num_tx_qs = 1;
759 num_rx_qs = 1;
760 } else {
761
762 unsigned int num_grps = gfar_of_group_count(np);
763
764 if (num_grps == 0 || num_grps > MAXGROUPS) {
765 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
766 num_grps);
767 pr_err("Cannot do alloc_etherdev, aborting\n");
768 return -EINVAL;
769 }
770
771 if (poll_mode == GFAR_SQ_POLLING) {
772 num_tx_qs = num_grps;
773 num_rx_qs = num_grps;
774 } else {
775 u32 tx_queues, rx_queues;
776 int ret;
777
778
779 ret = of_property_read_u32(np, "fsl,num_tx_queues",
780 &tx_queues);
781 num_tx_qs = ret ? 1 : tx_queues;
782
783 ret = of_property_read_u32(np, "fsl,num_rx_queues",
784 &rx_queues);
785 num_rx_qs = ret ? 1 : rx_queues;
786 }
787 }
788
789 if (num_tx_qs > MAX_TX_QS) {
790 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
791 num_tx_qs, MAX_TX_QS);
792 pr_err("Cannot do alloc_etherdev, aborting\n");
793 return -EINVAL;
794 }
795
796 if (num_rx_qs > MAX_RX_QS) {
797 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
798 num_rx_qs, MAX_RX_QS);
799 pr_err("Cannot do alloc_etherdev, aborting\n");
800 return -EINVAL;
801 }
802
803 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
804 dev = *pdev;
805 if (NULL == dev)
806 return -ENOMEM;
807
808 priv = netdev_priv(dev);
809 priv->ndev = dev;
810
811 priv->mode = mode;
812 priv->poll_mode = poll_mode;
813
814 priv->num_tx_queues = num_tx_qs;
815 netif_set_real_num_rx_queues(dev, num_rx_qs);
816 priv->num_rx_queues = num_rx_qs;
817
818 err = gfar_alloc_tx_queues(priv);
819 if (err)
820 goto tx_alloc_failed;
821
822 err = gfar_alloc_rx_queues(priv);
823 if (err)
824 goto rx_alloc_failed;
825
826 err = of_property_read_string(np, "model", &model);
827 if (err) {
828 pr_err("Device model property missing, aborting\n");
829 goto rx_alloc_failed;
830 }
831
832
833 INIT_LIST_HEAD(&priv->rx_list.list);
834 priv->rx_list.count = 0;
835 mutex_init(&priv->rx_queue_access);
836
837 for (i = 0; i < MAXGROUPS; i++)
838 priv->gfargrp[i].regs = NULL;
839
840
841 if (priv->mode == MQ_MG_MODE) {
842 for_each_available_child_of_node(np, child) {
843 if (of_node_cmp(child->name, "queue-group"))
844 continue;
845
846 err = gfar_parse_group(child, priv, model);
847 if (err)
848 goto err_grp_init;
849 }
850 } else {
851 err = gfar_parse_group(np, priv, model);
852 if (err)
853 goto err_grp_init;
854 }
855
856 if (of_property_read_bool(np, "bd-stash")) {
857 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
858 priv->bd_stash_en = 1;
859 }
860
861 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
862
863 if (err == 0)
864 priv->rx_stash_size = stash_len;
865
866 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
867
868 if (err == 0)
869 priv->rx_stash_index = stash_idx;
870
871 if (stash_len || stash_idx)
872 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
873
874 mac_addr = of_get_mac_address(np);
875
876 if (mac_addr)
877 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
878
879 if (model && !strcasecmp(model, "TSEC"))
880 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
881 FSL_GIANFAR_DEV_HAS_COALESCE |
882 FSL_GIANFAR_DEV_HAS_RMON |
883 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
884
885 if (model && !strcasecmp(model, "eTSEC"))
886 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
887 FSL_GIANFAR_DEV_HAS_COALESCE |
888 FSL_GIANFAR_DEV_HAS_RMON |
889 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
890 FSL_GIANFAR_DEV_HAS_CSUM |
891 FSL_GIANFAR_DEV_HAS_VLAN |
892 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
893 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
894 FSL_GIANFAR_DEV_HAS_TIMER |
895 FSL_GIANFAR_DEV_HAS_RX_FILER;
896
897 err = of_property_read_string(np, "phy-connection-type", &ctype);
898
899
900 if (err == 0 && !strcmp(ctype, "rgmii-id"))
901 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
902 else
903 priv->interface = PHY_INTERFACE_MODE_MII;
904
905 if (of_find_property(np, "fsl,magic-packet", NULL))
906 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
907
908 if (of_get_property(np, "fsl,wake-on-filer", NULL))
909 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
910
911 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
912
913
914
915
916 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
917 err = of_phy_register_fixed_link(np);
918 if (err)
919 goto err_grp_init;
920
921 priv->phy_node = of_node_get(np);
922 }
923
924
925 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
926
927 return 0;
928
929err_grp_init:
930 unmap_group_regs(priv);
931rx_alloc_failed:
932 gfar_free_rx_queues(priv);
933tx_alloc_failed:
934 gfar_free_tx_queues(priv);
935 free_gfar_dev(priv);
936 return err;
937}
938
939static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
940{
941 struct hwtstamp_config config;
942 struct gfar_private *priv = netdev_priv(netdev);
943
944 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
945 return -EFAULT;
946
947
948 if (config.flags)
949 return -EINVAL;
950
951 switch (config.tx_type) {
952 case HWTSTAMP_TX_OFF:
953 priv->hwts_tx_en = 0;
954 break;
955 case HWTSTAMP_TX_ON:
956 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
957 return -ERANGE;
958 priv->hwts_tx_en = 1;
959 break;
960 default:
961 return -ERANGE;
962 }
963
964 switch (config.rx_filter) {
965 case HWTSTAMP_FILTER_NONE:
966 if (priv->hwts_rx_en) {
967 priv->hwts_rx_en = 0;
968 reset_gfar(netdev);
969 }
970 break;
971 default:
972 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
973 return -ERANGE;
974 if (!priv->hwts_rx_en) {
975 priv->hwts_rx_en = 1;
976 reset_gfar(netdev);
977 }
978 config.rx_filter = HWTSTAMP_FILTER_ALL;
979 break;
980 }
981
982 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
983 -EFAULT : 0;
984}
985
986static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
987{
988 struct hwtstamp_config config;
989 struct gfar_private *priv = netdev_priv(netdev);
990
991 config.flags = 0;
992 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
993 config.rx_filter = (priv->hwts_rx_en ?
994 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
995
996 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
997 -EFAULT : 0;
998}
999
1000static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1001{
1002 struct phy_device *phydev = dev->phydev;
1003
1004 if (!netif_running(dev))
1005 return -EINVAL;
1006
1007 if (cmd == SIOCSHWTSTAMP)
1008 return gfar_hwtstamp_set(dev, rq);
1009 if (cmd == SIOCGHWTSTAMP)
1010 return gfar_hwtstamp_get(dev, rq);
1011
1012 if (!phydev)
1013 return -ENODEV;
1014
1015 return phy_mii_ioctl(phydev, rq, cmd);
1016}
1017
1018static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1019 u32 class)
1020{
1021 u32 rqfpr = FPR_FILER_MASK;
1022 u32 rqfcr = 0x0;
1023
1024 rqfar--;
1025 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1026 priv->ftp_rqfpr[rqfar] = rqfpr;
1027 priv->ftp_rqfcr[rqfar] = rqfcr;
1028 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1029
1030 rqfar--;
1031 rqfcr = RQFCR_CMP_NOMATCH;
1032 priv->ftp_rqfpr[rqfar] = rqfpr;
1033 priv->ftp_rqfcr[rqfar] = rqfcr;
1034 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1035
1036 rqfar--;
1037 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1038 rqfpr = class;
1039 priv->ftp_rqfcr[rqfar] = rqfcr;
1040 priv->ftp_rqfpr[rqfar] = rqfpr;
1041 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1042
1043 rqfar--;
1044 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1045 rqfpr = class;
1046 priv->ftp_rqfcr[rqfar] = rqfcr;
1047 priv->ftp_rqfpr[rqfar] = rqfpr;
1048 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1049
1050 return rqfar;
1051}
1052
1053static void gfar_init_filer_table(struct gfar_private *priv)
1054{
1055 int i = 0x0;
1056 u32 rqfar = MAX_FILER_IDX;
1057 u32 rqfcr = 0x0;
1058 u32 rqfpr = FPR_FILER_MASK;
1059
1060
1061 rqfcr = RQFCR_CMP_MATCH;
1062 priv->ftp_rqfcr[rqfar] = rqfcr;
1063 priv->ftp_rqfpr[rqfar] = rqfpr;
1064 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1065
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1071 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1072
1073
1074 priv->cur_filer_idx = rqfar;
1075
1076
1077 rqfcr = RQFCR_CMP_NOMATCH;
1078 for (i = 0; i < rqfar; i++) {
1079 priv->ftp_rqfcr[i] = rqfcr;
1080 priv->ftp_rqfpr[i] = rqfpr;
1081 gfar_write_filer(priv, i, rqfcr, rqfpr);
1082 }
1083}
1084
1085#ifdef CONFIG_PPC
1086static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1087{
1088 unsigned int pvr = mfspr(SPRN_PVR);
1089 unsigned int svr = mfspr(SPRN_SVR);
1090 unsigned int mod = (svr >> 16) & 0xfff6;
1091 unsigned int rev = svr & 0xffff;
1092
1093
1094 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1095 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1096 priv->errata |= GFAR_ERRATA_74;
1097
1098
1099 if ((pvr == 0x80850010 && mod == 0x80b0) ||
1100 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1101 priv->errata |= GFAR_ERRATA_76;
1102
1103
1104 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1105 priv->errata |= GFAR_ERRATA_12;
1106}
1107
1108static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1109{
1110 unsigned int svr = mfspr(SPRN_SVR);
1111
1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1113 priv->errata |= GFAR_ERRATA_12;
1114
1115 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1116 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1117 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
1118 priv->errata |= GFAR_ERRATA_76;
1119}
1120#endif
1121
1122static void gfar_detect_errata(struct gfar_private *priv)
1123{
1124 struct device *dev = &priv->ofdev->dev;
1125
1126
1127 priv->errata |= GFAR_ERRATA_A002;
1128
1129#ifdef CONFIG_PPC
1130 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1131 __gfar_detect_errata_85xx(priv);
1132 else
1133 __gfar_detect_errata_83xx(priv);
1134#endif
1135
1136 if (priv->errata)
1137 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1138 priv->errata);
1139}
1140
1141void gfar_mac_reset(struct gfar_private *priv)
1142{
1143 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1144 u32 tempval;
1145
1146
1147 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1148
1149
1150 udelay(3);
1151
1152
1153
1154
1155 gfar_write(®s->maccfg1, 0);
1156
1157 udelay(3);
1158
1159 gfar_rx_offload_en(priv);
1160
1161
1162 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1163 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
1164
1165
1166 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1167
1168
1169 tempval = MACCFG2_INIT_SETTINGS;
1170
1171
1172
1173
1174
1175 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1176 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1177
1178 gfar_write(®s->maccfg2, tempval);
1179
1180
1181 gfar_write(®s->igaddr0, 0);
1182 gfar_write(®s->igaddr1, 0);
1183 gfar_write(®s->igaddr2, 0);
1184 gfar_write(®s->igaddr3, 0);
1185 gfar_write(®s->igaddr4, 0);
1186 gfar_write(®s->igaddr5, 0);
1187 gfar_write(®s->igaddr6, 0);
1188 gfar_write(®s->igaddr7, 0);
1189
1190 gfar_write(®s->gaddr0, 0);
1191 gfar_write(®s->gaddr1, 0);
1192 gfar_write(®s->gaddr2, 0);
1193 gfar_write(®s->gaddr3, 0);
1194 gfar_write(®s->gaddr4, 0);
1195 gfar_write(®s->gaddr5, 0);
1196 gfar_write(®s->gaddr6, 0);
1197 gfar_write(®s->gaddr7, 0);
1198
1199 if (priv->extended_hash)
1200 gfar_clear_exact_match(priv->ndev);
1201
1202 gfar_mac_rx_config(priv);
1203
1204 gfar_mac_tx_config(priv);
1205
1206 gfar_set_mac_address(priv->ndev);
1207
1208 gfar_set_multi(priv->ndev);
1209
1210
1211 gfar_ints_disable(priv);
1212
1213
1214 gfar_configure_coalescing_all(priv);
1215}
1216
1217static void gfar_hw_init(struct gfar_private *priv)
1218{
1219 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1220 u32 attrs;
1221
1222
1223
1224
1225 gfar_halt(priv);
1226
1227 gfar_mac_reset(priv);
1228
1229
1230 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1231 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1232
1233
1234 gfar_write(®s->rmon.cam1, 0xffffffff);
1235 gfar_write(®s->rmon.cam2, 0xffffffff);
1236 }
1237
1238
1239 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1240
1241
1242 attrs = ATTRELI_EL(priv->rx_stash_size) |
1243 ATTRELI_EI(priv->rx_stash_index);
1244
1245 gfar_write(®s->attreli, attrs);
1246
1247
1248
1249
1250 attrs = ATTR_INIT_SETTINGS;
1251
1252 if (priv->bd_stash_en)
1253 attrs |= ATTR_BDSTASH;
1254
1255 if (priv->rx_stash_size != 0)
1256 attrs |= ATTR_BUFSTASH;
1257
1258 gfar_write(®s->attr, attrs);
1259
1260
1261 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1262 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1263 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1264
1265
1266 if (priv->num_grps > 1)
1267 gfar_write_isrg(priv);
1268}
1269
1270static void gfar_init_addr_hash_table(struct gfar_private *priv)
1271{
1272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1273
1274 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1275 priv->extended_hash = 1;
1276 priv->hash_width = 9;
1277
1278 priv->hash_regs[0] = ®s->igaddr0;
1279 priv->hash_regs[1] = ®s->igaddr1;
1280 priv->hash_regs[2] = ®s->igaddr2;
1281 priv->hash_regs[3] = ®s->igaddr3;
1282 priv->hash_regs[4] = ®s->igaddr4;
1283 priv->hash_regs[5] = ®s->igaddr5;
1284 priv->hash_regs[6] = ®s->igaddr6;
1285 priv->hash_regs[7] = ®s->igaddr7;
1286 priv->hash_regs[8] = ®s->gaddr0;
1287 priv->hash_regs[9] = ®s->gaddr1;
1288 priv->hash_regs[10] = ®s->gaddr2;
1289 priv->hash_regs[11] = ®s->gaddr3;
1290 priv->hash_regs[12] = ®s->gaddr4;
1291 priv->hash_regs[13] = ®s->gaddr5;
1292 priv->hash_regs[14] = ®s->gaddr6;
1293 priv->hash_regs[15] = ®s->gaddr7;
1294
1295 } else {
1296 priv->extended_hash = 0;
1297 priv->hash_width = 8;
1298
1299 priv->hash_regs[0] = ®s->gaddr0;
1300 priv->hash_regs[1] = ®s->gaddr1;
1301 priv->hash_regs[2] = ®s->gaddr2;
1302 priv->hash_regs[3] = ®s->gaddr3;
1303 priv->hash_regs[4] = ®s->gaddr4;
1304 priv->hash_regs[5] = ®s->gaddr5;
1305 priv->hash_regs[6] = ®s->gaddr6;
1306 priv->hash_regs[7] = ®s->gaddr7;
1307 }
1308}
1309
1310
1311
1312
1313static int gfar_probe(struct platform_device *ofdev)
1314{
1315 struct device_node *np = ofdev->dev.of_node;
1316 struct net_device *dev = NULL;
1317 struct gfar_private *priv = NULL;
1318 int err = 0, i;
1319
1320 err = gfar_of_init(ofdev, &dev);
1321
1322 if (err)
1323 return err;
1324
1325 priv = netdev_priv(dev);
1326 priv->ndev = dev;
1327 priv->ofdev = ofdev;
1328 priv->dev = &ofdev->dev;
1329 SET_NETDEV_DEV(dev, &ofdev->dev);
1330
1331 INIT_WORK(&priv->reset_task, gfar_reset_task);
1332
1333 platform_set_drvdata(ofdev, priv);
1334
1335 gfar_detect_errata(priv);
1336
1337
1338 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1339
1340
1341 dev->watchdog_timeo = TX_TIMEOUT;
1342
1343 dev->mtu = 1500;
1344 dev->min_mtu = 50;
1345 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
1346 dev->netdev_ops = &gfar_netdev_ops;
1347 dev->ethtool_ops = &gfar_ethtool_ops;
1348
1349
1350 for (i = 0; i < priv->num_grps; i++) {
1351 if (priv->poll_mode == GFAR_SQ_POLLING) {
1352 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1353 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1354 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1355 gfar_poll_tx_sq, 2);
1356 } else {
1357 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1358 gfar_poll_rx, GFAR_DEV_WEIGHT);
1359 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1360 gfar_poll_tx, 2);
1361 }
1362 }
1363
1364 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1365 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1366 NETIF_F_RXCSUM;
1367 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1368 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1369 }
1370
1371 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1372 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1373 NETIF_F_HW_VLAN_CTAG_RX;
1374 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1375 }
1376
1377 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1378
1379 gfar_init_addr_hash_table(priv);
1380
1381
1382
1383
1384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1385 priv->padding = 8 + DEFAULT_PADDING;
1386
1387 if (dev->features & NETIF_F_IP_CSUM ||
1388 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1389 dev->needed_headroom = GMAC_FCB_LEN;
1390
1391
1392 for (i = 0; i < priv->num_tx_queues; i++) {
1393 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1394 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1395 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1396 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1397 }
1398
1399 for (i = 0; i < priv->num_rx_queues; i++) {
1400 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1401 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1402 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1403 }
1404
1405
1406 priv->rx_filer_enable =
1407 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1408
1409 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1410
1411 if (priv->num_tx_queues == 1)
1412 priv->prio_sched_en = 1;
1413
1414 set_bit(GFAR_DOWN, &priv->state);
1415
1416 gfar_hw_init(priv);
1417
1418
1419 netif_carrier_off(dev);
1420
1421 err = register_netdev(dev);
1422
1423 if (err) {
1424 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1425 goto register_fail;
1426 }
1427
1428 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1429 priv->wol_supported |= GFAR_WOL_MAGIC;
1430
1431 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1432 priv->rx_filer_enable)
1433 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1434
1435 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
1436
1437
1438 for (i = 0; i < priv->num_grps; i++) {
1439 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1440 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1441 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1442 dev->name, "_g", '0' + i, "_tx");
1443 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1444 dev->name, "_g", '0' + i, "_rx");
1445 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1446 dev->name, "_g", '0' + i, "_er");
1447 } else
1448 strcpy(gfar_irq(grp, TX)->name, dev->name);
1449 }
1450
1451
1452 gfar_init_filer_table(priv);
1453
1454
1455 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1456
1457
1458
1459
1460 netdev_info(dev, "Running with NAPI enabled\n");
1461 for (i = 0; i < priv->num_rx_queues; i++)
1462 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1463 i, priv->rx_queue[i]->rx_ring_size);
1464 for (i = 0; i < priv->num_tx_queues; i++)
1465 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1466 i, priv->tx_queue[i]->tx_ring_size);
1467
1468 return 0;
1469
1470register_fail:
1471 if (of_phy_is_fixed_link(np))
1472 of_phy_deregister_fixed_link(np);
1473 unmap_group_regs(priv);
1474 gfar_free_rx_queues(priv);
1475 gfar_free_tx_queues(priv);
1476 of_node_put(priv->phy_node);
1477 of_node_put(priv->tbi_node);
1478 free_gfar_dev(priv);
1479 return err;
1480}
1481
1482static int gfar_remove(struct platform_device *ofdev)
1483{
1484 struct gfar_private *priv = platform_get_drvdata(ofdev);
1485 struct device_node *np = ofdev->dev.of_node;
1486
1487 of_node_put(priv->phy_node);
1488 of_node_put(priv->tbi_node);
1489
1490 unregister_netdev(priv->ndev);
1491
1492 if (of_phy_is_fixed_link(np))
1493 of_phy_deregister_fixed_link(np);
1494
1495 unmap_group_regs(priv);
1496 gfar_free_rx_queues(priv);
1497 gfar_free_tx_queues(priv);
1498 free_gfar_dev(priv);
1499
1500 return 0;
1501}
1502
1503#ifdef CONFIG_PM
1504
1505static void __gfar_filer_disable(struct gfar_private *priv)
1506{
1507 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1508 u32 temp;
1509
1510 temp = gfar_read(®s->rctrl);
1511 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1512 gfar_write(®s->rctrl, temp);
1513}
1514
1515static void __gfar_filer_enable(struct gfar_private *priv)
1516{
1517 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1518 u32 temp;
1519
1520 temp = gfar_read(®s->rctrl);
1521 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1522 gfar_write(®s->rctrl, temp);
1523}
1524
1525
1526static void gfar_filer_config_wol(struct gfar_private *priv)
1527{
1528 unsigned int i;
1529 u32 rqfcr;
1530
1531 __gfar_filer_disable(priv);
1532
1533
1534 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1535 for (i = 0; i <= MAX_FILER_IDX; i++)
1536 gfar_write_filer(priv, i, rqfcr, 0);
1537
1538 i = 0;
1539 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1540
1541 struct net_device *ndev = priv->ndev;
1542
1543 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1544 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1545 (ndev->dev_addr[1] << 8) |
1546 ndev->dev_addr[2];
1547
1548 rqfcr = (qindex << 10) | RQFCR_AND |
1549 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1550
1551 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1552
1553 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1554 (ndev->dev_addr[4] << 8) |
1555 ndev->dev_addr[5];
1556 rqfcr = (qindex << 10) | RQFCR_GPI |
1557 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1558 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1559 }
1560
1561 __gfar_filer_enable(priv);
1562}
1563
1564static void gfar_filer_restore_table(struct gfar_private *priv)
1565{
1566 u32 rqfcr, rqfpr;
1567 unsigned int i;
1568
1569 __gfar_filer_disable(priv);
1570
1571 for (i = 0; i <= MAX_FILER_IDX; i++) {
1572 rqfcr = priv->ftp_rqfcr[i];
1573 rqfpr = priv->ftp_rqfpr[i];
1574 gfar_write_filer(priv, i, rqfcr, rqfpr);
1575 }
1576
1577 __gfar_filer_enable(priv);
1578}
1579
1580
1581static void gfar_start_wol_filer(struct gfar_private *priv)
1582{
1583 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1584 u32 tempval;
1585 int i = 0;
1586
1587
1588 gfar_write(®s->rqueue, priv->rqueue);
1589
1590
1591 tempval = gfar_read(®s->dmactrl);
1592 tempval |= DMACTRL_INIT_SETTINGS;
1593 gfar_write(®s->dmactrl, tempval);
1594
1595
1596 tempval = gfar_read(®s->dmactrl);
1597 tempval &= ~DMACTRL_GRS;
1598 gfar_write(®s->dmactrl, tempval);
1599
1600 for (i = 0; i < priv->num_grps; i++) {
1601 regs = priv->gfargrp[i].regs;
1602
1603 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1604
1605 gfar_write(®s->imask, IMASK_FGPI);
1606 }
1607
1608
1609 tempval = gfar_read(®s->maccfg1);
1610 tempval |= MACCFG1_RX_EN;
1611 gfar_write(®s->maccfg1, tempval);
1612}
1613
1614static int gfar_suspend(struct device *dev)
1615{
1616 struct gfar_private *priv = dev_get_drvdata(dev);
1617 struct net_device *ndev = priv->ndev;
1618 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1619 u32 tempval;
1620 u16 wol = priv->wol_opts;
1621
1622 if (!netif_running(ndev))
1623 return 0;
1624
1625 disable_napi(priv);
1626 netif_tx_lock(ndev);
1627 netif_device_detach(ndev);
1628 netif_tx_unlock(ndev);
1629
1630 gfar_halt(priv);
1631
1632 if (wol & GFAR_WOL_MAGIC) {
1633
1634 gfar_write(®s->imask, IMASK_MAG);
1635
1636
1637 tempval = gfar_read(®s->maccfg2);
1638 tempval |= MACCFG2_MPEN;
1639 gfar_write(®s->maccfg2, tempval);
1640
1641
1642 tempval = gfar_read(®s->maccfg1);
1643 tempval |= MACCFG1_RX_EN;
1644 gfar_write(®s->maccfg1, tempval);
1645
1646 } else if (wol & GFAR_WOL_FILER_UCAST) {
1647 gfar_filer_config_wol(priv);
1648 gfar_start_wol_filer(priv);
1649
1650 } else {
1651 phy_stop(ndev->phydev);
1652 }
1653
1654 return 0;
1655}
1656
1657static int gfar_resume(struct device *dev)
1658{
1659 struct gfar_private *priv = dev_get_drvdata(dev);
1660 struct net_device *ndev = priv->ndev;
1661 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1662 u32 tempval;
1663 u16 wol = priv->wol_opts;
1664
1665 if (!netif_running(ndev))
1666 return 0;
1667
1668 if (wol & GFAR_WOL_MAGIC) {
1669
1670 tempval = gfar_read(®s->maccfg2);
1671 tempval &= ~MACCFG2_MPEN;
1672 gfar_write(®s->maccfg2, tempval);
1673
1674 } else if (wol & GFAR_WOL_FILER_UCAST) {
1675
1676 gfar_halt(priv);
1677 gfar_filer_restore_table(priv);
1678
1679 } else {
1680 phy_start(ndev->phydev);
1681 }
1682
1683 gfar_start(priv);
1684
1685 netif_device_attach(ndev);
1686 enable_napi(priv);
1687
1688 return 0;
1689}
1690
1691static int gfar_restore(struct device *dev)
1692{
1693 struct gfar_private *priv = dev_get_drvdata(dev);
1694 struct net_device *ndev = priv->ndev;
1695
1696 if (!netif_running(ndev)) {
1697 netif_device_attach(ndev);
1698
1699 return 0;
1700 }
1701
1702 gfar_init_bds(ndev);
1703
1704 gfar_mac_reset(priv);
1705
1706 gfar_init_tx_rx_base(priv);
1707
1708 gfar_start(priv);
1709
1710 priv->oldlink = 0;
1711 priv->oldspeed = 0;
1712 priv->oldduplex = -1;
1713
1714 if (ndev->phydev)
1715 phy_start(ndev->phydev);
1716
1717 netif_device_attach(ndev);
1718 enable_napi(priv);
1719
1720 return 0;
1721}
1722
1723static const struct dev_pm_ops gfar_pm_ops = {
1724 .suspend = gfar_suspend,
1725 .resume = gfar_resume,
1726 .freeze = gfar_suspend,
1727 .thaw = gfar_resume,
1728 .restore = gfar_restore,
1729};
1730
1731#define GFAR_PM_OPS (&gfar_pm_ops)
1732
1733#else
1734
1735#define GFAR_PM_OPS NULL
1736
1737#endif
1738
1739
1740
1741
1742static phy_interface_t gfar_get_interface(struct net_device *dev)
1743{
1744 struct gfar_private *priv = netdev_priv(dev);
1745 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1746 u32 ecntrl;
1747
1748 ecntrl = gfar_read(®s->ecntrl);
1749
1750 if (ecntrl & ECNTRL_SGMII_MODE)
1751 return PHY_INTERFACE_MODE_SGMII;
1752
1753 if (ecntrl & ECNTRL_TBI_MODE) {
1754 if (ecntrl & ECNTRL_REDUCED_MODE)
1755 return PHY_INTERFACE_MODE_RTBI;
1756 else
1757 return PHY_INTERFACE_MODE_TBI;
1758 }
1759
1760 if (ecntrl & ECNTRL_REDUCED_MODE) {
1761 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1762 return PHY_INTERFACE_MODE_RMII;
1763 }
1764 else {
1765 phy_interface_t interface = priv->interface;
1766
1767
1768
1769
1770 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1771 return PHY_INTERFACE_MODE_RGMII_ID;
1772
1773 return PHY_INTERFACE_MODE_RGMII;
1774 }
1775 }
1776
1777 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1778 return PHY_INTERFACE_MODE_GMII;
1779
1780 return PHY_INTERFACE_MODE_MII;
1781}
1782
1783
1784
1785
1786
1787static int init_phy(struct net_device *dev)
1788{
1789 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1790 struct gfar_private *priv = netdev_priv(dev);
1791 phy_interface_t interface;
1792 struct phy_device *phydev;
1793 struct ethtool_eee edata;
1794
1795 linkmode_set_bit_array(phy_10_100_features_array,
1796 ARRAY_SIZE(phy_10_100_features_array),
1797 mask);
1798 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1799 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1800 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1801 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1802
1803 priv->oldlink = 0;
1804 priv->oldspeed = 0;
1805 priv->oldduplex = -1;
1806
1807 interface = gfar_get_interface(dev);
1808
1809 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1810 interface);
1811 if (!phydev) {
1812 dev_err(&dev->dev, "could not attach to PHY\n");
1813 return -ENODEV;
1814 }
1815
1816 if (interface == PHY_INTERFACE_MODE_SGMII)
1817 gfar_configure_serdes(dev);
1818
1819
1820 linkmode_and(phydev->supported, phydev->supported, mask);
1821 linkmode_copy(phydev->advertising, phydev->supported);
1822
1823
1824 phy_support_asym_pause(phydev);
1825
1826
1827 memset(&edata, 0, sizeof(struct ethtool_eee));
1828 phy_ethtool_set_eee(phydev, &edata);
1829
1830 return 0;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static void gfar_configure_serdes(struct net_device *dev)
1842{
1843 struct gfar_private *priv = netdev_priv(dev);
1844 struct phy_device *tbiphy;
1845
1846 if (!priv->tbi_node) {
1847 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1848 "device tree specify a tbi-handle\n");
1849 return;
1850 }
1851
1852 tbiphy = of_phy_find_device(priv->tbi_node);
1853 if (!tbiphy) {
1854 dev_err(&dev->dev, "error: Could not get TBI device\n");
1855 return;
1856 }
1857
1858
1859
1860
1861
1862
1863 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1864 put_device(&tbiphy->mdio.dev);
1865 return;
1866 }
1867
1868
1869 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1870
1871 phy_write(tbiphy, MII_ADVERTISE,
1872 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1873 ADVERTISE_1000XPSE_ASYM);
1874
1875 phy_write(tbiphy, MII_BMCR,
1876 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1877 BMCR_SPEED1000);
1878
1879 put_device(&tbiphy->mdio.dev);
1880}
1881
1882static int __gfar_is_rx_idle(struct gfar_private *priv)
1883{
1884 u32 res;
1885
1886
1887
1888
1889 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1890 return 0;
1891
1892
1893
1894
1895
1896 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1897 res &= 0x7f807f80;
1898 if ((res & 0xffff) == (res >> 16))
1899 return 1;
1900
1901 return 0;
1902}
1903
1904
1905static void gfar_halt_nodisable(struct gfar_private *priv)
1906{
1907 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1908 u32 tempval;
1909 unsigned int timeout;
1910 int stopped;
1911
1912 gfar_ints_disable(priv);
1913
1914 if (gfar_is_dma_stopped(priv))
1915 return;
1916
1917
1918 tempval = gfar_read(®s->dmactrl);
1919 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1920 gfar_write(®s->dmactrl, tempval);
1921
1922retry:
1923 timeout = 1000;
1924 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1925 cpu_relax();
1926 timeout--;
1927 }
1928
1929 if (!timeout)
1930 stopped = gfar_is_dma_stopped(priv);
1931
1932 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1933 !__gfar_is_rx_idle(priv))
1934 goto retry;
1935}
1936
1937
1938void gfar_halt(struct gfar_private *priv)
1939{
1940 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1941 u32 tempval;
1942
1943
1944 gfar_write(®s->rqueue, 0);
1945 gfar_write(®s->tqueue, 0);
1946
1947 mdelay(10);
1948
1949 gfar_halt_nodisable(priv);
1950
1951
1952 tempval = gfar_read(®s->maccfg1);
1953 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1954 gfar_write(®s->maccfg1, tempval);
1955}
1956
1957void stop_gfar(struct net_device *dev)
1958{
1959 struct gfar_private *priv = netdev_priv(dev);
1960
1961 netif_tx_stop_all_queues(dev);
1962
1963 smp_mb__before_atomic();
1964 set_bit(GFAR_DOWN, &priv->state);
1965 smp_mb__after_atomic();
1966
1967 disable_napi(priv);
1968
1969
1970 gfar_halt(priv);
1971
1972 phy_stop(dev->phydev);
1973
1974 free_skb_resources(priv);
1975}
1976
1977static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1978{
1979 struct txbd8 *txbdp;
1980 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1981 int i, j;
1982
1983 txbdp = tx_queue->tx_bd_base;
1984
1985 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1986 if (!tx_queue->tx_skbuff[i])
1987 continue;
1988
1989 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1990 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1991 txbdp->lstatus = 0;
1992 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1993 j++) {
1994 txbdp++;
1995 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1996 be16_to_cpu(txbdp->length),
1997 DMA_TO_DEVICE);
1998 }
1999 txbdp++;
2000 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
2001 tx_queue->tx_skbuff[i] = NULL;
2002 }
2003 kfree(tx_queue->tx_skbuff);
2004 tx_queue->tx_skbuff = NULL;
2005}
2006
2007static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2008{
2009 int i;
2010
2011 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
2012
2013 if (rx_queue->skb)
2014 dev_kfree_skb(rx_queue->skb);
2015
2016 for (i = 0; i < rx_queue->rx_ring_size; i++) {
2017 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2018
2019 rxbdp->lstatus = 0;
2020 rxbdp->bufPtr = 0;
2021 rxbdp++;
2022
2023 if (!rxb->page)
2024 continue;
2025
2026 dma_unmap_page(rx_queue->dev, rxb->dma,
2027 PAGE_SIZE, DMA_FROM_DEVICE);
2028 __free_page(rxb->page);
2029
2030 rxb->page = NULL;
2031 }
2032
2033 kfree(rx_queue->rx_buff);
2034 rx_queue->rx_buff = NULL;
2035}
2036
2037
2038
2039
2040static void free_skb_resources(struct gfar_private *priv)
2041{
2042 struct gfar_priv_tx_q *tx_queue = NULL;
2043 struct gfar_priv_rx_q *rx_queue = NULL;
2044 int i;
2045
2046
2047 for (i = 0; i < priv->num_tx_queues; i++) {
2048 struct netdev_queue *txq;
2049
2050 tx_queue = priv->tx_queue[i];
2051 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
2052 if (tx_queue->tx_skbuff)
2053 free_skb_tx_queue(tx_queue);
2054 netdev_tx_reset_queue(txq);
2055 }
2056
2057 for (i = 0; i < priv->num_rx_queues; i++) {
2058 rx_queue = priv->rx_queue[i];
2059 if (rx_queue->rx_buff)
2060 free_skb_rx_queue(rx_queue);
2061 }
2062
2063 dma_free_coherent(priv->dev,
2064 sizeof(struct txbd8) * priv->total_tx_ring_size +
2065 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2066 priv->tx_queue[0]->tx_bd_base,
2067 priv->tx_queue[0]->tx_bd_dma_base);
2068}
2069
2070void gfar_start(struct gfar_private *priv)
2071{
2072 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2073 u32 tempval;
2074 int i = 0;
2075
2076
2077 gfar_write(®s->rqueue, priv->rqueue);
2078 gfar_write(®s->tqueue, priv->tqueue);
2079
2080
2081 tempval = gfar_read(®s->dmactrl);
2082 tempval |= DMACTRL_INIT_SETTINGS;
2083 gfar_write(®s->dmactrl, tempval);
2084
2085
2086 tempval = gfar_read(®s->dmactrl);
2087 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
2088 gfar_write(®s->dmactrl, tempval);
2089
2090 for (i = 0; i < priv->num_grps; i++) {
2091 regs = priv->gfargrp[i].regs;
2092
2093 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
2094 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
2095 }
2096
2097
2098 tempval = gfar_read(®s->maccfg1);
2099 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2100 gfar_write(®s->maccfg1, tempval);
2101
2102 gfar_ints_enable(priv);
2103
2104 netif_trans_update(priv->ndev);
2105}
2106
2107static void free_grp_irqs(struct gfar_priv_grp *grp)
2108{
2109 free_irq(gfar_irq(grp, TX)->irq, grp);
2110 free_irq(gfar_irq(grp, RX)->irq, grp);
2111 free_irq(gfar_irq(grp, ER)->irq, grp);
2112}
2113
2114static int register_grp_irqs(struct gfar_priv_grp *grp)
2115{
2116 struct gfar_private *priv = grp->priv;
2117 struct net_device *dev = priv->ndev;
2118 int err;
2119
2120
2121
2122
2123 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2124
2125
2126
2127 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2128 gfar_irq(grp, ER)->name, grp);
2129 if (err < 0) {
2130 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2131 gfar_irq(grp, ER)->irq);
2132
2133 goto err_irq_fail;
2134 }
2135 enable_irq_wake(gfar_irq(grp, ER)->irq);
2136
2137 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2138 gfar_irq(grp, TX)->name, grp);
2139 if (err < 0) {
2140 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2141 gfar_irq(grp, TX)->irq);
2142 goto tx_irq_fail;
2143 }
2144 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2145 gfar_irq(grp, RX)->name, grp);
2146 if (err < 0) {
2147 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2148 gfar_irq(grp, RX)->irq);
2149 goto rx_irq_fail;
2150 }
2151 enable_irq_wake(gfar_irq(grp, RX)->irq);
2152
2153 } else {
2154 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2155 gfar_irq(grp, TX)->name, grp);
2156 if (err < 0) {
2157 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2158 gfar_irq(grp, TX)->irq);
2159 goto err_irq_fail;
2160 }
2161 enable_irq_wake(gfar_irq(grp, TX)->irq);
2162 }
2163
2164 return 0;
2165
2166rx_irq_fail:
2167 free_irq(gfar_irq(grp, TX)->irq, grp);
2168tx_irq_fail:
2169 free_irq(gfar_irq(grp, ER)->irq, grp);
2170err_irq_fail:
2171 return err;
2172
2173}
2174
2175static void gfar_free_irq(struct gfar_private *priv)
2176{
2177 int i;
2178
2179
2180 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2181 for (i = 0; i < priv->num_grps; i++)
2182 free_grp_irqs(&priv->gfargrp[i]);
2183 } else {
2184 for (i = 0; i < priv->num_grps; i++)
2185 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2186 &priv->gfargrp[i]);
2187 }
2188}
2189
2190static int gfar_request_irq(struct gfar_private *priv)
2191{
2192 int err, i, j;
2193
2194 for (i = 0; i < priv->num_grps; i++) {
2195 err = register_grp_irqs(&priv->gfargrp[i]);
2196 if (err) {
2197 for (j = 0; j < i; j++)
2198 free_grp_irqs(&priv->gfargrp[j]);
2199 return err;
2200 }
2201 }
2202
2203 return 0;
2204}
2205
2206
2207int startup_gfar(struct net_device *ndev)
2208{
2209 struct gfar_private *priv = netdev_priv(ndev);
2210 int err;
2211
2212 gfar_mac_reset(priv);
2213
2214 err = gfar_alloc_skb_resources(ndev);
2215 if (err)
2216 return err;
2217
2218 gfar_init_tx_rx_base(priv);
2219
2220 smp_mb__before_atomic();
2221 clear_bit(GFAR_DOWN, &priv->state);
2222 smp_mb__after_atomic();
2223
2224
2225 gfar_start(priv);
2226
2227
2228 priv->oldlink = 0;
2229 priv->oldspeed = 0;
2230 priv->oldduplex = -1;
2231
2232 phy_start(ndev->phydev);
2233
2234 enable_napi(priv);
2235
2236 netif_tx_wake_all_queues(ndev);
2237
2238 return 0;
2239}
2240
2241
2242
2243
2244static int gfar_enet_open(struct net_device *dev)
2245{
2246 struct gfar_private *priv = netdev_priv(dev);
2247 int err;
2248
2249 err = init_phy(dev);
2250 if (err)
2251 return err;
2252
2253 err = gfar_request_irq(priv);
2254 if (err)
2255 return err;
2256
2257 err = startup_gfar(dev);
2258 if (err)
2259 return err;
2260
2261 return err;
2262}
2263
2264static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2265{
2266 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
2267
2268 memset(fcb, 0, GMAC_FCB_LEN);
2269
2270 return fcb;
2271}
2272
2273static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2274 int fcb_length)
2275{
2276
2277
2278
2279
2280 u8 flags = TXFCB_DEFAULT;
2281
2282
2283
2284
2285 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2286 flags |= TXFCB_UDP;
2287 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2288 } else
2289 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2290
2291
2292
2293
2294
2295
2296 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2297 fcb->l4os = skb_network_header_len(skb);
2298
2299 fcb->flags = flags;
2300}
2301
2302static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2303{
2304 fcb->flags |= TXFCB_VLN;
2305 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2306}
2307
2308static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2309 struct txbd8 *base, int ring_size)
2310{
2311 struct txbd8 *new_bd = bdp + stride;
2312
2313 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2314}
2315
2316static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2317 int ring_size)
2318{
2319 return skip_txbd(bdp, 1, base, ring_size);
2320}
2321
2322
2323static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2324 unsigned long fcb_addr)
2325{
2326 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2327 (fcb_addr % 0x20) > 0x18);
2328}
2329
2330
2331
2332
2333static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2334 unsigned int len)
2335{
2336 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2337 (len > 2500));
2338}
2339
2340
2341
2342
2343static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2344{
2345 struct gfar_private *priv = netdev_priv(dev);
2346 struct gfar_priv_tx_q *tx_queue = NULL;
2347 struct netdev_queue *txq;
2348 struct gfar __iomem *regs = NULL;
2349 struct txfcb *fcb = NULL;
2350 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2351 u32 lstatus;
2352 skb_frag_t *frag;
2353 int i, rq = 0;
2354 int do_tstamp, do_csum, do_vlan;
2355 u32 bufaddr;
2356 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2357
2358 rq = skb->queue_mapping;
2359 tx_queue = priv->tx_queue[rq];
2360 txq = netdev_get_tx_queue(dev, rq);
2361 base = tx_queue->tx_bd_base;
2362 regs = tx_queue->grp->regs;
2363
2364 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2365 do_vlan = skb_vlan_tag_present(skb);
2366 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2367 priv->hwts_tx_en;
2368
2369 if (do_csum || do_vlan)
2370 fcb_len = GMAC_FCB_LEN;
2371
2372
2373 if (unlikely(do_tstamp))
2374 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2375
2376
2377 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2378 struct sk_buff *skb_new;
2379
2380 skb_new = skb_realloc_headroom(skb, fcb_len);
2381 if (!skb_new) {
2382 dev->stats.tx_errors++;
2383 dev_kfree_skb_any(skb);
2384 return NETDEV_TX_OK;
2385 }
2386
2387 if (skb->sk)
2388 skb_set_owner_w(skb_new, skb->sk);
2389 dev_consume_skb_any(skb);
2390 skb = skb_new;
2391 }
2392
2393
2394 nr_frags = skb_shinfo(skb)->nr_frags;
2395
2396
2397 if (unlikely(do_tstamp))
2398 nr_txbds = nr_frags + 2;
2399 else
2400 nr_txbds = nr_frags + 1;
2401
2402
2403 if (nr_txbds > tx_queue->num_txbdfree) {
2404
2405 netif_tx_stop_queue(txq);
2406 dev->stats.tx_fifo_errors++;
2407 return NETDEV_TX_BUSY;
2408 }
2409
2410
2411 bytes_sent = skb->len;
2412 tx_queue->stats.tx_bytes += bytes_sent;
2413
2414 GFAR_CB(skb)->bytes_sent = bytes_sent;
2415 tx_queue->stats.tx_packets++;
2416
2417 txbdp = txbdp_start = tx_queue->cur_tx;
2418 lstatus = be32_to_cpu(txbdp->lstatus);
2419
2420
2421 if (unlikely(do_tstamp)) {
2422 skb_push(skb, GMAC_TXPAL_LEN);
2423 memset(skb->data, 0, GMAC_TXPAL_LEN);
2424 }
2425
2426
2427 if (fcb_len) {
2428 fcb = gfar_add_fcb(skb);
2429 lstatus |= BD_LFLAG(TXBD_TOE);
2430 }
2431
2432
2433 if (do_csum) {
2434 gfar_tx_checksum(skb, fcb, fcb_len);
2435
2436 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2437 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2438 __skb_pull(skb, GMAC_FCB_LEN);
2439 skb_checksum_help(skb);
2440 if (do_vlan || do_tstamp) {
2441
2442 fcb = gfar_add_fcb(skb);
2443 } else {
2444
2445 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2446 fcb = NULL;
2447 }
2448 }
2449 }
2450
2451 if (do_vlan)
2452 gfar_tx_vlan(skb, fcb);
2453
2454 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2455 DMA_TO_DEVICE);
2456 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2457 goto dma_map_err;
2458
2459 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2460
2461
2462 if (unlikely(do_tstamp))
2463 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2464 tx_queue->tx_ring_size);
2465
2466 if (likely(!nr_frags)) {
2467 if (likely(!do_tstamp))
2468 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2469 } else {
2470 u32 lstatus_start = lstatus;
2471
2472
2473 frag = &skb_shinfo(skb)->frags[0];
2474 for (i = 0; i < nr_frags; i++, frag++) {
2475 unsigned int size;
2476
2477
2478 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2479
2480 size = skb_frag_size(frag);
2481
2482 lstatus = be32_to_cpu(txbdp->lstatus) | size |
2483 BD_LFLAG(TXBD_READY);
2484
2485
2486 if (i == nr_frags - 1)
2487 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2488
2489 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2490 size, DMA_TO_DEVICE);
2491 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2492 goto dma_map_err;
2493
2494
2495 txbdp->bufPtr = cpu_to_be32(bufaddr);
2496 txbdp->lstatus = cpu_to_be32(lstatus);
2497 }
2498
2499 lstatus = lstatus_start;
2500 }
2501
2502
2503
2504
2505
2506
2507 if (unlikely(do_tstamp)) {
2508 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2509
2510 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2511 bufaddr += fcb_len;
2512
2513 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2514 (skb_headlen(skb) - fcb_len);
2515 if (!nr_frags)
2516 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2517
2518 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2519 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2520 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2521
2522
2523 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2524 fcb->ptp = 1;
2525 } else {
2526 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2527 }
2528
2529 netdev_tx_sent_queue(txq, bytes_sent);
2530
2531 gfar_wmb();
2532
2533 txbdp_start->lstatus = cpu_to_be32(lstatus);
2534
2535 gfar_wmb();
2536
2537 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2538
2539
2540
2541
2542 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2543 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2544
2545 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2546
2547
2548
2549
2550
2551
2552 spin_lock_bh(&tx_queue->txlock);
2553
2554 tx_queue->num_txbdfree -= (nr_txbds);
2555 spin_unlock_bh(&tx_queue->txlock);
2556
2557
2558
2559
2560 if (!tx_queue->num_txbdfree) {
2561 netif_tx_stop_queue(txq);
2562
2563 dev->stats.tx_fifo_errors++;
2564 }
2565
2566
2567 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2568
2569 return NETDEV_TX_OK;
2570
2571dma_map_err:
2572 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2573 if (do_tstamp)
2574 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2575 for (i = 0; i < nr_frags; i++) {
2576 lstatus = be32_to_cpu(txbdp->lstatus);
2577 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2578 break;
2579
2580 lstatus &= ~BD_LFLAG(TXBD_READY);
2581 txbdp->lstatus = cpu_to_be32(lstatus);
2582 bufaddr = be32_to_cpu(txbdp->bufPtr);
2583 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2584 DMA_TO_DEVICE);
2585 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2586 }
2587 gfar_wmb();
2588 dev_kfree_skb_any(skb);
2589 return NETDEV_TX_OK;
2590}
2591
2592
2593static int gfar_close(struct net_device *dev)
2594{
2595 struct gfar_private *priv = netdev_priv(dev);
2596
2597 cancel_work_sync(&priv->reset_task);
2598 stop_gfar(dev);
2599
2600
2601 phy_disconnect(dev->phydev);
2602
2603 gfar_free_irq(priv);
2604
2605 return 0;
2606}
2607
2608
2609static int gfar_set_mac_address(struct net_device *dev)
2610{
2611 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2612
2613 return 0;
2614}
2615
2616static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2617{
2618 struct gfar_private *priv = netdev_priv(dev);
2619
2620 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2621 cpu_relax();
2622
2623 if (dev->flags & IFF_UP)
2624 stop_gfar(dev);
2625
2626 dev->mtu = new_mtu;
2627
2628 if (dev->flags & IFF_UP)
2629 startup_gfar(dev);
2630
2631 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2632
2633 return 0;
2634}
2635
2636void reset_gfar(struct net_device *ndev)
2637{
2638 struct gfar_private *priv = netdev_priv(ndev);
2639
2640 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2641 cpu_relax();
2642
2643 stop_gfar(ndev);
2644 startup_gfar(ndev);
2645
2646 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2647}
2648
2649
2650
2651
2652
2653
2654static void gfar_reset_task(struct work_struct *work)
2655{
2656 struct gfar_private *priv = container_of(work, struct gfar_private,
2657 reset_task);
2658 reset_gfar(priv->ndev);
2659}
2660
2661static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2662{
2663 struct gfar_private *priv = netdev_priv(dev);
2664
2665 dev->stats.tx_errors++;
2666 schedule_work(&priv->reset_task);
2667}
2668
2669
2670static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2671{
2672 struct net_device *dev = tx_queue->dev;
2673 struct netdev_queue *txq;
2674 struct gfar_private *priv = netdev_priv(dev);
2675 struct txbd8 *bdp, *next = NULL;
2676 struct txbd8 *lbdp = NULL;
2677 struct txbd8 *base = tx_queue->tx_bd_base;
2678 struct sk_buff *skb;
2679 int skb_dirtytx;
2680 int tx_ring_size = tx_queue->tx_ring_size;
2681 int frags = 0, nr_txbds = 0;
2682 int i;
2683 int howmany = 0;
2684 int tqi = tx_queue->qindex;
2685 unsigned int bytes_sent = 0;
2686 u32 lstatus;
2687 size_t buflen;
2688
2689 txq = netdev_get_tx_queue(dev, tqi);
2690 bdp = tx_queue->dirty_tx;
2691 skb_dirtytx = tx_queue->skb_dirtytx;
2692
2693 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2694
2695 frags = skb_shinfo(skb)->nr_frags;
2696
2697
2698
2699
2700 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2701 nr_txbds = frags + 2;
2702 else
2703 nr_txbds = frags + 1;
2704
2705 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2706
2707 lstatus = be32_to_cpu(lbdp->lstatus);
2708
2709
2710 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2711 (lstatus & BD_LENGTH_MASK))
2712 break;
2713
2714 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2715 next = next_txbd(bdp, base, tx_ring_size);
2716 buflen = be16_to_cpu(next->length) +
2717 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2718 } else
2719 buflen = be16_to_cpu(bdp->length);
2720
2721 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2722 buflen, DMA_TO_DEVICE);
2723
2724 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2725 struct skb_shared_hwtstamps shhwtstamps;
2726 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2727 ~0x7UL);
2728
2729 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2730 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2731 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2732 skb_tstamp_tx(skb, &shhwtstamps);
2733 gfar_clear_txbd_status(bdp);
2734 bdp = next;
2735 }
2736
2737 gfar_clear_txbd_status(bdp);
2738 bdp = next_txbd(bdp, base, tx_ring_size);
2739
2740 for (i = 0; i < frags; i++) {
2741 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2742 be16_to_cpu(bdp->length),
2743 DMA_TO_DEVICE);
2744 gfar_clear_txbd_status(bdp);
2745 bdp = next_txbd(bdp, base, tx_ring_size);
2746 }
2747
2748 bytes_sent += GFAR_CB(skb)->bytes_sent;
2749
2750 dev_kfree_skb_any(skb);
2751
2752 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2753
2754 skb_dirtytx = (skb_dirtytx + 1) &
2755 TX_RING_MOD_MASK(tx_ring_size);
2756
2757 howmany++;
2758 spin_lock(&tx_queue->txlock);
2759 tx_queue->num_txbdfree += nr_txbds;
2760 spin_unlock(&tx_queue->txlock);
2761 }
2762
2763
2764 if (tx_queue->num_txbdfree &&
2765 netif_tx_queue_stopped(txq) &&
2766 !(test_bit(GFAR_DOWN, &priv->state)))
2767 netif_wake_subqueue(priv->ndev, tqi);
2768
2769
2770 tx_queue->skb_dirtytx = skb_dirtytx;
2771 tx_queue->dirty_tx = bdp;
2772
2773 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2774}
2775
2776static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
2777{
2778 struct page *page;
2779 dma_addr_t addr;
2780
2781 page = dev_alloc_page();
2782 if (unlikely(!page))
2783 return false;
2784
2785 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2786 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2787 __free_page(page);
2788
2789 return false;
2790 }
2791
2792 rxb->dma = addr;
2793 rxb->page = page;
2794 rxb->page_offset = 0;
2795
2796 return true;
2797}
2798
2799static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2800{
2801 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
2802 struct gfar_extra_stats *estats = &priv->extra_stats;
2803
2804 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
2805 atomic64_inc(&estats->rx_alloc_err);
2806}
2807
2808static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2809 int alloc_cnt)
2810{
2811 struct rxbd8 *bdp;
2812 struct gfar_rx_buff *rxb;
2813 int i;
2814
2815 i = rx_queue->next_to_use;
2816 bdp = &rx_queue->rx_bd_base[i];
2817 rxb = &rx_queue->rx_buff[i];
2818
2819 while (alloc_cnt--) {
2820
2821 if (unlikely(!rxb->page)) {
2822 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
2823 gfar_rx_alloc_err(rx_queue);
2824 break;
2825 }
2826 }
2827
2828
2829 gfar_init_rxbdp(rx_queue, bdp,
2830 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
2831
2832
2833 bdp++;
2834 rxb++;
2835
2836 if (unlikely(++i == rx_queue->rx_ring_size)) {
2837 i = 0;
2838 bdp = rx_queue->rx_bd_base;
2839 rxb = rx_queue->rx_buff;
2840 }
2841 }
2842
2843 rx_queue->next_to_use = i;
2844 rx_queue->next_to_alloc = i;
2845}
2846
2847static void count_errors(u32 lstatus, struct net_device *ndev)
2848{
2849 struct gfar_private *priv = netdev_priv(ndev);
2850 struct net_device_stats *stats = &ndev->stats;
2851 struct gfar_extra_stats *estats = &priv->extra_stats;
2852
2853
2854 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2855 stats->rx_length_errors++;
2856
2857 atomic64_inc(&estats->rx_trunc);
2858
2859 return;
2860 }
2861
2862 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2863 stats->rx_length_errors++;
2864
2865 if (lstatus & BD_LFLAG(RXBD_LARGE))
2866 atomic64_inc(&estats->rx_large);
2867 else
2868 atomic64_inc(&estats->rx_short);
2869 }
2870 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2871 stats->rx_frame_errors++;
2872 atomic64_inc(&estats->rx_nonoctet);
2873 }
2874 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2875 atomic64_inc(&estats->rx_crcerr);
2876 stats->rx_crc_errors++;
2877 }
2878 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2879 atomic64_inc(&estats->rx_overrun);
2880 stats->rx_over_errors++;
2881 }
2882}
2883
2884irqreturn_t gfar_receive(int irq, void *grp_id)
2885{
2886 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2887 unsigned long flags;
2888 u32 imask, ievent;
2889
2890 ievent = gfar_read(&grp->regs->ievent);
2891
2892 if (unlikely(ievent & IEVENT_FGPI)) {
2893 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2894 return IRQ_HANDLED;
2895 }
2896
2897 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2898 spin_lock_irqsave(&grp->grplock, flags);
2899 imask = gfar_read(&grp->regs->imask);
2900 imask &= IMASK_RX_DISABLED;
2901 gfar_write(&grp->regs->imask, imask);
2902 spin_unlock_irqrestore(&grp->grplock, flags);
2903 __napi_schedule(&grp->napi_rx);
2904 } else {
2905
2906
2907
2908 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2909 }
2910
2911 return IRQ_HANDLED;
2912}
2913
2914
2915static irqreturn_t gfar_transmit(int irq, void *grp_id)
2916{
2917 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2918 unsigned long flags;
2919 u32 imask;
2920
2921 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2922 spin_lock_irqsave(&grp->grplock, flags);
2923 imask = gfar_read(&grp->regs->imask);
2924 imask &= IMASK_TX_DISABLED;
2925 gfar_write(&grp->regs->imask, imask);
2926 spin_unlock_irqrestore(&grp->grplock, flags);
2927 __napi_schedule(&grp->napi_tx);
2928 } else {
2929
2930
2931
2932 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2933 }
2934
2935 return IRQ_HANDLED;
2936}
2937
2938static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2939 struct sk_buff *skb, bool first)
2940{
2941 int size = lstatus & BD_LENGTH_MASK;
2942 struct page *page = rxb->page;
2943
2944 if (likely(first)) {
2945 skb_put(skb, size);
2946 } else {
2947
2948 if (lstatus & BD_LFLAG(RXBD_LAST))
2949 size -= skb->len;
2950
2951 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2952 rxb->page_offset + RXBUF_ALIGNMENT,
2953 size, GFAR_RXB_TRUESIZE);
2954 }
2955
2956
2957 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2958 return false;
2959
2960
2961 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2962
2963 page_ref_inc(page);
2964
2965 return true;
2966}
2967
2968static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2969 struct gfar_rx_buff *old_rxb)
2970{
2971 struct gfar_rx_buff *new_rxb;
2972 u16 nta = rxq->next_to_alloc;
2973
2974 new_rxb = &rxq->rx_buff[nta];
2975
2976
2977 nta++;
2978 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2979
2980
2981 *new_rxb = *old_rxb;
2982
2983
2984 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2985 old_rxb->page_offset,
2986 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2987}
2988
2989static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2990 u32 lstatus, struct sk_buff *skb)
2991{
2992 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2993 struct page *page = rxb->page;
2994 bool first = false;
2995
2996 if (likely(!skb)) {
2997 void *buff_addr = page_address(page) + rxb->page_offset;
2998
2999 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
3000 if (unlikely(!skb)) {
3001 gfar_rx_alloc_err(rx_queue);
3002 return NULL;
3003 }
3004 skb_reserve(skb, RXBUF_ALIGNMENT);
3005 first = true;
3006 }
3007
3008 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3009 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3010
3011 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3012
3013 gfar_reuse_rx_page(rx_queue, rxb);
3014 } else {
3015
3016 dma_unmap_page(rx_queue->dev, rxb->dma,
3017 PAGE_SIZE, DMA_FROM_DEVICE);
3018 }
3019
3020
3021 rxb->page = NULL;
3022
3023 return skb;
3024}
3025
3026static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3027{
3028
3029
3030
3031
3032 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3033 (RXFCB_CIP | RXFCB_CTU))
3034 skb->ip_summed = CHECKSUM_UNNECESSARY;
3035 else
3036 skb_checksum_none_assert(skb);
3037}
3038
3039
3040static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3041{
3042 struct gfar_private *priv = netdev_priv(ndev);
3043 struct rxfcb *fcb = NULL;
3044
3045
3046 fcb = (struct rxfcb *)skb->data;
3047
3048
3049
3050
3051 if (priv->uses_rxfcb)
3052 skb_pull(skb, GMAC_FCB_LEN);
3053
3054
3055 if (priv->hwts_rx_en) {
3056 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3057 u64 *ns = (u64 *) skb->data;
3058
3059 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3060 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
3061 }
3062
3063 if (priv->padding)
3064 skb_pull(skb, priv->padding);
3065
3066
3067 pskb_trim(skb, skb->len - ETH_FCS_LEN);
3068
3069 if (ndev->features & NETIF_F_RXCSUM)
3070 gfar_rx_checksum(skb, fcb);
3071
3072
3073
3074
3075
3076 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
3077 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3078 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3079 be16_to_cpu(fcb->vlctl));
3080}
3081
3082
3083
3084
3085
3086int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3087{
3088 struct net_device *ndev = rx_queue->ndev;
3089 struct gfar_private *priv = netdev_priv(ndev);
3090 struct rxbd8 *bdp;
3091 int i, howmany = 0;
3092 struct sk_buff *skb = rx_queue->skb;
3093 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3094 unsigned int total_bytes = 0, total_pkts = 0;
3095
3096
3097 i = rx_queue->next_to_clean;
3098
3099 while (rx_work_limit--) {
3100 u32 lstatus;
3101
3102 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3103 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3104 cleaned_cnt = 0;
3105 }
3106
3107 bdp = &rx_queue->rx_bd_base[i];
3108 lstatus = be32_to_cpu(bdp->lstatus);
3109 if (lstatus & BD_LFLAG(RXBD_EMPTY))
3110 break;
3111
3112
3113 rmb();
3114
3115
3116 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3117 if (unlikely(!skb))
3118 break;
3119
3120 cleaned_cnt++;
3121 howmany++;
3122
3123 if (unlikely(++i == rx_queue->rx_ring_size))
3124 i = 0;
3125
3126 rx_queue->next_to_clean = i;
3127
3128
3129 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3130 continue;
3131
3132 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
3133 count_errors(lstatus, ndev);
3134
3135
3136 dev_kfree_skb(skb);
3137 skb = NULL;
3138 rx_queue->stats.rx_dropped++;
3139 continue;
3140 }
3141
3142 gfar_process_frame(ndev, skb);
3143
3144
3145 total_pkts++;
3146 total_bytes += skb->len;
3147
3148 skb_record_rx_queue(skb, rx_queue->qindex);
3149
3150 skb->protocol = eth_type_trans(skb, ndev);
3151
3152
3153 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3154
3155 skb = NULL;
3156 }
3157
3158
3159 rx_queue->skb = skb;
3160
3161 rx_queue->stats.rx_packets += total_pkts;
3162 rx_queue->stats.rx_bytes += total_bytes;
3163
3164 if (cleaned_cnt)
3165 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3166
3167
3168 if (unlikely(priv->tx_actual_en)) {
3169 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3170
3171 gfar_write(rx_queue->rfbptr, bdp_dma);
3172 }
3173
3174 return howmany;
3175}
3176
3177static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
3178{
3179 struct gfar_priv_grp *gfargrp =
3180 container_of(napi, struct gfar_priv_grp, napi_rx);
3181 struct gfar __iomem *regs = gfargrp->regs;
3182 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
3183 int work_done = 0;
3184
3185
3186
3187
3188 gfar_write(®s->ievent, IEVENT_RX_MASK);
3189
3190 work_done = gfar_clean_rx_ring(rx_queue, budget);
3191
3192 if (work_done < budget) {
3193 u32 imask;
3194 napi_complete_done(napi, work_done);
3195
3196 gfar_write(®s->rstat, gfargrp->rstat);
3197
3198 spin_lock_irq(&gfargrp->grplock);
3199 imask = gfar_read(®s->imask);
3200 imask |= IMASK_RX_DEFAULT;
3201 gfar_write(®s->imask, imask);
3202 spin_unlock_irq(&gfargrp->grplock);
3203 }
3204
3205 return work_done;
3206}
3207
3208static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
3209{
3210 struct gfar_priv_grp *gfargrp =
3211 container_of(napi, struct gfar_priv_grp, napi_tx);
3212 struct gfar __iomem *regs = gfargrp->regs;
3213 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
3214 u32 imask;
3215
3216
3217
3218
3219 gfar_write(®s->ievent, IEVENT_TX_MASK);
3220
3221
3222 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3223 gfar_clean_tx_ring(tx_queue);
3224
3225 napi_complete(napi);
3226
3227 spin_lock_irq(&gfargrp->grplock);
3228 imask = gfar_read(®s->imask);
3229 imask |= IMASK_TX_DEFAULT;
3230 gfar_write(®s->imask, imask);
3231 spin_unlock_irq(&gfargrp->grplock);
3232
3233 return 0;
3234}
3235
3236static int gfar_poll_rx(struct napi_struct *napi, int budget)
3237{
3238 struct gfar_priv_grp *gfargrp =
3239 container_of(napi, struct gfar_priv_grp, napi_rx);
3240 struct gfar_private *priv = gfargrp->priv;
3241 struct gfar __iomem *regs = gfargrp->regs;
3242 struct gfar_priv_rx_q *rx_queue = NULL;
3243 int work_done = 0, work_done_per_q = 0;
3244 int i, budget_per_q = 0;
3245 unsigned long rstat_rxf;
3246 int num_act_queues;
3247
3248
3249
3250
3251 gfar_write(®s->ievent, IEVENT_RX_MASK);
3252
3253 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
3254
3255 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3256 if (num_act_queues)
3257 budget_per_q = budget/num_act_queues;
3258
3259 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3260
3261 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3262 continue;
3263
3264 rx_queue = priv->rx_queue[i];
3265 work_done_per_q =
3266 gfar_clean_rx_ring(rx_queue, budget_per_q);
3267 work_done += work_done_per_q;
3268
3269
3270 if (work_done_per_q < budget_per_q) {
3271
3272 gfar_write(®s->rstat,
3273 RSTAT_CLEAR_RXF0 >> i);
3274 num_act_queues--;
3275
3276 if (!num_act_queues)
3277 break;
3278 }
3279 }
3280
3281 if (!num_act_queues) {
3282 u32 imask;
3283 napi_complete_done(napi, work_done);
3284
3285
3286 gfar_write(®s->rstat, gfargrp->rstat);
3287
3288 spin_lock_irq(&gfargrp->grplock);
3289 imask = gfar_read(®s->imask);
3290 imask |= IMASK_RX_DEFAULT;
3291 gfar_write(®s->imask, imask);
3292 spin_unlock_irq(&gfargrp->grplock);
3293 }
3294
3295 return work_done;
3296}
3297
3298static int gfar_poll_tx(struct napi_struct *napi, int budget)
3299{
3300 struct gfar_priv_grp *gfargrp =
3301 container_of(napi, struct gfar_priv_grp, napi_tx);
3302 struct gfar_private *priv = gfargrp->priv;
3303 struct gfar __iomem *regs = gfargrp->regs;
3304 struct gfar_priv_tx_q *tx_queue = NULL;
3305 int has_tx_work = 0;
3306 int i;
3307
3308
3309
3310
3311 gfar_write(®s->ievent, IEVENT_TX_MASK);
3312
3313 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3314 tx_queue = priv->tx_queue[i];
3315
3316 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3317 gfar_clean_tx_ring(tx_queue);
3318 has_tx_work = 1;
3319 }
3320 }
3321
3322 if (!has_tx_work) {
3323 u32 imask;
3324 napi_complete(napi);
3325
3326 spin_lock_irq(&gfargrp->grplock);
3327 imask = gfar_read(®s->imask);
3328 imask |= IMASK_TX_DEFAULT;
3329 gfar_write(®s->imask, imask);
3330 spin_unlock_irq(&gfargrp->grplock);
3331 }
3332
3333 return 0;
3334}
3335
3336
3337#ifdef CONFIG_NET_POLL_CONTROLLER
3338
3339
3340
3341
3342static void gfar_netpoll(struct net_device *dev)
3343{
3344 struct gfar_private *priv = netdev_priv(dev);
3345 int i;
3346
3347
3348 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3349 for (i = 0; i < priv->num_grps; i++) {
3350 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3351
3352 disable_irq(gfar_irq(grp, TX)->irq);
3353 disable_irq(gfar_irq(grp, RX)->irq);
3354 disable_irq(gfar_irq(grp, ER)->irq);
3355 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3356 enable_irq(gfar_irq(grp, ER)->irq);
3357 enable_irq(gfar_irq(grp, RX)->irq);
3358 enable_irq(gfar_irq(grp, TX)->irq);
3359 }
3360 } else {
3361 for (i = 0; i < priv->num_grps; i++) {
3362 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3363
3364 disable_irq(gfar_irq(grp, TX)->irq);
3365 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3366 enable_irq(gfar_irq(grp, TX)->irq);
3367 }
3368 }
3369}
3370#endif
3371
3372
3373static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3374{
3375 struct gfar_priv_grp *gfargrp = grp_id;
3376
3377
3378 u32 events = gfar_read(&gfargrp->regs->ievent);
3379
3380
3381 if (events & IEVENT_RX_MASK)
3382 gfar_receive(irq, grp_id);
3383
3384
3385 if (events & IEVENT_TX_MASK)
3386 gfar_transmit(irq, grp_id);
3387
3388
3389 if (events & IEVENT_ERR_MASK)
3390 gfar_error(irq, grp_id);
3391
3392 return IRQ_HANDLED;
3393}
3394
3395
3396
3397
3398
3399
3400
3401static void adjust_link(struct net_device *dev)
3402{
3403 struct gfar_private *priv = netdev_priv(dev);
3404 struct phy_device *phydev = dev->phydev;
3405
3406 if (unlikely(phydev->link != priv->oldlink ||
3407 (phydev->link && (phydev->duplex != priv->oldduplex ||
3408 phydev->speed != priv->oldspeed))))
3409 gfar_update_link_state(priv);
3410}
3411
3412
3413
3414
3415
3416
3417static void gfar_set_multi(struct net_device *dev)
3418{
3419 struct netdev_hw_addr *ha;
3420 struct gfar_private *priv = netdev_priv(dev);
3421 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3422 u32 tempval;
3423
3424 if (dev->flags & IFF_PROMISC) {
3425
3426 tempval = gfar_read(®s->rctrl);
3427 tempval |= RCTRL_PROM;
3428 gfar_write(®s->rctrl, tempval);
3429 } else {
3430
3431 tempval = gfar_read(®s->rctrl);
3432 tempval &= ~(RCTRL_PROM);
3433 gfar_write(®s->rctrl, tempval);
3434 }
3435
3436 if (dev->flags & IFF_ALLMULTI) {
3437
3438 gfar_write(®s->igaddr0, 0xffffffff);
3439 gfar_write(®s->igaddr1, 0xffffffff);
3440 gfar_write(®s->igaddr2, 0xffffffff);
3441 gfar_write(®s->igaddr3, 0xffffffff);
3442 gfar_write(®s->igaddr4, 0xffffffff);
3443 gfar_write(®s->igaddr5, 0xffffffff);
3444 gfar_write(®s->igaddr6, 0xffffffff);
3445 gfar_write(®s->igaddr7, 0xffffffff);
3446 gfar_write(®s->gaddr0, 0xffffffff);
3447 gfar_write(®s->gaddr1, 0xffffffff);
3448 gfar_write(®s->gaddr2, 0xffffffff);
3449 gfar_write(®s->gaddr3, 0xffffffff);
3450 gfar_write(®s->gaddr4, 0xffffffff);
3451 gfar_write(®s->gaddr5, 0xffffffff);
3452 gfar_write(®s->gaddr6, 0xffffffff);
3453 gfar_write(®s->gaddr7, 0xffffffff);
3454 } else {
3455 int em_num;
3456 int idx;
3457
3458
3459 gfar_write(®s->igaddr0, 0x0);
3460 gfar_write(®s->igaddr1, 0x0);
3461 gfar_write(®s->igaddr2, 0x0);
3462 gfar_write(®s->igaddr3, 0x0);
3463 gfar_write(®s->igaddr4, 0x0);
3464 gfar_write(®s->igaddr5, 0x0);
3465 gfar_write(®s->igaddr6, 0x0);
3466 gfar_write(®s->igaddr7, 0x0);
3467 gfar_write(®s->gaddr0, 0x0);
3468 gfar_write(®s->gaddr1, 0x0);
3469 gfar_write(®s->gaddr2, 0x0);
3470 gfar_write(®s->gaddr3, 0x0);
3471 gfar_write(®s->gaddr4, 0x0);
3472 gfar_write(®s->gaddr5, 0x0);
3473 gfar_write(®s->gaddr6, 0x0);
3474 gfar_write(®s->gaddr7, 0x0);
3475
3476
3477
3478
3479
3480 if (priv->extended_hash) {
3481 em_num = GFAR_EM_NUM + 1;
3482 gfar_clear_exact_match(dev);
3483 idx = 1;
3484 } else {
3485 idx = 0;
3486 em_num = 0;
3487 }
3488
3489 if (netdev_mc_empty(dev))
3490 return;
3491
3492
3493 netdev_for_each_mc_addr(ha, dev) {
3494 if (idx < em_num) {
3495 gfar_set_mac_for_addr(dev, idx, ha->addr);
3496 idx++;
3497 } else
3498 gfar_set_hash_for_addr(dev, ha->addr);
3499 }
3500 }
3501}
3502
3503
3504
3505
3506
3507static void gfar_clear_exact_match(struct net_device *dev)
3508{
3509 int idx;
3510 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3511
3512 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3513 gfar_set_mac_for_addr(dev, idx, zero_arr);
3514}
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3531{
3532 u32 tempval;
3533 struct gfar_private *priv = netdev_priv(dev);
3534 u32 result = ether_crc(ETH_ALEN, addr);
3535 int width = priv->hash_width;
3536 u8 whichbit = (result >> (32 - width)) & 0x1f;
3537 u8 whichreg = result >> (32 - width + 5);
3538 u32 value = (1 << (31-whichbit));
3539
3540 tempval = gfar_read(priv->hash_regs[whichreg]);
3541 tempval |= value;
3542 gfar_write(priv->hash_regs[whichreg], tempval);
3543}
3544
3545
3546
3547
3548
3549static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3550 const u8 *addr)
3551{
3552 struct gfar_private *priv = netdev_priv(dev);
3553 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3554 u32 tempval;
3555 u32 __iomem *macptr = ®s->macstnaddr1;
3556
3557 macptr += num*2;
3558
3559
3560
3561
3562
3563 tempval = (addr[5] << 24) | (addr[4] << 16) |
3564 (addr[3] << 8) | addr[2];
3565
3566 gfar_write(macptr, tempval);
3567
3568 tempval = (addr[1] << 24) | (addr[0] << 16);
3569
3570 gfar_write(macptr+1, tempval);
3571}
3572
3573
3574static irqreturn_t gfar_error(int irq, void *grp_id)
3575{
3576 struct gfar_priv_grp *gfargrp = grp_id;
3577 struct gfar __iomem *regs = gfargrp->regs;
3578 struct gfar_private *priv= gfargrp->priv;
3579 struct net_device *dev = priv->ndev;
3580
3581
3582 u32 events = gfar_read(®s->ievent);
3583
3584
3585 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3586
3587
3588 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3589 (events & IEVENT_MAG))
3590 events &= ~IEVENT_MAG;
3591
3592
3593 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3594 netdev_dbg(dev,
3595 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3596 events, gfar_read(®s->imask));
3597
3598
3599 if (events & IEVENT_TXE) {
3600 dev->stats.tx_errors++;
3601
3602 if (events & IEVENT_LC)
3603 dev->stats.tx_window_errors++;
3604 if (events & IEVENT_CRL)
3605 dev->stats.tx_aborted_errors++;
3606 if (events & IEVENT_XFUN) {
3607 netif_dbg(priv, tx_err, dev,
3608 "TX FIFO underrun, packet dropped\n");
3609 dev->stats.tx_dropped++;
3610 atomic64_inc(&priv->extra_stats.tx_underrun);
3611
3612 schedule_work(&priv->reset_task);
3613 }
3614 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3615 }
3616 if (events & IEVENT_BSY) {
3617 dev->stats.rx_over_errors++;
3618 atomic64_inc(&priv->extra_stats.rx_bsy);
3619
3620 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3621 gfar_read(®s->rstat));
3622 }
3623 if (events & IEVENT_BABR) {
3624 dev->stats.rx_errors++;
3625 atomic64_inc(&priv->extra_stats.rx_babr);
3626
3627 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3628 }
3629 if (events & IEVENT_EBERR) {
3630 atomic64_inc(&priv->extra_stats.eberr);
3631 netif_dbg(priv, rx_err, dev, "bus error\n");
3632 }
3633 if (events & IEVENT_RXC)
3634 netif_dbg(priv, rx_status, dev, "control frame\n");
3635
3636 if (events & IEVENT_BABT) {
3637 atomic64_inc(&priv->extra_stats.tx_babt);
3638 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3639 }
3640 return IRQ_HANDLED;
3641}
3642
3643static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3644{
3645 struct net_device *ndev = priv->ndev;
3646 struct phy_device *phydev = ndev->phydev;
3647 u32 val = 0;
3648
3649 if (!phydev->duplex)
3650 return val;
3651
3652 if (!priv->pause_aneg_en) {
3653 if (priv->tx_pause_en)
3654 val |= MACCFG1_TX_FLOW;
3655 if (priv->rx_pause_en)
3656 val |= MACCFG1_RX_FLOW;
3657 } else {
3658 u16 lcl_adv, rmt_adv;
3659 u8 flowctrl;
3660
3661 rmt_adv = 0;
3662 if (phydev->pause)
3663 rmt_adv = LPA_PAUSE_CAP;
3664 if (phydev->asym_pause)
3665 rmt_adv |= LPA_PAUSE_ASYM;
3666
3667 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
3668 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3669 if (flowctrl & FLOW_CTRL_TX)
3670 val |= MACCFG1_TX_FLOW;
3671 if (flowctrl & FLOW_CTRL_RX)
3672 val |= MACCFG1_RX_FLOW;
3673 }
3674
3675 return val;
3676}
3677
3678static noinline void gfar_update_link_state(struct gfar_private *priv)
3679{
3680 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3681 struct net_device *ndev = priv->ndev;
3682 struct phy_device *phydev = ndev->phydev;
3683 struct gfar_priv_rx_q *rx_queue = NULL;
3684 int i;
3685
3686 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3687 return;
3688
3689 if (phydev->link) {
3690 u32 tempval1 = gfar_read(®s->maccfg1);
3691 u32 tempval = gfar_read(®s->maccfg2);
3692 u32 ecntrl = gfar_read(®s->ecntrl);
3693 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
3694
3695 if (phydev->duplex != priv->oldduplex) {
3696 if (!(phydev->duplex))
3697 tempval &= ~(MACCFG2_FULL_DUPLEX);
3698 else
3699 tempval |= MACCFG2_FULL_DUPLEX;
3700
3701 priv->oldduplex = phydev->duplex;
3702 }
3703
3704 if (phydev->speed != priv->oldspeed) {
3705 switch (phydev->speed) {
3706 case 1000:
3707 tempval =
3708 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3709
3710 ecntrl &= ~(ECNTRL_R100);
3711 break;
3712 case 100:
3713 case 10:
3714 tempval =
3715 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3716
3717
3718
3719
3720 if (phydev->speed == SPEED_100)
3721 ecntrl |= ECNTRL_R100;
3722 else
3723 ecntrl &= ~(ECNTRL_R100);
3724 break;
3725 default:
3726 netif_warn(priv, link, priv->ndev,
3727 "Ack! Speed (%d) is not 10/100/1000!\n",
3728 phydev->speed);
3729 break;
3730 }
3731
3732 priv->oldspeed = phydev->speed;
3733 }
3734
3735 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3736 tempval1 |= gfar_get_flowctrl_cfg(priv);
3737
3738
3739 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3740 for (i = 0; i < priv->num_rx_queues; i++) {
3741 u32 bdp_dma;
3742
3743 rx_queue = priv->rx_queue[i];
3744 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3745 gfar_write(rx_queue->rfbptr, bdp_dma);
3746 }
3747
3748 priv->tx_actual_en = 1;
3749 }
3750
3751 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3752 priv->tx_actual_en = 0;
3753
3754 gfar_write(®s->maccfg1, tempval1);
3755 gfar_write(®s->maccfg2, tempval);
3756 gfar_write(®s->ecntrl, ecntrl);
3757
3758 if (!priv->oldlink)
3759 priv->oldlink = 1;
3760
3761 } else if (priv->oldlink) {
3762 priv->oldlink = 0;
3763 priv->oldspeed = 0;
3764 priv->oldduplex = -1;
3765 }
3766
3767 if (netif_msg_link(priv))
3768 phy_print_status(phydev);
3769}
3770
3771static const struct of_device_id gfar_match[] =
3772{
3773 {
3774 .type = "network",
3775 .compatible = "gianfar",
3776 },
3777 {
3778 .compatible = "fsl,etsec2",
3779 },
3780 {},
3781};
3782MODULE_DEVICE_TABLE(of, gfar_match);
3783
3784
3785static struct platform_driver gfar_driver = {
3786 .driver = {
3787 .name = "fsl-gianfar",
3788 .pm = GFAR_PM_OPS,
3789 .of_match_table = gfar_match,
3790 },
3791 .probe = gfar_probe,
3792 .remove = gfar_remove,
3793};
3794
3795module_platform_driver(gfar_driver);
3796