1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/string.h>
28#include <linux/pm_runtime.h>
29#include <linux/ptrace.h>
30#include <linux/errno.h>
31#include <linux/ioport.h>
32#include <linux/slab.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <net/ip.h>
41#include <net/tso.h>
42#include <linux/tcp.h>
43#include <linux/udp.h>
44#include <linux/icmp.h>
45#include <linux/spinlock.h>
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48#include <linux/io.h>
49#include <linux/irq.h>
50#include <linux/clk.h>
51#include <linux/crc32.h>
52#include <linux/platform_device.h>
53#include <linux/mdio.h>
54#include <linux/phy.h>
55#include <linux/fec.h>
56#include <linux/of.h>
57#include <linux/of_device.h>
58#include <linux/of_gpio.h>
59#include <linux/of_mdio.h>
60#include <linux/of_net.h>
61#include <linux/regulator/consumer.h>
62#include <linux/if_vlan.h>
63#include <linux/pinctrl/consumer.h>
64#include <linux/prefetch.h>
65#include <soc/imx/cpuidle.h>
66
67#include <asm/cacheflush.h>
68
69#include "fec.h"
70
71static void set_multicast_list(struct net_device *ndev);
72static void fec_enet_itr_coal_init(struct net_device *ndev);
73
74#define DRIVER_NAME "fec"
75
76#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
77
78
79#define FEC_ENET_FCE (1 << 5)
80#define FEC_ENET_RSEM_V 0x84
81#define FEC_ENET_RSFL_V 16
82#define FEC_ENET_RAEM_V 0x8
83#define FEC_ENET_RAFL_V 0x8
84#define FEC_ENET_OPD_V 0xFFF0
85#define FEC_MDIO_PM_TIMEOUT 100
86
87static struct platform_device_id fec_devtype[] = {
88 {
89
90 .name = DRIVER_NAME,
91 .driver_data = 0,
92 }, {
93 .name = "imx25-fec",
94 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
95 FEC_QUIRK_HAS_FRREG,
96 }, {
97 .name = "imx27-fec",
98 .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
99 }, {
100 .name = "imx28-fec",
101 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
102 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
103 FEC_QUIRK_HAS_FRREG,
104 }, {
105 .name = "imx6q-fec",
106 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
107 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
108 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
109 FEC_QUIRK_HAS_RACC,
110 }, {
111 .name = "mvf600-fec",
112 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
113 }, {
114 .name = "imx6sx-fec",
115 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
116 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
117 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
118 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
119 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
120 }, {
121 .name = "imx6ul-fec",
122 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
125 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
126 FEC_QUIRK_HAS_COALESCE,
127 }, {
128
129 }
130};
131MODULE_DEVICE_TABLE(platform, fec_devtype);
132
133enum imx_fec_type {
134 IMX25_FEC = 1,
135 IMX27_FEC,
136 IMX28_FEC,
137 IMX6Q_FEC,
138 MVF600_FEC,
139 IMX6SX_FEC,
140 IMX6UL_FEC,
141};
142
143static const struct of_device_id fec_dt_ids[] = {
144 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
145 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
146 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
147 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
148 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
149 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
150 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
151 { }
152};
153MODULE_DEVICE_TABLE(of, fec_dt_ids);
154
155static unsigned char macaddr[ETH_ALEN];
156module_param_array(macaddr, byte, NULL, 0);
157MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
158
159#if defined(CONFIG_M5272)
160
161
162
163
164#if defined(CONFIG_NETtel)
165#define FEC_FLASHMAC 0xf0006006
166#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
167#define FEC_FLASHMAC 0xf0006000
168#elif defined(CONFIG_CANCam)
169#define FEC_FLASHMAC 0xf0020000
170#elif defined (CONFIG_M5272C3)
171#define FEC_FLASHMAC (0xffe04000 + 4)
172#elif defined(CONFIG_MOD5272)
173#define FEC_FLASHMAC 0xffc0406b
174#else
175#define FEC_FLASHMAC 0
176#endif
177#endif
178
179
180
181
182
183
184#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
185#define PKT_MINBUF_SIZE 64
186
187
188#define FEC_RACC_IPDIS (1 << 1)
189#define FEC_RACC_PRODIS (1 << 2)
190#define FEC_RACC_SHIFT16 BIT(7)
191#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
192
193
194#define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
195
196
197
198
199
200
201#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
202 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
203 defined(CONFIG_ARM64)
204#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
205#else
206#define OPT_FRAME_SIZE 0
207#endif
208
209
210#define FEC_MMFR_ST (1 << 30)
211#define FEC_MMFR_OP_READ (2 << 28)
212#define FEC_MMFR_OP_WRITE (1 << 28)
213#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
214#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
215#define FEC_MMFR_TA (2 << 16)
216#define FEC_MMFR_DATA(v) (v & 0xffff)
217
218#define FEC_ECR_MAGICEN (1 << 2)
219#define FEC_ECR_SLEEP (1 << 3)
220
221#define FEC_MII_TIMEOUT 30000
222
223
224#define TX_TIMEOUT (2 * HZ)
225
226#define FEC_PAUSE_FLAG_AUTONEG 0x1
227#define FEC_PAUSE_FLAG_ENABLE 0x2
228#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
229#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
230#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
231
232#define COPYBREAK_DEFAULT 256
233
234
235#define FEC_MAX_TSO_SEGS 100
236#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
237
238#define IS_TSO_HEADER(txq, addr) \
239 ((addr >= txq->tso_hdrs_dma) && \
240 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
241
242static int mii_cnt;
243
244static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
245 struct bufdesc_prop *bd)
246{
247 return (bdp >= bd->last) ? bd->base
248 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
249}
250
251static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
252 struct bufdesc_prop *bd)
253{
254 return (bdp <= bd->base) ? bd->last
255 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
256}
257
258static int fec_enet_get_bd_index(struct bufdesc *bdp,
259 struct bufdesc_prop *bd)
260{
261 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
262}
263
264static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
265{
266 int entries;
267
268 entries = (((const char *)txq->dirty_tx -
269 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
270
271 return entries >= 0 ? entries : entries + txq->bd.ring_size;
272}
273
274static void swap_buffer(void *bufaddr, int len)
275{
276 int i;
277 unsigned int *buf = bufaddr;
278
279 for (i = 0; i < len; i += 4, buf++)
280 swab32s(buf);
281}
282
283static void swap_buffer2(void *dst_buf, void *src_buf, int len)
284{
285 int i;
286 unsigned int *src = src_buf;
287 unsigned int *dst = dst_buf;
288
289 for (i = 0; i < len; i += 4, src++, dst++)
290 *dst = swab32p(src);
291}
292
293static void fec_dump(struct net_device *ndev)
294{
295 struct fec_enet_private *fep = netdev_priv(ndev);
296 struct bufdesc *bdp;
297 struct fec_enet_priv_tx_q *txq;
298 int index = 0;
299
300 netdev_info(ndev, "TX ring dump\n");
301 pr_info("Nr SC addr len SKB\n");
302
303 txq = fep->tx_queue[0];
304 bdp = txq->bd.base;
305
306 do {
307 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
308 index,
309 bdp == txq->bd.cur ? 'S' : ' ',
310 bdp == txq->dirty_tx ? 'H' : ' ',
311 fec16_to_cpu(bdp->cbd_sc),
312 fec32_to_cpu(bdp->cbd_bufaddr),
313 fec16_to_cpu(bdp->cbd_datlen),
314 txq->tx_skbuff[index]);
315 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
316 index++;
317 } while (bdp != txq->bd.base);
318}
319
320static inline bool is_ipv4_pkt(struct sk_buff *skb)
321{
322 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
323}
324
325static int
326fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
327{
328
329 if (skb->ip_summed != CHECKSUM_PARTIAL)
330 return 0;
331
332 if (unlikely(skb_cow_head(skb, 0)))
333 return -1;
334
335 if (is_ipv4_pkt(skb))
336 ip_hdr(skb)->check = 0;
337 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
338
339 return 0;
340}
341
342static struct bufdesc *
343fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
344 struct sk_buff *skb,
345 struct net_device *ndev)
346{
347 struct fec_enet_private *fep = netdev_priv(ndev);
348 struct bufdesc *bdp = txq->bd.cur;
349 struct bufdesc_ex *ebdp;
350 int nr_frags = skb_shinfo(skb)->nr_frags;
351 int frag, frag_len;
352 unsigned short status;
353 unsigned int estatus = 0;
354 skb_frag_t *this_frag;
355 unsigned int index;
356 void *bufaddr;
357 dma_addr_t addr;
358 int i;
359
360 for (frag = 0; frag < nr_frags; frag++) {
361 this_frag = &skb_shinfo(skb)->frags[frag];
362 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
363 ebdp = (struct bufdesc_ex *)bdp;
364
365 status = fec16_to_cpu(bdp->cbd_sc);
366 status &= ~BD_ENET_TX_STATS;
367 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
368 frag_len = skb_shinfo(skb)->frags[frag].size;
369
370
371 if (frag == nr_frags - 1) {
372 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
373 if (fep->bufdesc_ex) {
374 estatus |= BD_ENET_TX_INT;
375 if (unlikely(skb_shinfo(skb)->tx_flags &
376 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
377 estatus |= BD_ENET_TX_TS;
378 }
379 }
380
381 if (fep->bufdesc_ex) {
382 if (fep->quirks & FEC_QUIRK_HAS_AVB)
383 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
384 if (skb->ip_summed == CHECKSUM_PARTIAL)
385 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
386 ebdp->cbd_bdu = 0;
387 ebdp->cbd_esc = cpu_to_fec32(estatus);
388 }
389
390 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
391
392 index = fec_enet_get_bd_index(bdp, &txq->bd);
393 if (((unsigned long) bufaddr) & fep->tx_align ||
394 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
395 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
396 bufaddr = txq->tx_bounce[index];
397
398 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
399 swap_buffer(bufaddr, frag_len);
400 }
401
402 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
403 DMA_TO_DEVICE);
404 if (dma_mapping_error(&fep->pdev->dev, addr)) {
405 if (net_ratelimit())
406 netdev_err(ndev, "Tx DMA memory map failed\n");
407 goto dma_mapping_error;
408 }
409
410 bdp->cbd_bufaddr = cpu_to_fec32(addr);
411 bdp->cbd_datlen = cpu_to_fec16(frag_len);
412
413
414
415 wmb();
416 bdp->cbd_sc = cpu_to_fec16(status);
417 }
418
419 return bdp;
420dma_mapping_error:
421 bdp = txq->bd.cur;
422 for (i = 0; i < frag; i++) {
423 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
424 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
425 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
426 }
427 return ERR_PTR(-ENOMEM);
428}
429
430static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
431 struct sk_buff *skb, struct net_device *ndev)
432{
433 struct fec_enet_private *fep = netdev_priv(ndev);
434 int nr_frags = skb_shinfo(skb)->nr_frags;
435 struct bufdesc *bdp, *last_bdp;
436 void *bufaddr;
437 dma_addr_t addr;
438 unsigned short status;
439 unsigned short buflen;
440 unsigned int estatus = 0;
441 unsigned int index;
442 int entries_free;
443
444 entries_free = fec_enet_get_free_txdesc_num(txq);
445 if (entries_free < MAX_SKB_FRAGS + 1) {
446 dev_kfree_skb_any(skb);
447 if (net_ratelimit())
448 netdev_err(ndev, "NOT enough BD for SG!\n");
449 return NETDEV_TX_OK;
450 }
451
452
453 if (fec_enet_clear_csum(skb, ndev)) {
454 dev_kfree_skb_any(skb);
455 return NETDEV_TX_OK;
456 }
457
458
459 bdp = txq->bd.cur;
460 last_bdp = bdp;
461 status = fec16_to_cpu(bdp->cbd_sc);
462 status &= ~BD_ENET_TX_STATS;
463
464
465 bufaddr = skb->data;
466 buflen = skb_headlen(skb);
467
468 index = fec_enet_get_bd_index(bdp, &txq->bd);
469 if (((unsigned long) bufaddr) & fep->tx_align ||
470 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
471 memcpy(txq->tx_bounce[index], skb->data, buflen);
472 bufaddr = txq->tx_bounce[index];
473
474 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
475 swap_buffer(bufaddr, buflen);
476 }
477
478
479 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
480 if (dma_mapping_error(&fep->pdev->dev, addr)) {
481 dev_kfree_skb_any(skb);
482 if (net_ratelimit())
483 netdev_err(ndev, "Tx DMA memory map failed\n");
484 return NETDEV_TX_OK;
485 }
486
487 if (nr_frags) {
488 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
489 if (IS_ERR(last_bdp)) {
490 dma_unmap_single(&fep->pdev->dev, addr,
491 buflen, DMA_TO_DEVICE);
492 dev_kfree_skb_any(skb);
493 return NETDEV_TX_OK;
494 }
495 } else {
496 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
497 if (fep->bufdesc_ex) {
498 estatus = BD_ENET_TX_INT;
499 if (unlikely(skb_shinfo(skb)->tx_flags &
500 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
501 estatus |= BD_ENET_TX_TS;
502 }
503 }
504 bdp->cbd_bufaddr = cpu_to_fec32(addr);
505 bdp->cbd_datlen = cpu_to_fec16(buflen);
506
507 if (fep->bufdesc_ex) {
508
509 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
510
511 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
512 fep->hwts_tx_en))
513 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
514
515 if (fep->quirks & FEC_QUIRK_HAS_AVB)
516 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
517
518 if (skb->ip_summed == CHECKSUM_PARTIAL)
519 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
520
521 ebdp->cbd_bdu = 0;
522 ebdp->cbd_esc = cpu_to_fec32(estatus);
523 }
524
525 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
526
527 txq->tx_skbuff[index] = skb;
528
529
530
531
532 wmb();
533
534
535
536
537 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
538 bdp->cbd_sc = cpu_to_fec16(status);
539
540
541 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
542
543 skb_tx_timestamp(skb);
544
545
546
547
548 wmb();
549 txq->bd.cur = bdp;
550
551
552 writel(0, txq->bd.reg_desc_active);
553
554 return 0;
555}
556
557static int
558fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
559 struct net_device *ndev,
560 struct bufdesc *bdp, int index, char *data,
561 int size, bool last_tcp, bool is_last)
562{
563 struct fec_enet_private *fep = netdev_priv(ndev);
564 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
565 unsigned short status;
566 unsigned int estatus = 0;
567 dma_addr_t addr;
568
569 status = fec16_to_cpu(bdp->cbd_sc);
570 status &= ~BD_ENET_TX_STATS;
571
572 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
573
574 if (((unsigned long) data) & fep->tx_align ||
575 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
576 memcpy(txq->tx_bounce[index], data, size);
577 data = txq->tx_bounce[index];
578
579 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
580 swap_buffer(data, size);
581 }
582
583 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
584 if (dma_mapping_error(&fep->pdev->dev, addr)) {
585 dev_kfree_skb_any(skb);
586 if (net_ratelimit())
587 netdev_err(ndev, "Tx DMA memory map failed\n");
588 return NETDEV_TX_BUSY;
589 }
590
591 bdp->cbd_datlen = cpu_to_fec16(size);
592 bdp->cbd_bufaddr = cpu_to_fec32(addr);
593
594 if (fep->bufdesc_ex) {
595 if (fep->quirks & FEC_QUIRK_HAS_AVB)
596 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
597 if (skb->ip_summed == CHECKSUM_PARTIAL)
598 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
599 ebdp->cbd_bdu = 0;
600 ebdp->cbd_esc = cpu_to_fec32(estatus);
601 }
602
603
604 if (last_tcp)
605 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
606 if (is_last) {
607 status |= BD_ENET_TX_INTR;
608 if (fep->bufdesc_ex)
609 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
610 }
611
612 bdp->cbd_sc = cpu_to_fec16(status);
613
614 return 0;
615}
616
617static int
618fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
619 struct sk_buff *skb, struct net_device *ndev,
620 struct bufdesc *bdp, int index)
621{
622 struct fec_enet_private *fep = netdev_priv(ndev);
623 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
624 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
625 void *bufaddr;
626 unsigned long dmabuf;
627 unsigned short status;
628 unsigned int estatus = 0;
629
630 status = fec16_to_cpu(bdp->cbd_sc);
631 status &= ~BD_ENET_TX_STATS;
632 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
633
634 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
635 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
636 if (((unsigned long)bufaddr) & fep->tx_align ||
637 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
638 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
639 bufaddr = txq->tx_bounce[index];
640
641 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
642 swap_buffer(bufaddr, hdr_len);
643
644 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
645 hdr_len, DMA_TO_DEVICE);
646 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
647 dev_kfree_skb_any(skb);
648 if (net_ratelimit())
649 netdev_err(ndev, "Tx DMA memory map failed\n");
650 return NETDEV_TX_BUSY;
651 }
652 }
653
654 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
655 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
656
657 if (fep->bufdesc_ex) {
658 if (fep->quirks & FEC_QUIRK_HAS_AVB)
659 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
660 if (skb->ip_summed == CHECKSUM_PARTIAL)
661 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
662 ebdp->cbd_bdu = 0;
663 ebdp->cbd_esc = cpu_to_fec32(estatus);
664 }
665
666 bdp->cbd_sc = cpu_to_fec16(status);
667
668 return 0;
669}
670
671static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
672 struct sk_buff *skb,
673 struct net_device *ndev)
674{
675 struct fec_enet_private *fep = netdev_priv(ndev);
676 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
677 int total_len, data_left;
678 struct bufdesc *bdp = txq->bd.cur;
679 struct tso_t tso;
680 unsigned int index = 0;
681 int ret;
682
683 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
684 dev_kfree_skb_any(skb);
685 if (net_ratelimit())
686 netdev_err(ndev, "NOT enough BD for TSO!\n");
687 return NETDEV_TX_OK;
688 }
689
690
691 if (fec_enet_clear_csum(skb, ndev)) {
692 dev_kfree_skb_any(skb);
693 return NETDEV_TX_OK;
694 }
695
696
697 tso_start(skb, &tso);
698
699 total_len = skb->len - hdr_len;
700 while (total_len > 0) {
701 char *hdr;
702
703 index = fec_enet_get_bd_index(bdp, &txq->bd);
704 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
705 total_len -= data_left;
706
707
708 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
709 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
710 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
711 if (ret)
712 goto err_release;
713
714 while (data_left > 0) {
715 int size;
716
717 size = min_t(int, tso.size, data_left);
718 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
719 index = fec_enet_get_bd_index(bdp, &txq->bd);
720 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
721 bdp, index,
722 tso.data, size,
723 size == data_left,
724 total_len == 0);
725 if (ret)
726 goto err_release;
727
728 data_left -= size;
729 tso_build_data(skb, &tso, size);
730 }
731
732 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
733 }
734
735
736 txq->tx_skbuff[index] = skb;
737
738 skb_tx_timestamp(skb);
739 txq->bd.cur = bdp;
740
741
742 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
743 !readl(txq->bd.reg_desc_active) ||
744 !readl(txq->bd.reg_desc_active) ||
745 !readl(txq->bd.reg_desc_active) ||
746 !readl(txq->bd.reg_desc_active))
747 writel(0, txq->bd.reg_desc_active);
748
749 return 0;
750
751err_release:
752
753 return ret;
754}
755
756static netdev_tx_t
757fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
758{
759 struct fec_enet_private *fep = netdev_priv(ndev);
760 int entries_free;
761 unsigned short queue;
762 struct fec_enet_priv_tx_q *txq;
763 struct netdev_queue *nq;
764 int ret;
765
766 queue = skb_get_queue_mapping(skb);
767 txq = fep->tx_queue[queue];
768 nq = netdev_get_tx_queue(ndev, queue);
769
770 if (skb_is_gso(skb))
771 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
772 else
773 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
774 if (ret)
775 return ret;
776
777 entries_free = fec_enet_get_free_txdesc_num(txq);
778 if (entries_free <= txq->tx_stop_threshold)
779 netif_tx_stop_queue(nq);
780
781 return NETDEV_TX_OK;
782}
783
784
785
786static void fec_enet_bd_init(struct net_device *dev)
787{
788 struct fec_enet_private *fep = netdev_priv(dev);
789 struct fec_enet_priv_tx_q *txq;
790 struct fec_enet_priv_rx_q *rxq;
791 struct bufdesc *bdp;
792 unsigned int i;
793 unsigned int q;
794
795 for (q = 0; q < fep->num_rx_queues; q++) {
796
797 rxq = fep->rx_queue[q];
798 bdp = rxq->bd.base;
799
800 for (i = 0; i < rxq->bd.ring_size; i++) {
801
802
803 if (bdp->cbd_bufaddr)
804 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
805 else
806 bdp->cbd_sc = cpu_to_fec16(0);
807 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
808 }
809
810
811 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
812 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
813
814 rxq->bd.cur = rxq->bd.base;
815 }
816
817 for (q = 0; q < fep->num_tx_queues; q++) {
818
819 txq = fep->tx_queue[q];
820 bdp = txq->bd.base;
821 txq->bd.cur = bdp;
822
823 for (i = 0; i < txq->bd.ring_size; i++) {
824
825 bdp->cbd_sc = cpu_to_fec16(0);
826 if (bdp->cbd_bufaddr &&
827 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
828 dma_unmap_single(&fep->pdev->dev,
829 fec32_to_cpu(bdp->cbd_bufaddr),
830 fec16_to_cpu(bdp->cbd_datlen),
831 DMA_TO_DEVICE);
832 if (txq->tx_skbuff[i]) {
833 dev_kfree_skb_any(txq->tx_skbuff[i]);
834 txq->tx_skbuff[i] = NULL;
835 }
836 bdp->cbd_bufaddr = cpu_to_fec32(0);
837 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
838 }
839
840
841 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
842 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
843 txq->dirty_tx = bdp;
844 }
845}
846
847static void fec_enet_active_rxring(struct net_device *ndev)
848{
849 struct fec_enet_private *fep = netdev_priv(ndev);
850 int i;
851
852 for (i = 0; i < fep->num_rx_queues; i++)
853 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
854}
855
856static void fec_enet_enable_ring(struct net_device *ndev)
857{
858 struct fec_enet_private *fep = netdev_priv(ndev);
859 struct fec_enet_priv_tx_q *txq;
860 struct fec_enet_priv_rx_q *rxq;
861 int i;
862
863 for (i = 0; i < fep->num_rx_queues; i++) {
864 rxq = fep->rx_queue[i];
865 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
866 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
867
868
869 if (i)
870 writel(RCMR_MATCHEN | RCMR_CMP(i),
871 fep->hwp + FEC_RCMR(i));
872 }
873
874 for (i = 0; i < fep->num_tx_queues; i++) {
875 txq = fep->tx_queue[i];
876 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
877
878
879 if (i)
880 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
881 fep->hwp + FEC_DMA_CFG(i));
882 }
883}
884
885static void fec_enet_reset_skb(struct net_device *ndev)
886{
887 struct fec_enet_private *fep = netdev_priv(ndev);
888 struct fec_enet_priv_tx_q *txq;
889 int i, j;
890
891 for (i = 0; i < fep->num_tx_queues; i++) {
892 txq = fep->tx_queue[i];
893
894 for (j = 0; j < txq->bd.ring_size; j++) {
895 if (txq->tx_skbuff[j]) {
896 dev_kfree_skb_any(txq->tx_skbuff[j]);
897 txq->tx_skbuff[j] = NULL;
898 }
899 }
900 }
901}
902
903
904
905
906
907
908static void
909fec_restart(struct net_device *ndev)
910{
911 struct fec_enet_private *fep = netdev_priv(ndev);
912 u32 val;
913 u32 temp_mac[2];
914 u32 rcntl = OPT_FRAME_SIZE | 0x04;
915 u32 ecntl = 0x2;
916
917
918
919
920
921 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
922 writel(0, fep->hwp + FEC_ECNTRL);
923 } else {
924 writel(1, fep->hwp + FEC_ECNTRL);
925 udelay(10);
926 }
927
928
929
930
931
932 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
933 writel((__force u32)cpu_to_be32(temp_mac[0]),
934 fep->hwp + FEC_ADDR_LOW);
935 writel((__force u32)cpu_to_be32(temp_mac[1]),
936 fep->hwp + FEC_ADDR_HIGH);
937
938
939 writel(0xffffffff, fep->hwp + FEC_IEVENT);
940
941 fec_enet_bd_init(ndev);
942
943 fec_enet_enable_ring(ndev);
944
945
946 fec_enet_reset_skb(ndev);
947
948
949 if (fep->full_duplex == DUPLEX_FULL) {
950
951 writel(0x04, fep->hwp + FEC_X_CNTRL);
952 } else {
953
954 rcntl |= 0x02;
955 writel(0x0, fep->hwp + FEC_X_CNTRL);
956 }
957
958
959 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
960
961#if !defined(CONFIG_M5272)
962 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
963 val = readl(fep->hwp + FEC_RACC);
964
965 val |= FEC_RACC_SHIFT16;
966 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
967
968 val |= FEC_RACC_OPTIONS;
969 else
970 val &= ~FEC_RACC_OPTIONS;
971 writel(val, fep->hwp + FEC_RACC);
972 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
973 }
974#endif
975
976
977
978
979
980 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
981
982 rcntl |= 0x40000000 | 0x00000020;
983
984
985 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
986 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
987 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
988 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
989 rcntl |= (1 << 6);
990 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
991 rcntl |= (1 << 8);
992 else
993 rcntl &= ~(1 << 8);
994
995
996 if (ndev->phydev) {
997 if (ndev->phydev->speed == SPEED_1000)
998 ecntl |= (1 << 5);
999 else if (ndev->phydev->speed == SPEED_100)
1000 rcntl &= ~(1 << 9);
1001 else
1002 rcntl |= (1 << 9);
1003 }
1004 } else {
1005#ifdef FEC_MIIGSK_ENR
1006 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1007 u32 cfgr;
1008
1009 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1010 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1011 udelay(1);
1012
1013
1014
1015
1016
1017
1018 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1019 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1020 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1021 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1022 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1023
1024
1025 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1026 }
1027#endif
1028 }
1029
1030#if !defined(CONFIG_M5272)
1031
1032 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1033 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1034 ndev->phydev && ndev->phydev->pause)) {
1035 rcntl |= FEC_ENET_FCE;
1036
1037
1038 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1039 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1040 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1041 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1042
1043
1044 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1045 } else {
1046 rcntl &= ~FEC_ENET_FCE;
1047 }
1048#endif
1049
1050 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1051
1052
1053 set_multicast_list(ndev);
1054#ifndef CONFIG_M5272
1055 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1056 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1057#endif
1058
1059 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1060
1061 ecntl |= (1 << 8);
1062
1063 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1064 }
1065
1066 if (fep->bufdesc_ex)
1067 ecntl |= (1 << 4);
1068
1069#ifndef CONFIG_M5272
1070
1071 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1072#endif
1073
1074
1075 writel(ecntl, fep->hwp + FEC_ECNTRL);
1076 fec_enet_active_rxring(ndev);
1077
1078 if (fep->bufdesc_ex)
1079 fec_ptp_start_cyclecounter(ndev);
1080
1081
1082 if (fep->link)
1083 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1084 else
1085 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1086
1087
1088 fec_enet_itr_coal_init(ndev);
1089
1090}
1091
1092static void
1093fec_stop(struct net_device *ndev)
1094{
1095 struct fec_enet_private *fep = netdev_priv(ndev);
1096 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1097 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1098 u32 val;
1099
1100
1101 if (fep->link) {
1102 writel(1, fep->hwp + FEC_X_CNTRL);
1103 udelay(10);
1104 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1105 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1106 }
1107
1108
1109
1110
1111
1112 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1113 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1114 writel(0, fep->hwp + FEC_ECNTRL);
1115 } else {
1116 writel(1, fep->hwp + FEC_ECNTRL);
1117 udelay(10);
1118 }
1119 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1120 } else {
1121 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1122 val = readl(fep->hwp + FEC_ECNTRL);
1123 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1124 writel(val, fep->hwp + FEC_ECNTRL);
1125
1126 if (pdata && pdata->sleep_mode_enable)
1127 pdata->sleep_mode_enable(true);
1128 }
1129 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1130
1131
1132 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1133 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1134 writel(2, fep->hwp + FEC_ECNTRL);
1135 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1136 }
1137}
1138
1139
1140static void
1141fec_timeout(struct net_device *ndev)
1142{
1143 struct fec_enet_private *fep = netdev_priv(ndev);
1144
1145 fec_dump(ndev);
1146
1147 ndev->stats.tx_errors++;
1148
1149 schedule_work(&fep->tx_timeout_work);
1150}
1151
1152static void fec_enet_timeout_work(struct work_struct *work)
1153{
1154 struct fec_enet_private *fep =
1155 container_of(work, struct fec_enet_private, tx_timeout_work);
1156 struct net_device *ndev = fep->netdev;
1157
1158 rtnl_lock();
1159 if (netif_device_present(ndev) || netif_running(ndev)) {
1160 napi_disable(&fep->napi);
1161 netif_tx_lock_bh(ndev);
1162 fec_restart(ndev);
1163 netif_tx_wake_all_queues(ndev);
1164 netif_tx_unlock_bh(ndev);
1165 napi_enable(&fep->napi);
1166 }
1167 rtnl_unlock();
1168}
1169
1170static void
1171fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1172 struct skb_shared_hwtstamps *hwtstamps)
1173{
1174 unsigned long flags;
1175 u64 ns;
1176
1177 spin_lock_irqsave(&fep->tmreg_lock, flags);
1178 ns = timecounter_cyc2time(&fep->tc, ts);
1179 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1180
1181 memset(hwtstamps, 0, sizeof(*hwtstamps));
1182 hwtstamps->hwtstamp = ns_to_ktime(ns);
1183}
1184
1185static void
1186fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1187{
1188 struct fec_enet_private *fep;
1189 struct bufdesc *bdp;
1190 unsigned short status;
1191 struct sk_buff *skb;
1192 struct fec_enet_priv_tx_q *txq;
1193 struct netdev_queue *nq;
1194 int index = 0;
1195 int entries_free;
1196
1197 fep = netdev_priv(ndev);
1198
1199 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1200
1201 txq = fep->tx_queue[queue_id];
1202
1203 nq = netdev_get_tx_queue(ndev, queue_id);
1204 bdp = txq->dirty_tx;
1205
1206
1207 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1208
1209 while (bdp != READ_ONCE(txq->bd.cur)) {
1210
1211 rmb();
1212 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1213 if (status & BD_ENET_TX_READY)
1214 break;
1215
1216 index = fec_enet_get_bd_index(bdp, &txq->bd);
1217
1218 skb = txq->tx_skbuff[index];
1219 txq->tx_skbuff[index] = NULL;
1220 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1221 dma_unmap_single(&fep->pdev->dev,
1222 fec32_to_cpu(bdp->cbd_bufaddr),
1223 fec16_to_cpu(bdp->cbd_datlen),
1224 DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = cpu_to_fec32(0);
1226 if (!skb)
1227 goto skb_done;
1228
1229
1230 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1231 BD_ENET_TX_RL | BD_ENET_TX_UN |
1232 BD_ENET_TX_CSL)) {
1233 ndev->stats.tx_errors++;
1234 if (status & BD_ENET_TX_HB)
1235 ndev->stats.tx_heartbeat_errors++;
1236 if (status & BD_ENET_TX_LC)
1237 ndev->stats.tx_window_errors++;
1238 if (status & BD_ENET_TX_RL)
1239 ndev->stats.tx_aborted_errors++;
1240 if (status & BD_ENET_TX_UN)
1241 ndev->stats.tx_fifo_errors++;
1242 if (status & BD_ENET_TX_CSL)
1243 ndev->stats.tx_carrier_errors++;
1244 } else {
1245 ndev->stats.tx_packets++;
1246 ndev->stats.tx_bytes += skb->len;
1247 }
1248
1249 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1250 fep->bufdesc_ex) {
1251 struct skb_shared_hwtstamps shhwtstamps;
1252 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1253
1254 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1255 skb_tstamp_tx(skb, &shhwtstamps);
1256 }
1257
1258
1259
1260
1261 if (status & BD_ENET_TX_DEF)
1262 ndev->stats.collisions++;
1263
1264
1265 dev_kfree_skb_any(skb);
1266skb_done:
1267
1268
1269
1270 wmb();
1271 txq->dirty_tx = bdp;
1272
1273
1274 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1275
1276
1277
1278 if (netif_tx_queue_stopped(nq)) {
1279 entries_free = fec_enet_get_free_txdesc_num(txq);
1280 if (entries_free >= txq->tx_wake_threshold)
1281 netif_tx_wake_queue(nq);
1282 }
1283 }
1284
1285
1286 if (bdp != txq->bd.cur &&
1287 readl(txq->bd.reg_desc_active) == 0)
1288 writel(0, txq->bd.reg_desc_active);
1289}
1290
1291static void
1292fec_enet_tx(struct net_device *ndev)
1293{
1294 struct fec_enet_private *fep = netdev_priv(ndev);
1295 u16 queue_id;
1296
1297 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1298 clear_bit(queue_id, &fep->work_tx);
1299 fec_enet_tx_queue(ndev, queue_id);
1300 }
1301 return;
1302}
1303
1304static int
1305fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1306{
1307 struct fec_enet_private *fep = netdev_priv(ndev);
1308 int off;
1309
1310 off = ((unsigned long)skb->data) & fep->rx_align;
1311 if (off)
1312 skb_reserve(skb, fep->rx_align + 1 - off);
1313
1314 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1315 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1316 if (net_ratelimit())
1317 netdev_err(ndev, "Rx DMA memory map failed\n");
1318 return -ENOMEM;
1319 }
1320
1321 return 0;
1322}
1323
1324static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1325 struct bufdesc *bdp, u32 length, bool swap)
1326{
1327 struct fec_enet_private *fep = netdev_priv(ndev);
1328 struct sk_buff *new_skb;
1329
1330 if (length > fep->rx_copybreak)
1331 return false;
1332
1333 new_skb = netdev_alloc_skb(ndev, length);
1334 if (!new_skb)
1335 return false;
1336
1337 dma_sync_single_for_cpu(&fep->pdev->dev,
1338 fec32_to_cpu(bdp->cbd_bufaddr),
1339 FEC_ENET_RX_FRSIZE - fep->rx_align,
1340 DMA_FROM_DEVICE);
1341 if (!swap)
1342 memcpy(new_skb->data, (*skb)->data, length);
1343 else
1344 swap_buffer2(new_skb->data, (*skb)->data, length);
1345 *skb = new_skb;
1346
1347 return true;
1348}
1349
1350
1351
1352
1353
1354
1355static int
1356fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1357{
1358 struct fec_enet_private *fep = netdev_priv(ndev);
1359 struct fec_enet_priv_rx_q *rxq;
1360 struct bufdesc *bdp;
1361 unsigned short status;
1362 struct sk_buff *skb_new = NULL;
1363 struct sk_buff *skb;
1364 ushort pkt_len;
1365 __u8 *data;
1366 int pkt_received = 0;
1367 struct bufdesc_ex *ebdp = NULL;
1368 bool vlan_packet_rcvd = false;
1369 u16 vlan_tag;
1370 int index = 0;
1371 bool is_copybreak;
1372 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1373
1374#ifdef CONFIG_M532x
1375 flush_cache_all();
1376#endif
1377 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1378 rxq = fep->rx_queue[queue_id];
1379
1380
1381
1382
1383 bdp = rxq->bd.cur;
1384
1385 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1386
1387 if (pkt_received >= budget)
1388 break;
1389 pkt_received++;
1390
1391 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1392
1393
1394 status ^= BD_ENET_RX_LAST;
1395 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1396 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1397 BD_ENET_RX_CL)) {
1398 ndev->stats.rx_errors++;
1399 if (status & BD_ENET_RX_OV) {
1400
1401 ndev->stats.rx_fifo_errors++;
1402 goto rx_processing_done;
1403 }
1404 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1405 | BD_ENET_RX_LAST)) {
1406
1407 ndev->stats.rx_length_errors++;
1408 if (status & BD_ENET_RX_LAST)
1409 netdev_err(ndev, "rcv is not +last\n");
1410 }
1411 if (status & BD_ENET_RX_CR)
1412 ndev->stats.rx_crc_errors++;
1413
1414 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1415 ndev->stats.rx_frame_errors++;
1416 goto rx_processing_done;
1417 }
1418
1419
1420 ndev->stats.rx_packets++;
1421 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1422 ndev->stats.rx_bytes += pkt_len;
1423
1424 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1425 skb = rxq->rx_skbuff[index];
1426
1427
1428
1429
1430
1431 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1432 need_swap);
1433 if (!is_copybreak) {
1434 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1435 if (unlikely(!skb_new)) {
1436 ndev->stats.rx_dropped++;
1437 goto rx_processing_done;
1438 }
1439 dma_unmap_single(&fep->pdev->dev,
1440 fec32_to_cpu(bdp->cbd_bufaddr),
1441 FEC_ENET_RX_FRSIZE - fep->rx_align,
1442 DMA_FROM_DEVICE);
1443 }
1444
1445 prefetch(skb->data - NET_IP_ALIGN);
1446 skb_put(skb, pkt_len - 4);
1447 data = skb->data;
1448
1449 if (!is_copybreak && need_swap)
1450 swap_buffer(data, pkt_len);
1451
1452#if !defined(CONFIG_M5272)
1453 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1454 data = skb_pull_inline(skb, 2);
1455#endif
1456
1457
1458 ebdp = NULL;
1459 if (fep->bufdesc_ex)
1460 ebdp = (struct bufdesc_ex *)bdp;
1461
1462
1463 vlan_packet_rcvd = false;
1464 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1465 fep->bufdesc_ex &&
1466 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1467
1468 struct vlan_hdr *vlan_header =
1469 (struct vlan_hdr *) (data + ETH_HLEN);
1470 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1471
1472 vlan_packet_rcvd = true;
1473
1474 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1475 skb_pull(skb, VLAN_HLEN);
1476 }
1477
1478 skb->protocol = eth_type_trans(skb, ndev);
1479
1480
1481 if (fep->hwts_rx_en && fep->bufdesc_ex)
1482 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1483 skb_hwtstamps(skb));
1484
1485 if (fep->bufdesc_ex &&
1486 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1487 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1488
1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
1490 } else {
1491 skb_checksum_none_assert(skb);
1492 }
1493 }
1494
1495
1496 if (vlan_packet_rcvd)
1497 __vlan_hwaccel_put_tag(skb,
1498 htons(ETH_P_8021Q),
1499 vlan_tag);
1500
1501 napi_gro_receive(&fep->napi, skb);
1502
1503 if (is_copybreak) {
1504 dma_sync_single_for_device(&fep->pdev->dev,
1505 fec32_to_cpu(bdp->cbd_bufaddr),
1506 FEC_ENET_RX_FRSIZE - fep->rx_align,
1507 DMA_FROM_DEVICE);
1508 } else {
1509 rxq->rx_skbuff[index] = skb_new;
1510 fec_enet_new_rxbdp(ndev, bdp, skb_new);
1511 }
1512
1513rx_processing_done:
1514
1515 status &= ~BD_ENET_RX_STATS;
1516
1517
1518 status |= BD_ENET_RX_EMPTY;
1519
1520 if (fep->bufdesc_ex) {
1521 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1522
1523 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1524 ebdp->cbd_prot = 0;
1525 ebdp->cbd_bdu = 0;
1526 }
1527
1528
1529
1530 wmb();
1531 bdp->cbd_sc = cpu_to_fec16(status);
1532
1533
1534 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1535
1536
1537
1538
1539
1540 writel(0, rxq->bd.reg_desc_active);
1541 }
1542 rxq->bd.cur = bdp;
1543 return pkt_received;
1544}
1545
1546static int
1547fec_enet_rx(struct net_device *ndev, int budget)
1548{
1549 int pkt_received = 0;
1550 u16 queue_id;
1551 struct fec_enet_private *fep = netdev_priv(ndev);
1552
1553 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1554 int ret;
1555
1556 ret = fec_enet_rx_queue(ndev,
1557 budget - pkt_received, queue_id);
1558
1559 if (ret < budget - pkt_received)
1560 clear_bit(queue_id, &fep->work_rx);
1561
1562 pkt_received += ret;
1563 }
1564 return pkt_received;
1565}
1566
1567static bool
1568fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1569{
1570 if (int_events == 0)
1571 return false;
1572
1573 if (int_events & FEC_ENET_RXF_0)
1574 fep->work_rx |= (1 << 2);
1575 if (int_events & FEC_ENET_RXF_1)
1576 fep->work_rx |= (1 << 0);
1577 if (int_events & FEC_ENET_RXF_2)
1578 fep->work_rx |= (1 << 1);
1579
1580 if (int_events & FEC_ENET_TXF_0)
1581 fep->work_tx |= (1 << 2);
1582 if (int_events & FEC_ENET_TXF_1)
1583 fep->work_tx |= (1 << 0);
1584 if (int_events & FEC_ENET_TXF_2)
1585 fep->work_tx |= (1 << 1);
1586
1587 return true;
1588}
1589
1590static irqreturn_t
1591fec_enet_interrupt(int irq, void *dev_id)
1592{
1593 struct net_device *ndev = dev_id;
1594 struct fec_enet_private *fep = netdev_priv(ndev);
1595 uint int_events;
1596 irqreturn_t ret = IRQ_NONE;
1597
1598 int_events = readl(fep->hwp + FEC_IEVENT);
1599 writel(int_events, fep->hwp + FEC_IEVENT);
1600 fec_enet_collect_events(fep, int_events);
1601
1602 if ((fep->work_tx || fep->work_rx) && fep->link) {
1603 ret = IRQ_HANDLED;
1604
1605 if (napi_schedule_prep(&fep->napi)) {
1606
1607 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1608 __napi_schedule(&fep->napi);
1609 }
1610 }
1611
1612 if (int_events & FEC_ENET_MII) {
1613 ret = IRQ_HANDLED;
1614 complete(&fep->mdio_done);
1615 }
1616 return ret;
1617}
1618
1619static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1620{
1621 struct net_device *ndev = napi->dev;
1622 struct fec_enet_private *fep = netdev_priv(ndev);
1623 int pkts;
1624
1625 pkts = fec_enet_rx(ndev, budget);
1626
1627 fec_enet_tx(ndev);
1628
1629 if (pkts < budget) {
1630 napi_complete_done(napi, pkts);
1631 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1632 }
1633 return pkts;
1634}
1635
1636
1637static void fec_get_mac(struct net_device *ndev)
1638{
1639 struct fec_enet_private *fep = netdev_priv(ndev);
1640 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1641 unsigned char *iap, tmpaddr[ETH_ALEN];
1642
1643
1644
1645
1646
1647
1648
1649 iap = macaddr;
1650
1651
1652
1653
1654 if (!is_valid_ether_addr(iap)) {
1655 struct device_node *np = fep->pdev->dev.of_node;
1656 if (np) {
1657 const char *mac = of_get_mac_address(np);
1658 if (!IS_ERR(mac))
1659 iap = (unsigned char *) mac;
1660 }
1661 }
1662
1663
1664
1665
1666 if (!is_valid_ether_addr(iap)) {
1667#ifdef CONFIG_M5272
1668 if (FEC_FLASHMAC)
1669 iap = (unsigned char *)FEC_FLASHMAC;
1670#else
1671 if (pdata)
1672 iap = (unsigned char *)&pdata->mac;
1673#endif
1674 }
1675
1676
1677
1678
1679 if (!is_valid_ether_addr(iap)) {
1680 *((__be32 *) &tmpaddr[0]) =
1681 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1682 *((__be16 *) &tmpaddr[4]) =
1683 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1684 iap = &tmpaddr[0];
1685 }
1686
1687
1688
1689
1690 if (!is_valid_ether_addr(iap)) {
1691
1692 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1693 eth_hw_addr_random(ndev);
1694 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1695 ndev->dev_addr);
1696 return;
1697 }
1698
1699 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1700
1701
1702 if (iap == macaddr)
1703 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1704}
1705
1706
1707
1708
1709
1710
1711static void fec_enet_adjust_link(struct net_device *ndev)
1712{
1713 struct fec_enet_private *fep = netdev_priv(ndev);
1714 struct phy_device *phy_dev = ndev->phydev;
1715 int status_change = 0;
1716
1717
1718
1719
1720
1721
1722 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1723 fep->link = 0;
1724 } else if (phy_dev->link) {
1725 if (!fep->link) {
1726 fep->link = phy_dev->link;
1727 status_change = 1;
1728 }
1729
1730 if (fep->full_duplex != phy_dev->duplex) {
1731 fep->full_duplex = phy_dev->duplex;
1732 status_change = 1;
1733 }
1734
1735 if (phy_dev->speed != fep->speed) {
1736 fep->speed = phy_dev->speed;
1737 status_change = 1;
1738 }
1739
1740
1741 if (status_change) {
1742 napi_disable(&fep->napi);
1743 netif_tx_lock_bh(ndev);
1744 fec_restart(ndev);
1745 netif_tx_wake_all_queues(ndev);
1746 netif_tx_unlock_bh(ndev);
1747 napi_enable(&fep->napi);
1748 }
1749 } else {
1750 if (fep->link) {
1751 napi_disable(&fep->napi);
1752 netif_tx_lock_bh(ndev);
1753 fec_stop(ndev);
1754 netif_tx_unlock_bh(ndev);
1755 napi_enable(&fep->napi);
1756 fep->link = phy_dev->link;
1757 status_change = 1;
1758 }
1759 }
1760
1761 if (status_change)
1762 phy_print_status(phy_dev);
1763}
1764
1765static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1766{
1767 struct fec_enet_private *fep = bus->priv;
1768 struct device *dev = &fep->pdev->dev;
1769 unsigned long time_left;
1770 int ret = 0;
1771
1772 ret = pm_runtime_get_sync(dev);
1773 if (ret < 0)
1774 return ret;
1775
1776 reinit_completion(&fep->mdio_done);
1777
1778
1779 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1780 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1781 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1782
1783
1784 time_left = wait_for_completion_timeout(&fep->mdio_done,
1785 usecs_to_jiffies(FEC_MII_TIMEOUT));
1786 if (time_left == 0) {
1787 netdev_err(fep->netdev, "MDIO read timeout\n");
1788 ret = -ETIMEDOUT;
1789 goto out;
1790 }
1791
1792 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1793
1794out:
1795 pm_runtime_mark_last_busy(dev);
1796 pm_runtime_put_autosuspend(dev);
1797
1798 return ret;
1799}
1800
1801static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1802 u16 value)
1803{
1804 struct fec_enet_private *fep = bus->priv;
1805 struct device *dev = &fep->pdev->dev;
1806 unsigned long time_left;
1807 int ret;
1808
1809 ret = pm_runtime_get_sync(dev);
1810 if (ret < 0)
1811 return ret;
1812 else
1813 ret = 0;
1814
1815 reinit_completion(&fep->mdio_done);
1816
1817
1818 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1819 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1820 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1821 fep->hwp + FEC_MII_DATA);
1822
1823
1824 time_left = wait_for_completion_timeout(&fep->mdio_done,
1825 usecs_to_jiffies(FEC_MII_TIMEOUT));
1826 if (time_left == 0) {
1827 netdev_err(fep->netdev, "MDIO write timeout\n");
1828 ret = -ETIMEDOUT;
1829 }
1830
1831 pm_runtime_mark_last_busy(dev);
1832 pm_runtime_put_autosuspend(dev);
1833
1834 return ret;
1835}
1836
1837static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1838{
1839 struct fec_enet_private *fep = netdev_priv(ndev);
1840 int ret;
1841
1842 if (enable) {
1843 ret = clk_prepare_enable(fep->clk_enet_out);
1844 if (ret)
1845 return ret;
1846
1847 if (fep->clk_ptp) {
1848 mutex_lock(&fep->ptp_clk_mutex);
1849 ret = clk_prepare_enable(fep->clk_ptp);
1850 if (ret) {
1851 mutex_unlock(&fep->ptp_clk_mutex);
1852 goto failed_clk_ptp;
1853 } else {
1854 fep->ptp_clk_on = true;
1855 }
1856 mutex_unlock(&fep->ptp_clk_mutex);
1857 }
1858
1859 ret = clk_prepare_enable(fep->clk_ref);
1860 if (ret)
1861 goto failed_clk_ref;
1862
1863 phy_reset_after_clk_enable(ndev->phydev);
1864 } else {
1865 clk_disable_unprepare(fep->clk_enet_out);
1866 if (fep->clk_ptp) {
1867 mutex_lock(&fep->ptp_clk_mutex);
1868 clk_disable_unprepare(fep->clk_ptp);
1869 fep->ptp_clk_on = false;
1870 mutex_unlock(&fep->ptp_clk_mutex);
1871 }
1872 clk_disable_unprepare(fep->clk_ref);
1873 }
1874
1875 return 0;
1876
1877failed_clk_ref:
1878 if (fep->clk_ref)
1879 clk_disable_unprepare(fep->clk_ref);
1880failed_clk_ptp:
1881 if (fep->clk_enet_out)
1882 clk_disable_unprepare(fep->clk_enet_out);
1883
1884 return ret;
1885}
1886
1887static int fec_enet_mii_probe(struct net_device *ndev)
1888{
1889 struct fec_enet_private *fep = netdev_priv(ndev);
1890 struct phy_device *phy_dev = NULL;
1891 char mdio_bus_id[MII_BUS_ID_SIZE];
1892 char phy_name[MII_BUS_ID_SIZE + 3];
1893 int phy_id;
1894 int dev_id = fep->dev_id;
1895
1896 if (fep->phy_node) {
1897 phy_dev = of_phy_connect(ndev, fep->phy_node,
1898 &fec_enet_adjust_link, 0,
1899 fep->phy_interface);
1900 if (!phy_dev) {
1901 netdev_err(ndev, "Unable to connect to phy\n");
1902 return -ENODEV;
1903 }
1904 } else {
1905
1906 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1907 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1908 continue;
1909 if (dev_id--)
1910 continue;
1911 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1912 break;
1913 }
1914
1915 if (phy_id >= PHY_MAX_ADDR) {
1916 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1917 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1918 phy_id = 0;
1919 }
1920
1921 snprintf(phy_name, sizeof(phy_name),
1922 PHY_ID_FMT, mdio_bus_id, phy_id);
1923 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1924 fep->phy_interface);
1925 }
1926
1927 if (IS_ERR(phy_dev)) {
1928 netdev_err(ndev, "could not attach to PHY\n");
1929 return PTR_ERR(phy_dev);
1930 }
1931
1932
1933 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
1934 phy_set_max_speed(phy_dev, 1000);
1935 phy_remove_link_mode(phy_dev,
1936 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1937#if !defined(CONFIG_M5272)
1938 phy_support_sym_pause(phy_dev);
1939#endif
1940 }
1941 else
1942 phy_set_max_speed(phy_dev, 100);
1943
1944 fep->link = 0;
1945 fep->full_duplex = 0;
1946
1947 phy_attached_info(phy_dev);
1948
1949 return 0;
1950}
1951
1952static int fec_enet_mii_init(struct platform_device *pdev)
1953{
1954 static struct mii_bus *fec0_mii_bus;
1955 struct net_device *ndev = platform_get_drvdata(pdev);
1956 struct fec_enet_private *fep = netdev_priv(ndev);
1957 struct device_node *node;
1958 int err = -ENXIO;
1959 u32 mii_speed, holdtime;
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1978
1979 if (mii_cnt && fec0_mii_bus) {
1980 fep->mii_bus = fec0_mii_bus;
1981 mii_cnt++;
1982 return 0;
1983 }
1984 return -ENOENT;
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1996 if (fep->quirks & FEC_QUIRK_ENET_MAC)
1997 mii_speed--;
1998 if (mii_speed > 63) {
1999 dev_err(&pdev->dev,
2000 "fec clock (%lu) too fast to get right mii speed\n",
2001 clk_get_rate(fep->clk_ipg));
2002 err = -EINVAL;
2003 goto err_out;
2004 }
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2019
2020 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2021
2022 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2023
2024 fep->mii_bus = mdiobus_alloc();
2025 if (fep->mii_bus == NULL) {
2026 err = -ENOMEM;
2027 goto err_out;
2028 }
2029
2030 fep->mii_bus->name = "fec_enet_mii_bus";
2031 fep->mii_bus->read = fec_enet_mdio_read;
2032 fep->mii_bus->write = fec_enet_mdio_write;
2033 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2034 pdev->name, fep->dev_id + 1);
2035 fep->mii_bus->priv = fep;
2036 fep->mii_bus->parent = &pdev->dev;
2037
2038 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2039 err = of_mdiobus_register(fep->mii_bus, node);
2040 of_node_put(node);
2041 if (err)
2042 goto err_out_free_mdiobus;
2043
2044 mii_cnt++;
2045
2046
2047 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2048 fec0_mii_bus = fep->mii_bus;
2049
2050 return 0;
2051
2052err_out_free_mdiobus:
2053 mdiobus_free(fep->mii_bus);
2054err_out:
2055 return err;
2056}
2057
2058static void fec_enet_mii_remove(struct fec_enet_private *fep)
2059{
2060 if (--mii_cnt == 0) {
2061 mdiobus_unregister(fep->mii_bus);
2062 mdiobus_free(fep->mii_bus);
2063 }
2064}
2065
2066static void fec_enet_get_drvinfo(struct net_device *ndev,
2067 struct ethtool_drvinfo *info)
2068{
2069 struct fec_enet_private *fep = netdev_priv(ndev);
2070
2071 strlcpy(info->driver, fep->pdev->dev.driver->name,
2072 sizeof(info->driver));
2073 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
2074 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2075}
2076
2077static int fec_enet_get_regs_len(struct net_device *ndev)
2078{
2079 struct fec_enet_private *fep = netdev_priv(ndev);
2080 struct resource *r;
2081 int s = 0;
2082
2083 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2084 if (r)
2085 s = resource_size(r);
2086
2087 return s;
2088}
2089
2090
2091#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2092 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2093 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2094static __u32 fec_enet_register_version = 2;
2095static u32 fec_enet_register_offset[] = {
2096 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2097 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2098 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2099 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2100 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2101 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2102 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2103 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2104 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2105 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2106 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2107 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2108 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2109 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2110 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2111 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2112 RMON_T_P_GTE2048, RMON_T_OCTETS,
2113 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2114 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2115 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2116 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2117 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2118 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2119 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2120 RMON_R_P_GTE2048, RMON_R_OCTETS,
2121 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2122 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2123};
2124#else
2125static __u32 fec_enet_register_version = 1;
2126static u32 fec_enet_register_offset[] = {
2127 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2128 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2129 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2130 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2131 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2132 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2133 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2134 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2135 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2136};
2137#endif
2138
2139static void fec_enet_get_regs(struct net_device *ndev,
2140 struct ethtool_regs *regs, void *regbuf)
2141{
2142 struct fec_enet_private *fep = netdev_priv(ndev);
2143 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2144 u32 *buf = (u32 *)regbuf;
2145 u32 i, off;
2146
2147 regs->version = fec_enet_register_version;
2148
2149 memset(buf, 0, regs->len);
2150
2151 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2152 off = fec_enet_register_offset[i];
2153
2154 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2155 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2156 continue;
2157
2158 off >>= 2;
2159 buf[off] = readl(&theregs[off]);
2160 }
2161}
2162
2163static int fec_enet_get_ts_info(struct net_device *ndev,
2164 struct ethtool_ts_info *info)
2165{
2166 struct fec_enet_private *fep = netdev_priv(ndev);
2167
2168 if (fep->bufdesc_ex) {
2169
2170 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2171 SOF_TIMESTAMPING_RX_SOFTWARE |
2172 SOF_TIMESTAMPING_SOFTWARE |
2173 SOF_TIMESTAMPING_TX_HARDWARE |
2174 SOF_TIMESTAMPING_RX_HARDWARE |
2175 SOF_TIMESTAMPING_RAW_HARDWARE;
2176 if (fep->ptp_clock)
2177 info->phc_index = ptp_clock_index(fep->ptp_clock);
2178 else
2179 info->phc_index = -1;
2180
2181 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2182 (1 << HWTSTAMP_TX_ON);
2183
2184 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2185 (1 << HWTSTAMP_FILTER_ALL);
2186 return 0;
2187 } else {
2188 return ethtool_op_get_ts_info(ndev, info);
2189 }
2190}
2191
2192#if !defined(CONFIG_M5272)
2193
2194static void fec_enet_get_pauseparam(struct net_device *ndev,
2195 struct ethtool_pauseparam *pause)
2196{
2197 struct fec_enet_private *fep = netdev_priv(ndev);
2198
2199 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2200 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2201 pause->rx_pause = pause->tx_pause;
2202}
2203
2204static int fec_enet_set_pauseparam(struct net_device *ndev,
2205 struct ethtool_pauseparam *pause)
2206{
2207 struct fec_enet_private *fep = netdev_priv(ndev);
2208
2209 if (!ndev->phydev)
2210 return -ENODEV;
2211
2212 if (pause->tx_pause != pause->rx_pause) {
2213 netdev_info(ndev,
2214 "hardware only support enable/disable both tx and rx");
2215 return -EINVAL;
2216 }
2217
2218 fep->pause_flag = 0;
2219
2220
2221 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2222 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2223
2224 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2225 pause->autoneg);
2226
2227 if (pause->autoneg) {
2228 if (netif_running(ndev))
2229 fec_stop(ndev);
2230 phy_start_aneg(ndev->phydev);
2231 }
2232 if (netif_running(ndev)) {
2233 napi_disable(&fep->napi);
2234 netif_tx_lock_bh(ndev);
2235 fec_restart(ndev);
2236 netif_tx_wake_all_queues(ndev);
2237 netif_tx_unlock_bh(ndev);
2238 napi_enable(&fep->napi);
2239 }
2240
2241 return 0;
2242}
2243
2244static const struct fec_stat {
2245 char name[ETH_GSTRING_LEN];
2246 u16 offset;
2247} fec_stats[] = {
2248
2249 { "tx_dropped", RMON_T_DROP },
2250 { "tx_packets", RMON_T_PACKETS },
2251 { "tx_broadcast", RMON_T_BC_PKT },
2252 { "tx_multicast", RMON_T_MC_PKT },
2253 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2254 { "tx_undersize", RMON_T_UNDERSIZE },
2255 { "tx_oversize", RMON_T_OVERSIZE },
2256 { "tx_fragment", RMON_T_FRAG },
2257 { "tx_jabber", RMON_T_JAB },
2258 { "tx_collision", RMON_T_COL },
2259 { "tx_64byte", RMON_T_P64 },
2260 { "tx_65to127byte", RMON_T_P65TO127 },
2261 { "tx_128to255byte", RMON_T_P128TO255 },
2262 { "tx_256to511byte", RMON_T_P256TO511 },
2263 { "tx_512to1023byte", RMON_T_P512TO1023 },
2264 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2265 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2266 { "tx_octets", RMON_T_OCTETS },
2267
2268
2269 { "IEEE_tx_drop", IEEE_T_DROP },
2270 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2271 { "IEEE_tx_1col", IEEE_T_1COL },
2272 { "IEEE_tx_mcol", IEEE_T_MCOL },
2273 { "IEEE_tx_def", IEEE_T_DEF },
2274 { "IEEE_tx_lcol", IEEE_T_LCOL },
2275 { "IEEE_tx_excol", IEEE_T_EXCOL },
2276 { "IEEE_tx_macerr", IEEE_T_MACERR },
2277 { "IEEE_tx_cserr", IEEE_T_CSERR },
2278 { "IEEE_tx_sqe", IEEE_T_SQE },
2279 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2280 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2281
2282
2283 { "rx_packets", RMON_R_PACKETS },
2284 { "rx_broadcast", RMON_R_BC_PKT },
2285 { "rx_multicast", RMON_R_MC_PKT },
2286 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2287 { "rx_undersize", RMON_R_UNDERSIZE },
2288 { "rx_oversize", RMON_R_OVERSIZE },
2289 { "rx_fragment", RMON_R_FRAG },
2290 { "rx_jabber", RMON_R_JAB },
2291 { "rx_64byte", RMON_R_P64 },
2292 { "rx_65to127byte", RMON_R_P65TO127 },
2293 { "rx_128to255byte", RMON_R_P128TO255 },
2294 { "rx_256to511byte", RMON_R_P256TO511 },
2295 { "rx_512to1023byte", RMON_R_P512TO1023 },
2296 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2297 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2298 { "rx_octets", RMON_R_OCTETS },
2299
2300
2301 { "IEEE_rx_drop", IEEE_R_DROP },
2302 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2303 { "IEEE_rx_crc", IEEE_R_CRC },
2304 { "IEEE_rx_align", IEEE_R_ALIGN },
2305 { "IEEE_rx_macerr", IEEE_R_MACERR },
2306 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2307 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2308};
2309
2310#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2311
2312static void fec_enet_update_ethtool_stats(struct net_device *dev)
2313{
2314 struct fec_enet_private *fep = netdev_priv(dev);
2315 int i;
2316
2317 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2318 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2319}
2320
2321static void fec_enet_get_ethtool_stats(struct net_device *dev,
2322 struct ethtool_stats *stats, u64 *data)
2323{
2324 struct fec_enet_private *fep = netdev_priv(dev);
2325
2326 if (netif_running(dev))
2327 fec_enet_update_ethtool_stats(dev);
2328
2329 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2330}
2331
2332static void fec_enet_get_strings(struct net_device *netdev,
2333 u32 stringset, u8 *data)
2334{
2335 int i;
2336 switch (stringset) {
2337 case ETH_SS_STATS:
2338 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2339 memcpy(data + i * ETH_GSTRING_LEN,
2340 fec_stats[i].name, ETH_GSTRING_LEN);
2341 break;
2342 }
2343}
2344
2345static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2346{
2347 switch (sset) {
2348 case ETH_SS_STATS:
2349 return ARRAY_SIZE(fec_stats);
2350 default:
2351 return -EOPNOTSUPP;
2352 }
2353}
2354
2355static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2356{
2357 struct fec_enet_private *fep = netdev_priv(dev);
2358 int i;
2359
2360
2361 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2362
2363 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2364 writel(0, fep->hwp + fec_stats[i].offset);
2365
2366
2367 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2368}
2369
2370#else
2371#define FEC_STATS_SIZE 0
2372static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2373{
2374}
2375
2376static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2377{
2378}
2379#endif
2380
2381
2382
2383
2384
2385static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2386{
2387 struct fec_enet_private *fep = netdev_priv(ndev);
2388
2389 return us * (fep->itr_clk_rate / 64000) / 1000;
2390}
2391
2392
2393static void fec_enet_itr_coal_set(struct net_device *ndev)
2394{
2395 struct fec_enet_private *fep = netdev_priv(ndev);
2396 int rx_itr, tx_itr;
2397
2398
2399 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2400 !fep->tx_time_itr || !fep->tx_pkts_itr)
2401 return;
2402
2403
2404
2405
2406 rx_itr = FEC_ITR_CLK_SEL;
2407 tx_itr = FEC_ITR_CLK_SEL;
2408
2409
2410 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2411 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2412 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2413 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2414
2415 rx_itr |= FEC_ITR_EN;
2416 tx_itr |= FEC_ITR_EN;
2417
2418 writel(tx_itr, fep->hwp + FEC_TXIC0);
2419 writel(rx_itr, fep->hwp + FEC_RXIC0);
2420 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2421 writel(tx_itr, fep->hwp + FEC_TXIC1);
2422 writel(rx_itr, fep->hwp + FEC_RXIC1);
2423 writel(tx_itr, fep->hwp + FEC_TXIC2);
2424 writel(rx_itr, fep->hwp + FEC_RXIC2);
2425 }
2426}
2427
2428static int
2429fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2430{
2431 struct fec_enet_private *fep = netdev_priv(ndev);
2432
2433 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2434 return -EOPNOTSUPP;
2435
2436 ec->rx_coalesce_usecs = fep->rx_time_itr;
2437 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2438
2439 ec->tx_coalesce_usecs = fep->tx_time_itr;
2440 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2441
2442 return 0;
2443}
2444
2445static int
2446fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2447{
2448 struct fec_enet_private *fep = netdev_priv(ndev);
2449 struct device *dev = &fep->pdev->dev;
2450 unsigned int cycle;
2451
2452 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2453 return -EOPNOTSUPP;
2454
2455 if (ec->rx_max_coalesced_frames > 255) {
2456 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
2457 return -EINVAL;
2458 }
2459
2460 if (ec->tx_max_coalesced_frames > 255) {
2461 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
2462 return -EINVAL;
2463 }
2464
2465 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2466 if (cycle > 0xFFFF) {
2467 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
2468 return -EINVAL;
2469 }
2470
2471 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2472 if (cycle > 0xFFFF) {
2473 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
2474 return -EINVAL;
2475 }
2476
2477 fep->rx_time_itr = ec->rx_coalesce_usecs;
2478 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2479
2480 fep->tx_time_itr = ec->tx_coalesce_usecs;
2481 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2482
2483 fec_enet_itr_coal_set(ndev);
2484
2485 return 0;
2486}
2487
2488static void fec_enet_itr_coal_init(struct net_device *ndev)
2489{
2490 struct ethtool_coalesce ec;
2491
2492 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2493 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2494
2495 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2496 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2497
2498 fec_enet_set_coalesce(ndev, &ec);
2499}
2500
2501static int fec_enet_get_tunable(struct net_device *netdev,
2502 const struct ethtool_tunable *tuna,
2503 void *data)
2504{
2505 struct fec_enet_private *fep = netdev_priv(netdev);
2506 int ret = 0;
2507
2508 switch (tuna->id) {
2509 case ETHTOOL_RX_COPYBREAK:
2510 *(u32 *)data = fep->rx_copybreak;
2511 break;
2512 default:
2513 ret = -EINVAL;
2514 break;
2515 }
2516
2517 return ret;
2518}
2519
2520static int fec_enet_set_tunable(struct net_device *netdev,
2521 const struct ethtool_tunable *tuna,
2522 const void *data)
2523{
2524 struct fec_enet_private *fep = netdev_priv(netdev);
2525 int ret = 0;
2526
2527 switch (tuna->id) {
2528 case ETHTOOL_RX_COPYBREAK:
2529 fep->rx_copybreak = *(u32 *)data;
2530 break;
2531 default:
2532 ret = -EINVAL;
2533 break;
2534 }
2535
2536 return ret;
2537}
2538
2539static void
2540fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2541{
2542 struct fec_enet_private *fep = netdev_priv(ndev);
2543
2544 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2545 wol->supported = WAKE_MAGIC;
2546 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2547 } else {
2548 wol->supported = wol->wolopts = 0;
2549 }
2550}
2551
2552static int
2553fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2554{
2555 struct fec_enet_private *fep = netdev_priv(ndev);
2556
2557 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2558 return -EINVAL;
2559
2560 if (wol->wolopts & ~WAKE_MAGIC)
2561 return -EINVAL;
2562
2563 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2564 if (device_may_wakeup(&ndev->dev)) {
2565 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2566 if (fep->irq[0] > 0)
2567 enable_irq_wake(fep->irq[0]);
2568 } else {
2569 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2570 if (fep->irq[0] > 0)
2571 disable_irq_wake(fep->irq[0]);
2572 }
2573
2574 return 0;
2575}
2576
2577static const struct ethtool_ops fec_enet_ethtool_ops = {
2578 .get_drvinfo = fec_enet_get_drvinfo,
2579 .get_regs_len = fec_enet_get_regs_len,
2580 .get_regs = fec_enet_get_regs,
2581 .nway_reset = phy_ethtool_nway_reset,
2582 .get_link = ethtool_op_get_link,
2583 .get_coalesce = fec_enet_get_coalesce,
2584 .set_coalesce = fec_enet_set_coalesce,
2585#ifndef CONFIG_M5272
2586 .get_pauseparam = fec_enet_get_pauseparam,
2587 .set_pauseparam = fec_enet_set_pauseparam,
2588 .get_strings = fec_enet_get_strings,
2589 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2590 .get_sset_count = fec_enet_get_sset_count,
2591#endif
2592 .get_ts_info = fec_enet_get_ts_info,
2593 .get_tunable = fec_enet_get_tunable,
2594 .set_tunable = fec_enet_set_tunable,
2595 .get_wol = fec_enet_get_wol,
2596 .set_wol = fec_enet_set_wol,
2597 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2598 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2599};
2600
2601static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2602{
2603 struct fec_enet_private *fep = netdev_priv(ndev);
2604 struct phy_device *phydev = ndev->phydev;
2605
2606 if (!netif_running(ndev))
2607 return -EINVAL;
2608
2609 if (!phydev)
2610 return -ENODEV;
2611
2612 if (fep->bufdesc_ex) {
2613 if (cmd == SIOCSHWTSTAMP)
2614 return fec_ptp_set(ndev, rq);
2615 if (cmd == SIOCGHWTSTAMP)
2616 return fec_ptp_get(ndev, rq);
2617 }
2618
2619 return phy_mii_ioctl(phydev, rq, cmd);
2620}
2621
2622static void fec_enet_free_buffers(struct net_device *ndev)
2623{
2624 struct fec_enet_private *fep = netdev_priv(ndev);
2625 unsigned int i;
2626 struct sk_buff *skb;
2627 struct bufdesc *bdp;
2628 struct fec_enet_priv_tx_q *txq;
2629 struct fec_enet_priv_rx_q *rxq;
2630 unsigned int q;
2631
2632 for (q = 0; q < fep->num_rx_queues; q++) {
2633 rxq = fep->rx_queue[q];
2634 bdp = rxq->bd.base;
2635 for (i = 0; i < rxq->bd.ring_size; i++) {
2636 skb = rxq->rx_skbuff[i];
2637 rxq->rx_skbuff[i] = NULL;
2638 if (skb) {
2639 dma_unmap_single(&fep->pdev->dev,
2640 fec32_to_cpu(bdp->cbd_bufaddr),
2641 FEC_ENET_RX_FRSIZE - fep->rx_align,
2642 DMA_FROM_DEVICE);
2643 dev_kfree_skb(skb);
2644 }
2645 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2646 }
2647 }
2648
2649 for (q = 0; q < fep->num_tx_queues; q++) {
2650 txq = fep->tx_queue[q];
2651 bdp = txq->bd.base;
2652 for (i = 0; i < txq->bd.ring_size; i++) {
2653 kfree(txq->tx_bounce[i]);
2654 txq->tx_bounce[i] = NULL;
2655 skb = txq->tx_skbuff[i];
2656 txq->tx_skbuff[i] = NULL;
2657 dev_kfree_skb(skb);
2658 }
2659 }
2660}
2661
2662static void fec_enet_free_queue(struct net_device *ndev)
2663{
2664 struct fec_enet_private *fep = netdev_priv(ndev);
2665 int i;
2666 struct fec_enet_priv_tx_q *txq;
2667
2668 for (i = 0; i < fep->num_tx_queues; i++)
2669 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2670 txq = fep->tx_queue[i];
2671 dma_free_coherent(&fep->pdev->dev,
2672 txq->bd.ring_size * TSO_HEADER_SIZE,
2673 txq->tso_hdrs,
2674 txq->tso_hdrs_dma);
2675 }
2676
2677 for (i = 0; i < fep->num_rx_queues; i++)
2678 kfree(fep->rx_queue[i]);
2679 for (i = 0; i < fep->num_tx_queues; i++)
2680 kfree(fep->tx_queue[i]);
2681}
2682
2683static int fec_enet_alloc_queue(struct net_device *ndev)
2684{
2685 struct fec_enet_private *fep = netdev_priv(ndev);
2686 int i;
2687 int ret = 0;
2688 struct fec_enet_priv_tx_q *txq;
2689
2690 for (i = 0; i < fep->num_tx_queues; i++) {
2691 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2692 if (!txq) {
2693 ret = -ENOMEM;
2694 goto alloc_failed;
2695 }
2696
2697 fep->tx_queue[i] = txq;
2698 txq->bd.ring_size = TX_RING_SIZE;
2699 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2700
2701 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2702 txq->tx_wake_threshold =
2703 (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2704
2705 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
2706 txq->bd.ring_size * TSO_HEADER_SIZE,
2707 &txq->tso_hdrs_dma,
2708 GFP_KERNEL);
2709 if (!txq->tso_hdrs) {
2710 ret = -ENOMEM;
2711 goto alloc_failed;
2712 }
2713 }
2714
2715 for (i = 0; i < fep->num_rx_queues; i++) {
2716 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2717 GFP_KERNEL);
2718 if (!fep->rx_queue[i]) {
2719 ret = -ENOMEM;
2720 goto alloc_failed;
2721 }
2722
2723 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
2724 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2725 }
2726 return ret;
2727
2728alloc_failed:
2729 fec_enet_free_queue(ndev);
2730 return ret;
2731}
2732
2733static int
2734fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2735{
2736 struct fec_enet_private *fep = netdev_priv(ndev);
2737 unsigned int i;
2738 struct sk_buff *skb;
2739 struct bufdesc *bdp;
2740 struct fec_enet_priv_rx_q *rxq;
2741
2742 rxq = fep->rx_queue[queue];
2743 bdp = rxq->bd.base;
2744 for (i = 0; i < rxq->bd.ring_size; i++) {
2745 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2746 if (!skb)
2747 goto err_alloc;
2748
2749 if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2750 dev_kfree_skb(skb);
2751 goto err_alloc;
2752 }
2753
2754 rxq->rx_skbuff[i] = skb;
2755 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2756
2757 if (fep->bufdesc_ex) {
2758 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2759 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2760 }
2761
2762 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2763 }
2764
2765
2766 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2767 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2768 return 0;
2769
2770 err_alloc:
2771 fec_enet_free_buffers(ndev);
2772 return -ENOMEM;
2773}
2774
2775static int
2776fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2777{
2778 struct fec_enet_private *fep = netdev_priv(ndev);
2779 unsigned int i;
2780 struct bufdesc *bdp;
2781 struct fec_enet_priv_tx_q *txq;
2782
2783 txq = fep->tx_queue[queue];
2784 bdp = txq->bd.base;
2785 for (i = 0; i < txq->bd.ring_size; i++) {
2786 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2787 if (!txq->tx_bounce[i])
2788 goto err_alloc;
2789
2790 bdp->cbd_sc = cpu_to_fec16(0);
2791 bdp->cbd_bufaddr = cpu_to_fec32(0);
2792
2793 if (fep->bufdesc_ex) {
2794 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2795 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2796 }
2797
2798 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2799 }
2800
2801
2802 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2803 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2804
2805 return 0;
2806
2807 err_alloc:
2808 fec_enet_free_buffers(ndev);
2809 return -ENOMEM;
2810}
2811
2812static int fec_enet_alloc_buffers(struct net_device *ndev)
2813{
2814 struct fec_enet_private *fep = netdev_priv(ndev);
2815 unsigned int i;
2816
2817 for (i = 0; i < fep->num_rx_queues; i++)
2818 if (fec_enet_alloc_rxq_buffers(ndev, i))
2819 return -ENOMEM;
2820
2821 for (i = 0; i < fep->num_tx_queues; i++)
2822 if (fec_enet_alloc_txq_buffers(ndev, i))
2823 return -ENOMEM;
2824 return 0;
2825}
2826
2827static int
2828fec_enet_open(struct net_device *ndev)
2829{
2830 struct fec_enet_private *fep = netdev_priv(ndev);
2831 int ret;
2832 bool reset_again;
2833
2834 ret = pm_runtime_get_sync(&fep->pdev->dev);
2835 if (ret < 0)
2836 return ret;
2837
2838 pinctrl_pm_select_default_state(&fep->pdev->dev);
2839 ret = fec_enet_clk_enable(ndev, true);
2840 if (ret)
2841 goto clk_enable;
2842
2843
2844
2845
2846
2847
2848
2849 if (ndev->phydev && ndev->phydev->drv)
2850 reset_again = false;
2851 else
2852 reset_again = true;
2853
2854
2855
2856
2857
2858 ret = fec_enet_alloc_buffers(ndev);
2859 if (ret)
2860 goto err_enet_alloc;
2861
2862
2863 fec_restart(ndev);
2864
2865
2866 ret = fec_enet_mii_probe(ndev);
2867 if (ret)
2868 goto err_enet_mii_probe;
2869
2870
2871
2872
2873 if (reset_again)
2874 phy_reset_after_clk_enable(ndev->phydev);
2875
2876 if (fep->quirks & FEC_QUIRK_ERR006687)
2877 imx6q_cpuidle_fec_irqs_used();
2878
2879 napi_enable(&fep->napi);
2880 phy_start(ndev->phydev);
2881 netif_tx_start_all_queues(ndev);
2882
2883 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2884 FEC_WOL_FLAG_ENABLE);
2885
2886 return 0;
2887
2888err_enet_mii_probe:
2889 fec_enet_free_buffers(ndev);
2890err_enet_alloc:
2891 fec_enet_clk_enable(ndev, false);
2892clk_enable:
2893 pm_runtime_mark_last_busy(&fep->pdev->dev);
2894 pm_runtime_put_autosuspend(&fep->pdev->dev);
2895 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2896 return ret;
2897}
2898
2899static int
2900fec_enet_close(struct net_device *ndev)
2901{
2902 struct fec_enet_private *fep = netdev_priv(ndev);
2903
2904 phy_stop(ndev->phydev);
2905
2906 if (netif_device_present(ndev)) {
2907 napi_disable(&fep->napi);
2908 netif_tx_disable(ndev);
2909 fec_stop(ndev);
2910 }
2911
2912 phy_disconnect(ndev->phydev);
2913
2914 if (fep->quirks & FEC_QUIRK_ERR006687)
2915 imx6q_cpuidle_fec_irqs_unused();
2916
2917 fec_enet_update_ethtool_stats(ndev);
2918
2919 fec_enet_clk_enable(ndev, false);
2920 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2921 pm_runtime_mark_last_busy(&fep->pdev->dev);
2922 pm_runtime_put_autosuspend(&fep->pdev->dev);
2923
2924 fec_enet_free_buffers(ndev);
2925
2926 return 0;
2927}
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939#define FEC_HASH_BITS 6
2940
2941static void set_multicast_list(struct net_device *ndev)
2942{
2943 struct fec_enet_private *fep = netdev_priv(ndev);
2944 struct netdev_hw_addr *ha;
2945 unsigned int crc, tmp;
2946 unsigned char hash;
2947 unsigned int hash_high = 0, hash_low = 0;
2948
2949 if (ndev->flags & IFF_PROMISC) {
2950 tmp = readl(fep->hwp + FEC_R_CNTRL);
2951 tmp |= 0x8;
2952 writel(tmp, fep->hwp + FEC_R_CNTRL);
2953 return;
2954 }
2955
2956 tmp = readl(fep->hwp + FEC_R_CNTRL);
2957 tmp &= ~0x8;
2958 writel(tmp, fep->hwp + FEC_R_CNTRL);
2959
2960 if (ndev->flags & IFF_ALLMULTI) {
2961
2962
2963
2964 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2965 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2966
2967 return;
2968 }
2969
2970
2971 netdev_for_each_mc_addr(ha, ndev) {
2972
2973 crc = ether_crc_le(ndev->addr_len, ha->addr);
2974
2975
2976
2977
2978 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2979
2980 if (hash > 31)
2981 hash_high |= 1 << (hash - 32);
2982 else
2983 hash_low |= 1 << hash;
2984 }
2985
2986 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2987 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2988}
2989
2990
2991static int
2992fec_set_mac_address(struct net_device *ndev, void *p)
2993{
2994 struct fec_enet_private *fep = netdev_priv(ndev);
2995 struct sockaddr *addr = p;
2996
2997 if (addr) {
2998 if (!is_valid_ether_addr(addr->sa_data))
2999 return -EADDRNOTAVAIL;
3000 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3001 }
3002
3003
3004
3005
3006
3007
3008 if (!netif_running(ndev))
3009 return 0;
3010
3011 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3012 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3013 fep->hwp + FEC_ADDR_LOW);
3014 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3015 fep->hwp + FEC_ADDR_HIGH);
3016 return 0;
3017}
3018
3019#ifdef CONFIG_NET_POLL_CONTROLLER
3020
3021
3022
3023
3024
3025
3026
3027static void fec_poll_controller(struct net_device *dev)
3028{
3029 int i;
3030 struct fec_enet_private *fep = netdev_priv(dev);
3031
3032 for (i = 0; i < FEC_IRQ_NUM; i++) {
3033 if (fep->irq[i] > 0) {
3034 disable_irq(fep->irq[i]);
3035 fec_enet_interrupt(fep->irq[i], dev);
3036 enable_irq(fep->irq[i]);
3037 }
3038 }
3039}
3040#endif
3041
3042static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3043 netdev_features_t features)
3044{
3045 struct fec_enet_private *fep = netdev_priv(netdev);
3046 netdev_features_t changed = features ^ netdev->features;
3047
3048 netdev->features = features;
3049
3050
3051 if (changed & NETIF_F_RXCSUM) {
3052 if (features & NETIF_F_RXCSUM)
3053 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3054 else
3055 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3056 }
3057}
3058
3059static int fec_set_features(struct net_device *netdev,
3060 netdev_features_t features)
3061{
3062 struct fec_enet_private *fep = netdev_priv(netdev);
3063 netdev_features_t changed = features ^ netdev->features;
3064
3065 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3066 napi_disable(&fep->napi);
3067 netif_tx_lock_bh(netdev);
3068 fec_stop(netdev);
3069 fec_enet_set_netdev_features(netdev, features);
3070 fec_restart(netdev);
3071 netif_tx_wake_all_queues(netdev);
3072 netif_tx_unlock_bh(netdev);
3073 napi_enable(&fep->napi);
3074 } else {
3075 fec_enet_set_netdev_features(netdev, features);
3076 }
3077
3078 return 0;
3079}
3080
3081static const struct net_device_ops fec_netdev_ops = {
3082 .ndo_open = fec_enet_open,
3083 .ndo_stop = fec_enet_close,
3084 .ndo_start_xmit = fec_enet_start_xmit,
3085 .ndo_set_rx_mode = set_multicast_list,
3086 .ndo_validate_addr = eth_validate_addr,
3087 .ndo_tx_timeout = fec_timeout,
3088 .ndo_set_mac_address = fec_set_mac_address,
3089 .ndo_do_ioctl = fec_enet_ioctl,
3090#ifdef CONFIG_NET_POLL_CONTROLLER
3091 .ndo_poll_controller = fec_poll_controller,
3092#endif
3093 .ndo_set_features = fec_set_features,
3094};
3095
3096static const unsigned short offset_des_active_rxq[] = {
3097 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3098};
3099
3100static const unsigned short offset_des_active_txq[] = {
3101 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3102};
3103
3104
3105
3106
3107
3108static int fec_enet_init(struct net_device *ndev)
3109{
3110 struct fec_enet_private *fep = netdev_priv(ndev);
3111 struct bufdesc *cbd_base;
3112 dma_addr_t bd_dma;
3113 int bd_size;
3114 unsigned int i;
3115 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3116 sizeof(struct bufdesc);
3117 unsigned dsize_log2 = __fls(dsize);
3118 int ret;
3119
3120 WARN_ON(dsize != (1 << dsize_log2));
3121#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3122 fep->rx_align = 0xf;
3123 fep->tx_align = 0xf;
3124#else
3125 fep->rx_align = 0x3;
3126 fep->tx_align = 0x3;
3127#endif
3128
3129
3130 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
3131 if (ret < 0) {
3132 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
3133 return ret;
3134 }
3135
3136 fec_enet_alloc_queue(ndev);
3137
3138 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3139
3140
3141 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3142 GFP_KERNEL);
3143 if (!cbd_base) {
3144 return -ENOMEM;
3145 }
3146
3147
3148 fec_get_mac(ndev);
3149
3150 fec_set_mac_address(ndev, NULL);
3151
3152
3153 for (i = 0; i < fep->num_rx_queues; i++) {
3154 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3155 unsigned size = dsize * rxq->bd.ring_size;
3156
3157 rxq->bd.qid = i;
3158 rxq->bd.base = cbd_base;
3159 rxq->bd.cur = cbd_base;
3160 rxq->bd.dma = bd_dma;
3161 rxq->bd.dsize = dsize;
3162 rxq->bd.dsize_log2 = dsize_log2;
3163 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3164 bd_dma += size;
3165 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3166 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3167 }
3168
3169 for (i = 0; i < fep->num_tx_queues; i++) {
3170 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3171 unsigned size = dsize * txq->bd.ring_size;
3172
3173 txq->bd.qid = i;
3174 txq->bd.base = cbd_base;
3175 txq->bd.cur = cbd_base;
3176 txq->bd.dma = bd_dma;
3177 txq->bd.dsize = dsize;
3178 txq->bd.dsize_log2 = dsize_log2;
3179 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3180 bd_dma += size;
3181 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3182 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3183 }
3184
3185
3186
3187 ndev->watchdog_timeo = TX_TIMEOUT;
3188 ndev->netdev_ops = &fec_netdev_ops;
3189 ndev->ethtool_ops = &fec_enet_ethtool_ops;
3190
3191 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3192 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3193
3194 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3195
3196 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3197
3198 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3199 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
3200
3201
3202 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3203 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3204 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3205 }
3206
3207 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3208 fep->tx_align = 0;
3209 fep->rx_align = 0x3f;
3210 }
3211
3212 ndev->hw_features = ndev->features;
3213
3214 fec_restart(ndev);
3215
3216 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
3217 fec_enet_clear_ethtool_stats(ndev);
3218 else
3219 fec_enet_update_ethtool_stats(ndev);
3220
3221 return 0;
3222}
3223
3224#ifdef CONFIG_OF
3225static int fec_reset_phy(struct platform_device *pdev)
3226{
3227 int err, phy_reset;
3228 bool active_high = false;
3229 int msec = 1, phy_post_delay = 0;
3230 struct device_node *np = pdev->dev.of_node;
3231
3232 if (!np)
3233 return 0;
3234
3235 err = of_property_read_u32(np, "phy-reset-duration", &msec);
3236
3237 if (!err && msec > 1000)
3238 msec = 1;
3239
3240 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3241 if (phy_reset == -EPROBE_DEFER)
3242 return phy_reset;
3243 else if (!gpio_is_valid(phy_reset))
3244 return 0;
3245
3246 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3247
3248 if (!err && phy_post_delay > 1000)
3249 return -EINVAL;
3250
3251 active_high = of_property_read_bool(np, "phy-reset-active-high");
3252
3253 err = devm_gpio_request_one(&pdev->dev, phy_reset,
3254 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3255 "phy-reset");
3256 if (err) {
3257 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3258 return err;
3259 }
3260
3261 if (msec > 20)
3262 msleep(msec);
3263 else
3264 usleep_range(msec * 1000, msec * 1000 + 1000);
3265
3266 gpio_set_value_cansleep(phy_reset, !active_high);
3267
3268 if (!phy_post_delay)
3269 return 0;
3270
3271 if (phy_post_delay > 20)
3272 msleep(phy_post_delay);
3273 else
3274 usleep_range(phy_post_delay * 1000,
3275 phy_post_delay * 1000 + 1000);
3276
3277 return 0;
3278}
3279#else
3280static int fec_reset_phy(struct platform_device *pdev)
3281{
3282
3283
3284
3285
3286 return 0;
3287}
3288#endif
3289
3290static void
3291fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3292{
3293 struct device_node *np = pdev->dev.of_node;
3294
3295 *num_tx = *num_rx = 1;
3296
3297 if (!np || !of_device_is_available(np))
3298 return;
3299
3300
3301 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3302
3303 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3304
3305 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3306 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3307 *num_tx);
3308 *num_tx = 1;
3309 return;
3310 }
3311
3312 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3313 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3314 *num_rx);
3315 *num_rx = 1;
3316 return;
3317 }
3318
3319}
3320
3321static int fec_enet_get_irq_cnt(struct platform_device *pdev)
3322{
3323 int irq_cnt = platform_irq_count(pdev);
3324
3325 if (irq_cnt > FEC_IRQ_NUM)
3326 irq_cnt = FEC_IRQ_NUM;
3327 else if (irq_cnt == 2)
3328 irq_cnt = 1;
3329 else if (irq_cnt <= 0)
3330 irq_cnt = 1;
3331 return irq_cnt;
3332}
3333
3334static int
3335fec_probe(struct platform_device *pdev)
3336{
3337 struct fec_enet_private *fep;
3338 struct fec_platform_data *pdata;
3339 struct net_device *ndev;
3340 int i, irq, ret = 0;
3341 struct resource *r;
3342 const struct of_device_id *of_id;
3343 static int dev_id;
3344 struct device_node *np = pdev->dev.of_node, *phy_node;
3345 int num_tx_qs;
3346 int num_rx_qs;
3347 char irq_name[8];
3348 int irq_cnt;
3349
3350 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3351
3352
3353 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3354 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3355 if (!ndev)
3356 return -ENOMEM;
3357
3358 SET_NETDEV_DEV(ndev, &pdev->dev);
3359
3360
3361 fep = netdev_priv(ndev);
3362
3363 of_id = of_match_device(fec_dt_ids, &pdev->dev);
3364 if (of_id)
3365 pdev->id_entry = of_id->data;
3366 fep->quirks = pdev->id_entry->driver_data;
3367
3368 fep->netdev = ndev;
3369 fep->num_rx_queues = num_rx_qs;
3370 fep->num_tx_queues = num_tx_qs;
3371
3372#if !defined(CONFIG_M5272)
3373
3374 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3375 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3376#endif
3377
3378
3379 pinctrl_pm_select_default_state(&pdev->dev);
3380
3381 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3382 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
3383 if (IS_ERR(fep->hwp)) {
3384 ret = PTR_ERR(fep->hwp);
3385 goto failed_ioremap;
3386 }
3387
3388 fep->pdev = pdev;
3389 fep->dev_id = dev_id++;
3390
3391 platform_set_drvdata(pdev, ndev);
3392
3393 if ((of_machine_is_compatible("fsl,imx6q") ||
3394 of_machine_is_compatible("fsl,imx6dl")) &&
3395 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
3396 fep->quirks |= FEC_QUIRK_ERR006687;
3397
3398 if (of_get_property(np, "fsl,magic-packet", NULL))
3399 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3400
3401 phy_node = of_parse_phandle(np, "phy-handle", 0);
3402 if (!phy_node && of_phy_is_fixed_link(np)) {
3403 ret = of_phy_register_fixed_link(np);
3404 if (ret < 0) {
3405 dev_err(&pdev->dev,
3406 "broken fixed-link specification\n");
3407 goto failed_phy;
3408 }
3409 phy_node = of_node_get(np);
3410 }
3411 fep->phy_node = phy_node;
3412
3413 ret = of_get_phy_mode(pdev->dev.of_node);
3414 if (ret < 0) {
3415 pdata = dev_get_platdata(&pdev->dev);
3416 if (pdata)
3417 fep->phy_interface = pdata->phy;
3418 else
3419 fep->phy_interface = PHY_INTERFACE_MODE_MII;
3420 } else {
3421 fep->phy_interface = ret;
3422 }
3423
3424 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3425 if (IS_ERR(fep->clk_ipg)) {
3426 ret = PTR_ERR(fep->clk_ipg);
3427 goto failed_clk;
3428 }
3429
3430 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3431 if (IS_ERR(fep->clk_ahb)) {
3432 ret = PTR_ERR(fep->clk_ahb);
3433 goto failed_clk;
3434 }
3435
3436 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3437
3438
3439 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3440 if (IS_ERR(fep->clk_enet_out))
3441 fep->clk_enet_out = NULL;
3442
3443 fep->ptp_clk_on = false;
3444 mutex_init(&fep->ptp_clk_mutex);
3445
3446
3447 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3448 if (IS_ERR(fep->clk_ref))
3449 fep->clk_ref = NULL;
3450
3451 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3452 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3453 if (IS_ERR(fep->clk_ptp)) {
3454 fep->clk_ptp = NULL;
3455 fep->bufdesc_ex = false;
3456 }
3457
3458 ret = fec_enet_clk_enable(ndev, true);
3459 if (ret)
3460 goto failed_clk;
3461
3462 ret = clk_prepare_enable(fep->clk_ipg);
3463 if (ret)
3464 goto failed_clk_ipg;
3465 ret = clk_prepare_enable(fep->clk_ahb);
3466 if (ret)
3467 goto failed_clk_ahb;
3468
3469 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3470 if (!IS_ERR(fep->reg_phy)) {
3471 ret = regulator_enable(fep->reg_phy);
3472 if (ret) {
3473 dev_err(&pdev->dev,
3474 "Failed to enable phy regulator: %d\n", ret);
3475 goto failed_regulator;
3476 }
3477 } else {
3478 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3479 ret = -EPROBE_DEFER;
3480 goto failed_regulator;
3481 }
3482 fep->reg_phy = NULL;
3483 }
3484
3485 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3486 pm_runtime_use_autosuspend(&pdev->dev);
3487 pm_runtime_get_noresume(&pdev->dev);
3488 pm_runtime_set_active(&pdev->dev);
3489 pm_runtime_enable(&pdev->dev);
3490
3491 ret = fec_reset_phy(pdev);
3492 if (ret)
3493 goto failed_reset;
3494
3495 irq_cnt = fec_enet_get_irq_cnt(pdev);
3496 if (fep->bufdesc_ex)
3497 fec_ptp_init(pdev, irq_cnt);
3498
3499 ret = fec_enet_init(ndev);
3500 if (ret)
3501 goto failed_init;
3502
3503 for (i = 0; i < irq_cnt; i++) {
3504 snprintf(irq_name, sizeof(irq_name), "int%d", i);
3505 irq = platform_get_irq_byname(pdev, irq_name);
3506 if (irq < 0)
3507 irq = platform_get_irq(pdev, i);
3508 if (irq < 0) {
3509 ret = irq;
3510 goto failed_irq;
3511 }
3512 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3513 0, pdev->name, ndev);
3514 if (ret)
3515 goto failed_irq;
3516
3517 fep->irq[i] = irq;
3518 }
3519
3520 init_completion(&fep->mdio_done);
3521 ret = fec_enet_mii_init(pdev);
3522 if (ret)
3523 goto failed_mii_init;
3524
3525
3526 netif_carrier_off(ndev);
3527 fec_enet_clk_enable(ndev, false);
3528 pinctrl_pm_select_sleep_state(&pdev->dev);
3529
3530 ret = register_netdev(ndev);
3531 if (ret)
3532 goto failed_register;
3533
3534 device_init_wakeup(&ndev->dev, fep->wol_flag &
3535 FEC_WOL_HAS_MAGIC_PACKET);
3536
3537 if (fep->bufdesc_ex && fep->ptp_clock)
3538 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3539
3540 fep->rx_copybreak = COPYBREAK_DEFAULT;
3541 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3542
3543 pm_runtime_mark_last_busy(&pdev->dev);
3544 pm_runtime_put_autosuspend(&pdev->dev);
3545
3546 return 0;
3547
3548failed_register:
3549 fec_enet_mii_remove(fep);
3550failed_mii_init:
3551failed_irq:
3552failed_init:
3553 fec_ptp_stop(pdev);
3554 if (fep->reg_phy)
3555 regulator_disable(fep->reg_phy);
3556failed_reset:
3557 pm_runtime_put_noidle(&pdev->dev);
3558 pm_runtime_disable(&pdev->dev);
3559failed_regulator:
3560 clk_disable_unprepare(fep->clk_ahb);
3561failed_clk_ahb:
3562 clk_disable_unprepare(fep->clk_ipg);
3563failed_clk_ipg:
3564 fec_enet_clk_enable(ndev, false);
3565failed_clk:
3566 if (of_phy_is_fixed_link(np))
3567 of_phy_deregister_fixed_link(np);
3568 of_node_put(phy_node);
3569failed_phy:
3570 dev_id--;
3571failed_ioremap:
3572 free_netdev(ndev);
3573
3574 return ret;
3575}
3576
3577static int
3578fec_drv_remove(struct platform_device *pdev)
3579{
3580 struct net_device *ndev = platform_get_drvdata(pdev);
3581 struct fec_enet_private *fep = netdev_priv(ndev);
3582 struct device_node *np = pdev->dev.of_node;
3583
3584 cancel_work_sync(&fep->tx_timeout_work);
3585 fec_ptp_stop(pdev);
3586 unregister_netdev(ndev);
3587 fec_enet_mii_remove(fep);
3588 if (fep->reg_phy)
3589 regulator_disable(fep->reg_phy);
3590 pm_runtime_put(&pdev->dev);
3591 pm_runtime_disable(&pdev->dev);
3592 if (of_phy_is_fixed_link(np))
3593 of_phy_deregister_fixed_link(np);
3594 of_node_put(fep->phy_node);
3595 free_netdev(ndev);
3596
3597 return 0;
3598}
3599
3600static int __maybe_unused fec_suspend(struct device *dev)
3601{
3602 struct net_device *ndev = dev_get_drvdata(dev);
3603 struct fec_enet_private *fep = netdev_priv(ndev);
3604
3605 rtnl_lock();
3606 if (netif_running(ndev)) {
3607 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3608 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3609 phy_stop(ndev->phydev);
3610 napi_disable(&fep->napi);
3611 netif_tx_lock_bh(ndev);
3612 netif_device_detach(ndev);
3613 netif_tx_unlock_bh(ndev);
3614 fec_stop(ndev);
3615 fec_enet_clk_enable(ndev, false);
3616 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3617 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3618 }
3619 rtnl_unlock();
3620
3621 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3622 regulator_disable(fep->reg_phy);
3623
3624
3625
3626
3627 if (fep->clk_enet_out || fep->reg_phy)
3628 fep->link = 0;
3629
3630 return 0;
3631}
3632
3633static int __maybe_unused fec_resume(struct device *dev)
3634{
3635 struct net_device *ndev = dev_get_drvdata(dev);
3636 struct fec_enet_private *fep = netdev_priv(ndev);
3637 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3638 int ret;
3639 int val;
3640
3641 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3642 ret = regulator_enable(fep->reg_phy);
3643 if (ret)
3644 return ret;
3645 }
3646
3647 rtnl_lock();
3648 if (netif_running(ndev)) {
3649 ret = fec_enet_clk_enable(ndev, true);
3650 if (ret) {
3651 rtnl_unlock();
3652 goto failed_clk;
3653 }
3654 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3655 if (pdata && pdata->sleep_mode_enable)
3656 pdata->sleep_mode_enable(false);
3657 val = readl(fep->hwp + FEC_ECNTRL);
3658 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3659 writel(val, fep->hwp + FEC_ECNTRL);
3660 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3661 } else {
3662 pinctrl_pm_select_default_state(&fep->pdev->dev);
3663 }
3664 fec_restart(ndev);
3665 netif_tx_lock_bh(ndev);
3666 netif_device_attach(ndev);
3667 netif_tx_unlock_bh(ndev);
3668 napi_enable(&fep->napi);
3669 phy_start(ndev->phydev);
3670 }
3671 rtnl_unlock();
3672
3673 return 0;
3674
3675failed_clk:
3676 if (fep->reg_phy)
3677 regulator_disable(fep->reg_phy);
3678 return ret;
3679}
3680
3681static int __maybe_unused fec_runtime_suspend(struct device *dev)
3682{
3683 struct net_device *ndev = dev_get_drvdata(dev);
3684 struct fec_enet_private *fep = netdev_priv(ndev);
3685
3686 clk_disable_unprepare(fep->clk_ahb);
3687 clk_disable_unprepare(fep->clk_ipg);
3688
3689 return 0;
3690}
3691
3692static int __maybe_unused fec_runtime_resume(struct device *dev)
3693{
3694 struct net_device *ndev = dev_get_drvdata(dev);
3695 struct fec_enet_private *fep = netdev_priv(ndev);
3696 int ret;
3697
3698 ret = clk_prepare_enable(fep->clk_ahb);
3699 if (ret)
3700 return ret;
3701 ret = clk_prepare_enable(fep->clk_ipg);
3702 if (ret)
3703 goto failed_clk_ipg;
3704
3705 return 0;
3706
3707failed_clk_ipg:
3708 clk_disable_unprepare(fep->clk_ahb);
3709 return ret;
3710}
3711
3712static const struct dev_pm_ops fec_pm_ops = {
3713 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3714 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3715};
3716
3717static struct platform_driver fec_driver = {
3718 .driver = {
3719 .name = DRIVER_NAME,
3720 .pm = &fec_pm_ops,
3721 .of_match_table = fec_dt_ids,
3722 },
3723 .id_table = fec_devtype,
3724 .probe = fec_probe,
3725 .remove = fec_drv_remove,
3726};
3727
3728module_platform_driver(fec_driver);
3729
3730MODULE_ALIAS("platform:"DRIVER_NAME);
3731MODULE_LICENSE("GPL");
3732