1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/string.h>
28#include <linux/pm_runtime.h>
29#include <linux/ptrace.h>
30#include <linux/errno.h>
31#include <linux/ioport.h>
32#include <linux/slab.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <net/ip.h>
41#include <net/tso.h>
42#include <linux/tcp.h>
43#include <linux/udp.h>
44#include <linux/icmp.h>
45#include <linux/spinlock.h>
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48#include <linux/io.h>
49#include <linux/irq.h>
50#include <linux/clk.h>
51#include <linux/platform_device.h>
52#include <linux/mdio.h>
53#include <linux/phy.h>
54#include <linux/fec.h>
55#include <linux/of.h>
56#include <linux/of_device.h>
57#include <linux/of_gpio.h>
58#include <linux/of_mdio.h>
59#include <linux/of_net.h>
60#include <linux/regulator/consumer.h>
61#include <linux/if_vlan.h>
62#include <linux/pinctrl/consumer.h>
63#include <linux/prefetch.h>
64#include <soc/imx/cpuidle.h>
65
66#include <asm/cacheflush.h>
67
68#include "fec.h"
69
70static void set_multicast_list(struct net_device *ndev);
71static void fec_enet_itr_coal_init(struct net_device *ndev);
72
73#define DRIVER_NAME "fec"
74
75#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
76
77
78#define FEC_ENET_FCE (1 << 5)
79#define FEC_ENET_RSEM_V 0x84
80#define FEC_ENET_RSFL_V 16
81#define FEC_ENET_RAEM_V 0x8
82#define FEC_ENET_RAFL_V 0x8
83#define FEC_ENET_OPD_V 0xFFF0
84#define FEC_MDIO_PM_TIMEOUT 100
85
86static struct platform_device_id fec_devtype[] = {
87 {
88
89 .name = DRIVER_NAME,
90 .driver_data = 0,
91 }, {
92 .name = "imx25-fec",
93 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
94 }, {
95 .name = "imx27-fec",
96 .driver_data = FEC_QUIRK_MIB_CLEAR,
97 }, {
98 .name = "imx28-fec",
99 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
100 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
101 }, {
102 .name = "imx6q-fec",
103 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
104 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
105 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
106 FEC_QUIRK_HAS_RACC,
107 }, {
108 .name = "mvf600-fec",
109 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
110 }, {
111 .name = "imx6sx-fec",
112 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
113 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
114 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
115 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
116 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
117 }, {
118 .name = "imx6ul-fec",
119 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
120 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
121 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
122 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
123 FEC_QUIRK_HAS_COALESCE,
124 }, {
125
126 }
127};
128MODULE_DEVICE_TABLE(platform, fec_devtype);
129
130enum imx_fec_type {
131 IMX25_FEC = 1,
132 IMX27_FEC,
133 IMX28_FEC,
134 IMX6Q_FEC,
135 MVF600_FEC,
136 IMX6SX_FEC,
137 IMX6UL_FEC,
138};
139
140static const struct of_device_id fec_dt_ids[] = {
141 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
142 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
143 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
144 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
145 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
146 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
147 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
148 { }
149};
150MODULE_DEVICE_TABLE(of, fec_dt_ids);
151
152static unsigned char macaddr[ETH_ALEN];
153module_param_array(macaddr, byte, NULL, 0);
154MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
155
156#if defined(CONFIG_M5272)
157
158
159
160
161#if defined(CONFIG_NETtel)
162#define FEC_FLASHMAC 0xf0006006
163#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
164#define FEC_FLASHMAC 0xf0006000
165#elif defined(CONFIG_CANCam)
166#define FEC_FLASHMAC 0xf0020000
167#elif defined (CONFIG_M5272C3)
168#define FEC_FLASHMAC (0xffe04000 + 4)
169#elif defined(CONFIG_MOD5272)
170#define FEC_FLASHMAC 0xffc0406b
171#else
172#define FEC_FLASHMAC 0
173#endif
174#endif
175
176
177
178
179
180
181#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
182#define PKT_MINBUF_SIZE 64
183
184
185#define FEC_RACC_IPDIS (1 << 1)
186#define FEC_RACC_PRODIS (1 << 2)
187#define FEC_RACC_SHIFT16 BIT(7)
188#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
189
190
191#define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
192
193
194
195
196
197
198#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
199 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
200 defined(CONFIG_ARM64)
201#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
202#else
203#define OPT_FRAME_SIZE 0
204#endif
205
206
207#define FEC_MMFR_ST (1 << 30)
208#define FEC_MMFR_OP_READ (2 << 28)
209#define FEC_MMFR_OP_WRITE (1 << 28)
210#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
211#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
212#define FEC_MMFR_TA (2 << 16)
213#define FEC_MMFR_DATA(v) (v & 0xffff)
214
215#define FEC_ECR_MAGICEN (1 << 2)
216#define FEC_ECR_SLEEP (1 << 3)
217
218#define FEC_MII_TIMEOUT 30000
219
220
221#define TX_TIMEOUT (2 * HZ)
222
223#define FEC_PAUSE_FLAG_AUTONEG 0x1
224#define FEC_PAUSE_FLAG_ENABLE 0x2
225#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
226#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
227#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
228
229#define COPYBREAK_DEFAULT 256
230
231
232#define FEC_MAX_TSO_SEGS 100
233#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
234
235#define IS_TSO_HEADER(txq, addr) \
236 ((addr >= txq->tso_hdrs_dma) && \
237 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
238
239static int mii_cnt;
240
241static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
242 struct bufdesc_prop *bd)
243{
244 return (bdp >= bd->last) ? bd->base
245 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
246}
247
248static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
249 struct bufdesc_prop *bd)
250{
251 return (bdp <= bd->base) ? bd->last
252 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
253}
254
255static int fec_enet_get_bd_index(struct bufdesc *bdp,
256 struct bufdesc_prop *bd)
257{
258 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
259}
260
261static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
262{
263 int entries;
264
265 entries = (((const char *)txq->dirty_tx -
266 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
267
268 return entries >= 0 ? entries : entries + txq->bd.ring_size;
269}
270
271static void swap_buffer(void *bufaddr, int len)
272{
273 int i;
274 unsigned int *buf = bufaddr;
275
276 for (i = 0; i < len; i += 4, buf++)
277 swab32s(buf);
278}
279
280static void swap_buffer2(void *dst_buf, void *src_buf, int len)
281{
282 int i;
283 unsigned int *src = src_buf;
284 unsigned int *dst = dst_buf;
285
286 for (i = 0; i < len; i += 4, src++, dst++)
287 *dst = swab32p(src);
288}
289
290static void fec_dump(struct net_device *ndev)
291{
292 struct fec_enet_private *fep = netdev_priv(ndev);
293 struct bufdesc *bdp;
294 struct fec_enet_priv_tx_q *txq;
295 int index = 0;
296
297 netdev_info(ndev, "TX ring dump\n");
298 pr_info("Nr SC addr len SKB\n");
299
300 txq = fep->tx_queue[0];
301 bdp = txq->bd.base;
302
303 do {
304 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
305 index,
306 bdp == txq->bd.cur ? 'S' : ' ',
307 bdp == txq->dirty_tx ? 'H' : ' ',
308 fec16_to_cpu(bdp->cbd_sc),
309 fec32_to_cpu(bdp->cbd_bufaddr),
310 fec16_to_cpu(bdp->cbd_datlen),
311 txq->tx_skbuff[index]);
312 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
313 index++;
314 } while (bdp != txq->bd.base);
315}
316
317static inline bool is_ipv4_pkt(struct sk_buff *skb)
318{
319 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
320}
321
322static int
323fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
324{
325
326 if (skb->ip_summed != CHECKSUM_PARTIAL)
327 return 0;
328
329 if (unlikely(skb_cow_head(skb, 0)))
330 return -1;
331
332 if (is_ipv4_pkt(skb))
333 ip_hdr(skb)->check = 0;
334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
335
336 return 0;
337}
338
339static struct bufdesc *
340fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
341 struct sk_buff *skb,
342 struct net_device *ndev)
343{
344 struct fec_enet_private *fep = netdev_priv(ndev);
345 struct bufdesc *bdp = txq->bd.cur;
346 struct bufdesc_ex *ebdp;
347 int nr_frags = skb_shinfo(skb)->nr_frags;
348 int frag, frag_len;
349 unsigned short status;
350 unsigned int estatus = 0;
351 skb_frag_t *this_frag;
352 unsigned int index;
353 void *bufaddr;
354 dma_addr_t addr;
355 int i;
356
357 for (frag = 0; frag < nr_frags; frag++) {
358 this_frag = &skb_shinfo(skb)->frags[frag];
359 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
360 ebdp = (struct bufdesc_ex *)bdp;
361
362 status = fec16_to_cpu(bdp->cbd_sc);
363 status &= ~BD_ENET_TX_STATS;
364 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
365 frag_len = skb_shinfo(skb)->frags[frag].size;
366
367
368 if (frag == nr_frags - 1) {
369 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
370 if (fep->bufdesc_ex) {
371 estatus |= BD_ENET_TX_INT;
372 if (unlikely(skb_shinfo(skb)->tx_flags &
373 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
374 estatus |= BD_ENET_TX_TS;
375 }
376 }
377
378 if (fep->bufdesc_ex) {
379 if (fep->quirks & FEC_QUIRK_HAS_AVB)
380 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
381 if (skb->ip_summed == CHECKSUM_PARTIAL)
382 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
383 ebdp->cbd_bdu = 0;
384 ebdp->cbd_esc = cpu_to_fec32(estatus);
385 }
386
387 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
388
389 index = fec_enet_get_bd_index(bdp, &txq->bd);
390 if (((unsigned long) bufaddr) & fep->tx_align ||
391 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
392 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
393 bufaddr = txq->tx_bounce[index];
394
395 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
396 swap_buffer(bufaddr, frag_len);
397 }
398
399 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
400 DMA_TO_DEVICE);
401 if (dma_mapping_error(&fep->pdev->dev, addr)) {
402 if (net_ratelimit())
403 netdev_err(ndev, "Tx DMA memory map failed\n");
404 goto dma_mapping_error;
405 }
406
407 bdp->cbd_bufaddr = cpu_to_fec32(addr);
408 bdp->cbd_datlen = cpu_to_fec16(frag_len);
409
410
411
412 wmb();
413 bdp->cbd_sc = cpu_to_fec16(status);
414 }
415
416 return bdp;
417dma_mapping_error:
418 bdp = txq->bd.cur;
419 for (i = 0; i < frag; i++) {
420 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
421 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
422 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
423 }
424 return ERR_PTR(-ENOMEM);
425}
426
427static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
428 struct sk_buff *skb, struct net_device *ndev)
429{
430 struct fec_enet_private *fep = netdev_priv(ndev);
431 int nr_frags = skb_shinfo(skb)->nr_frags;
432 struct bufdesc *bdp, *last_bdp;
433 void *bufaddr;
434 dma_addr_t addr;
435 unsigned short status;
436 unsigned short buflen;
437 unsigned int estatus = 0;
438 unsigned int index;
439 int entries_free;
440
441 entries_free = fec_enet_get_free_txdesc_num(txq);
442 if (entries_free < MAX_SKB_FRAGS + 1) {
443 dev_kfree_skb_any(skb);
444 if (net_ratelimit())
445 netdev_err(ndev, "NOT enough BD for SG!\n");
446 return NETDEV_TX_OK;
447 }
448
449
450 if (fec_enet_clear_csum(skb, ndev)) {
451 dev_kfree_skb_any(skb);
452 return NETDEV_TX_OK;
453 }
454
455
456 bdp = txq->bd.cur;
457 last_bdp = bdp;
458 status = fec16_to_cpu(bdp->cbd_sc);
459 status &= ~BD_ENET_TX_STATS;
460
461
462 bufaddr = skb->data;
463 buflen = skb_headlen(skb);
464
465 index = fec_enet_get_bd_index(bdp, &txq->bd);
466 if (((unsigned long) bufaddr) & fep->tx_align ||
467 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
468 memcpy(txq->tx_bounce[index], skb->data, buflen);
469 bufaddr = txq->tx_bounce[index];
470
471 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
472 swap_buffer(bufaddr, buflen);
473 }
474
475
476 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
477 if (dma_mapping_error(&fep->pdev->dev, addr)) {
478 dev_kfree_skb_any(skb);
479 if (net_ratelimit())
480 netdev_err(ndev, "Tx DMA memory map failed\n");
481 return NETDEV_TX_OK;
482 }
483
484 if (nr_frags) {
485 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
486 if (IS_ERR(last_bdp)) {
487 dma_unmap_single(&fep->pdev->dev, addr,
488 buflen, DMA_TO_DEVICE);
489 dev_kfree_skb_any(skb);
490 return NETDEV_TX_OK;
491 }
492 } else {
493 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
494 if (fep->bufdesc_ex) {
495 estatus = BD_ENET_TX_INT;
496 if (unlikely(skb_shinfo(skb)->tx_flags &
497 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
498 estatus |= BD_ENET_TX_TS;
499 }
500 }
501 bdp->cbd_bufaddr = cpu_to_fec32(addr);
502 bdp->cbd_datlen = cpu_to_fec16(buflen);
503
504 if (fep->bufdesc_ex) {
505
506 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
507
508 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
509 fep->hwts_tx_en))
510 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
511
512 if (fep->quirks & FEC_QUIRK_HAS_AVB)
513 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
514
515 if (skb->ip_summed == CHECKSUM_PARTIAL)
516 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
517
518 ebdp->cbd_bdu = 0;
519 ebdp->cbd_esc = cpu_to_fec32(estatus);
520 }
521
522 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
523
524 txq->tx_skbuff[index] = skb;
525
526
527
528
529 wmb();
530
531
532
533
534 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
535 bdp->cbd_sc = cpu_to_fec16(status);
536
537
538 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
539
540 skb_tx_timestamp(skb);
541
542
543
544
545 wmb();
546 txq->bd.cur = bdp;
547
548
549 writel(0, txq->bd.reg_desc_active);
550
551 return 0;
552}
553
554static int
555fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
556 struct net_device *ndev,
557 struct bufdesc *bdp, int index, char *data,
558 int size, bool last_tcp, bool is_last)
559{
560 struct fec_enet_private *fep = netdev_priv(ndev);
561 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
562 unsigned short status;
563 unsigned int estatus = 0;
564 dma_addr_t addr;
565
566 status = fec16_to_cpu(bdp->cbd_sc);
567 status &= ~BD_ENET_TX_STATS;
568
569 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
570
571 if (((unsigned long) data) & fep->tx_align ||
572 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
573 memcpy(txq->tx_bounce[index], data, size);
574 data = txq->tx_bounce[index];
575
576 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
577 swap_buffer(data, size);
578 }
579
580 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
581 if (dma_mapping_error(&fep->pdev->dev, addr)) {
582 dev_kfree_skb_any(skb);
583 if (net_ratelimit())
584 netdev_err(ndev, "Tx DMA memory map failed\n");
585 return NETDEV_TX_BUSY;
586 }
587
588 bdp->cbd_datlen = cpu_to_fec16(size);
589 bdp->cbd_bufaddr = cpu_to_fec32(addr);
590
591 if (fep->bufdesc_ex) {
592 if (fep->quirks & FEC_QUIRK_HAS_AVB)
593 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
594 if (skb->ip_summed == CHECKSUM_PARTIAL)
595 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
596 ebdp->cbd_bdu = 0;
597 ebdp->cbd_esc = cpu_to_fec32(estatus);
598 }
599
600
601 if (last_tcp)
602 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
603 if (is_last) {
604 status |= BD_ENET_TX_INTR;
605 if (fep->bufdesc_ex)
606 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
607 }
608
609 bdp->cbd_sc = cpu_to_fec16(status);
610
611 return 0;
612}
613
614static int
615fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
616 struct sk_buff *skb, struct net_device *ndev,
617 struct bufdesc *bdp, int index)
618{
619 struct fec_enet_private *fep = netdev_priv(ndev);
620 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
621 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
622 void *bufaddr;
623 unsigned long dmabuf;
624 unsigned short status;
625 unsigned int estatus = 0;
626
627 status = fec16_to_cpu(bdp->cbd_sc);
628 status &= ~BD_ENET_TX_STATS;
629 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
630
631 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
632 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
633 if (((unsigned long)bufaddr) & fep->tx_align ||
634 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
635 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
636 bufaddr = txq->tx_bounce[index];
637
638 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
639 swap_buffer(bufaddr, hdr_len);
640
641 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
642 hdr_len, DMA_TO_DEVICE);
643 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
644 dev_kfree_skb_any(skb);
645 if (net_ratelimit())
646 netdev_err(ndev, "Tx DMA memory map failed\n");
647 return NETDEV_TX_BUSY;
648 }
649 }
650
651 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
652 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
653
654 if (fep->bufdesc_ex) {
655 if (fep->quirks & FEC_QUIRK_HAS_AVB)
656 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
657 if (skb->ip_summed == CHECKSUM_PARTIAL)
658 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
659 ebdp->cbd_bdu = 0;
660 ebdp->cbd_esc = cpu_to_fec32(estatus);
661 }
662
663 bdp->cbd_sc = cpu_to_fec16(status);
664
665 return 0;
666}
667
668static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
669 struct sk_buff *skb,
670 struct net_device *ndev)
671{
672 struct fec_enet_private *fep = netdev_priv(ndev);
673 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
674 int total_len, data_left;
675 struct bufdesc *bdp = txq->bd.cur;
676 struct tso_t tso;
677 unsigned int index = 0;
678 int ret;
679
680 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
681 dev_kfree_skb_any(skb);
682 if (net_ratelimit())
683 netdev_err(ndev, "NOT enough BD for TSO!\n");
684 return NETDEV_TX_OK;
685 }
686
687
688 if (fec_enet_clear_csum(skb, ndev)) {
689 dev_kfree_skb_any(skb);
690 return NETDEV_TX_OK;
691 }
692
693
694 tso_start(skb, &tso);
695
696 total_len = skb->len - hdr_len;
697 while (total_len > 0) {
698 char *hdr;
699
700 index = fec_enet_get_bd_index(bdp, &txq->bd);
701 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
702 total_len -= data_left;
703
704
705 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
706 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
707 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
708 if (ret)
709 goto err_release;
710
711 while (data_left > 0) {
712 int size;
713
714 size = min_t(int, tso.size, data_left);
715 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
716 index = fec_enet_get_bd_index(bdp, &txq->bd);
717 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
718 bdp, index,
719 tso.data, size,
720 size == data_left,
721 total_len == 0);
722 if (ret)
723 goto err_release;
724
725 data_left -= size;
726 tso_build_data(skb, &tso, size);
727 }
728
729 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
730 }
731
732
733 txq->tx_skbuff[index] = skb;
734
735 skb_tx_timestamp(skb);
736 txq->bd.cur = bdp;
737
738
739 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
740 !readl(txq->bd.reg_desc_active) ||
741 !readl(txq->bd.reg_desc_active) ||
742 !readl(txq->bd.reg_desc_active) ||
743 !readl(txq->bd.reg_desc_active))
744 writel(0, txq->bd.reg_desc_active);
745
746 return 0;
747
748err_release:
749
750 return ret;
751}
752
753static netdev_tx_t
754fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
755{
756 struct fec_enet_private *fep = netdev_priv(ndev);
757 int entries_free;
758 unsigned short queue;
759 struct fec_enet_priv_tx_q *txq;
760 struct netdev_queue *nq;
761 int ret;
762
763 queue = skb_get_queue_mapping(skb);
764 txq = fep->tx_queue[queue];
765 nq = netdev_get_tx_queue(ndev, queue);
766
767 if (skb_is_gso(skb))
768 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
769 else
770 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
771 if (ret)
772 return ret;
773
774 entries_free = fec_enet_get_free_txdesc_num(txq);
775 if (entries_free <= txq->tx_stop_threshold)
776 netif_tx_stop_queue(nq);
777
778 return NETDEV_TX_OK;
779}
780
781
782
783static void fec_enet_bd_init(struct net_device *dev)
784{
785 struct fec_enet_private *fep = netdev_priv(dev);
786 struct fec_enet_priv_tx_q *txq;
787 struct fec_enet_priv_rx_q *rxq;
788 struct bufdesc *bdp;
789 unsigned int i;
790 unsigned int q;
791
792 for (q = 0; q < fep->num_rx_queues; q++) {
793
794 rxq = fep->rx_queue[q];
795 bdp = rxq->bd.base;
796
797 for (i = 0; i < rxq->bd.ring_size; i++) {
798
799
800 if (bdp->cbd_bufaddr)
801 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
802 else
803 bdp->cbd_sc = cpu_to_fec16(0);
804 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
805 }
806
807
808 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
809 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
810
811 rxq->bd.cur = rxq->bd.base;
812 }
813
814 for (q = 0; q < fep->num_tx_queues; q++) {
815
816 txq = fep->tx_queue[q];
817 bdp = txq->bd.base;
818 txq->bd.cur = bdp;
819
820 for (i = 0; i < txq->bd.ring_size; i++) {
821
822 bdp->cbd_sc = cpu_to_fec16(0);
823 if (bdp->cbd_bufaddr &&
824 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
825 dma_unmap_single(&fep->pdev->dev,
826 fec32_to_cpu(bdp->cbd_bufaddr),
827 fec16_to_cpu(bdp->cbd_datlen),
828 DMA_TO_DEVICE);
829 if (txq->tx_skbuff[i]) {
830 dev_kfree_skb_any(txq->tx_skbuff[i]);
831 txq->tx_skbuff[i] = NULL;
832 }
833 bdp->cbd_bufaddr = cpu_to_fec32(0);
834 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
835 }
836
837
838 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
839 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
840 txq->dirty_tx = bdp;
841 }
842}
843
844static void fec_enet_active_rxring(struct net_device *ndev)
845{
846 struct fec_enet_private *fep = netdev_priv(ndev);
847 int i;
848
849 for (i = 0; i < fep->num_rx_queues; i++)
850 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
851}
852
853static void fec_enet_enable_ring(struct net_device *ndev)
854{
855 struct fec_enet_private *fep = netdev_priv(ndev);
856 struct fec_enet_priv_tx_q *txq;
857 struct fec_enet_priv_rx_q *rxq;
858 int i;
859
860 for (i = 0; i < fep->num_rx_queues; i++) {
861 rxq = fep->rx_queue[i];
862 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
863 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
864
865
866 if (i)
867 writel(RCMR_MATCHEN | RCMR_CMP(i),
868 fep->hwp + FEC_RCMR(i));
869 }
870
871 for (i = 0; i < fep->num_tx_queues; i++) {
872 txq = fep->tx_queue[i];
873 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
874
875
876 if (i)
877 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
878 fep->hwp + FEC_DMA_CFG(i));
879 }
880}
881
882static void fec_enet_reset_skb(struct net_device *ndev)
883{
884 struct fec_enet_private *fep = netdev_priv(ndev);
885 struct fec_enet_priv_tx_q *txq;
886 int i, j;
887
888 for (i = 0; i < fep->num_tx_queues; i++) {
889 txq = fep->tx_queue[i];
890
891 for (j = 0; j < txq->bd.ring_size; j++) {
892 if (txq->tx_skbuff[j]) {
893 dev_kfree_skb_any(txq->tx_skbuff[j]);
894 txq->tx_skbuff[j] = NULL;
895 }
896 }
897 }
898}
899
900
901
902
903
904
905static void
906fec_restart(struct net_device *ndev)
907{
908 struct fec_enet_private *fep = netdev_priv(ndev);
909 u32 val;
910 u32 temp_mac[2];
911 u32 rcntl = OPT_FRAME_SIZE | 0x04;
912 u32 ecntl = 0x2;
913
914
915
916
917
918 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
919 writel(0, fep->hwp + FEC_ECNTRL);
920 } else {
921 writel(1, fep->hwp + FEC_ECNTRL);
922 udelay(10);
923 }
924
925
926
927
928
929 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
930 writel((__force u32)cpu_to_be32(temp_mac[0]),
931 fep->hwp + FEC_ADDR_LOW);
932 writel((__force u32)cpu_to_be32(temp_mac[1]),
933 fep->hwp + FEC_ADDR_HIGH);
934
935
936 writel(0xffffffff, fep->hwp + FEC_IEVENT);
937
938 fec_enet_bd_init(ndev);
939
940 fec_enet_enable_ring(ndev);
941
942
943 fec_enet_reset_skb(ndev);
944
945
946 if (fep->full_duplex == DUPLEX_FULL) {
947
948 writel(0x04, fep->hwp + FEC_X_CNTRL);
949 } else {
950
951 rcntl |= 0x02;
952 writel(0x0, fep->hwp + FEC_X_CNTRL);
953 }
954
955
956 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
957
958#if !defined(CONFIG_M5272)
959 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
960 val = readl(fep->hwp + FEC_RACC);
961
962 val |= FEC_RACC_SHIFT16;
963 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
964
965 val |= FEC_RACC_OPTIONS;
966 else
967 val &= ~FEC_RACC_OPTIONS;
968 writel(val, fep->hwp + FEC_RACC);
969 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
970 }
971#endif
972
973
974
975
976
977 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
978
979 rcntl |= 0x40000000 | 0x00000020;
980
981
982 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
983 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
984 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
985 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
986 rcntl |= (1 << 6);
987 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
988 rcntl |= (1 << 8);
989 else
990 rcntl &= ~(1 << 8);
991
992
993 if (ndev->phydev) {
994 if (ndev->phydev->speed == SPEED_1000)
995 ecntl |= (1 << 5);
996 else if (ndev->phydev->speed == SPEED_100)
997 rcntl &= ~(1 << 9);
998 else
999 rcntl |= (1 << 9);
1000 }
1001 } else {
1002#ifdef FEC_MIIGSK_ENR
1003 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1004 u32 cfgr;
1005
1006 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1007 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1008 udelay(1);
1009
1010
1011
1012
1013
1014
1015 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1016 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1017 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1018 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1019 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1020
1021
1022 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1023 }
1024#endif
1025 }
1026
1027#if !defined(CONFIG_M5272)
1028
1029 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1030 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1031 ndev->phydev && ndev->phydev->pause)) {
1032 rcntl |= FEC_ENET_FCE;
1033
1034
1035 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1036 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1037 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1038 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1039
1040
1041 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1042 } else {
1043 rcntl &= ~FEC_ENET_FCE;
1044 }
1045#endif
1046
1047 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1048
1049
1050 set_multicast_list(ndev);
1051#ifndef CONFIG_M5272
1052 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1053 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1054#endif
1055
1056 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1057
1058 ecntl |= (1 << 8);
1059
1060 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1061 }
1062
1063 if (fep->bufdesc_ex)
1064 ecntl |= (1 << 4);
1065
1066#ifndef CONFIG_M5272
1067
1068 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1069#endif
1070
1071
1072 writel(ecntl, fep->hwp + FEC_ECNTRL);
1073 fec_enet_active_rxring(ndev);
1074
1075 if (fep->bufdesc_ex)
1076 fec_ptp_start_cyclecounter(ndev);
1077
1078
1079 if (fep->link)
1080 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1081 else
1082 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1083
1084
1085 fec_enet_itr_coal_init(ndev);
1086
1087}
1088
1089static void
1090fec_stop(struct net_device *ndev)
1091{
1092 struct fec_enet_private *fep = netdev_priv(ndev);
1093 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1094 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1095 u32 val;
1096
1097
1098 if (fep->link) {
1099 writel(1, fep->hwp + FEC_X_CNTRL);
1100 udelay(10);
1101 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1102 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1103 }
1104
1105
1106
1107
1108
1109 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1110 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1111 writel(0, fep->hwp + FEC_ECNTRL);
1112 } else {
1113 writel(1, fep->hwp + FEC_ECNTRL);
1114 udelay(10);
1115 }
1116 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1117 } else {
1118 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1119 val = readl(fep->hwp + FEC_ECNTRL);
1120 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1121 writel(val, fep->hwp + FEC_ECNTRL);
1122
1123 if (pdata && pdata->sleep_mode_enable)
1124 pdata->sleep_mode_enable(true);
1125 }
1126 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1127
1128
1129 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1130 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1131 writel(2, fep->hwp + FEC_ECNTRL);
1132 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1133 }
1134}
1135
1136
1137static void
1138fec_timeout(struct net_device *ndev, unsigned int txqueue)
1139{
1140 struct fec_enet_private *fep = netdev_priv(ndev);
1141
1142 fec_dump(ndev);
1143
1144 ndev->stats.tx_errors++;
1145
1146 schedule_work(&fep->tx_timeout_work);
1147}
1148
1149static void fec_enet_timeout_work(struct work_struct *work)
1150{
1151 struct fec_enet_private *fep =
1152 container_of(work, struct fec_enet_private, tx_timeout_work);
1153 struct net_device *ndev = fep->netdev;
1154
1155 rtnl_lock();
1156 if (netif_device_present(ndev) || netif_running(ndev)) {
1157 napi_disable(&fep->napi);
1158 netif_tx_lock_bh(ndev);
1159 fec_restart(ndev);
1160 netif_wake_queue(ndev);
1161 netif_tx_unlock_bh(ndev);
1162 napi_enable(&fep->napi);
1163 }
1164 rtnl_unlock();
1165}
1166
1167static void
1168fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1169 struct skb_shared_hwtstamps *hwtstamps)
1170{
1171 unsigned long flags;
1172 u64 ns;
1173
1174 spin_lock_irqsave(&fep->tmreg_lock, flags);
1175 ns = timecounter_cyc2time(&fep->tc, ts);
1176 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1177
1178 memset(hwtstamps, 0, sizeof(*hwtstamps));
1179 hwtstamps->hwtstamp = ns_to_ktime(ns);
1180}
1181
1182static void
1183fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1184{
1185 struct fec_enet_private *fep;
1186 struct bufdesc *bdp;
1187 unsigned short status;
1188 struct sk_buff *skb;
1189 struct fec_enet_priv_tx_q *txq;
1190 struct netdev_queue *nq;
1191 int index = 0;
1192 int entries_free;
1193
1194 fep = netdev_priv(ndev);
1195
1196 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1197
1198 txq = fep->tx_queue[queue_id];
1199
1200 nq = netdev_get_tx_queue(ndev, queue_id);
1201 bdp = txq->dirty_tx;
1202
1203
1204 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1205
1206 while (bdp != READ_ONCE(txq->bd.cur)) {
1207
1208 rmb();
1209 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1210 if (status & BD_ENET_TX_READY)
1211 break;
1212
1213 index = fec_enet_get_bd_index(bdp, &txq->bd);
1214
1215 skb = txq->tx_skbuff[index];
1216 txq->tx_skbuff[index] = NULL;
1217 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1218 dma_unmap_single(&fep->pdev->dev,
1219 fec32_to_cpu(bdp->cbd_bufaddr),
1220 fec16_to_cpu(bdp->cbd_datlen),
1221 DMA_TO_DEVICE);
1222 bdp->cbd_bufaddr = cpu_to_fec32(0);
1223 if (!skb)
1224 goto skb_done;
1225
1226
1227 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1228 BD_ENET_TX_RL | BD_ENET_TX_UN |
1229 BD_ENET_TX_CSL)) {
1230 ndev->stats.tx_errors++;
1231 if (status & BD_ENET_TX_HB)
1232 ndev->stats.tx_heartbeat_errors++;
1233 if (status & BD_ENET_TX_LC)
1234 ndev->stats.tx_window_errors++;
1235 if (status & BD_ENET_TX_RL)
1236 ndev->stats.tx_aborted_errors++;
1237 if (status & BD_ENET_TX_UN)
1238 ndev->stats.tx_fifo_errors++;
1239 if (status & BD_ENET_TX_CSL)
1240 ndev->stats.tx_carrier_errors++;
1241 } else {
1242 ndev->stats.tx_packets++;
1243 ndev->stats.tx_bytes += skb->len;
1244 }
1245
1246 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1247 fep->bufdesc_ex) {
1248 struct skb_shared_hwtstamps shhwtstamps;
1249 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1250
1251 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1252 skb_tstamp_tx(skb, &shhwtstamps);
1253 }
1254
1255
1256
1257
1258 if (status & BD_ENET_TX_DEF)
1259 ndev->stats.collisions++;
1260
1261
1262 dev_kfree_skb_any(skb);
1263skb_done:
1264
1265
1266
1267 wmb();
1268 txq->dirty_tx = bdp;
1269
1270
1271 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1272
1273
1274
1275 if (netif_queue_stopped(ndev)) {
1276 entries_free = fec_enet_get_free_txdesc_num(txq);
1277 if (entries_free >= txq->tx_wake_threshold)
1278 netif_tx_wake_queue(nq);
1279 }
1280 }
1281
1282
1283 if (bdp != txq->bd.cur &&
1284 readl(txq->bd.reg_desc_active) == 0)
1285 writel(0, txq->bd.reg_desc_active);
1286}
1287
1288static void
1289fec_enet_tx(struct net_device *ndev)
1290{
1291 struct fec_enet_private *fep = netdev_priv(ndev);
1292 u16 queue_id;
1293
1294 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1295 clear_bit(queue_id, &fep->work_tx);
1296 fec_enet_tx_queue(ndev, queue_id);
1297 }
1298 return;
1299}
1300
1301static int
1302fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1303{
1304 struct fec_enet_private *fep = netdev_priv(ndev);
1305 int off;
1306
1307 off = ((unsigned long)skb->data) & fep->rx_align;
1308 if (off)
1309 skb_reserve(skb, fep->rx_align + 1 - off);
1310
1311 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1312 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1313 if (net_ratelimit())
1314 netdev_err(ndev, "Rx DMA memory map failed\n");
1315 return -ENOMEM;
1316 }
1317
1318 return 0;
1319}
1320
1321static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1322 struct bufdesc *bdp, u32 length, bool swap)
1323{
1324 struct fec_enet_private *fep = netdev_priv(ndev);
1325 struct sk_buff *new_skb;
1326
1327 if (length > fep->rx_copybreak)
1328 return false;
1329
1330 new_skb = netdev_alloc_skb(ndev, length);
1331 if (!new_skb)
1332 return false;
1333
1334 dma_sync_single_for_cpu(&fep->pdev->dev,
1335 fec32_to_cpu(bdp->cbd_bufaddr),
1336 FEC_ENET_RX_FRSIZE - fep->rx_align,
1337 DMA_FROM_DEVICE);
1338 if (!swap)
1339 memcpy(new_skb->data, (*skb)->data, length);
1340 else
1341 swap_buffer2(new_skb->data, (*skb)->data, length);
1342 *skb = new_skb;
1343
1344 return true;
1345}
1346
1347
1348
1349
1350
1351
1352static int
1353fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1354{
1355 struct fec_enet_private *fep = netdev_priv(ndev);
1356 struct fec_enet_priv_rx_q *rxq;
1357 struct bufdesc *bdp;
1358 unsigned short status;
1359 struct sk_buff *skb_new = NULL;
1360 struct sk_buff *skb;
1361 ushort pkt_len;
1362 __u8 *data;
1363 int pkt_received = 0;
1364 struct bufdesc_ex *ebdp = NULL;
1365 bool vlan_packet_rcvd = false;
1366 u16 vlan_tag;
1367 int index = 0;
1368 bool is_copybreak;
1369 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1370
1371#ifdef CONFIG_M532x
1372 flush_cache_all();
1373#endif
1374 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1375 rxq = fep->rx_queue[queue_id];
1376
1377
1378
1379
1380 bdp = rxq->bd.cur;
1381
1382 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1383
1384 if (pkt_received >= budget)
1385 break;
1386 pkt_received++;
1387
1388 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1389
1390
1391 status ^= BD_ENET_RX_LAST;
1392 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1393 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1394 BD_ENET_RX_CL)) {
1395 ndev->stats.rx_errors++;
1396 if (status & BD_ENET_RX_OV) {
1397
1398 ndev->stats.rx_fifo_errors++;
1399 goto rx_processing_done;
1400 }
1401 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1402 | BD_ENET_RX_LAST)) {
1403
1404 ndev->stats.rx_length_errors++;
1405 if (status & BD_ENET_RX_LAST)
1406 netdev_err(ndev, "rcv is not +last\n");
1407 }
1408 if (status & BD_ENET_RX_CR)
1409 ndev->stats.rx_crc_errors++;
1410
1411 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1412 ndev->stats.rx_frame_errors++;
1413 goto rx_processing_done;
1414 }
1415
1416
1417 ndev->stats.rx_packets++;
1418 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1419 ndev->stats.rx_bytes += pkt_len;
1420
1421 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1422 skb = rxq->rx_skbuff[index];
1423
1424
1425
1426
1427
1428 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1429 need_swap);
1430 if (!is_copybreak) {
1431 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1432 if (unlikely(!skb_new)) {
1433 ndev->stats.rx_dropped++;
1434 goto rx_processing_done;
1435 }
1436 dma_unmap_single(&fep->pdev->dev,
1437 fec32_to_cpu(bdp->cbd_bufaddr),
1438 FEC_ENET_RX_FRSIZE - fep->rx_align,
1439 DMA_FROM_DEVICE);
1440 }
1441
1442 prefetch(skb->data - NET_IP_ALIGN);
1443 skb_put(skb, pkt_len - 4);
1444 data = skb->data;
1445
1446 if (!is_copybreak && need_swap)
1447 swap_buffer(data, pkt_len);
1448
1449#if !defined(CONFIG_M5272)
1450 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1451 data = skb_pull_inline(skb, 2);
1452#endif
1453
1454
1455 ebdp = NULL;
1456 if (fep->bufdesc_ex)
1457 ebdp = (struct bufdesc_ex *)bdp;
1458
1459
1460 vlan_packet_rcvd = false;
1461 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1462 fep->bufdesc_ex &&
1463 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1464
1465 struct vlan_hdr *vlan_header =
1466 (struct vlan_hdr *) (data + ETH_HLEN);
1467 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1468
1469 vlan_packet_rcvd = true;
1470
1471 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1472 skb_pull(skb, VLAN_HLEN);
1473 }
1474
1475 skb->protocol = eth_type_trans(skb, ndev);
1476
1477
1478 if (fep->hwts_rx_en && fep->bufdesc_ex)
1479 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1480 skb_hwtstamps(skb));
1481
1482 if (fep->bufdesc_ex &&
1483 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1484 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1485
1486 skb->ip_summed = CHECKSUM_UNNECESSARY;
1487 } else {
1488 skb_checksum_none_assert(skb);
1489 }
1490 }
1491
1492
1493 if (vlan_packet_rcvd)
1494 __vlan_hwaccel_put_tag(skb,
1495 htons(ETH_P_8021Q),
1496 vlan_tag);
1497
1498 napi_gro_receive(&fep->napi, skb);
1499
1500 if (is_copybreak) {
1501 dma_sync_single_for_device(&fep->pdev->dev,
1502 fec32_to_cpu(bdp->cbd_bufaddr),
1503 FEC_ENET_RX_FRSIZE - fep->rx_align,
1504 DMA_FROM_DEVICE);
1505 } else {
1506 rxq->rx_skbuff[index] = skb_new;
1507 fec_enet_new_rxbdp(ndev, bdp, skb_new);
1508 }
1509
1510rx_processing_done:
1511
1512 status &= ~BD_ENET_RX_STATS;
1513
1514
1515 status |= BD_ENET_RX_EMPTY;
1516
1517 if (fep->bufdesc_ex) {
1518 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1519
1520 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1521 ebdp->cbd_prot = 0;
1522 ebdp->cbd_bdu = 0;
1523 }
1524
1525
1526
1527 wmb();
1528 bdp->cbd_sc = cpu_to_fec16(status);
1529
1530
1531 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1532
1533
1534
1535
1536
1537 writel(0, rxq->bd.reg_desc_active);
1538 }
1539 rxq->bd.cur = bdp;
1540 return pkt_received;
1541}
1542
1543static int
1544fec_enet_rx(struct net_device *ndev, int budget)
1545{
1546 int pkt_received = 0;
1547 u16 queue_id;
1548 struct fec_enet_private *fep = netdev_priv(ndev);
1549
1550 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1551 int ret;
1552
1553 ret = fec_enet_rx_queue(ndev,
1554 budget - pkt_received, queue_id);
1555
1556 if (ret < budget - pkt_received)
1557 clear_bit(queue_id, &fep->work_rx);
1558
1559 pkt_received += ret;
1560 }
1561 return pkt_received;
1562}
1563
1564static bool
1565fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1566{
1567 if (int_events == 0)
1568 return false;
1569
1570 if (int_events & FEC_ENET_RXF_0)
1571 fep->work_rx |= (1 << 2);
1572 if (int_events & FEC_ENET_RXF_1)
1573 fep->work_rx |= (1 << 0);
1574 if (int_events & FEC_ENET_RXF_2)
1575 fep->work_rx |= (1 << 1);
1576
1577 if (int_events & FEC_ENET_TXF_0)
1578 fep->work_tx |= (1 << 2);
1579 if (int_events & FEC_ENET_TXF_1)
1580 fep->work_tx |= (1 << 0);
1581 if (int_events & FEC_ENET_TXF_2)
1582 fep->work_tx |= (1 << 1);
1583
1584 return true;
1585}
1586
1587static irqreturn_t
1588fec_enet_interrupt(int irq, void *dev_id)
1589{
1590 struct net_device *ndev = dev_id;
1591 struct fec_enet_private *fep = netdev_priv(ndev);
1592 uint int_events;
1593 irqreturn_t ret = IRQ_NONE;
1594
1595 int_events = readl(fep->hwp + FEC_IEVENT);
1596 writel(int_events, fep->hwp + FEC_IEVENT);
1597 fec_enet_collect_events(fep, int_events);
1598
1599 if ((fep->work_tx || fep->work_rx) && fep->link) {
1600 ret = IRQ_HANDLED;
1601
1602 if (napi_schedule_prep(&fep->napi)) {
1603
1604 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1605 __napi_schedule(&fep->napi);
1606 }
1607 }
1608
1609 if (int_events & FEC_ENET_MII) {
1610 ret = IRQ_HANDLED;
1611 complete(&fep->mdio_done);
1612 }
1613 return ret;
1614}
1615
1616static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1617{
1618 struct net_device *ndev = napi->dev;
1619 struct fec_enet_private *fep = netdev_priv(ndev);
1620 int pkts;
1621
1622 pkts = fec_enet_rx(ndev, budget);
1623
1624 fec_enet_tx(ndev);
1625
1626 if (pkts < budget) {
1627 napi_complete_done(napi, pkts);
1628 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1629 }
1630 return pkts;
1631}
1632
1633
1634static void fec_get_mac(struct net_device *ndev)
1635{
1636 struct fec_enet_private *fep = netdev_priv(ndev);
1637 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1638 unsigned char *iap, tmpaddr[ETH_ALEN];
1639 int ret;
1640
1641
1642
1643
1644
1645
1646
1647 iap = macaddr;
1648
1649
1650
1651
1652 if (!is_valid_ether_addr(iap)) {
1653 struct device_node *np = fep->pdev->dev.of_node;
1654 if (np) {
1655 ret = of_get_mac_address(np, tmpaddr);
1656 if (!ret)
1657 iap = tmpaddr;
1658 }
1659 }
1660
1661
1662
1663
1664 if (!is_valid_ether_addr(iap)) {
1665#ifdef CONFIG_M5272
1666 if (FEC_FLASHMAC)
1667 iap = (unsigned char *)FEC_FLASHMAC;
1668#else
1669 if (pdata)
1670 iap = (unsigned char *)&pdata->mac;
1671#endif
1672 }
1673
1674
1675
1676
1677 if (!is_valid_ether_addr(iap)) {
1678 *((__be32 *) &tmpaddr[0]) =
1679 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1680 *((__be16 *) &tmpaddr[4]) =
1681 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1682 iap = &tmpaddr[0];
1683 }
1684
1685
1686
1687
1688 if (!is_valid_ether_addr(iap)) {
1689
1690 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1691 eth_hw_addr_random(ndev);
1692 netdev_info(ndev, "Using random MAC address: %pM\n",
1693 ndev->dev_addr);
1694 return;
1695 }
1696
1697 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1698
1699
1700 if (iap == macaddr)
1701 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1702}
1703
1704
1705
1706
1707
1708
1709static void fec_enet_adjust_link(struct net_device *ndev)
1710{
1711 struct fec_enet_private *fep = netdev_priv(ndev);
1712 struct phy_device *phy_dev = ndev->phydev;
1713 int status_change = 0;
1714
1715
1716 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1717 phy_dev->state = PHY_RESUMING;
1718 return;
1719 }
1720
1721
1722
1723
1724
1725
1726 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1727 fep->link = 0;
1728 } else if (phy_dev->link) {
1729 if (!fep->link) {
1730 fep->link = phy_dev->link;
1731 status_change = 1;
1732 }
1733
1734 if (fep->full_duplex != phy_dev->duplex) {
1735 fep->full_duplex = phy_dev->duplex;
1736 status_change = 1;
1737 }
1738
1739 if (phy_dev->speed != fep->speed) {
1740 fep->speed = phy_dev->speed;
1741 status_change = 1;
1742 }
1743
1744
1745 if (status_change) {
1746 napi_disable(&fep->napi);
1747 netif_tx_lock_bh(ndev);
1748 fec_restart(ndev);
1749 netif_wake_queue(ndev);
1750 netif_tx_unlock_bh(ndev);
1751 napi_enable(&fep->napi);
1752 }
1753 } else {
1754 if (fep->link) {
1755 napi_disable(&fep->napi);
1756 netif_tx_lock_bh(ndev);
1757 fec_stop(ndev);
1758 netif_tx_unlock_bh(ndev);
1759 napi_enable(&fep->napi);
1760 fep->link = phy_dev->link;
1761 status_change = 1;
1762 }
1763 }
1764
1765 if (status_change)
1766 phy_print_status(phy_dev);
1767}
1768
1769static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1770{
1771 struct fec_enet_private *fep = bus->priv;
1772 struct device *dev = &fep->pdev->dev;
1773 unsigned long time_left;
1774 int ret = 0;
1775
1776 ret = pm_runtime_get_sync(dev);
1777 if (ret < 0)
1778 return ret;
1779
1780 fep->mii_timeout = 0;
1781 reinit_completion(&fep->mdio_done);
1782
1783
1784 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1785 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1786 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1787
1788
1789 time_left = wait_for_completion_timeout(&fep->mdio_done,
1790 usecs_to_jiffies(FEC_MII_TIMEOUT));
1791 if (time_left == 0) {
1792 fep->mii_timeout = 1;
1793 netdev_err(fep->netdev, "MDIO read timeout\n");
1794 ret = -ETIMEDOUT;
1795 goto out;
1796 }
1797
1798 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1799
1800out:
1801 pm_runtime_mark_last_busy(dev);
1802 pm_runtime_put_autosuspend(dev);
1803
1804 return ret;
1805}
1806
1807static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1808 u16 value)
1809{
1810 struct fec_enet_private *fep = bus->priv;
1811 struct device *dev = &fep->pdev->dev;
1812 unsigned long time_left;
1813 int ret;
1814
1815 ret = pm_runtime_get_sync(dev);
1816 if (ret < 0)
1817 return ret;
1818 else
1819 ret = 0;
1820
1821 fep->mii_timeout = 0;
1822 reinit_completion(&fep->mdio_done);
1823
1824
1825 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1826 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1827 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1828 fep->hwp + FEC_MII_DATA);
1829
1830
1831 time_left = wait_for_completion_timeout(&fep->mdio_done,
1832 usecs_to_jiffies(FEC_MII_TIMEOUT));
1833 if (time_left == 0) {
1834 fep->mii_timeout = 1;
1835 netdev_err(fep->netdev, "MDIO write timeout\n");
1836 ret = -ETIMEDOUT;
1837 }
1838
1839 pm_runtime_mark_last_busy(dev);
1840 pm_runtime_put_autosuspend(dev);
1841
1842 return ret;
1843}
1844
1845static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1846{
1847 struct fec_enet_private *fep = netdev_priv(ndev);
1848 int ret;
1849
1850 if (enable) {
1851 ret = clk_prepare_enable(fep->clk_ahb);
1852 if (ret)
1853 return ret;
1854
1855 ret = clk_prepare_enable(fep->clk_enet_out);
1856 if (ret)
1857 goto failed_clk_enet_out;
1858
1859 if (fep->clk_ptp) {
1860 mutex_lock(&fep->ptp_clk_mutex);
1861 ret = clk_prepare_enable(fep->clk_ptp);
1862 if (ret) {
1863 mutex_unlock(&fep->ptp_clk_mutex);
1864 goto failed_clk_ptp;
1865 } else {
1866 fep->ptp_clk_on = true;
1867 }
1868 mutex_unlock(&fep->ptp_clk_mutex);
1869 }
1870
1871 ret = clk_prepare_enable(fep->clk_ref);
1872 if (ret)
1873 goto failed_clk_ref;
1874
1875 phy_reset_after_clk_enable(ndev->phydev);
1876 } else {
1877 clk_disable_unprepare(fep->clk_ahb);
1878 clk_disable_unprepare(fep->clk_enet_out);
1879 if (fep->clk_ptp) {
1880 mutex_lock(&fep->ptp_clk_mutex);
1881 clk_disable_unprepare(fep->clk_ptp);
1882 fep->ptp_clk_on = false;
1883 mutex_unlock(&fep->ptp_clk_mutex);
1884 }
1885 clk_disable_unprepare(fep->clk_ref);
1886 }
1887
1888 return 0;
1889
1890failed_clk_ref:
1891 if (fep->clk_ref)
1892 clk_disable_unprepare(fep->clk_ref);
1893failed_clk_ptp:
1894 if (fep->clk_enet_out)
1895 clk_disable_unprepare(fep->clk_enet_out);
1896failed_clk_enet_out:
1897 clk_disable_unprepare(fep->clk_ahb);
1898
1899 return ret;
1900}
1901
1902static int fec_enet_mii_probe(struct net_device *ndev)
1903{
1904 struct fec_enet_private *fep = netdev_priv(ndev);
1905 struct phy_device *phy_dev = NULL;
1906 char mdio_bus_id[MII_BUS_ID_SIZE];
1907 char phy_name[MII_BUS_ID_SIZE + 3];
1908 int phy_id;
1909 int dev_id = fep->dev_id;
1910
1911 if (fep->phy_node) {
1912 phy_dev = of_phy_connect(ndev, fep->phy_node,
1913 &fec_enet_adjust_link, 0,
1914 fep->phy_interface);
1915 if (!phy_dev) {
1916 netdev_err(ndev, "Unable to connect to phy\n");
1917 return -ENODEV;
1918 }
1919 } else {
1920
1921 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1922 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1923 continue;
1924 if (dev_id--)
1925 continue;
1926 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1927 break;
1928 }
1929
1930 if (phy_id >= PHY_MAX_ADDR) {
1931 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1932 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1933 phy_id = 0;
1934 }
1935
1936 snprintf(phy_name, sizeof(phy_name),
1937 PHY_ID_FMT, mdio_bus_id, phy_id);
1938 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1939 fep->phy_interface);
1940 }
1941
1942 if (IS_ERR(phy_dev)) {
1943 netdev_err(ndev, "could not attach to PHY\n");
1944 return PTR_ERR(phy_dev);
1945 }
1946
1947
1948 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
1949 phy_set_max_speed(phy_dev, 1000);
1950 phy_remove_link_mode(phy_dev,
1951 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1952#if !defined(CONFIG_M5272)
1953 phy_support_sym_pause(phy_dev);
1954#endif
1955 }
1956 else
1957 phy_set_max_speed(phy_dev, 100);
1958
1959 fep->link = 0;
1960 fep->full_duplex = 0;
1961
1962 phy_attached_info(phy_dev);
1963
1964 return 0;
1965}
1966
1967static int fec_enet_mii_init(struct platform_device *pdev)
1968{
1969 static struct mii_bus *fec0_mii_bus;
1970 struct net_device *ndev = platform_get_drvdata(pdev);
1971 struct fec_enet_private *fep = netdev_priv(ndev);
1972 struct device_node *node;
1973 int err = -ENXIO;
1974 u32 mii_speed, holdtime;
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1993
1994 if (mii_cnt && fec0_mii_bus) {
1995 fep->mii_bus = fec0_mii_bus;
1996 mii_cnt++;
1997 return 0;
1998 }
1999 return -ENOENT;
2000 }
2001
2002 fep->mii_timeout = 0;
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2013 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2014 mii_speed--;
2015 if (mii_speed > 63) {
2016 dev_err(&pdev->dev,
2017 "fec clock (%lu) too fast to get right mii speed\n",
2018 clk_get_rate(fep->clk_ipg));
2019 err = -EINVAL;
2020 goto err_out;
2021 }
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2036
2037 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2038
2039 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2040
2041 fep->mii_bus = mdiobus_alloc();
2042 if (fep->mii_bus == NULL) {
2043 err = -ENOMEM;
2044 goto err_out;
2045 }
2046
2047 fep->mii_bus->name = "fec_enet_mii_bus";
2048 fep->mii_bus->read = fec_enet_mdio_read;
2049 fep->mii_bus->write = fec_enet_mdio_write;
2050 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2051 pdev->name, fep->dev_id + 1);
2052 fep->mii_bus->priv = fep;
2053 fep->mii_bus->parent = &pdev->dev;
2054
2055 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2056 err = of_mdiobus_register(fep->mii_bus, node);
2057 if (node)
2058 of_node_put(node);
2059 if (err)
2060 goto err_out_free_mdiobus;
2061
2062 mii_cnt++;
2063
2064
2065 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2066 fec0_mii_bus = fep->mii_bus;
2067
2068 return 0;
2069
2070err_out_free_mdiobus:
2071 mdiobus_free(fep->mii_bus);
2072err_out:
2073 return err;
2074}
2075
2076static void fec_enet_mii_remove(struct fec_enet_private *fep)
2077{
2078 if (--mii_cnt == 0) {
2079 mdiobus_unregister(fep->mii_bus);
2080 mdiobus_free(fep->mii_bus);
2081 }
2082}
2083
2084static void fec_enet_get_drvinfo(struct net_device *ndev,
2085 struct ethtool_drvinfo *info)
2086{
2087 struct fec_enet_private *fep = netdev_priv(ndev);
2088
2089 strlcpy(info->driver, fep->pdev->dev.driver->name,
2090 sizeof(info->driver));
2091 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
2092 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2093}
2094
2095static int fec_enet_get_regs_len(struct net_device *ndev)
2096{
2097 struct fec_enet_private *fep = netdev_priv(ndev);
2098 struct resource *r;
2099 int s = 0;
2100
2101 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2102 if (r)
2103 s = resource_size(r);
2104
2105 return s;
2106}
2107
2108
2109#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2110 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2111 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2112static u32 fec_enet_register_offset[] = {
2113 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2114 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2115 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2116 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2117 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2118 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2119 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2120 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2121 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2122 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2123 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2124 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2125 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2126 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2127 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2128 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2129 RMON_T_P_GTE2048, RMON_T_OCTETS,
2130 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2131 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2132 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2133 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2134 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2135 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2136 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2137 RMON_R_P_GTE2048, RMON_R_OCTETS,
2138 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2139 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2140};
2141#else
2142static u32 fec_enet_register_offset[] = {
2143 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2144 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2145 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2146 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2147 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2148 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2149 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2150 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2151 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2152};
2153#endif
2154
2155static void fec_enet_get_regs(struct net_device *ndev,
2156 struct ethtool_regs *regs, void *regbuf)
2157{
2158 struct fec_enet_private *fep = netdev_priv(ndev);
2159 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2160 u32 *buf = (u32 *)regbuf;
2161 u32 i, off;
2162
2163 memset(buf, 0, regs->len);
2164
2165 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2166 off = fec_enet_register_offset[i] / 4;
2167 buf[off] = readl(&theregs[off]);
2168 }
2169}
2170
2171static int fec_enet_get_ts_info(struct net_device *ndev,
2172 struct ethtool_ts_info *info)
2173{
2174 struct fec_enet_private *fep = netdev_priv(ndev);
2175
2176 if (fep->bufdesc_ex) {
2177
2178 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2179 SOF_TIMESTAMPING_RX_SOFTWARE |
2180 SOF_TIMESTAMPING_SOFTWARE |
2181 SOF_TIMESTAMPING_TX_HARDWARE |
2182 SOF_TIMESTAMPING_RX_HARDWARE |
2183 SOF_TIMESTAMPING_RAW_HARDWARE;
2184 if (fep->ptp_clock)
2185 info->phc_index = ptp_clock_index(fep->ptp_clock);
2186 else
2187 info->phc_index = -1;
2188
2189 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2190 (1 << HWTSTAMP_TX_ON);
2191
2192 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2193 (1 << HWTSTAMP_FILTER_ALL);
2194 return 0;
2195 } else {
2196 return ethtool_op_get_ts_info(ndev, info);
2197 }
2198}
2199
2200#if !defined(CONFIG_M5272)
2201
2202static void fec_enet_get_pauseparam(struct net_device *ndev,
2203 struct ethtool_pauseparam *pause)
2204{
2205 struct fec_enet_private *fep = netdev_priv(ndev);
2206
2207 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2208 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2209 pause->rx_pause = pause->tx_pause;
2210}
2211
2212static int fec_enet_set_pauseparam(struct net_device *ndev,
2213 struct ethtool_pauseparam *pause)
2214{
2215 struct fec_enet_private *fep = netdev_priv(ndev);
2216
2217 if (!ndev->phydev)
2218 return -ENODEV;
2219
2220 if (pause->tx_pause != pause->rx_pause) {
2221 netdev_info(ndev,
2222 "hardware only support enable/disable both tx and rx");
2223 return -EINVAL;
2224 }
2225
2226 fep->pause_flag = 0;
2227
2228
2229 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2230 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2231
2232 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2233 pause->autoneg);
2234
2235 if (pause->autoneg) {
2236 if (netif_running(ndev))
2237 fec_stop(ndev);
2238 phy_start_aneg(ndev->phydev);
2239 }
2240 if (netif_running(ndev)) {
2241 napi_disable(&fep->napi);
2242 netif_tx_lock_bh(ndev);
2243 fec_restart(ndev);
2244 netif_wake_queue(ndev);
2245 netif_tx_unlock_bh(ndev);
2246 napi_enable(&fep->napi);
2247 }
2248
2249 return 0;
2250}
2251
2252static const struct fec_stat {
2253 char name[ETH_GSTRING_LEN];
2254 u16 offset;
2255} fec_stats[] = {
2256
2257 { "tx_dropped", RMON_T_DROP },
2258 { "tx_packets", RMON_T_PACKETS },
2259 { "tx_broadcast", RMON_T_BC_PKT },
2260 { "tx_multicast", RMON_T_MC_PKT },
2261 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2262 { "tx_undersize", RMON_T_UNDERSIZE },
2263 { "tx_oversize", RMON_T_OVERSIZE },
2264 { "tx_fragment", RMON_T_FRAG },
2265 { "tx_jabber", RMON_T_JAB },
2266 { "tx_collision", RMON_T_COL },
2267 { "tx_64byte", RMON_T_P64 },
2268 { "tx_65to127byte", RMON_T_P65TO127 },
2269 { "tx_128to255byte", RMON_T_P128TO255 },
2270 { "tx_256to511byte", RMON_T_P256TO511 },
2271 { "tx_512to1023byte", RMON_T_P512TO1023 },
2272 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2273 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2274 { "tx_octets", RMON_T_OCTETS },
2275
2276
2277 { "IEEE_tx_drop", IEEE_T_DROP },
2278 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2279 { "IEEE_tx_1col", IEEE_T_1COL },
2280 { "IEEE_tx_mcol", IEEE_T_MCOL },
2281 { "IEEE_tx_def", IEEE_T_DEF },
2282 { "IEEE_tx_lcol", IEEE_T_LCOL },
2283 { "IEEE_tx_excol", IEEE_T_EXCOL },
2284 { "IEEE_tx_macerr", IEEE_T_MACERR },
2285 { "IEEE_tx_cserr", IEEE_T_CSERR },
2286 { "IEEE_tx_sqe", IEEE_T_SQE },
2287 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2288 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2289
2290
2291 { "rx_packets", RMON_R_PACKETS },
2292 { "rx_broadcast", RMON_R_BC_PKT },
2293 { "rx_multicast", RMON_R_MC_PKT },
2294 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2295 { "rx_undersize", RMON_R_UNDERSIZE },
2296 { "rx_oversize", RMON_R_OVERSIZE },
2297 { "rx_fragment", RMON_R_FRAG },
2298 { "rx_jabber", RMON_R_JAB },
2299 { "rx_64byte", RMON_R_P64 },
2300 { "rx_65to127byte", RMON_R_P65TO127 },
2301 { "rx_128to255byte", RMON_R_P128TO255 },
2302 { "rx_256to511byte", RMON_R_P256TO511 },
2303 { "rx_512to1023byte", RMON_R_P512TO1023 },
2304 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2305 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2306 { "rx_octets", RMON_R_OCTETS },
2307
2308
2309 { "IEEE_rx_drop", IEEE_R_DROP },
2310 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2311 { "IEEE_rx_crc", IEEE_R_CRC },
2312 { "IEEE_rx_align", IEEE_R_ALIGN },
2313 { "IEEE_rx_macerr", IEEE_R_MACERR },
2314 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2315 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2316};
2317
2318#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2319
2320static void fec_enet_update_ethtool_stats(struct net_device *dev)
2321{
2322 struct fec_enet_private *fep = netdev_priv(dev);
2323 int i;
2324
2325 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2326 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2327}
2328
2329static void fec_enet_get_ethtool_stats(struct net_device *dev,
2330 struct ethtool_stats *stats, u64 *data)
2331{
2332 struct fec_enet_private *fep = netdev_priv(dev);
2333
2334 if (netif_running(dev))
2335 fec_enet_update_ethtool_stats(dev);
2336
2337 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2338}
2339
2340static void fec_enet_get_strings(struct net_device *netdev,
2341 u32 stringset, u8 *data)
2342{
2343 int i;
2344 switch (stringset) {
2345 case ETH_SS_STATS:
2346 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2347 memcpy(data + i * ETH_GSTRING_LEN,
2348 fec_stats[i].name, ETH_GSTRING_LEN);
2349 break;
2350 }
2351}
2352
2353static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2354{
2355 switch (sset) {
2356 case ETH_SS_STATS:
2357 return ARRAY_SIZE(fec_stats);
2358 default:
2359 return -EOPNOTSUPP;
2360 }
2361}
2362
2363static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2364{
2365 struct fec_enet_private *fep = netdev_priv(dev);
2366 int i;
2367
2368
2369 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2370
2371 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2372 writel(0, fep->hwp + fec_stats[i].offset);
2373
2374
2375 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2376}
2377
2378#else
2379#define FEC_STATS_SIZE 0
2380static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2381{
2382}
2383
2384static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2385{
2386}
2387#endif
2388
2389
2390
2391
2392
2393static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2394{
2395 struct fec_enet_private *fep = netdev_priv(ndev);
2396
2397 return us * (fep->itr_clk_rate / 64000) / 1000;
2398}
2399
2400
2401static void fec_enet_itr_coal_set(struct net_device *ndev)
2402{
2403 struct fec_enet_private *fep = netdev_priv(ndev);
2404 int rx_itr, tx_itr;
2405
2406
2407 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2408 !fep->tx_time_itr || !fep->tx_pkts_itr)
2409 return;
2410
2411
2412
2413
2414 rx_itr = FEC_ITR_CLK_SEL;
2415 tx_itr = FEC_ITR_CLK_SEL;
2416
2417
2418 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2419 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2420 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2421 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2422
2423 rx_itr |= FEC_ITR_EN;
2424 tx_itr |= FEC_ITR_EN;
2425
2426 writel(tx_itr, fep->hwp + FEC_TXIC0);
2427 writel(rx_itr, fep->hwp + FEC_RXIC0);
2428 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2429 writel(tx_itr, fep->hwp + FEC_TXIC1);
2430 writel(rx_itr, fep->hwp + FEC_RXIC1);
2431 writel(tx_itr, fep->hwp + FEC_TXIC2);
2432 writel(rx_itr, fep->hwp + FEC_RXIC2);
2433 }
2434}
2435
2436static int fec_enet_get_coalesce(struct net_device *ndev,
2437 struct ethtool_coalesce *ec,
2438 struct kernel_ethtool_coalesce *kernel_coal,
2439 struct netlink_ext_ack *extack)
2440{
2441 struct fec_enet_private *fep = netdev_priv(ndev);
2442
2443 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2444 return -EOPNOTSUPP;
2445
2446 ec->rx_coalesce_usecs = fep->rx_time_itr;
2447 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2448
2449 ec->tx_coalesce_usecs = fep->tx_time_itr;
2450 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2451
2452 return 0;
2453}
2454
2455static int fec_enet_set_coalesce(struct net_device *ndev,
2456 struct ethtool_coalesce *ec,
2457 struct kernel_ethtool_coalesce *kernel_coal,
2458 struct netlink_ext_ack *extack)
2459{
2460 struct fec_enet_private *fep = netdev_priv(ndev);
2461 unsigned int cycle;
2462
2463 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2464 return -EOPNOTSUPP;
2465
2466 if (ec->rx_max_coalesced_frames > 255) {
2467 pr_err("Rx coalesced frames exceed hardware limitation\n");
2468 return -EINVAL;
2469 }
2470
2471 if (ec->tx_max_coalesced_frames > 255) {
2472 pr_err("Tx coalesced frame exceed hardware limitation\n");
2473 return -EINVAL;
2474 }
2475
2476 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2477 if (cycle > 0xFFFF) {
2478 pr_err("Rx coalesced usec exceed hardware limitation\n");
2479 return -EINVAL;
2480 }
2481
2482 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2483 if (cycle > 0xFFFF) {
2484 pr_err("Rx coalesced usec exceed hardware limitation\n");
2485 return -EINVAL;
2486 }
2487
2488 fep->rx_time_itr = ec->rx_coalesce_usecs;
2489 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2490
2491 fep->tx_time_itr = ec->tx_coalesce_usecs;
2492 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2493
2494 fec_enet_itr_coal_set(ndev);
2495
2496 return 0;
2497}
2498
2499static void fec_enet_itr_coal_init(struct net_device *ndev)
2500{
2501 struct ethtool_coalesce ec;
2502
2503 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2504 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2505
2506 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2507 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2508
2509 fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
2510}
2511
2512static int fec_enet_get_tunable(struct net_device *netdev,
2513 const struct ethtool_tunable *tuna,
2514 void *data)
2515{
2516 struct fec_enet_private *fep = netdev_priv(netdev);
2517 int ret = 0;
2518
2519 switch (tuna->id) {
2520 case ETHTOOL_RX_COPYBREAK:
2521 *(u32 *)data = fep->rx_copybreak;
2522 break;
2523 default:
2524 ret = -EINVAL;
2525 break;
2526 }
2527
2528 return ret;
2529}
2530
2531static int fec_enet_set_tunable(struct net_device *netdev,
2532 const struct ethtool_tunable *tuna,
2533 const void *data)
2534{
2535 struct fec_enet_private *fep = netdev_priv(netdev);
2536 int ret = 0;
2537
2538 switch (tuna->id) {
2539 case ETHTOOL_RX_COPYBREAK:
2540 fep->rx_copybreak = *(u32 *)data;
2541 break;
2542 default:
2543 ret = -EINVAL;
2544 break;
2545 }
2546
2547 return ret;
2548}
2549
2550static void
2551fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2552{
2553 struct fec_enet_private *fep = netdev_priv(ndev);
2554
2555 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2556 wol->supported = WAKE_MAGIC;
2557 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2558 } else {
2559 wol->supported = wol->wolopts = 0;
2560 }
2561}
2562
2563static int
2564fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2565{
2566 struct fec_enet_private *fep = netdev_priv(ndev);
2567
2568 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2569 return -EINVAL;
2570
2571 if (wol->wolopts & ~WAKE_MAGIC)
2572 return -EINVAL;
2573
2574 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2575 if (device_may_wakeup(&ndev->dev)) {
2576 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2577 if (fep->irq[0] > 0)
2578 enable_irq_wake(fep->irq[0]);
2579 } else {
2580 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2581 if (fep->irq[0] > 0)
2582 disable_irq_wake(fep->irq[0]);
2583 }
2584
2585 return 0;
2586}
2587
2588static const struct ethtool_ops fec_enet_ethtool_ops = {
2589 .get_drvinfo = fec_enet_get_drvinfo,
2590 .get_regs_len = fec_enet_get_regs_len,
2591 .get_regs = fec_enet_get_regs,
2592 .nway_reset = phy_ethtool_nway_reset,
2593 .get_link = ethtool_op_get_link,
2594 .get_coalesce = fec_enet_get_coalesce,
2595 .set_coalesce = fec_enet_set_coalesce,
2596#ifndef CONFIG_M5272
2597 .get_pauseparam = fec_enet_get_pauseparam,
2598 .set_pauseparam = fec_enet_set_pauseparam,
2599 .get_strings = fec_enet_get_strings,
2600 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2601 .get_sset_count = fec_enet_get_sset_count,
2602#endif
2603 .get_ts_info = fec_enet_get_ts_info,
2604 .get_tunable = fec_enet_get_tunable,
2605 .set_tunable = fec_enet_set_tunable,
2606 .get_wol = fec_enet_get_wol,
2607 .set_wol = fec_enet_set_wol,
2608 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2609 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2610};
2611
2612static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2613{
2614 struct fec_enet_private *fep = netdev_priv(ndev);
2615 struct phy_device *phydev = ndev->phydev;
2616
2617 if (!netif_running(ndev))
2618 return -EINVAL;
2619
2620 if (!phydev)
2621 return -ENODEV;
2622
2623 if (fep->bufdesc_ex) {
2624 if (cmd == SIOCSHWTSTAMP)
2625 return fec_ptp_set(ndev, rq);
2626 if (cmd == SIOCGHWTSTAMP)
2627 return fec_ptp_get(ndev, rq);
2628 }
2629
2630 return phy_mii_ioctl(phydev, rq, cmd);
2631}
2632
2633static void fec_enet_free_buffers(struct net_device *ndev)
2634{
2635 struct fec_enet_private *fep = netdev_priv(ndev);
2636 unsigned int i;
2637 struct sk_buff *skb;
2638 struct bufdesc *bdp;
2639 struct fec_enet_priv_tx_q *txq;
2640 struct fec_enet_priv_rx_q *rxq;
2641 unsigned int q;
2642
2643 for (q = 0; q < fep->num_rx_queues; q++) {
2644 rxq = fep->rx_queue[q];
2645 bdp = rxq->bd.base;
2646 for (i = 0; i < rxq->bd.ring_size; i++) {
2647 skb = rxq->rx_skbuff[i];
2648 rxq->rx_skbuff[i] = NULL;
2649 if (skb) {
2650 dma_unmap_single(&fep->pdev->dev,
2651 fec32_to_cpu(bdp->cbd_bufaddr),
2652 FEC_ENET_RX_FRSIZE - fep->rx_align,
2653 DMA_FROM_DEVICE);
2654 dev_kfree_skb(skb);
2655 }
2656 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2657 }
2658 }
2659
2660 for (q = 0; q < fep->num_tx_queues; q++) {
2661 txq = fep->tx_queue[q];
2662 bdp = txq->bd.base;
2663 for (i = 0; i < txq->bd.ring_size; i++) {
2664 kfree(txq->tx_bounce[i]);
2665 txq->tx_bounce[i] = NULL;
2666 skb = txq->tx_skbuff[i];
2667 txq->tx_skbuff[i] = NULL;
2668 dev_kfree_skb(skb);
2669 }
2670 }
2671}
2672
2673static void fec_enet_free_queue(struct net_device *ndev)
2674{
2675 struct fec_enet_private *fep = netdev_priv(ndev);
2676 int i;
2677 struct fec_enet_priv_tx_q *txq;
2678
2679 for (i = 0; i < fep->num_tx_queues; i++)
2680 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2681 txq = fep->tx_queue[i];
2682 dma_free_coherent(&fep->pdev->dev,
2683 txq->bd.ring_size * TSO_HEADER_SIZE,
2684 txq->tso_hdrs,
2685 txq->tso_hdrs_dma);
2686 }
2687
2688 for (i = 0; i < fep->num_rx_queues; i++)
2689 kfree(fep->rx_queue[i]);
2690 for (i = 0; i < fep->num_tx_queues; i++)
2691 kfree(fep->tx_queue[i]);
2692}
2693
2694static int fec_enet_alloc_queue(struct net_device *ndev)
2695{
2696 struct fec_enet_private *fep = netdev_priv(ndev);
2697 int i;
2698 int ret = 0;
2699 struct fec_enet_priv_tx_q *txq;
2700
2701 for (i = 0; i < fep->num_tx_queues; i++) {
2702 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2703 if (!txq) {
2704 ret = -ENOMEM;
2705 goto alloc_failed;
2706 }
2707
2708 fep->tx_queue[i] = txq;
2709 txq->bd.ring_size = TX_RING_SIZE;
2710 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2711
2712 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2713 txq->tx_wake_threshold =
2714 (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2715
2716 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
2717 txq->bd.ring_size * TSO_HEADER_SIZE,
2718 &txq->tso_hdrs_dma,
2719 GFP_KERNEL);
2720 if (!txq->tso_hdrs) {
2721 ret = -ENOMEM;
2722 goto alloc_failed;
2723 }
2724 }
2725
2726 for (i = 0; i < fep->num_rx_queues; i++) {
2727 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2728 GFP_KERNEL);
2729 if (!fep->rx_queue[i]) {
2730 ret = -ENOMEM;
2731 goto alloc_failed;
2732 }
2733
2734 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
2735 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2736 }
2737 return ret;
2738
2739alloc_failed:
2740 fec_enet_free_queue(ndev);
2741 return ret;
2742}
2743
2744static int
2745fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2746{
2747 struct fec_enet_private *fep = netdev_priv(ndev);
2748 unsigned int i;
2749 struct sk_buff *skb;
2750 struct bufdesc *bdp;
2751 struct fec_enet_priv_rx_q *rxq;
2752
2753 rxq = fep->rx_queue[queue];
2754 bdp = rxq->bd.base;
2755 for (i = 0; i < rxq->bd.ring_size; i++) {
2756 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2757 if (!skb)
2758 goto err_alloc;
2759
2760 if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2761 dev_kfree_skb(skb);
2762 goto err_alloc;
2763 }
2764
2765 rxq->rx_skbuff[i] = skb;
2766 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2767
2768 if (fep->bufdesc_ex) {
2769 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2770 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2771 }
2772
2773 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2774 }
2775
2776
2777 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2778 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2779 return 0;
2780
2781 err_alloc:
2782 fec_enet_free_buffers(ndev);
2783 return -ENOMEM;
2784}
2785
2786static int
2787fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2788{
2789 struct fec_enet_private *fep = netdev_priv(ndev);
2790 unsigned int i;
2791 struct bufdesc *bdp;
2792 struct fec_enet_priv_tx_q *txq;
2793
2794 txq = fep->tx_queue[queue];
2795 bdp = txq->bd.base;
2796 for (i = 0; i < txq->bd.ring_size; i++) {
2797 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2798 if (!txq->tx_bounce[i])
2799 goto err_alloc;
2800
2801 bdp->cbd_sc = cpu_to_fec16(0);
2802 bdp->cbd_bufaddr = cpu_to_fec32(0);
2803
2804 if (fep->bufdesc_ex) {
2805 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2806 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2807 }
2808
2809 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2810 }
2811
2812
2813 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2814 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2815
2816 return 0;
2817
2818 err_alloc:
2819 fec_enet_free_buffers(ndev);
2820 return -ENOMEM;
2821}
2822
2823static int fec_enet_alloc_buffers(struct net_device *ndev)
2824{
2825 struct fec_enet_private *fep = netdev_priv(ndev);
2826 unsigned int i;
2827
2828 for (i = 0; i < fep->num_rx_queues; i++)
2829 if (fec_enet_alloc_rxq_buffers(ndev, i))
2830 return -ENOMEM;
2831
2832 for (i = 0; i < fep->num_tx_queues; i++)
2833 if (fec_enet_alloc_txq_buffers(ndev, i))
2834 return -ENOMEM;
2835 return 0;
2836}
2837
2838static int
2839fec_enet_open(struct net_device *ndev)
2840{
2841 struct fec_enet_private *fep = netdev_priv(ndev);
2842 int ret;
2843 bool reset_again;
2844
2845 ret = pm_runtime_get_sync(&fep->pdev->dev);
2846 if (ret < 0)
2847 return ret;
2848
2849 pinctrl_pm_select_default_state(&fep->pdev->dev);
2850 ret = fec_enet_clk_enable(ndev, true);
2851 if (ret)
2852 goto clk_enable;
2853
2854
2855
2856
2857
2858
2859
2860 if (ndev->phydev && ndev->phydev->drv)
2861 reset_again = false;
2862 else
2863 reset_again = true;
2864
2865
2866
2867
2868
2869 ret = fec_enet_alloc_buffers(ndev);
2870 if (ret)
2871 goto err_enet_alloc;
2872
2873
2874 fec_restart(ndev);
2875
2876
2877 ret = fec_enet_mii_probe(ndev);
2878 if (ret)
2879 goto err_enet_mii_probe;
2880
2881
2882
2883
2884 if (reset_again)
2885 phy_reset_after_clk_enable(ndev->phydev);
2886
2887 if (fep->quirks & FEC_QUIRK_ERR006687)
2888 imx6q_cpuidle_fec_irqs_used();
2889
2890 napi_enable(&fep->napi);
2891 phy_start(ndev->phydev);
2892 netif_tx_start_all_queues(ndev);
2893
2894 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2895 FEC_WOL_FLAG_ENABLE);
2896
2897 return 0;
2898
2899err_enet_mii_probe:
2900 fec_enet_free_buffers(ndev);
2901err_enet_alloc:
2902 fec_enet_clk_enable(ndev, false);
2903clk_enable:
2904 pm_runtime_mark_last_busy(&fep->pdev->dev);
2905 pm_runtime_put_autosuspend(&fep->pdev->dev);
2906 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2907 return ret;
2908}
2909
2910static int
2911fec_enet_close(struct net_device *ndev)
2912{
2913 struct fec_enet_private *fep = netdev_priv(ndev);
2914
2915 phy_stop(ndev->phydev);
2916
2917 if (netif_device_present(ndev)) {
2918 napi_disable(&fep->napi);
2919 netif_tx_disable(ndev);
2920 fec_stop(ndev);
2921 }
2922
2923 phy_disconnect(ndev->phydev);
2924
2925 if (fep->quirks & FEC_QUIRK_ERR006687)
2926 imx6q_cpuidle_fec_irqs_unused();
2927
2928 fec_enet_update_ethtool_stats(ndev);
2929
2930 fec_enet_clk_enable(ndev, false);
2931 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2932 pm_runtime_mark_last_busy(&fep->pdev->dev);
2933 pm_runtime_put_autosuspend(&fep->pdev->dev);
2934
2935 fec_enet_free_buffers(ndev);
2936
2937 return 0;
2938}
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950#define FEC_HASH_BITS 6
2951#define CRC32_POLY 0xEDB88320
2952
2953static void set_multicast_list(struct net_device *ndev)
2954{
2955 struct fec_enet_private *fep = netdev_priv(ndev);
2956 struct netdev_hw_addr *ha;
2957 unsigned int i, bit, data, crc, tmp;
2958 unsigned char hash;
2959 unsigned int hash_high = 0, hash_low = 0;
2960
2961 if (ndev->flags & IFF_PROMISC) {
2962 tmp = readl(fep->hwp + FEC_R_CNTRL);
2963 tmp |= 0x8;
2964 writel(tmp, fep->hwp + FEC_R_CNTRL);
2965 return;
2966 }
2967
2968 tmp = readl(fep->hwp + FEC_R_CNTRL);
2969 tmp &= ~0x8;
2970 writel(tmp, fep->hwp + FEC_R_CNTRL);
2971
2972 if (ndev->flags & IFF_ALLMULTI) {
2973
2974
2975
2976 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2977 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2978
2979 return;
2980 }
2981
2982
2983 netdev_for_each_mc_addr(ha, ndev) {
2984
2985 crc = 0xffffffff;
2986
2987 for (i = 0; i < ndev->addr_len; i++) {
2988 data = ha->addr[i];
2989 for (bit = 0; bit < 8; bit++, data >>= 1) {
2990 crc = (crc >> 1) ^
2991 (((crc ^ data) & 1) ? CRC32_POLY : 0);
2992 }
2993 }
2994
2995
2996
2997
2998 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2999
3000 if (hash > 31)
3001 hash_high |= 1 << (hash - 32);
3002 else
3003 hash_low |= 1 << hash;
3004 }
3005
3006 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3007 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3008}
3009
3010
3011static int
3012fec_set_mac_address(struct net_device *ndev, void *p)
3013{
3014 struct fec_enet_private *fep = netdev_priv(ndev);
3015 struct sockaddr *addr = p;
3016
3017 if (addr) {
3018 if (!is_valid_ether_addr(addr->sa_data))
3019 return -EADDRNOTAVAIL;
3020 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3021 }
3022
3023
3024
3025
3026
3027
3028 if (!netif_running(ndev))
3029 return 0;
3030
3031 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3032 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3033 fep->hwp + FEC_ADDR_LOW);
3034 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3035 fep->hwp + FEC_ADDR_HIGH);
3036 return 0;
3037}
3038
3039#ifdef CONFIG_NET_POLL_CONTROLLER
3040
3041
3042
3043
3044
3045
3046
3047static void fec_poll_controller(struct net_device *dev)
3048{
3049 int i;
3050 struct fec_enet_private *fep = netdev_priv(dev);
3051
3052 for (i = 0; i < FEC_IRQ_NUM; i++) {
3053 if (fep->irq[i] > 0) {
3054 disable_irq(fep->irq[i]);
3055 fec_enet_interrupt(fep->irq[i], dev);
3056 enable_irq(fep->irq[i]);
3057 }
3058 }
3059}
3060#endif
3061
3062static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3063 netdev_features_t features)
3064{
3065 struct fec_enet_private *fep = netdev_priv(netdev);
3066 netdev_features_t changed = features ^ netdev->features;
3067
3068 netdev->features = features;
3069
3070
3071 if (changed & NETIF_F_RXCSUM) {
3072 if (features & NETIF_F_RXCSUM)
3073 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3074 else
3075 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3076 }
3077}
3078
3079static int fec_set_features(struct net_device *netdev,
3080 netdev_features_t features)
3081{
3082 struct fec_enet_private *fep = netdev_priv(netdev);
3083 netdev_features_t changed = features ^ netdev->features;
3084
3085 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3086 napi_disable(&fep->napi);
3087 netif_tx_lock_bh(netdev);
3088 fec_stop(netdev);
3089 fec_enet_set_netdev_features(netdev, features);
3090 fec_restart(netdev);
3091 netif_tx_wake_all_queues(netdev);
3092 netif_tx_unlock_bh(netdev);
3093 napi_enable(&fep->napi);
3094 } else {
3095 fec_enet_set_netdev_features(netdev, features);
3096 }
3097
3098 return 0;
3099}
3100
3101static const struct net_device_ops fec_netdev_ops = {
3102 .ndo_open = fec_enet_open,
3103 .ndo_stop = fec_enet_close,
3104 .ndo_start_xmit = fec_enet_start_xmit,
3105 .ndo_set_rx_mode = set_multicast_list,
3106 .ndo_validate_addr = eth_validate_addr,
3107 .ndo_tx_timeout = fec_timeout,
3108 .ndo_set_mac_address = fec_set_mac_address,
3109 .ndo_do_ioctl = fec_enet_ioctl,
3110#ifdef CONFIG_NET_POLL_CONTROLLER
3111 .ndo_poll_controller = fec_poll_controller,
3112#endif
3113 .ndo_set_features = fec_set_features,
3114};
3115
3116static const unsigned short offset_des_active_rxq[] = {
3117 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3118};
3119
3120static const unsigned short offset_des_active_txq[] = {
3121 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3122};
3123
3124
3125
3126
3127
3128static int fec_enet_init(struct net_device *ndev)
3129{
3130 struct fec_enet_private *fep = netdev_priv(ndev);
3131 struct bufdesc *cbd_base;
3132 dma_addr_t bd_dma;
3133 int bd_size;
3134 unsigned int i;
3135 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3136 sizeof(struct bufdesc);
3137 unsigned dsize_log2 = __fls(dsize);
3138
3139 WARN_ON(dsize != (1 << dsize_log2));
3140#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3141 fep->rx_align = 0xf;
3142 fep->tx_align = 0xf;
3143#else
3144 fep->rx_align = 0x3;
3145 fep->tx_align = 0x3;
3146#endif
3147
3148 fec_enet_alloc_queue(ndev);
3149
3150 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3151
3152
3153 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3154 GFP_KERNEL);
3155 if (!cbd_base) {
3156 return -ENOMEM;
3157 }
3158
3159 memset(cbd_base, 0, bd_size);
3160
3161
3162 fec_get_mac(ndev);
3163
3164 fec_set_mac_address(ndev, NULL);
3165
3166
3167 for (i = 0; i < fep->num_rx_queues; i++) {
3168 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3169 unsigned size = dsize * rxq->bd.ring_size;
3170
3171 rxq->bd.qid = i;
3172 rxq->bd.base = cbd_base;
3173 rxq->bd.cur = cbd_base;
3174 rxq->bd.dma = bd_dma;
3175 rxq->bd.dsize = dsize;
3176 rxq->bd.dsize_log2 = dsize_log2;
3177 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3178 bd_dma += size;
3179 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3180 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3181 }
3182
3183 for (i = 0; i < fep->num_tx_queues; i++) {
3184 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3185 unsigned size = dsize * txq->bd.ring_size;
3186
3187 txq->bd.qid = i;
3188 txq->bd.base = cbd_base;
3189 txq->bd.cur = cbd_base;
3190 txq->bd.dma = bd_dma;
3191 txq->bd.dsize = dsize;
3192 txq->bd.dsize_log2 = dsize_log2;
3193 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3194 bd_dma += size;
3195 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3196 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3197 }
3198
3199
3200
3201 ndev->watchdog_timeo = TX_TIMEOUT;
3202 ndev->netdev_ops = &fec_netdev_ops;
3203 ndev->ethtool_ops = &fec_enet_ethtool_ops;
3204
3205 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3206 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3207
3208 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3209
3210 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3211
3212 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3213 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
3214
3215
3216 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3217 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3218 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3219 }
3220
3221 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3222 fep->tx_align = 0;
3223 fep->rx_align = 0x3f;
3224 }
3225
3226 ndev->hw_features = ndev->features;
3227
3228 fec_restart(ndev);
3229
3230 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
3231 fec_enet_clear_ethtool_stats(ndev);
3232 else
3233 fec_enet_update_ethtool_stats(ndev);
3234
3235 return 0;
3236}
3237
3238#ifdef CONFIG_OF
3239static int fec_reset_phy(struct platform_device *pdev)
3240{
3241 int err, phy_reset;
3242 bool active_high = false;
3243 int msec = 1, phy_post_delay = 0;
3244 struct device_node *np = pdev->dev.of_node;
3245
3246 if (!np)
3247 return 0;
3248
3249 err = of_property_read_u32(np, "phy-reset-duration", &msec);
3250
3251 if (!err && msec > 1000)
3252 msec = 1;
3253
3254 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3255 if (phy_reset == -EPROBE_DEFER)
3256 return phy_reset;
3257 else if (!gpio_is_valid(phy_reset))
3258 return 0;
3259
3260 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3261
3262 if (!err && phy_post_delay > 1000)
3263 return -EINVAL;
3264
3265 active_high = of_property_read_bool(np, "phy-reset-active-high");
3266
3267 err = devm_gpio_request_one(&pdev->dev, phy_reset,
3268 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3269 "phy-reset");
3270 if (err) {
3271 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3272 return err;
3273 }
3274
3275 if (msec > 20)
3276 msleep(msec);
3277 else
3278 usleep_range(msec * 1000, msec * 1000 + 1000);
3279
3280 gpio_set_value_cansleep(phy_reset, !active_high);
3281
3282 if (!phy_post_delay)
3283 return 0;
3284
3285 if (phy_post_delay > 20)
3286 msleep(phy_post_delay);
3287 else
3288 usleep_range(phy_post_delay * 1000,
3289 phy_post_delay * 1000 + 1000);
3290
3291 return 0;
3292}
3293#else
3294static int fec_reset_phy(struct platform_device *pdev)
3295{
3296
3297
3298
3299
3300 return 0;
3301}
3302#endif
3303
3304static void
3305fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3306{
3307 struct device_node *np = pdev->dev.of_node;
3308
3309 *num_tx = *num_rx = 1;
3310
3311 if (!np || !of_device_is_available(np))
3312 return;
3313
3314
3315 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3316
3317 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3318
3319 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3320 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3321 *num_tx);
3322 *num_tx = 1;
3323 return;
3324 }
3325
3326 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3327 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3328 *num_rx);
3329 *num_rx = 1;
3330 return;
3331 }
3332
3333}
3334
3335static int fec_enet_get_irq_cnt(struct platform_device *pdev)
3336{
3337 int irq_cnt = platform_irq_count(pdev);
3338
3339 if (irq_cnt > FEC_IRQ_NUM)
3340 irq_cnt = FEC_IRQ_NUM;
3341 else if (irq_cnt == 2)
3342 irq_cnt = 1;
3343 else if (irq_cnt <= 0)
3344 irq_cnt = 1;
3345 return irq_cnt;
3346}
3347
3348static int
3349fec_probe(struct platform_device *pdev)
3350{
3351 struct fec_enet_private *fep;
3352 struct fec_platform_data *pdata;
3353 struct net_device *ndev;
3354 int i, irq, ret = 0;
3355 struct resource *r;
3356 const struct of_device_id *of_id;
3357 static int dev_id;
3358 struct device_node *np = pdev->dev.of_node, *phy_node;
3359 int num_tx_qs;
3360 int num_rx_qs;
3361 char irq_name[8];
3362 int irq_cnt;
3363
3364 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3365
3366
3367 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3368 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3369 if (!ndev)
3370 return -ENOMEM;
3371
3372 SET_NETDEV_DEV(ndev, &pdev->dev);
3373
3374
3375 fep = netdev_priv(ndev);
3376
3377 of_id = of_match_device(fec_dt_ids, &pdev->dev);
3378 if (of_id)
3379 pdev->id_entry = of_id->data;
3380 fep->quirks = pdev->id_entry->driver_data;
3381
3382 fep->netdev = ndev;
3383 fep->num_rx_queues = num_rx_qs;
3384 fep->num_tx_queues = num_tx_qs;
3385
3386#if !defined(CONFIG_M5272)
3387
3388 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3389 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3390#endif
3391
3392
3393 pinctrl_pm_select_default_state(&pdev->dev);
3394
3395 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3396 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
3397 if (IS_ERR(fep->hwp)) {
3398 ret = PTR_ERR(fep->hwp);
3399 goto failed_ioremap;
3400 }
3401
3402 fep->pdev = pdev;
3403 fep->dev_id = dev_id++;
3404
3405 platform_set_drvdata(pdev, ndev);
3406
3407 if ((of_machine_is_compatible("fsl,imx6q") ||
3408 of_machine_is_compatible("fsl,imx6dl")) &&
3409 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
3410 fep->quirks |= FEC_QUIRK_ERR006687;
3411
3412 if (of_get_property(np, "fsl,magic-packet", NULL))
3413 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3414
3415 phy_node = of_parse_phandle(np, "phy-handle", 0);
3416 if (!phy_node && of_phy_is_fixed_link(np)) {
3417 ret = of_phy_register_fixed_link(np);
3418 if (ret < 0) {
3419 dev_err(&pdev->dev,
3420 "broken fixed-link specification\n");
3421 goto failed_phy;
3422 }
3423 phy_node = of_node_get(np);
3424 }
3425 fep->phy_node = phy_node;
3426
3427 ret = of_get_phy_mode(pdev->dev.of_node);
3428 if (ret < 0) {
3429 pdata = dev_get_platdata(&pdev->dev);
3430 if (pdata)
3431 fep->phy_interface = pdata->phy;
3432 else
3433 fep->phy_interface = PHY_INTERFACE_MODE_MII;
3434 } else {
3435 fep->phy_interface = ret;
3436 }
3437
3438 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3439 if (IS_ERR(fep->clk_ipg)) {
3440 ret = PTR_ERR(fep->clk_ipg);
3441 goto failed_clk;
3442 }
3443
3444 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3445 if (IS_ERR(fep->clk_ahb)) {
3446 ret = PTR_ERR(fep->clk_ahb);
3447 goto failed_clk;
3448 }
3449
3450 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3451
3452
3453 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3454 if (IS_ERR(fep->clk_enet_out))
3455 fep->clk_enet_out = NULL;
3456
3457 fep->ptp_clk_on = false;
3458 mutex_init(&fep->ptp_clk_mutex);
3459
3460
3461 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3462 if (IS_ERR(fep->clk_ref))
3463 fep->clk_ref = NULL;
3464
3465 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3466 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3467 if (IS_ERR(fep->clk_ptp)) {
3468 fep->clk_ptp = NULL;
3469 fep->bufdesc_ex = false;
3470 }
3471
3472 ret = fec_enet_clk_enable(ndev, true);
3473 if (ret)
3474 goto failed_clk;
3475
3476 ret = clk_prepare_enable(fep->clk_ipg);
3477 if (ret)
3478 goto failed_clk_ipg;
3479
3480 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3481 if (!IS_ERR(fep->reg_phy)) {
3482 ret = regulator_enable(fep->reg_phy);
3483 if (ret) {
3484 dev_err(&pdev->dev,
3485 "Failed to enable phy regulator: %d\n", ret);
3486 clk_disable_unprepare(fep->clk_ipg);
3487 goto failed_regulator;
3488 }
3489 } else {
3490 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3491 ret = -EPROBE_DEFER;
3492 goto failed_regulator;
3493 }
3494 fep->reg_phy = NULL;
3495 }
3496
3497 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3498 pm_runtime_use_autosuspend(&pdev->dev);
3499 pm_runtime_get_noresume(&pdev->dev);
3500 pm_runtime_set_active(&pdev->dev);
3501 pm_runtime_enable(&pdev->dev);
3502
3503 ret = fec_reset_phy(pdev);
3504 if (ret)
3505 goto failed_reset;
3506
3507 irq_cnt = fec_enet_get_irq_cnt(pdev);
3508 if (fep->bufdesc_ex)
3509 fec_ptp_init(pdev, irq_cnt);
3510
3511 ret = fec_enet_init(ndev);
3512 if (ret)
3513 goto failed_init;
3514
3515 for (i = 0; i < irq_cnt; i++) {
3516 snprintf(irq_name, sizeof(irq_name), "int%d", i);
3517 irq = platform_get_irq_byname(pdev, irq_name);
3518 if (irq < 0)
3519 irq = platform_get_irq(pdev, i);
3520 if (irq < 0) {
3521 ret = irq;
3522 goto failed_irq;
3523 }
3524 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3525 0, pdev->name, ndev);
3526 if (ret)
3527 goto failed_irq;
3528
3529 fep->irq[i] = irq;
3530 }
3531
3532 init_completion(&fep->mdio_done);
3533 ret = fec_enet_mii_init(pdev);
3534 if (ret)
3535 goto failed_mii_init;
3536
3537
3538 netif_carrier_off(ndev);
3539 fec_enet_clk_enable(ndev, false);
3540 pinctrl_pm_select_sleep_state(&pdev->dev);
3541
3542 ret = register_netdev(ndev);
3543 if (ret)
3544 goto failed_register;
3545
3546 device_init_wakeup(&ndev->dev, fep->wol_flag &
3547 FEC_WOL_HAS_MAGIC_PACKET);
3548
3549 if (fep->bufdesc_ex && fep->ptp_clock)
3550 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3551
3552 fep->rx_copybreak = COPYBREAK_DEFAULT;
3553 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3554
3555 pm_runtime_mark_last_busy(&pdev->dev);
3556 pm_runtime_put_autosuspend(&pdev->dev);
3557
3558 return 0;
3559
3560failed_register:
3561 fec_enet_mii_remove(fep);
3562failed_mii_init:
3563failed_irq:
3564failed_init:
3565 fec_ptp_stop(pdev);
3566 if (fep->reg_phy)
3567 regulator_disable(fep->reg_phy);
3568failed_reset:
3569 pm_runtime_put(&pdev->dev);
3570 pm_runtime_disable(&pdev->dev);
3571failed_regulator:
3572failed_clk_ipg:
3573 fec_enet_clk_enable(ndev, false);
3574failed_clk:
3575 if (of_phy_is_fixed_link(np))
3576 of_phy_deregister_fixed_link(np);
3577 of_node_put(phy_node);
3578failed_phy:
3579 dev_id--;
3580failed_ioremap:
3581 free_netdev(ndev);
3582
3583 return ret;
3584}
3585
3586static int
3587fec_drv_remove(struct platform_device *pdev)
3588{
3589 struct net_device *ndev = platform_get_drvdata(pdev);
3590 struct fec_enet_private *fep = netdev_priv(ndev);
3591 struct device_node *np = pdev->dev.of_node;
3592
3593 cancel_work_sync(&fep->tx_timeout_work);
3594 fec_ptp_stop(pdev);
3595 unregister_netdev(ndev);
3596 fec_enet_mii_remove(fep);
3597 if (fep->reg_phy)
3598 regulator_disable(fep->reg_phy);
3599 pm_runtime_put(&pdev->dev);
3600 pm_runtime_disable(&pdev->dev);
3601 if (of_phy_is_fixed_link(np))
3602 of_phy_deregister_fixed_link(np);
3603 of_node_put(fep->phy_node);
3604 free_netdev(ndev);
3605
3606 return 0;
3607}
3608
3609static int __maybe_unused fec_suspend(struct device *dev)
3610{
3611 struct net_device *ndev = dev_get_drvdata(dev);
3612 struct fec_enet_private *fep = netdev_priv(ndev);
3613
3614 rtnl_lock();
3615 if (netif_running(ndev)) {
3616 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3617 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3618 phy_stop(ndev->phydev);
3619 napi_disable(&fep->napi);
3620 netif_tx_lock_bh(ndev);
3621 netif_device_detach(ndev);
3622 netif_tx_unlock_bh(ndev);
3623 fec_stop(ndev);
3624 fec_enet_clk_enable(ndev, false);
3625 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3626 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3627 }
3628 rtnl_unlock();
3629
3630 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3631 regulator_disable(fep->reg_phy);
3632
3633
3634
3635
3636 if (fep->clk_enet_out || fep->reg_phy)
3637 fep->link = 0;
3638
3639 return 0;
3640}
3641
3642static int __maybe_unused fec_resume(struct device *dev)
3643{
3644 struct net_device *ndev = dev_get_drvdata(dev);
3645 struct fec_enet_private *fep = netdev_priv(ndev);
3646 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3647 int ret;
3648 int val;
3649
3650 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3651 ret = regulator_enable(fep->reg_phy);
3652 if (ret)
3653 return ret;
3654 }
3655
3656 rtnl_lock();
3657 if (netif_running(ndev)) {
3658 ret = fec_enet_clk_enable(ndev, true);
3659 if (ret) {
3660 rtnl_unlock();
3661 goto failed_clk;
3662 }
3663 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3664 if (pdata && pdata->sleep_mode_enable)
3665 pdata->sleep_mode_enable(false);
3666 val = readl(fep->hwp + FEC_ECNTRL);
3667 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3668 writel(val, fep->hwp + FEC_ECNTRL);
3669 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3670 } else {
3671 pinctrl_pm_select_default_state(&fep->pdev->dev);
3672 }
3673 fec_restart(ndev);
3674 netif_tx_lock_bh(ndev);
3675 netif_device_attach(ndev);
3676 netif_tx_unlock_bh(ndev);
3677 napi_enable(&fep->napi);
3678 phy_start(ndev->phydev);
3679 }
3680 rtnl_unlock();
3681
3682 return 0;
3683
3684failed_clk:
3685 if (fep->reg_phy)
3686 regulator_disable(fep->reg_phy);
3687 return ret;
3688}
3689
3690static int __maybe_unused fec_runtime_suspend(struct device *dev)
3691{
3692 struct net_device *ndev = dev_get_drvdata(dev);
3693 struct fec_enet_private *fep = netdev_priv(ndev);
3694
3695 clk_disable_unprepare(fep->clk_ipg);
3696
3697 return 0;
3698}
3699
3700static int __maybe_unused fec_runtime_resume(struct device *dev)
3701{
3702 struct net_device *ndev = dev_get_drvdata(dev);
3703 struct fec_enet_private *fep = netdev_priv(ndev);
3704
3705 return clk_prepare_enable(fep->clk_ipg);
3706}
3707
3708static const struct dev_pm_ops fec_pm_ops = {
3709 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3710 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3711};
3712
3713static struct platform_driver fec_driver = {
3714 .driver = {
3715 .name = DRIVER_NAME,
3716 .pm = &fec_pm_ops,
3717 .of_match_table = fec_dt_ids,
3718 },
3719 .id_table = fec_devtype,
3720 .probe = fec_probe,
3721 .remove = fec_drv_remove,
3722};
3723
3724module_platform_driver(fec_driver);
3725
3726MODULE_ALIAS("platform:"DRIVER_NAME);
3727MODULE_LICENSE("GPL");
3728