1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/clk.h>
32#include <linux/kernel.h>
33#include <linux/interrupt.h>
34#include <linux/ip.h>
35#include <linux/tcp.h>
36#include <linux/skbuff.h>
37#include <linux/ethtool.h>
38#include <linux/if_ether.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
41#include <linux/if.h>
42#include <linux/if_vlan.h>
43#include <linux/dma-mapping.h>
44#include <linux/slab.h>
45#include <linux/prefetch.h>
46#ifdef CONFIG_STMMAC_DEBUG_FS
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#endif
50#include <linux/net_tstamp.h>
51#include "stmmac_ptp.h"
52#include "stmmac.h"
53
54#undef STMMAC_DEBUG
55
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73
74#ifdef STMMAC_XMIT_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000
82
83
84#define TX_TIMEO 5000
85static int watchdog = TX_TIMEO;
86module_param(watchdog, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
88
89static int debug = -1;
90module_param(debug, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
92
93int phyaddr = -1;
94module_param(phyaddr, int, S_IRUGO);
95MODULE_PARM_DESC(phyaddr, "Physical device address");
96
97#define DMA_TX_SIZE 256
98static int dma_txsize = DMA_TX_SIZE;
99module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
101
102#define DMA_RX_SIZE 256
103static int dma_rxsize = DMA_RX_SIZE;
104module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
106
107static int flow_ctrl = FLOW_OFF;
108module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
110
111static int pause = PAUSE_TIME;
112module_param(pause, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(pause, "Flow Control Pause Time");
114
115#define TC_DEFAULT 64
116static int tc = TC_DEFAULT;
117module_param(tc, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(tc, "DMA threshold control value");
119
120#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
121static int buf_sz = DMA_BUFFER_SIZE;
122module_param(buf_sz, int, S_IRUGO | S_IWUSR);
123MODULE_PARM_DESC(buf_sz, "DMA buffer size");
124
125static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
126 NETIF_MSG_LINK | NETIF_MSG_IFUP |
127 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
128
129#define STMMAC_DEFAULT_LPI_TIMER 1000
130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
131module_param(eee_timer, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
133#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
134
135
136
137
138static unsigned int chain_mode;
139module_param(chain_mode, int, S_IRUGO);
140MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
141
142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
143
144#ifdef CONFIG_STMMAC_DEBUG_FS
145static int stmmac_init_fs(struct net_device *dev);
146static void stmmac_exit_fs(void);
147#endif
148
149#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
150
151
152
153
154
155
156static void stmmac_verify_args(void)
157{
158 if (unlikely(watchdog < 0))
159 watchdog = TX_TIMEO;
160 if (unlikely(dma_rxsize < 0))
161 dma_rxsize = DMA_RX_SIZE;
162 if (unlikely(dma_txsize < 0))
163 dma_txsize = DMA_TX_SIZE;
164 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
165 buf_sz = DMA_BUFFER_SIZE;
166 if (unlikely(flow_ctrl > 1))
167 flow_ctrl = FLOW_AUTO;
168 else if (likely(flow_ctrl < 0))
169 flow_ctrl = FLOW_OFF;
170 if (unlikely((pause < 0) || (pause > 0xffff)))
171 pause = PAUSE_TIME;
172 if (eee_timer < 0)
173 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188static void stmmac_clk_csr_set(struct stmmac_priv *priv)
189{
190 u32 clk_rate;
191
192 clk_rate = clk_get_rate(priv->stmmac_clk);
193
194
195
196
197
198
199
200
201 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
202 if (clk_rate < CSR_F_35M)
203 priv->clk_csr = STMMAC_CSR_20_35M;
204 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
205 priv->clk_csr = STMMAC_CSR_35_60M;
206 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
207 priv->clk_csr = STMMAC_CSR_60_100M;
208 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
209 priv->clk_csr = STMMAC_CSR_100_150M;
210 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
211 priv->clk_csr = STMMAC_CSR_150_250M;
212 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
213 priv->clk_csr = STMMAC_CSR_250_300M;
214 }
215}
216
217#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
218static void print_pkt(unsigned char *buf, int len)
219{
220 int j;
221 pr_info("len = %d byte, buf addr: 0x%p", len, buf);
222 for (j = 0; j < len; j++) {
223 if ((j % 16) == 0)
224 pr_info("\n %03x:", j);
225 pr_info(" %02x", buf[j]);
226 }
227 pr_info("\n");
228}
229#endif
230
231
232#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
233
234static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
235{
236 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
237}
238
239
240
241
242
243
244
245static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
246{
247 struct phy_device *phydev = priv->phydev;
248
249 if (likely(priv->plat->fix_mac_speed))
250 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
251}
252
253
254
255
256
257
258static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
259{
260
261 if ((priv->dirty_tx == priv->cur_tx) &&
262 (priv->tx_path_in_lpi_mode == false))
263 priv->hw->mac->set_eee_mode(priv->ioaddr);
264}
265
266
267
268
269
270
271
272void stmmac_disable_eee_mode(struct stmmac_priv *priv)
273{
274 priv->hw->mac->reset_eee_mode(priv->ioaddr);
275 del_timer_sync(&priv->eee_ctrl_timer);
276 priv->tx_path_in_lpi_mode = false;
277}
278
279
280
281
282
283
284
285
286static void stmmac_eee_ctrl_timer(unsigned long arg)
287{
288 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
289
290 stmmac_enable_eee_mode(priv);
291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
292}
293
294
295
296
297
298
299
300
301
302
303bool stmmac_eee_init(struct stmmac_priv *priv)
304{
305 bool ret = false;
306
307
308
309
310 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
311 (priv->pcs == STMMAC_PCS_RTBI))
312 goto out;
313
314
315 if (priv->dma_cap.eee) {
316
317 if (phy_init_eee(priv->phydev, 1))
318 goto out;
319
320 if (!priv->eee_active) {
321 priv->eee_active = 1;
322 init_timer(&priv->eee_ctrl_timer);
323 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
324 priv->eee_ctrl_timer.data = (unsigned long)priv;
325 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
326 add_timer(&priv->eee_ctrl_timer);
327
328 priv->hw->mac->set_eee_timer(priv->ioaddr,
329 STMMAC_DEFAULT_LIT_LS,
330 priv->tx_lpi_timer);
331 } else
332
333 priv->hw->mac->set_eee_pls(priv->ioaddr,
334 priv->phydev->link);
335
336 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
337
338 ret = true;
339 }
340out:
341 return ret;
342}
343
344
345
346
347
348
349
350
351
352static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
353 unsigned int entry, struct sk_buff *skb)
354{
355 struct skb_shared_hwtstamps shhwtstamp;
356 u64 ns;
357 void *desc = NULL;
358
359 if (!priv->hwts_tx_en)
360 return;
361
362
363 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
364 return;
365
366 if (priv->adv_ts)
367 desc = (priv->dma_etx + entry);
368 else
369 desc = (priv->dma_tx + entry);
370
371
372 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
373 return;
374
375
376 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
377
378 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
379 shhwtstamp.hwtstamp = ns_to_ktime(ns);
380
381 skb_tstamp_tx(skb, &shhwtstamp);
382
383 return;
384}
385
386
387
388
389
390
391
392
393
394static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
395 unsigned int entry, struct sk_buff *skb)
396{
397 struct skb_shared_hwtstamps *shhwtstamp = NULL;
398 u64 ns;
399 void *desc = NULL;
400
401 if (!priv->hwts_rx_en)
402 return;
403
404 if (priv->adv_ts)
405 desc = (priv->dma_erx + entry);
406 else
407 desc = (priv->dma_rx + entry);
408
409
410 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
411 return;
412
413
414 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
415 shhwtstamp = skb_hwtstamps(skb);
416 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
417 shhwtstamp->hwtstamp = ns_to_ktime(ns);
418}
419
420
421
422
423
424
425
426
427
428
429
430
431static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
432{
433 struct stmmac_priv *priv = netdev_priv(dev);
434 struct hwtstamp_config config;
435 struct timespec now;
436 u64 temp = 0;
437 u32 ptp_v2 = 0;
438 u32 tstamp_all = 0;
439 u32 ptp_over_ipv4_udp = 0;
440 u32 ptp_over_ipv6_udp = 0;
441 u32 ptp_over_ethernet = 0;
442 u32 snap_type_sel = 0;
443 u32 ts_master_en = 0;
444 u32 ts_event_en = 0;
445 u32 value = 0;
446
447 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
448 netdev_alert(priv->dev, "No support for HW time stamping\n");
449 priv->hwts_tx_en = 0;
450 priv->hwts_rx_en = 0;
451
452 return -EOPNOTSUPP;
453 }
454
455 if (copy_from_user(&config, ifr->ifr_data,
456 sizeof(struct hwtstamp_config)))
457 return -EFAULT;
458
459 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
460 __func__, config.flags, config.tx_type, config.rx_filter);
461
462
463 if (config.flags)
464 return -EINVAL;
465
466 switch (config.tx_type) {
467 case HWTSTAMP_TX_OFF:
468 priv->hwts_tx_en = 0;
469 break;
470 case HWTSTAMP_TX_ON:
471 priv->hwts_tx_en = 1;
472 break;
473 default:
474 return -ERANGE;
475 }
476
477 if (priv->adv_ts) {
478 switch (config.rx_filter) {
479 case HWTSTAMP_FILTER_NONE:
480
481 config.rx_filter = HWTSTAMP_FILTER_NONE;
482 break;
483
484 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
485
486 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
487
488 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
489
490 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
491 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
492 break;
493
494 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
495
496 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
497
498 ts_event_en = PTP_TCR_TSEVNTENA;
499
500 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
501 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
502 break;
503
504 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
505
506 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
507
508 ts_master_en = PTP_TCR_TSMSTRENA;
509 ts_event_en = PTP_TCR_TSEVNTENA;
510
511 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
512 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
513 break;
514
515 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
516
517 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
518 ptp_v2 = PTP_TCR_TSVER2ENA;
519
520 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
521
522 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
523 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
524 break;
525
526 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
527
528 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
529 ptp_v2 = PTP_TCR_TSVER2ENA;
530
531 ts_event_en = PTP_TCR_TSEVNTENA;
532
533 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
534 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
535 break;
536
537 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
538
539 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
540 ptp_v2 = PTP_TCR_TSVER2ENA;
541
542 ts_master_en = PTP_TCR_TSMSTRENA;
543 ts_event_en = PTP_TCR_TSEVNTENA;
544
545 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
546 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
547 break;
548
549 case HWTSTAMP_FILTER_PTP_V2_EVENT:
550
551 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
552 ptp_v2 = PTP_TCR_TSVER2ENA;
553
554 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
555
556 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
557 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
558 ptp_over_ethernet = PTP_TCR_TSIPENA;
559 break;
560
561 case HWTSTAMP_FILTER_PTP_V2_SYNC:
562
563 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
564 ptp_v2 = PTP_TCR_TSVER2ENA;
565
566 ts_event_en = PTP_TCR_TSEVNTENA;
567
568 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
569 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
570 ptp_over_ethernet = PTP_TCR_TSIPENA;
571 break;
572
573 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
574
575 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
576 ptp_v2 = PTP_TCR_TSVER2ENA;
577
578 ts_master_en = PTP_TCR_TSMSTRENA;
579 ts_event_en = PTP_TCR_TSEVNTENA;
580
581 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
582 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
583 ptp_over_ethernet = PTP_TCR_TSIPENA;
584 break;
585
586 case HWTSTAMP_FILTER_ALL:
587
588 config.rx_filter = HWTSTAMP_FILTER_ALL;
589 tstamp_all = PTP_TCR_TSENALL;
590 break;
591
592 default:
593 return -ERANGE;
594 }
595 } else {
596 switch (config.rx_filter) {
597 case HWTSTAMP_FILTER_NONE:
598 config.rx_filter = HWTSTAMP_FILTER_NONE;
599 break;
600 default:
601
602 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
603 break;
604 }
605 }
606 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
607
608 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
609 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
610 else {
611 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
612 tstamp_all | ptp_v2 | ptp_over_ethernet |
613 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
614 ts_master_en | snap_type_sel);
615
616 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
617
618
619 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
620
621
622
623
624
625
626
627
628
629
630
631
632 temp = (u64) (50000000ULL << 32);
633 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
634 priv->hw->ptp->config_addend(priv->ioaddr,
635 priv->default_addend);
636
637
638 getnstimeofday(&now);
639 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
640 now.tv_nsec);
641 }
642
643 return copy_to_user(ifr->ifr_data, &config,
644 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
645}
646
647
648
649
650
651
652
653
654static int stmmac_init_ptp(struct stmmac_priv *priv)
655{
656 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
657 return -EOPNOTSUPP;
658
659 if (netif_msg_hw(priv)) {
660 if (priv->dma_cap.time_stamp) {
661 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
662 priv->adv_ts = 0;
663 }
664 if (priv->dma_cap.atime_stamp && priv->extend_desc) {
665 pr_debug
666 ("IEEE 1588-2008 Advanced Time Stamp supported\n");
667 priv->adv_ts = 1;
668 }
669 }
670
671 priv->hw->ptp = &stmmac_ptp;
672 priv->hwts_tx_en = 0;
673 priv->hwts_rx_en = 0;
674
675 return stmmac_ptp_register(priv);
676}
677
678static void stmmac_release_ptp(struct stmmac_priv *priv)
679{
680 stmmac_ptp_unregister(priv);
681}
682
683
684
685
686
687
688static void stmmac_adjust_link(struct net_device *dev)
689{
690 struct stmmac_priv *priv = netdev_priv(dev);
691 struct phy_device *phydev = priv->phydev;
692 unsigned long flags;
693 int new_state = 0;
694 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
695
696 if (phydev == NULL)
697 return;
698
699 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
700 phydev->addr, phydev->link);
701
702 spin_lock_irqsave(&priv->lock, flags);
703
704 if (phydev->link) {
705 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
706
707
708
709 if (phydev->duplex != priv->oldduplex) {
710 new_state = 1;
711 if (!(phydev->duplex))
712 ctrl &= ~priv->hw->link.duplex;
713 else
714 ctrl |= priv->hw->link.duplex;
715 priv->oldduplex = phydev->duplex;
716 }
717
718 if (phydev->pause)
719 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
720 fc, pause_time);
721
722 if (phydev->speed != priv->speed) {
723 new_state = 1;
724 switch (phydev->speed) {
725 case 1000:
726 if (likely(priv->plat->has_gmac))
727 ctrl &= ~priv->hw->link.port;
728 stmmac_hw_fix_mac_speed(priv);
729 break;
730 case 100:
731 case 10:
732 if (priv->plat->has_gmac) {
733 ctrl |= priv->hw->link.port;
734 if (phydev->speed == SPEED_100) {
735 ctrl |= priv->hw->link.speed;
736 } else {
737 ctrl &= ~(priv->hw->link.speed);
738 }
739 } else {
740 ctrl &= ~priv->hw->link.port;
741 }
742 stmmac_hw_fix_mac_speed(priv);
743 break;
744 default:
745 if (netif_msg_link(priv))
746 pr_warn("%s: Speed (%d) not 10/100\n",
747 dev->name, phydev->speed);
748 break;
749 }
750
751 priv->speed = phydev->speed;
752 }
753
754 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
755
756 if (!priv->oldlink) {
757 new_state = 1;
758 priv->oldlink = 1;
759 }
760 } else if (priv->oldlink) {
761 new_state = 1;
762 priv->oldlink = 0;
763 priv->speed = 0;
764 priv->oldduplex = -1;
765 }
766
767 if (new_state && netif_msg_link(priv))
768 phy_print_status(phydev);
769
770
771
772
773 priv->eee_enabled = stmmac_eee_init(priv);
774
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
778}
779
780
781
782
783
784
785
786
787static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
788{
789 int interface = priv->plat->interface;
790
791 if (priv->dma_cap.pcs) {
792 if ((interface & PHY_INTERFACE_MODE_RGMII) ||
793 (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
794 (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
795 (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
796 pr_debug("STMMAC: PCS RGMII support enable\n");
797 priv->pcs = STMMAC_PCS_RGMII;
798 } else if (interface & PHY_INTERFACE_MODE_SGMII) {
799 pr_debug("STMMAC: PCS SGMII support enable\n");
800 priv->pcs = STMMAC_PCS_SGMII;
801 }
802 }
803}
804
805
806
807
808
809
810
811
812
813static int stmmac_init_phy(struct net_device *dev)
814{
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct phy_device *phydev;
817 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
818 char bus_id[MII_BUS_ID_SIZE];
819 int interface = priv->plat->interface;
820 priv->oldlink = 0;
821 priv->speed = 0;
822 priv->oldduplex = -1;
823
824 if (priv->plat->phy_bus_name)
825 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
826 priv->plat->phy_bus_name, priv->plat->bus_id);
827 else
828 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
829 priv->plat->bus_id);
830
831 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
832 priv->plat->phy_addr);
833 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
834
835 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
836
837 if (IS_ERR(phydev)) {
838 pr_err("%s: Could not attach to PHY\n", dev->name);
839 return PTR_ERR(phydev);
840 }
841
842
843 if ((interface == PHY_INTERFACE_MODE_MII) ||
844 (interface == PHY_INTERFACE_MODE_RMII))
845 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
846 SUPPORTED_1000baseT_Full);
847
848
849
850
851
852
853
854
855 if (phydev->phy_id == 0) {
856 phy_disconnect(phydev);
857 return -ENODEV;
858 }
859 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
860 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
861
862 priv->phydev = phydev;
863
864 return 0;
865}
866
867
868
869
870
871
872
873
874static void stmmac_display_ring(void *head, int size, int extend_desc)
875{
876 int i;
877 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
878 struct dma_desc *p = (struct dma_desc *)head;
879
880 for (i = 0; i < size; i++) {
881 u64 x;
882 if (extend_desc) {
883 x = *(u64 *) ep;
884 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
885 i, (unsigned int)virt_to_phys(ep),
886 (unsigned int)x, (unsigned int)(x >> 32),
887 ep->basic.des2, ep->basic.des3);
888 ep++;
889 } else {
890 x = *(u64 *) p;
891 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
892 i, (unsigned int)virt_to_phys(p),
893 (unsigned int)x, (unsigned int)(x >> 32),
894 p->des2, p->des3);
895 p++;
896 }
897 pr_info("\n");
898 }
899}
900
901static void stmmac_display_rings(struct stmmac_priv *priv)
902{
903 unsigned int txsize = priv->dma_tx_size;
904 unsigned int rxsize = priv->dma_rx_size;
905
906 if (priv->extend_desc) {
907 pr_info("Extended RX descriptor ring:\n");
908 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
909 pr_info("Extended TX descriptor ring:\n");
910 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
911 } else {
912 pr_info("RX descriptor ring:\n");
913 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
914 pr_info("TX descriptor ring:\n");
915 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
916 }
917}
918
919static int stmmac_set_bfsize(int mtu, int bufsize)
920{
921 int ret = bufsize;
922
923 if (mtu >= BUF_SIZE_4KiB)
924 ret = BUF_SIZE_8KiB;
925 else if (mtu >= BUF_SIZE_2KiB)
926 ret = BUF_SIZE_4KiB;
927 else if (mtu >= DMA_BUFFER_SIZE)
928 ret = BUF_SIZE_2KiB;
929 else
930 ret = DMA_BUFFER_SIZE;
931
932 return ret;
933}
934
935
936
937
938
939
940
941static void stmmac_clear_descriptors(struct stmmac_priv *priv)
942{
943 int i;
944 unsigned int txsize = priv->dma_tx_size;
945 unsigned int rxsize = priv->dma_rx_size;
946
947
948 for (i = 0; i < rxsize; i++)
949 if (priv->extend_desc)
950 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
951 priv->use_riwt, priv->mode,
952 (i == rxsize - 1));
953 else
954 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
955 priv->use_riwt, priv->mode,
956 (i == rxsize - 1));
957 for (i = 0; i < txsize; i++)
958 if (priv->extend_desc)
959 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
960 priv->mode,
961 (i == txsize - 1));
962 else
963 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
964 priv->mode,
965 (i == txsize - 1));
966}
967
968static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
969 int i)
970{
971 struct sk_buff *skb;
972
973 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
974 GFP_KERNEL);
975 if (unlikely(skb == NULL)) {
976 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
977 return 1;
978 }
979 skb_reserve(skb, NET_IP_ALIGN);
980 priv->rx_skbuff[i] = skb;
981 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
982 priv->dma_buf_sz,
983 DMA_FROM_DEVICE);
984
985 p->des2 = priv->rx_skbuff_dma[i];
986
987 if ((priv->mode == STMMAC_RING_MODE) &&
988 (priv->dma_buf_sz == BUF_SIZE_16KiB))
989 priv->hw->ring->init_desc3(p);
990
991 return 0;
992}
993
994
995
996
997
998
999
1000
1001static void init_dma_desc_rings(struct net_device *dev)
1002{
1003 int i;
1004 struct stmmac_priv *priv = netdev_priv(dev);
1005 unsigned int txsize = priv->dma_tx_size;
1006 unsigned int rxsize = priv->dma_rx_size;
1007 unsigned int bfsize = 0;
1008
1009
1010
1011
1012 if (priv->mode == STMMAC_RING_MODE)
1013 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
1014
1015 if (bfsize < BUF_SIZE_16KiB)
1016 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1017
1018 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
1019 txsize, rxsize, bfsize);
1020
1021 if (priv->extend_desc) {
1022 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
1023 sizeof(struct
1024 dma_extended_desc),
1025 &priv->dma_rx_phy,
1026 GFP_KERNEL);
1027 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
1028 sizeof(struct
1029 dma_extended_desc),
1030 &priv->dma_tx_phy,
1031 GFP_KERNEL);
1032 if ((!priv->dma_erx) || (!priv->dma_etx))
1033 return;
1034 } else {
1035 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1036 sizeof(struct dma_desc),
1037 &priv->dma_rx_phy,
1038 GFP_KERNEL);
1039 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1040 sizeof(struct dma_desc),
1041 &priv->dma_tx_phy,
1042 GFP_KERNEL);
1043 if ((!priv->dma_rx) || (!priv->dma_tx))
1044 return;
1045 }
1046
1047 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1048 GFP_KERNEL);
1049 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1050 GFP_KERNEL);
1051 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1052 GFP_KERNEL);
1053 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1054 GFP_KERNEL);
1055 if (netif_msg_drv(priv))
1056 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1057 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1058
1059
1060 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
1061 for (i = 0; i < rxsize; i++) {
1062 struct dma_desc *p;
1063 if (priv->extend_desc)
1064 p = &((priv->dma_erx + i)->basic);
1065 else
1066 p = priv->dma_rx + i;
1067
1068 if (stmmac_init_rx_buffers(priv, p, i))
1069 break;
1070
1071 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1072 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
1073 }
1074 priv->cur_rx = 0;
1075 priv->dirty_rx = (unsigned int)(i - rxsize);
1076 priv->dma_buf_sz = bfsize;
1077 buf_sz = bfsize;
1078
1079
1080 if (priv->mode == STMMAC_CHAIN_MODE) {
1081 if (priv->extend_desc) {
1082 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1083 rxsize, 1);
1084 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1085 txsize, 1);
1086 } else {
1087 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1088 rxsize, 0);
1089 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1090 txsize, 0);
1091 }
1092 }
1093
1094
1095 for (i = 0; i < txsize; i++) {
1096 struct dma_desc *p;
1097 if (priv->extend_desc)
1098 p = &((priv->dma_etx + i)->basic);
1099 else
1100 p = priv->dma_tx + i;
1101 p->des2 = 0;
1102 priv->tx_skbuff_dma[i] = 0;
1103 priv->tx_skbuff[i] = NULL;
1104 }
1105
1106 priv->dirty_tx = 0;
1107 priv->cur_tx = 0;
1108
1109 stmmac_clear_descriptors(priv);
1110
1111 if (netif_msg_hw(priv))
1112 stmmac_display_rings(priv);
1113}
1114
1115static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1116{
1117 int i;
1118
1119 for (i = 0; i < priv->dma_rx_size; i++) {
1120 if (priv->rx_skbuff[i]) {
1121 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1122 priv->dma_buf_sz, DMA_FROM_DEVICE);
1123 dev_kfree_skb_any(priv->rx_skbuff[i]);
1124 }
1125 priv->rx_skbuff[i] = NULL;
1126 }
1127}
1128
1129static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1130{
1131 int i;
1132
1133 for (i = 0; i < priv->dma_tx_size; i++) {
1134 if (priv->tx_skbuff[i] != NULL) {
1135 struct dma_desc *p;
1136 if (priv->extend_desc)
1137 p = &((priv->dma_etx + i)->basic);
1138 else
1139 p = priv->dma_tx + i;
1140
1141 if (priv->tx_skbuff_dma[i])
1142 dma_unmap_single(priv->device,
1143 priv->tx_skbuff_dma[i],
1144 priv->hw->desc->get_tx_len(p),
1145 DMA_TO_DEVICE);
1146 dev_kfree_skb_any(priv->tx_skbuff[i]);
1147 priv->tx_skbuff[i] = NULL;
1148 priv->tx_skbuff_dma[i] = 0;
1149 }
1150 }
1151}
1152
1153static void free_dma_desc_resources(struct stmmac_priv *priv)
1154{
1155
1156 dma_free_rx_skbufs(priv);
1157 dma_free_tx_skbufs(priv);
1158
1159
1160 if (!priv->extend_desc) {
1161 dma_free_coherent(priv->device,
1162 priv->dma_tx_size * sizeof(struct dma_desc),
1163 priv->dma_tx, priv->dma_tx_phy);
1164 dma_free_coherent(priv->device,
1165 priv->dma_rx_size * sizeof(struct dma_desc),
1166 priv->dma_rx, priv->dma_rx_phy);
1167 } else {
1168 dma_free_coherent(priv->device, priv->dma_tx_size *
1169 sizeof(struct dma_extended_desc),
1170 priv->dma_etx, priv->dma_tx_phy);
1171 dma_free_coherent(priv->device, priv->dma_rx_size *
1172 sizeof(struct dma_extended_desc),
1173 priv->dma_erx, priv->dma_rx_phy);
1174 }
1175 kfree(priv->rx_skbuff_dma);
1176 kfree(priv->rx_skbuff);
1177 kfree(priv->tx_skbuff_dma);
1178 kfree(priv->tx_skbuff);
1179}
1180
1181
1182
1183
1184
1185
1186
1187static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1188{
1189 if (likely(priv->plat->force_sf_dma_mode ||
1190 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
1191
1192
1193
1194
1195
1196
1197
1198 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1199 tc = SF_DMA_MODE;
1200 } else
1201 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1202}
1203
1204
1205
1206
1207
1208
1209static void stmmac_tx_clean(struct stmmac_priv *priv)
1210{
1211 unsigned int txsize = priv->dma_tx_size;
1212
1213 spin_lock(&priv->tx_lock);
1214
1215 priv->xstats.tx_clean++;
1216
1217 while (priv->dirty_tx != priv->cur_tx) {
1218 int last;
1219 unsigned int entry = priv->dirty_tx % txsize;
1220 struct sk_buff *skb = priv->tx_skbuff[entry];
1221 struct dma_desc *p;
1222
1223 if (priv->extend_desc)
1224 p = (struct dma_desc *)(priv->dma_etx + entry);
1225 else
1226 p = priv->dma_tx + entry;
1227
1228
1229 if (priv->hw->desc->get_tx_owner(p))
1230 break;
1231
1232
1233 last = priv->hw->desc->get_tx_ls(p);
1234 if (likely(last)) {
1235 int tx_error =
1236 priv->hw->desc->tx_status(&priv->dev->stats,
1237 &priv->xstats, p,
1238 priv->ioaddr);
1239 if (likely(tx_error == 0)) {
1240 priv->dev->stats.tx_packets++;
1241 priv->xstats.tx_pkt_n++;
1242 } else
1243 priv->dev->stats.tx_errors++;
1244
1245 stmmac_get_tx_hwtstamp(priv, entry, skb);
1246 }
1247 TX_DBG("%s: curr %d, dirty %d\n", __func__,
1248 priv->cur_tx, priv->dirty_tx);
1249
1250 if (likely(priv->tx_skbuff_dma[entry])) {
1251 dma_unmap_single(priv->device,
1252 priv->tx_skbuff_dma[entry],
1253 priv->hw->desc->get_tx_len(p),
1254 DMA_TO_DEVICE);
1255 priv->tx_skbuff_dma[entry] = 0;
1256 }
1257 priv->hw->ring->clean_desc3(priv, p);
1258
1259 if (likely(skb != NULL)) {
1260 dev_kfree_skb(skb);
1261 priv->tx_skbuff[entry] = NULL;
1262 }
1263
1264 priv->hw->desc->release_tx_desc(p, priv->mode);
1265
1266 priv->dirty_tx++;
1267 }
1268 if (unlikely(netif_queue_stopped(priv->dev) &&
1269 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1270 netif_tx_lock(priv->dev);
1271 if (netif_queue_stopped(priv->dev) &&
1272 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1273 TX_DBG("%s: restart transmit\n", __func__);
1274 netif_wake_queue(priv->dev);
1275 }
1276 netif_tx_unlock(priv->dev);
1277 }
1278
1279 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1280 stmmac_enable_eee_mode(priv);
1281 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1282 }
1283 spin_unlock(&priv->tx_lock);
1284}
1285
1286static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1287{
1288 priv->hw->dma->enable_dma_irq(priv->ioaddr);
1289}
1290
1291static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1292{
1293 priv->hw->dma->disable_dma_irq(priv->ioaddr);
1294}
1295
1296
1297
1298
1299
1300
1301
1302static void stmmac_tx_err(struct stmmac_priv *priv)
1303{
1304 int i;
1305 int txsize = priv->dma_tx_size;
1306 netif_stop_queue(priv->dev);
1307
1308 priv->hw->dma->stop_tx(priv->ioaddr);
1309 dma_free_tx_skbufs(priv);
1310 for (i = 0; i < txsize; i++)
1311 if (priv->extend_desc)
1312 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1313 priv->mode,
1314 (i == txsize - 1));
1315 else
1316 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1317 priv->mode,
1318 (i == txsize - 1));
1319 priv->dirty_tx = 0;
1320 priv->cur_tx = 0;
1321 priv->hw->dma->start_tx(priv->ioaddr);
1322
1323 priv->dev->stats.tx_errors++;
1324 netif_wake_queue(priv->dev);
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1336{
1337 int status;
1338
1339 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1340 if (likely((status & handle_rx)) || (status & handle_tx)) {
1341 if (likely(napi_schedule_prep(&priv->napi))) {
1342 stmmac_disable_dma_irq(priv);
1343 __napi_schedule(&priv->napi);
1344 }
1345 }
1346 if (unlikely(status & tx_hard_error_bump_tc)) {
1347
1348 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
1349 tc += 64;
1350 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1351 priv->xstats.threshold = tc;
1352 }
1353 } else if (unlikely(status == tx_hard_error))
1354 stmmac_tx_err(priv);
1355}
1356
1357
1358
1359
1360
1361
1362static void stmmac_mmc_setup(struct stmmac_priv *priv)
1363{
1364 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1365 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1366
1367 dwmac_mmc_intr_all_mask(priv->ioaddr);
1368
1369 if (priv->dma_cap.rmon) {
1370 dwmac_mmc_ctrl(priv->ioaddr, mode);
1371 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1372 } else
1373 pr_info(" No MAC Management Counters available\n");
1374}
1375
1376static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1377{
1378 u32 hwid = priv->hw->synopsys_uid;
1379
1380
1381 if (likely(hwid)) {
1382 u32 uid = ((hwid & 0x0000ff00) >> 8);
1383 u32 synid = (hwid & 0x000000ff);
1384
1385 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1386 uid, synid);
1387
1388 return synid;
1389 }
1390 return 0;
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1401{
1402 if (priv->plat->enh_desc) {
1403 pr_info(" Enhanced/Alternate descriptors\n");
1404
1405
1406 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1407 pr_info("\tEnabled extended descriptors\n");
1408 priv->extend_desc = 1;
1409 } else
1410 pr_warn("Extended descriptors not supported\n");
1411
1412 priv->hw->desc = &enh_desc_ops;
1413 } else {
1414 pr_info(" Normal descriptors\n");
1415 priv->hw->desc = &ndesc_ops;
1416 }
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428static int stmmac_get_hw_features(struct stmmac_priv *priv)
1429{
1430 u32 hw_cap = 0;
1431
1432 if (priv->hw->dma->get_hw_feature) {
1433 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1434
1435 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1436 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1437 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1438 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1439 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1440 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1441 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1442 priv->dma_cap.pmt_remote_wake_up =
1443 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1444 priv->dma_cap.pmt_magic_frame =
1445 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1446
1447 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1448
1449 priv->dma_cap.time_stamp =
1450 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1451
1452 priv->dma_cap.atime_stamp =
1453 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1454
1455 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1456 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1457
1458 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1459 priv->dma_cap.rx_coe_type1 =
1460 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1461 priv->dma_cap.rx_coe_type2 =
1462 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1463 priv->dma_cap.rxfifo_over_2048 =
1464 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1465
1466 priv->dma_cap.number_rx_channel =
1467 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1468 priv->dma_cap.number_tx_channel =
1469 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1470
1471 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1472 }
1473
1474 return hw_cap;
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1485{
1486 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1487 priv->hw->mac->get_umac_addr((void __iomem *)
1488 priv->dev->base_addr,
1489 priv->dev->dev_addr, 0);
1490 if (!is_valid_ether_addr(priv->dev->dev_addr))
1491 eth_hw_addr_random(priv->dev);
1492 }
1493 pr_warn("%s: device MAC address %pM\n", priv->dev->name,
1494 priv->dev->dev_addr);
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1506{
1507 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1508 int mixed_burst = 0;
1509 int atds = 0;
1510
1511 if (priv->plat->dma_cfg) {
1512 pbl = priv->plat->dma_cfg->pbl;
1513 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1514 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1515 burst_len = priv->plat->dma_cfg->burst_len;
1516 }
1517
1518 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1519 atds = 1;
1520
1521 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1522 burst_len, priv->dma_tx_phy,
1523 priv->dma_rx_phy, atds);
1524}
1525
1526
1527
1528
1529
1530
1531
1532static void stmmac_tx_timer(unsigned long data)
1533{
1534 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1535
1536 stmmac_tx_clean(priv);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1548{
1549 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1550 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1551 init_timer(&priv->txtimer);
1552 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1553 priv->txtimer.data = (unsigned long)priv;
1554 priv->txtimer.function = stmmac_tx_timer;
1555 add_timer(&priv->txtimer);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567static int stmmac_open(struct net_device *dev)
1568{
1569 struct stmmac_priv *priv = netdev_priv(dev);
1570 int ret;
1571
1572 clk_prepare_enable(priv->stmmac_clk);
1573
1574 stmmac_check_ether_addr(priv);
1575
1576 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1577 priv->pcs != STMMAC_PCS_RTBI) {
1578 ret = stmmac_init_phy(dev);
1579 if (ret) {
1580 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1581 __func__, ret);
1582 goto open_error;
1583 }
1584 }
1585
1586
1587 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1588 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1589 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1590 init_dma_desc_rings(dev);
1591
1592
1593 ret = stmmac_init_dma_engine(priv);
1594 if (ret < 0) {
1595 pr_err("%s: DMA initialization failed\n", __func__);
1596 goto open_error;
1597 }
1598
1599
1600 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1601
1602
1603 if (priv->plat->bus_setup)
1604 priv->plat->bus_setup(priv->ioaddr);
1605
1606
1607 priv->hw->mac->core_init(priv->ioaddr);
1608
1609
1610 ret = request_irq(dev->irq, stmmac_interrupt,
1611 IRQF_SHARED, dev->name, dev);
1612 if (unlikely(ret < 0)) {
1613 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1614 __func__, dev->irq, ret);
1615 goto open_error;
1616 }
1617
1618
1619 if (priv->wol_irq != dev->irq) {
1620 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1621 IRQF_SHARED, dev->name, dev);
1622 if (unlikely(ret < 0)) {
1623 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1624 __func__, priv->wol_irq, ret);
1625 goto open_error_wolirq;
1626 }
1627 }
1628
1629
1630 if (priv->lpi_irq != -ENXIO) {
1631 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1632 dev->name, dev);
1633 if (unlikely(ret < 0)) {
1634 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1635 __func__, priv->lpi_irq, ret);
1636 goto open_error_lpiirq;
1637 }
1638 }
1639
1640
1641 stmmac_set_mac(priv->ioaddr, true);
1642
1643
1644 stmmac_dma_operation_mode(priv);
1645
1646
1647 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1648 priv->xstats.threshold = tc;
1649
1650 stmmac_mmc_setup(priv);
1651
1652 ret = stmmac_init_ptp(priv);
1653 if (ret)
1654 pr_warn("%s: failed PTP initialisation\n", __func__);
1655
1656#ifdef CONFIG_STMMAC_DEBUG_FS
1657 ret = stmmac_init_fs(dev);
1658 if (ret < 0)
1659 pr_warn("%s: failed debugFS registration\n", __func__);
1660#endif
1661
1662 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1663 priv->hw->dma->start_tx(priv->ioaddr);
1664 priv->hw->dma->start_rx(priv->ioaddr);
1665
1666
1667 if (netif_msg_hw(priv)) {
1668 priv->hw->mac->dump_regs(priv->ioaddr);
1669 priv->hw->dma->dump_regs(priv->ioaddr);
1670 }
1671
1672 if (priv->phydev)
1673 phy_start(priv->phydev);
1674
1675 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1676
1677 priv->eee_enabled = stmmac_eee_init(priv);
1678
1679 stmmac_init_tx_coalesce(priv);
1680
1681 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1682 priv->rx_riwt = MAX_DMA_RIWT;
1683 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1684 }
1685
1686 if (priv->pcs && priv->hw->mac->ctrl_ane)
1687 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1688
1689 napi_enable(&priv->napi);
1690 netif_start_queue(dev);
1691
1692 return 0;
1693
1694open_error_lpiirq:
1695 if (priv->wol_irq != dev->irq)
1696 free_irq(priv->wol_irq, dev);
1697
1698open_error_wolirq:
1699 free_irq(dev->irq, dev);
1700
1701open_error:
1702 if (priv->phydev)
1703 phy_disconnect(priv->phydev);
1704
1705 clk_disable_unprepare(priv->stmmac_clk);
1706
1707 return ret;
1708}
1709
1710
1711
1712
1713
1714
1715
1716static int stmmac_release(struct net_device *dev)
1717{
1718 struct stmmac_priv *priv = netdev_priv(dev);
1719
1720 if (priv->eee_enabled)
1721 del_timer_sync(&priv->eee_ctrl_timer);
1722
1723
1724 if (priv->phydev) {
1725 phy_stop(priv->phydev);
1726 phy_disconnect(priv->phydev);
1727 priv->phydev = NULL;
1728 }
1729
1730 netif_stop_queue(dev);
1731
1732 napi_disable(&priv->napi);
1733
1734 del_timer_sync(&priv->txtimer);
1735
1736
1737 free_irq(dev->irq, dev);
1738 if (priv->wol_irq != dev->irq)
1739 free_irq(priv->wol_irq, dev);
1740 if (priv->lpi_irq != -ENXIO)
1741 free_irq(priv->lpi_irq, dev);
1742
1743
1744 priv->hw->dma->stop_tx(priv->ioaddr);
1745 priv->hw->dma->stop_rx(priv->ioaddr);
1746
1747
1748 free_dma_desc_resources(priv);
1749
1750
1751 stmmac_set_mac(priv->ioaddr, false);
1752
1753 netif_carrier_off(dev);
1754
1755#ifdef CONFIG_STMMAC_DEBUG_FS
1756 stmmac_exit_fs();
1757#endif
1758 clk_disable_unprepare(priv->stmmac_clk);
1759
1760 stmmac_release_ptp(priv);
1761
1762 return 0;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1774{
1775 struct stmmac_priv *priv = netdev_priv(dev);
1776 unsigned int txsize = priv->dma_tx_size;
1777 unsigned int entry;
1778 int i, csum_insertion = 0, is_jumbo = 0;
1779 int nfrags = skb_shinfo(skb)->nr_frags;
1780 struct dma_desc *desc, *first;
1781 unsigned int nopaged_len = skb_headlen(skb);
1782
1783 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1784 if (!netif_queue_stopped(dev)) {
1785 netif_stop_queue(dev);
1786
1787 pr_err("%s: Tx Ring full when queue awake\n", __func__);
1788 }
1789 return NETDEV_TX_BUSY;
1790 }
1791
1792 spin_lock(&priv->tx_lock);
1793
1794 if (priv->tx_path_in_lpi_mode)
1795 stmmac_disable_eee_mode(priv);
1796
1797 entry = priv->cur_tx % txsize;
1798
1799#ifdef STMMAC_XMIT_DEBUG
1800 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1801 pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
1802 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1803 "\ttx_count_frames %d\n", __func__, entry,
1804 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1805 !skb_is_gso(skb) ? "isn't" : "is",
1806 priv->tx_count_frames);
1807#endif
1808
1809 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1810
1811 if (priv->extend_desc)
1812 desc = (struct dma_desc *)(priv->dma_etx + entry);
1813 else
1814 desc = priv->dma_tx + entry;
1815
1816 first = desc;
1817
1818#ifdef STMMAC_XMIT_DEBUG
1819 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1820 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1821 "\t\tn_frags: %d, ip_summed: %d\n",
1822 skb->len, nopaged_len, nfrags, skb->ip_summed);
1823#endif
1824 priv->tx_skbuff[entry] = skb;
1825
1826
1827 if (priv->mode == STMMAC_RING_MODE) {
1828 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1829 priv->plat->enh_desc);
1830 if (unlikely(is_jumbo))
1831 entry = priv->hw->ring->jumbo_frm(priv, skb,
1832 csum_insertion);
1833 } else {
1834 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1835 priv->plat->enh_desc);
1836 if (unlikely(is_jumbo))
1837 entry = priv->hw->chain->jumbo_frm(priv, skb,
1838 csum_insertion);
1839 }
1840 if (likely(!is_jumbo)) {
1841 desc->des2 = dma_map_single(priv->device, skb->data,
1842 nopaged_len, DMA_TO_DEVICE);
1843 priv->tx_skbuff_dma[entry] = desc->des2;
1844 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1845 csum_insertion, priv->mode);
1846 } else
1847 desc = first;
1848
1849 for (i = 0; i < nfrags; i++) {
1850 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1851 int len = skb_frag_size(frag);
1852
1853 entry = (++priv->cur_tx) % txsize;
1854 if (priv->extend_desc)
1855 desc = (struct dma_desc *)(priv->dma_etx + entry);
1856 else
1857 desc = priv->dma_tx + entry;
1858
1859 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1860 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1861 DMA_TO_DEVICE);
1862 priv->tx_skbuff_dma[entry] = desc->des2;
1863 priv->tx_skbuff[entry] = NULL;
1864 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1865 priv->mode);
1866 wmb();
1867 priv->hw->desc->set_tx_owner(desc);
1868 wmb();
1869 }
1870
1871
1872 priv->hw->desc->close_tx_desc(desc);
1873
1874 wmb();
1875
1876
1877
1878
1879 priv->tx_count_frames += nfrags + 1;
1880 if (priv->tx_coal_frames > priv->tx_count_frames) {
1881 priv->hw->desc->clear_tx_ic(desc);
1882 priv->xstats.tx_reset_ic_bit++;
1883 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1884 priv->tx_count_frames);
1885 mod_timer(&priv->txtimer,
1886 STMMAC_COAL_TIMER(priv->tx_coal_timer));
1887 } else
1888 priv->tx_count_frames = 0;
1889
1890
1891 priv->hw->desc->set_tx_owner(first);
1892 wmb();
1893
1894 priv->cur_tx++;
1895
1896#ifdef STMMAC_XMIT_DEBUG
1897 if (netif_msg_pktdata(priv)) {
1898 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1899 __func__, (priv->cur_tx % txsize),
1900 (priv->dirty_tx % txsize), entry, first, nfrags);
1901 if (priv->extend_desc)
1902 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1903 else
1904 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1905
1906 pr_info(">>> frame to be transmitted: ");
1907 print_pkt(skb->data, skb->len);
1908 }
1909#endif
1910 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1911 TX_DBG("%s: stop transmitted packets\n", __func__);
1912 netif_stop_queue(dev);
1913 }
1914
1915 dev->stats.tx_bytes += skb->len;
1916
1917 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1918 priv->hwts_tx_en)) {
1919
1920 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1921 priv->hw->desc->enable_tx_timestamp(first);
1922 }
1923
1924 if (!priv->hwts_tx_en)
1925 skb_tx_timestamp(skb);
1926
1927 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1928
1929 spin_unlock(&priv->tx_lock);
1930
1931 return NETDEV_TX_OK;
1932}
1933
1934
1935
1936
1937
1938
1939
1940static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1941{
1942 unsigned int rxsize = priv->dma_rx_size;
1943 int bfsize = priv->dma_buf_sz;
1944
1945 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1946 unsigned int entry = priv->dirty_rx % rxsize;
1947 struct dma_desc *p;
1948
1949 if (priv->extend_desc)
1950 p = (struct dma_desc *)(priv->dma_erx + entry);
1951 else
1952 p = priv->dma_rx + entry;
1953
1954 if (likely(priv->rx_skbuff[entry] == NULL)) {
1955 struct sk_buff *skb;
1956
1957 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1958
1959 if (unlikely(skb == NULL))
1960 break;
1961
1962 priv->rx_skbuff[entry] = skb;
1963 priv->rx_skbuff_dma[entry] =
1964 dma_map_single(priv->device, skb->data, bfsize,
1965 DMA_FROM_DEVICE);
1966
1967 p->des2 = priv->rx_skbuff_dma[entry];
1968
1969 priv->hw->ring->refill_desc3(priv, p);
1970
1971 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1972 }
1973 wmb();
1974 priv->hw->desc->set_rx_owner(p);
1975 wmb();
1976 }
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986static int stmmac_rx(struct stmmac_priv *priv, int limit)
1987{
1988 unsigned int rxsize = priv->dma_rx_size;
1989 unsigned int entry = priv->cur_rx % rxsize;
1990 unsigned int next_entry;
1991 unsigned int count = 0;
1992 int coe = priv->plat->rx_coe;
1993
1994#ifdef STMMAC_RX_DEBUG
1995 if (netif_msg_hw(priv)) {
1996 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1997 if (priv->extend_desc)
1998 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
1999 else
2000 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2001 }
2002#endif
2003 while (count < limit) {
2004 int status;
2005 struct dma_desc *p;
2006
2007 if (priv->extend_desc)
2008 p = (struct dma_desc *)(priv->dma_erx + entry);
2009 else
2010 p = priv->dma_rx + entry;
2011
2012 if (priv->hw->desc->get_rx_owner(p))
2013 break;
2014
2015 count++;
2016
2017 next_entry = (++priv->cur_rx) % rxsize;
2018 if (priv->extend_desc)
2019 prefetch(priv->dma_erx + next_entry);
2020 else
2021 prefetch(priv->dma_rx + next_entry);
2022
2023
2024 status = priv->hw->desc->rx_status(&priv->dev->stats,
2025 &priv->xstats, p);
2026 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2027 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2028 &priv->xstats,
2029 priv->dma_erx +
2030 entry);
2031 if (unlikely(status == discard_frame)) {
2032 priv->dev->stats.rx_errors++;
2033 if (priv->hwts_rx_en && !priv->extend_desc) {
2034
2035
2036
2037
2038
2039 priv->rx_skbuff[entry] = NULL;
2040 dma_unmap_single(priv->device,
2041 priv->rx_skbuff_dma[entry],
2042 priv->dma_buf_sz,
2043 DMA_FROM_DEVICE);
2044 }
2045 } else {
2046 struct sk_buff *skb;
2047 int frame_len;
2048
2049 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2050
2051
2052
2053
2054 if (unlikely(status != llc_snap))
2055 frame_len -= ETH_FCS_LEN;
2056#ifdef STMMAC_RX_DEBUG
2057 if (frame_len > ETH_FRAME_LEN)
2058 pr_debug("\tRX frame size %d, COE status: %d\n",
2059 frame_len, status);
2060
2061 if (netif_msg_hw(priv))
2062 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2063 p, entry, p->des2);
2064#endif
2065 skb = priv->rx_skbuff[entry];
2066 if (unlikely(!skb)) {
2067 pr_err("%s: Inconsistent Rx descriptor chain\n",
2068 priv->dev->name);
2069 priv->dev->stats.rx_dropped++;
2070 break;
2071 }
2072 prefetch(skb->data - NET_IP_ALIGN);
2073 priv->rx_skbuff[entry] = NULL;
2074
2075 stmmac_get_rx_hwtstamp(priv, entry, skb);
2076
2077 skb_put(skb, frame_len);
2078 dma_unmap_single(priv->device,
2079 priv->rx_skbuff_dma[entry],
2080 priv->dma_buf_sz, DMA_FROM_DEVICE);
2081#ifdef STMMAC_RX_DEBUG
2082 if (netif_msg_pktdata(priv)) {
2083 pr_info(" frame received (%dbytes)", frame_len);
2084 print_pkt(skb->data, frame_len);
2085 }
2086#endif
2087 skb->protocol = eth_type_trans(skb, priv->dev);
2088
2089 if (unlikely(!coe))
2090 skb_checksum_none_assert(skb);
2091 else
2092 skb->ip_summed = CHECKSUM_UNNECESSARY;
2093
2094 napi_gro_receive(&priv->napi, skb);
2095
2096 priv->dev->stats.rx_packets++;
2097 priv->dev->stats.rx_bytes += frame_len;
2098 }
2099 entry = next_entry;
2100 }
2101
2102 stmmac_rx_refill(priv);
2103
2104 priv->xstats.rx_pkt_n += count;
2105
2106 return count;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static int stmmac_poll(struct napi_struct *napi, int budget)
2118{
2119 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2120 int work_done = 0;
2121
2122 priv->xstats.napi_poll++;
2123 stmmac_tx_clean(priv);
2124
2125 work_done = stmmac_rx(priv, budget);
2126 if (work_done < budget) {
2127 napi_complete(napi);
2128 stmmac_enable_dma_irq(priv);
2129 }
2130 return work_done;
2131}
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static void stmmac_tx_timeout(struct net_device *dev)
2142{
2143 struct stmmac_priv *priv = netdev_priv(dev);
2144
2145
2146 stmmac_tx_err(priv);
2147}
2148
2149
2150static int stmmac_config(struct net_device *dev, struct ifmap *map)
2151{
2152 if (dev->flags & IFF_UP)
2153 return -EBUSY;
2154
2155
2156 if (map->base_addr != dev->base_addr) {
2157 pr_warn("%s: can't change I/O address\n", dev->name);
2158 return -EOPNOTSUPP;
2159 }
2160
2161
2162 if (map->irq != dev->irq) {
2163 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2164 return -EOPNOTSUPP;
2165 }
2166
2167 return 0;
2168}
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179static void stmmac_set_rx_mode(struct net_device *dev)
2180{
2181 struct stmmac_priv *priv = netdev_priv(dev);
2182
2183 spin_lock(&priv->lock);
2184 priv->hw->mac->set_filter(dev, priv->synopsys_id);
2185 spin_unlock(&priv->lock);
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2200{
2201 struct stmmac_priv *priv = netdev_priv(dev);
2202 int max_mtu;
2203
2204 if (netif_running(dev)) {
2205 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2206 return -EBUSY;
2207 }
2208
2209 if (priv->plat->enh_desc)
2210 max_mtu = JUMBO_LEN;
2211 else
2212 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2213
2214 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2215 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2216 return -EINVAL;
2217 }
2218
2219 dev->mtu = new_mtu;
2220 netdev_update_features(dev);
2221
2222 return 0;
2223}
2224
2225static netdev_features_t stmmac_fix_features(struct net_device *dev,
2226 netdev_features_t features)
2227{
2228 struct stmmac_priv *priv = netdev_priv(dev);
2229
2230 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2231 features &= ~NETIF_F_RXCSUM;
2232 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
2233 features &= ~NETIF_F_IPV6_CSUM;
2234 if (!priv->plat->tx_coe)
2235 features &= ~NETIF_F_ALL_CSUM;
2236
2237
2238
2239
2240
2241
2242 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2243 features &= ~NETIF_F_ALL_CSUM;
2244
2245 return features;
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2257{
2258 struct net_device *dev = (struct net_device *)dev_id;
2259 struct stmmac_priv *priv = netdev_priv(dev);
2260
2261 if (unlikely(!dev)) {
2262 pr_err("%s: invalid dev pointer\n", __func__);
2263 return IRQ_NONE;
2264 }
2265
2266
2267 if (priv->plat->has_gmac) {
2268 int status = priv->hw->mac->host_irq_status((void __iomem *)
2269 dev->base_addr,
2270 &priv->xstats);
2271 if (unlikely(status)) {
2272
2273 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2274 priv->tx_path_in_lpi_mode = true;
2275 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2276 priv->tx_path_in_lpi_mode = false;
2277 }
2278 }
2279
2280
2281 stmmac_dma_interrupt(priv);
2282
2283 return IRQ_HANDLED;
2284}
2285
2286#ifdef CONFIG_NET_POLL_CONTROLLER
2287
2288
2289
2290static void stmmac_poll_controller(struct net_device *dev)
2291{
2292 disable_irq(dev->irq);
2293 stmmac_interrupt(dev->irq, dev);
2294 enable_irq(dev->irq);
2295}
2296#endif
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2308{
2309 struct stmmac_priv *priv = netdev_priv(dev);
2310 int ret = -EOPNOTSUPP;
2311
2312 if (!netif_running(dev))
2313 return -EINVAL;
2314
2315 switch (cmd) {
2316 case SIOCGMIIPHY:
2317 case SIOCGMIIREG:
2318 case SIOCSMIIREG:
2319 if (!priv->phydev)
2320 return -EINVAL;
2321 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2322 break;
2323 case SIOCSHWTSTAMP:
2324 ret = stmmac_hwtstamp_ioctl(dev, rq);
2325 break;
2326 default:
2327 break;
2328 }
2329
2330 return ret;
2331}
2332
2333#ifdef CONFIG_STMMAC_DEBUG_FS
2334static struct dentry *stmmac_fs_dir;
2335static struct dentry *stmmac_rings_status;
2336static struct dentry *stmmac_dma_cap;
2337
2338static void sysfs_display_ring(void *head, int size, int extend_desc,
2339 struct seq_file *seq)
2340{
2341 int i;
2342 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2343 struct dma_desc *p = (struct dma_desc *)head;
2344
2345 for (i = 0; i < size; i++) {
2346 u64 x;
2347 if (extend_desc) {
2348 x = *(u64 *) ep;
2349 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2350 i, (unsigned int)virt_to_phys(ep),
2351 (unsigned int)x, (unsigned int)(x >> 32),
2352 ep->basic.des2, ep->basic.des3);
2353 ep++;
2354 } else {
2355 x = *(u64 *) p;
2356 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2357 i, (unsigned int)virt_to_phys(ep),
2358 (unsigned int)x, (unsigned int)(x >> 32),
2359 p->des2, p->des3);
2360 p++;
2361 }
2362 seq_printf(seq, "\n");
2363 }
2364}
2365
2366static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2367{
2368 struct net_device *dev = seq->private;
2369 struct stmmac_priv *priv = netdev_priv(dev);
2370 unsigned int txsize = priv->dma_tx_size;
2371 unsigned int rxsize = priv->dma_rx_size;
2372
2373 if (priv->extend_desc) {
2374 seq_printf(seq, "Extended RX descriptor ring:\n");
2375 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
2376 seq_printf(seq, "Extended TX descriptor ring:\n");
2377 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
2378 } else {
2379 seq_printf(seq, "RX descriptor ring:\n");
2380 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2381 seq_printf(seq, "TX descriptor ring:\n");
2382 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
2383 }
2384
2385 return 0;
2386}
2387
2388static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2389{
2390 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2391}
2392
2393static const struct file_operations stmmac_rings_status_fops = {
2394 .owner = THIS_MODULE,
2395 .open = stmmac_sysfs_ring_open,
2396 .read = seq_read,
2397 .llseek = seq_lseek,
2398 .release = single_release,
2399};
2400
2401static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2402{
2403 struct net_device *dev = seq->private;
2404 struct stmmac_priv *priv = netdev_priv(dev);
2405
2406 if (!priv->hw_cap_support) {
2407 seq_printf(seq, "DMA HW features not supported\n");
2408 return 0;
2409 }
2410
2411 seq_printf(seq, "==============================\n");
2412 seq_printf(seq, "\tDMA HW features\n");
2413 seq_printf(seq, "==============================\n");
2414
2415 seq_printf(seq, "\t10/100 Mbps %s\n",
2416 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2417 seq_printf(seq, "\t1000 Mbps %s\n",
2418 (priv->dma_cap.mbps_1000) ? "Y" : "N");
2419 seq_printf(seq, "\tHalf duple %s\n",
2420 (priv->dma_cap.half_duplex) ? "Y" : "N");
2421 seq_printf(seq, "\tHash Filter: %s\n",
2422 (priv->dma_cap.hash_filter) ? "Y" : "N");
2423 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2424 (priv->dma_cap.multi_addr) ? "Y" : "N");
2425 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2426 (priv->dma_cap.pcs) ? "Y" : "N");
2427 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2428 (priv->dma_cap.sma_mdio) ? "Y" : "N");
2429 seq_printf(seq, "\tPMT Remote wake up: %s\n",
2430 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2431 seq_printf(seq, "\tPMT Magic Frame: %s\n",
2432 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2433 seq_printf(seq, "\tRMON module: %s\n",
2434 (priv->dma_cap.rmon) ? "Y" : "N");
2435 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2436 (priv->dma_cap.time_stamp) ? "Y" : "N");
2437 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2438 (priv->dma_cap.atime_stamp) ? "Y" : "N");
2439 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2440 (priv->dma_cap.eee) ? "Y" : "N");
2441 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2442 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2443 (priv->dma_cap.tx_coe) ? "Y" : "N");
2444 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2445 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2446 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2447 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2448 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2449 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2450 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2451 priv->dma_cap.number_rx_channel);
2452 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2453 priv->dma_cap.number_tx_channel);
2454 seq_printf(seq, "\tEnhanced descriptors: %s\n",
2455 (priv->dma_cap.enh_desc) ? "Y" : "N");
2456
2457 return 0;
2458}
2459
2460static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2461{
2462 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2463}
2464
2465static const struct file_operations stmmac_dma_cap_fops = {
2466 .owner = THIS_MODULE,
2467 .open = stmmac_sysfs_dma_cap_open,
2468 .read = seq_read,
2469 .llseek = seq_lseek,
2470 .release = single_release,
2471};
2472
2473static int stmmac_init_fs(struct net_device *dev)
2474{
2475
2476 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
2477
2478 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
2479 pr_err("ERROR %s, debugfs create directory failed\n",
2480 STMMAC_RESOURCE_NAME);
2481
2482 return -ENOMEM;
2483 }
2484
2485
2486 stmmac_rings_status = debugfs_create_file("descriptors_status",
2487 S_IRUGO, stmmac_fs_dir, dev,
2488 &stmmac_rings_status_fops);
2489
2490 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2491 pr_info("ERROR creating stmmac ring debugfs file\n");
2492 debugfs_remove(stmmac_fs_dir);
2493
2494 return -ENOMEM;
2495 }
2496
2497
2498 stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2499 dev, &stmmac_dma_cap_fops);
2500
2501 if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2502 pr_info("ERROR creating stmmac MMC debugfs file\n");
2503 debugfs_remove(stmmac_rings_status);
2504 debugfs_remove(stmmac_fs_dir);
2505
2506 return -ENOMEM;
2507 }
2508
2509 return 0;
2510}
2511
2512static void stmmac_exit_fs(void)
2513{
2514 debugfs_remove(stmmac_rings_status);
2515 debugfs_remove(stmmac_dma_cap);
2516 debugfs_remove(stmmac_fs_dir);
2517}
2518#endif
2519
2520static const struct net_device_ops stmmac_netdev_ops = {
2521 .ndo_open = stmmac_open,
2522 .ndo_start_xmit = stmmac_xmit,
2523 .ndo_stop = stmmac_release,
2524 .ndo_change_mtu = stmmac_change_mtu,
2525 .ndo_fix_features = stmmac_fix_features,
2526 .ndo_set_rx_mode = stmmac_set_rx_mode,
2527 .ndo_tx_timeout = stmmac_tx_timeout,
2528 .ndo_do_ioctl = stmmac_ioctl,
2529 .ndo_set_config = stmmac_config,
2530#ifdef CONFIG_NET_POLL_CONTROLLER
2531 .ndo_poll_controller = stmmac_poll_controller,
2532#endif
2533 .ndo_set_mac_address = eth_mac_addr,
2534};
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545static int stmmac_hw_init(struct stmmac_priv *priv)
2546{
2547 int ret;
2548 struct mac_device_info *mac;
2549
2550
2551 if (priv->plat->has_gmac) {
2552 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2553 mac = dwmac1000_setup(priv->ioaddr);
2554 } else {
2555 mac = dwmac100_setup(priv->ioaddr);
2556 }
2557 if (!mac)
2558 return -ENOMEM;
2559
2560 priv->hw = mac;
2561
2562
2563 priv->synopsys_id = stmmac_get_synopsys_id(priv);
2564
2565
2566 stmmac_selec_desc_mode(priv);
2567
2568
2569 if (chain_mode) {
2570 priv->hw->chain = &chain_mode_ops;
2571 pr_info(" Chain mode enabled\n");
2572 priv->mode = STMMAC_CHAIN_MODE;
2573 } else {
2574 priv->hw->ring = &ring_mode_ops;
2575 pr_info(" Ring mode enabled\n");
2576 priv->mode = STMMAC_RING_MODE;
2577 }
2578
2579
2580 priv->hw_cap_support = stmmac_get_hw_features(priv);
2581 if (priv->hw_cap_support) {
2582 pr_info(" DMA HW capability register supported");
2583
2584
2585
2586
2587
2588
2589 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2590 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2591
2592 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2593
2594 if (priv->dma_cap.rx_coe_type2)
2595 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2596 else if (priv->dma_cap.rx_coe_type1)
2597 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2598
2599 } else
2600 pr_info(" No HW DMA feature register supported");
2601
2602 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2603 if (!ret) {
2604 pr_warn(" RX IPC Checksum Offload not configured.\n");
2605 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2606 }
2607
2608 if (priv->plat->rx_coe)
2609 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2610 priv->plat->rx_coe);
2611 if (priv->plat->tx_coe)
2612 pr_info(" TX Checksum insertion supported\n");
2613
2614 if (priv->plat->pmt) {
2615 pr_info(" Wake-Up On Lan supported\n");
2616 device_set_wakeup_capable(priv->device, 1);
2617 }
2618
2619 return 0;
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2631 struct plat_stmmacenet_data *plat_dat,
2632 void __iomem *addr)
2633{
2634 int ret = 0;
2635 struct net_device *ndev = NULL;
2636 struct stmmac_priv *priv;
2637
2638 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2639 if (!ndev)
2640 return NULL;
2641
2642 SET_NETDEV_DEV(ndev, device);
2643
2644 priv = netdev_priv(ndev);
2645 priv->device = device;
2646 priv->dev = ndev;
2647
2648 ether_setup(ndev);
2649
2650 stmmac_set_ethtool_ops(ndev);
2651 priv->pause = pause;
2652 priv->plat = plat_dat;
2653 priv->ioaddr = addr;
2654 priv->dev->base_addr = (unsigned long)addr;
2655
2656
2657 stmmac_verify_args();
2658
2659
2660
2661
2662 if ((phyaddr >= 0) && (phyaddr <= 31))
2663 priv->plat->phy_addr = phyaddr;
2664
2665
2666 ret = stmmac_hw_init(priv);
2667 if (ret)
2668 goto error_free_netdev;
2669
2670 ndev->netdev_ops = &stmmac_netdev_ops;
2671
2672 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2673 NETIF_F_RXCSUM;
2674 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2675 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2676#ifdef STMMAC_VLAN_TAG_USED
2677
2678 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2679#endif
2680 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2681
2682 if (flow_ctrl)
2683 priv->flow_ctrl = FLOW_AUTO;
2684
2685
2686
2687
2688
2689
2690 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2691 priv->use_riwt = 1;
2692 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2693 }
2694
2695 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2696
2697 spin_lock_init(&priv->lock);
2698 spin_lock_init(&priv->tx_lock);
2699
2700 ret = register_netdev(ndev);
2701 if (ret) {
2702 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2703 goto error_netdev_register;
2704 }
2705
2706 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
2707 if (IS_ERR(priv->stmmac_clk)) {
2708 pr_warn("%s: warning: cannot get CSR clock\n", __func__);
2709 goto error_clk_get;
2710 }
2711
2712
2713
2714
2715
2716
2717
2718 if (!priv->plat->clk_csr)
2719 stmmac_clk_csr_set(priv);
2720 else
2721 priv->clk_csr = priv->plat->clk_csr;
2722
2723 stmmac_check_pcs_mode(priv);
2724
2725 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2726 priv->pcs != STMMAC_PCS_RTBI) {
2727
2728 ret = stmmac_mdio_register(ndev);
2729 if (ret < 0) {
2730 pr_debug("%s: MDIO bus (id: %d) registration failed",
2731 __func__, priv->plat->bus_id);
2732 goto error_mdio_register;
2733 }
2734 }
2735
2736 return priv;
2737
2738error_mdio_register:
2739 clk_put(priv->stmmac_clk);
2740error_clk_get:
2741 unregister_netdev(ndev);
2742error_netdev_register:
2743 netif_napi_del(&priv->napi);
2744error_free_netdev:
2745 free_netdev(ndev);
2746
2747 return NULL;
2748}
2749
2750
2751
2752
2753
2754
2755
2756int stmmac_dvr_remove(struct net_device *ndev)
2757{
2758 struct stmmac_priv *priv = netdev_priv(ndev);
2759
2760 pr_info("%s:\n\tremoving driver", __func__);
2761
2762 priv->hw->dma->stop_rx(priv->ioaddr);
2763 priv->hw->dma->stop_tx(priv->ioaddr);
2764
2765 stmmac_set_mac(priv->ioaddr, false);
2766 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2767 priv->pcs != STMMAC_PCS_RTBI)
2768 stmmac_mdio_unregister(ndev);
2769 netif_carrier_off(ndev);
2770 unregister_netdev(ndev);
2771 free_netdev(ndev);
2772
2773 return 0;
2774}
2775
2776#ifdef CONFIG_PM
2777int stmmac_suspend(struct net_device *ndev)
2778{
2779 struct stmmac_priv *priv = netdev_priv(ndev);
2780 unsigned long flags;
2781
2782 if (!ndev || !netif_running(ndev))
2783 return 0;
2784
2785 if (priv->phydev)
2786 phy_stop(priv->phydev);
2787
2788 spin_lock_irqsave(&priv->lock, flags);
2789
2790 netif_device_detach(ndev);
2791 netif_stop_queue(ndev);
2792
2793 napi_disable(&priv->napi);
2794
2795
2796 priv->hw->dma->stop_tx(priv->ioaddr);
2797 priv->hw->dma->stop_rx(priv->ioaddr);
2798
2799 stmmac_clear_descriptors(priv);
2800
2801
2802 if (device_may_wakeup(priv->device))
2803 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
2804 else {
2805 stmmac_set_mac(priv->ioaddr, false);
2806
2807 clk_disable_unprepare(priv->stmmac_clk);
2808 }
2809 spin_unlock_irqrestore(&priv->lock, flags);
2810 return 0;
2811}
2812
2813int stmmac_resume(struct net_device *ndev)
2814{
2815 struct stmmac_priv *priv = netdev_priv(ndev);
2816 unsigned long flags;
2817
2818 if (!netif_running(ndev))
2819 return 0;
2820
2821 spin_lock_irqsave(&priv->lock, flags);
2822
2823
2824
2825
2826
2827
2828
2829 if (device_may_wakeup(priv->device))
2830 priv->hw->mac->pmt(priv->ioaddr, 0);
2831 else
2832
2833 clk_prepare_enable(priv->stmmac_clk);
2834
2835 netif_device_attach(ndev);
2836
2837
2838 stmmac_set_mac(priv->ioaddr, true);
2839 priv->hw->dma->start_tx(priv->ioaddr);
2840 priv->hw->dma->start_rx(priv->ioaddr);
2841
2842 napi_enable(&priv->napi);
2843
2844 netif_start_queue(ndev);
2845
2846 spin_unlock_irqrestore(&priv->lock, flags);
2847
2848 if (priv->phydev)
2849 phy_start(priv->phydev);
2850
2851 return 0;
2852}
2853
2854int stmmac_freeze(struct net_device *ndev)
2855{
2856 if (!ndev || !netif_running(ndev))
2857 return 0;
2858
2859 return stmmac_release(ndev);
2860}
2861
2862int stmmac_restore(struct net_device *ndev)
2863{
2864 if (!ndev || !netif_running(ndev))
2865 return 0;
2866
2867 return stmmac_open(ndev);
2868}
2869#endif
2870
2871
2872
2873
2874static int __init stmmac_init(void)
2875{
2876 int ret;
2877
2878 ret = stmmac_register_platform();
2879 if (ret)
2880 goto err;
2881 ret = stmmac_register_pci();
2882 if (ret)
2883 goto err_pci;
2884 return 0;
2885err_pci:
2886 stmmac_unregister_platform();
2887err:
2888 pr_err("stmmac: driver registration failed\n");
2889 return ret;
2890}
2891
2892static void __exit stmmac_exit(void)
2893{
2894 stmmac_unregister_platform();
2895 stmmac_unregister_pci();
2896}
2897
2898module_init(stmmac_init);
2899module_exit(stmmac_exit);
2900
2901#ifndef MODULE
2902static int __init stmmac_cmdline_opt(char *str)
2903{
2904 char *opt;
2905
2906 if (!str || !*str)
2907 return -EINVAL;
2908 while ((opt = strsep(&str, ",")) != NULL) {
2909 if (!strncmp(opt, "debug:", 6)) {
2910 if (kstrtoint(opt + 6, 0, &debug))
2911 goto err;
2912 } else if (!strncmp(opt, "phyaddr:", 8)) {
2913 if (kstrtoint(opt + 8, 0, &phyaddr))
2914 goto err;
2915 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2916 if (kstrtoint(opt + 11, 0, &dma_txsize))
2917 goto err;
2918 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2919 if (kstrtoint(opt + 11, 0, &dma_rxsize))
2920 goto err;
2921 } else if (!strncmp(opt, "buf_sz:", 7)) {
2922 if (kstrtoint(opt + 7, 0, &buf_sz))
2923 goto err;
2924 } else if (!strncmp(opt, "tc:", 3)) {
2925 if (kstrtoint(opt + 3, 0, &tc))
2926 goto err;
2927 } else if (!strncmp(opt, "watchdog:", 9)) {
2928 if (kstrtoint(opt + 9, 0, &watchdog))
2929 goto err;
2930 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
2931 if (kstrtoint(opt + 10, 0, &flow_ctrl))
2932 goto err;
2933 } else if (!strncmp(opt, "pause:", 6)) {
2934 if (kstrtoint(opt + 6, 0, &pause))
2935 goto err;
2936 } else if (!strncmp(opt, "eee_timer:", 10)) {
2937 if (kstrtoint(opt + 10, 0, &eee_timer))
2938 goto err;
2939 } else if (!strncmp(opt, "chain_mode:", 11)) {
2940 if (kstrtoint(opt + 11, 0, &chain_mode))
2941 goto err;
2942 }
2943 }
2944 return 0;
2945
2946err:
2947 pr_err("%s: ERROR broken module parameter conversion", __func__);
2948 return -EINVAL;
2949}
2950
2951__setup("stmmaceth=", stmmac_cmdline_opt);
2952#endif
2953
2954MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
2955MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2956MODULE_LICENSE("GPL");
2957