1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/clk.h>
28#include <linux/kernel.h>
29#include <linux/interrupt.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/if_ether.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
41#include <linux/prefetch.h>
42#include <linux/pinctrl/consumer.h>
43#ifdef CONFIG_DEBUG_FS
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46#endif
47#include <linux/net_tstamp.h>
48#include <net/pkt_cls.h>
49#include "stmmac_ptp.h"
50#include "stmmac.h"
51#include <linux/reset.h>
52#include <linux/of_mdio.h>
53#include "dwmac1000.h"
54#include "hwif.h"
55
56#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
57#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
58
59
60#define TX_TIMEO 5000
61static int watchdog = TX_TIMEO;
62module_param(watchdog, int, 0644);
63MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64
65static int debug = -1;
66module_param(debug, int, 0644);
67MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68
69static int phyaddr = -1;
70module_param(phyaddr, int, 0444);
71MODULE_PARM_DESC(phyaddr, "Physical device address");
72
73#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
74#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
75
76static int flow_ctrl = FLOW_OFF;
77module_param(flow_ctrl, int, 0644);
78MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79
80static int pause = PAUSE_TIME;
81module_param(pause, int, 0644);
82MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83
84#define TC_DEFAULT 64
85static int tc = TC_DEFAULT;
86module_param(tc, int, 0644);
87MODULE_PARM_DESC(tc, "DMA threshold control value");
88
89#define DEFAULT_BUFSIZE 1536
90static int buf_sz = DEFAULT_BUFSIZE;
91module_param(buf_sz, int, 0644);
92MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93
94#define STMMAC_RX_COPYBREAK 256
95
96static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99
100#define STMMAC_DEFAULT_LPI_TIMER 1000
101static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102module_param(eee_timer, int, 0644);
103MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
106
107
108
109static unsigned int chain_mode;
110module_param(chain_mode, int, 0444);
111MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112
113static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114
115#ifdef CONFIG_DEBUG_FS
116static int stmmac_init_fs(struct net_device *dev);
117static void stmmac_exit_fs(struct net_device *dev);
118#endif
119
120#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121
122
123
124
125
126
127static void stmmac_verify_args(void)
128{
129 if (unlikely(watchdog < 0))
130 watchdog = TX_TIMEO;
131 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 buf_sz = DEFAULT_BUFSIZE;
133 if (unlikely(flow_ctrl > 1))
134 flow_ctrl = FLOW_AUTO;
135 else if (likely(flow_ctrl < 0))
136 flow_ctrl = FLOW_OFF;
137 if (unlikely((pause < 0) || (pause > 0xffff)))
138 pause = PAUSE_TIME;
139 if (eee_timer < 0)
140 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141}
142
143
144
145
146
147static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148{
149 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 u32 queue;
151
152 for (queue = 0; queue < rx_queues_cnt; queue++) {
153 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154
155 napi_disable(&rx_q->napi);
156 }
157}
158
159
160
161
162
163static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164{
165 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 u32 queue;
167
168 for (queue = 0; queue < rx_queues_cnt; queue++) {
169 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170
171 napi_enable(&rx_q->napi);
172 }
173}
174
175
176
177
178
179static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180{
181 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 u32 queue;
183
184 for (queue = 0; queue < tx_queues_cnt; queue++)
185 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186}
187
188
189
190
191
192static void stmmac_start_all_queues(struct stmmac_priv *priv)
193{
194 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 u32 queue;
196
197 for (queue = 0; queue < tx_queues_cnt; queue++)
198 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199}
200
201static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202{
203 if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 queue_work(priv->wq, &priv->service_task);
206}
207
208static void stmmac_global_err(struct stmmac_priv *priv)
209{
210 netif_carrier_off(priv->dev);
211 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 stmmac_service_event_schedule(priv);
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228{
229 u32 clk_rate;
230
231 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232
233
234
235
236
237
238
239
240 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 if (clk_rate < CSR_F_35M)
242 priv->clk_csr = STMMAC_CSR_20_35M;
243 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 priv->clk_csr = STMMAC_CSR_35_60M;
245 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 priv->clk_csr = STMMAC_CSR_60_100M;
247 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 priv->clk_csr = STMMAC_CSR_100_150M;
249 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 priv->clk_csr = STMMAC_CSR_150_250M;
251 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 priv->clk_csr = STMMAC_CSR_250_300M;
253 }
254
255 if (priv->plat->has_sun8i) {
256 if (clk_rate > 160000000)
257 priv->clk_csr = 0x03;
258 else if (clk_rate > 80000000)
259 priv->clk_csr = 0x02;
260 else if (clk_rate > 40000000)
261 priv->clk_csr = 0x01;
262 else
263 priv->clk_csr = 0;
264 }
265}
266
267static void print_pkt(unsigned char *buf, int len)
268{
269 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271}
272
273static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274{
275 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 u32 avail;
277
278 if (tx_q->dirty_tx > tx_q->cur_tx)
279 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 else
281 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282
283 return avail;
284}
285
286
287
288
289
290
291static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292{
293 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 u32 dirty;
295
296 if (rx_q->dirty_rx <= rx_q->cur_rx)
297 dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 else
299 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300
301 return dirty;
302}
303
304
305
306
307
308
309
310static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311{
312 struct net_device *ndev = priv->dev;
313 struct phy_device *phydev = ndev->phydev;
314
315 if (likely(priv->plat->fix_mac_speed))
316 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317}
318
319
320
321
322
323
324
325static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326{
327 u32 tx_cnt = priv->plat->tx_queues_to_use;
328 u32 queue;
329
330
331 for (queue = 0; queue < tx_cnt; queue++) {
332 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333
334 if (tx_q->dirty_tx != tx_q->cur_tx)
335 return;
336 }
337
338
339 if (!priv->tx_path_in_lpi_mode)
340 stmmac_set_eee_mode(priv, priv->hw,
341 priv->plat->en_tx_lpi_clockgating);
342}
343
344
345
346
347
348
349
350void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351{
352 stmmac_reset_eee_mode(priv, priv->hw);
353 del_timer_sync(&priv->eee_ctrl_timer);
354 priv->tx_path_in_lpi_mode = false;
355}
356
357
358
359
360
361
362
363
364static void stmmac_eee_ctrl_timer(struct timer_list *t)
365{
366 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367
368 stmmac_enable_eee_mode(priv);
369 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370}
371
372
373
374
375
376
377
378
379
380bool stmmac_eee_init(struct stmmac_priv *priv)
381{
382 struct net_device *ndev = priv->dev;
383 int interface = priv->plat->interface;
384 bool ret = false;
385
386 if ((interface != PHY_INTERFACE_MODE_MII) &&
387 (interface != PHY_INTERFACE_MODE_GMII) &&
388 !phy_interface_mode_is_rgmii(interface))
389 goto out;
390
391
392
393
394 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 (priv->hw->pcs == STMMAC_PCS_RTBI))
397 goto out;
398
399
400 if (priv->dma_cap.eee) {
401 int tx_lpi_timer = priv->tx_lpi_timer;
402
403
404 if (phy_init_eee(ndev->phydev, 1)) {
405
406
407
408
409
410 mutex_lock(&priv->lock);
411 if (priv->eee_active) {
412 netdev_dbg(priv->dev, "disable EEE\n");
413 del_timer_sync(&priv->eee_ctrl_timer);
414 stmmac_set_eee_timer(priv, priv->hw, 0,
415 tx_lpi_timer);
416 }
417 priv->eee_active = 0;
418 mutex_unlock(&priv->lock);
419 goto out;
420 }
421
422 mutex_lock(&priv->lock);
423 if (!priv->eee_active) {
424 priv->eee_active = 1;
425 timer_setup(&priv->eee_ctrl_timer,
426 stmmac_eee_ctrl_timer, 0);
427 mod_timer(&priv->eee_ctrl_timer,
428 STMMAC_LPI_T(eee_timer));
429
430 stmmac_set_eee_timer(priv, priv->hw,
431 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 }
433
434 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435
436 ret = true;
437 mutex_unlock(&priv->lock);
438
439 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 }
441out:
442 return ret;
443}
444
445
446
447
448
449
450
451
452
453static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 struct dma_desc *p, struct sk_buff *skb)
455{
456 struct skb_shared_hwtstamps shhwtstamp;
457 u64 ns;
458
459 if (!priv->hwts_tx_en)
460 return;
461
462
463 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 return;
465
466
467 if (stmmac_get_tx_timestamp_status(priv, p)) {
468
469 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470
471 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 shhwtstamp.hwtstamp = ns_to_ktime(ns);
473
474 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475
476 skb_tstamp_tx(skb, &shhwtstamp);
477 }
478
479 return;
480}
481
482
483
484
485
486
487
488
489
490
491static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 struct dma_desc *np, struct sk_buff *skb)
493{
494 struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 struct dma_desc *desc = p;
496 u64 ns;
497
498 if (!priv->hwts_rx_en)
499 return;
500
501 if (priv->plat->has_gmac4)
502 desc = np;
503
504
505 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 shhwtstamp = skb_hwtstamps(skb);
509 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 } else {
512 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 }
514}
515
516
517
518
519
520
521
522
523
524
525
526
527static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528{
529 struct stmmac_priv *priv = netdev_priv(dev);
530 struct hwtstamp_config config;
531 struct timespec64 now;
532 u64 temp = 0;
533 u32 ptp_v2 = 0;
534 u32 tstamp_all = 0;
535 u32 ptp_over_ipv4_udp = 0;
536 u32 ptp_over_ipv6_udp = 0;
537 u32 ptp_over_ethernet = 0;
538 u32 snap_type_sel = 0;
539 u32 ts_master_en = 0;
540 u32 ts_event_en = 0;
541 u32 value = 0;
542 u32 sec_inc;
543
544 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 netdev_alert(priv->dev, "No support for HW time stamping\n");
546 priv->hwts_tx_en = 0;
547 priv->hwts_rx_en = 0;
548
549 return -EOPNOTSUPP;
550 }
551
552 if (copy_from_user(&config, ifr->ifr_data,
553 sizeof(struct hwtstamp_config)))
554 return -EFAULT;
555
556 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 __func__, config.flags, config.tx_type, config.rx_filter);
558
559
560 if (config.flags)
561 return -EINVAL;
562
563 if (config.tx_type != HWTSTAMP_TX_OFF &&
564 config.tx_type != HWTSTAMP_TX_ON)
565 return -ERANGE;
566
567 if (priv->adv_ts) {
568 switch (config.rx_filter) {
569 case HWTSTAMP_FILTER_NONE:
570
571 config.rx_filter = HWTSTAMP_FILTER_NONE;
572 break;
573
574 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575
576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577
578 if (priv->plat->has_gmac4)
579 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 else
581 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582
583 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 break;
586
587 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588
589 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590
591 ts_event_en = PTP_TCR_TSEVNTENA;
592
593 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 break;
596
597 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598
599 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600
601 ts_master_en = PTP_TCR_TSMSTRENA;
602 ts_event_en = PTP_TCR_TSEVNTENA;
603
604 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 break;
607
608 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609
610 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 ptp_v2 = PTP_TCR_TSVER2ENA;
612
613 if (priv->plat->has_gmac4)
614 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 else
616 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617
618 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 break;
621
622 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623
624 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 ptp_v2 = PTP_TCR_TSVER2ENA;
626
627 ts_event_en = PTP_TCR_TSEVNTENA;
628
629 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 break;
632
633 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634
635 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 ptp_v2 = PTP_TCR_TSVER2ENA;
637
638 ts_master_en = PTP_TCR_TSMSTRENA;
639 ts_event_en = PTP_TCR_TSEVNTENA;
640
641 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 break;
644
645 case HWTSTAMP_FILTER_PTP_V2_EVENT:
646
647 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 ptp_v2 = PTP_TCR_TSVER2ENA;
649
650 if (priv->plat->has_gmac4)
651 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 else
653 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654
655 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 ptp_over_ethernet = PTP_TCR_TSIPENA;
658 break;
659
660 case HWTSTAMP_FILTER_PTP_V2_SYNC:
661
662 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 ptp_v2 = PTP_TCR_TSVER2ENA;
664
665 ts_event_en = PTP_TCR_TSEVNTENA;
666
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 ptp_over_ethernet = PTP_TCR_TSIPENA;
670 break;
671
672 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673
674 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 ptp_v2 = PTP_TCR_TSVER2ENA;
676
677 ts_master_en = PTP_TCR_TSMSTRENA;
678 ts_event_en = PTP_TCR_TSEVNTENA;
679
680 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 ptp_over_ethernet = PTP_TCR_TSIPENA;
683 break;
684
685 case HWTSTAMP_FILTER_NTP_ALL:
686 case HWTSTAMP_FILTER_ALL:
687
688 config.rx_filter = HWTSTAMP_FILTER_ALL;
689 tstamp_all = PTP_TCR_TSENALL;
690 break;
691
692 default:
693 return -ERANGE;
694 }
695 } else {
696 switch (config.rx_filter) {
697 case HWTSTAMP_FILTER_NONE:
698 config.rx_filter = HWTSTAMP_FILTER_NONE;
699 break;
700 default:
701
702 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 break;
704 }
705 }
706 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708
709 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 else {
712 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 ts_master_en | snap_type_sel);
716 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717
718
719 stmmac_config_sub_second_increment(priv,
720 priv->ptpaddr, priv->plat->clk_ptp_rate,
721 priv->plat->has_gmac4, &sec_inc);
722 temp = div_u64(1000000000ULL, sec_inc);
723
724
725 priv->sub_second_inc = sec_inc;
726 priv->systime_flags = value;
727
728
729
730
731
732
733 temp = (u64)(temp << 32);
734 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736
737
738 ktime_get_real_ts64(&now);
739
740
741 stmmac_init_systime(priv, priv->ptpaddr,
742 (u32)now.tv_sec, now.tv_nsec);
743 }
744
745 return copy_to_user(ifr->ifr_data, &config,
746 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747}
748
749
750
751
752
753
754
755
756static int stmmac_init_ptp(struct stmmac_priv *priv)
757{
758 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759 return -EOPNOTSUPP;
760
761 priv->adv_ts = 0;
762
763 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764 priv->adv_ts = 1;
765
766 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767 priv->adv_ts = 1;
768
769 if (priv->dma_cap.time_stamp)
770 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771
772 if (priv->adv_ts)
773 netdev_info(priv->dev,
774 "IEEE 1588-2008 Advanced Timestamp supported\n");
775
776 priv->hwts_tx_en = 0;
777 priv->hwts_rx_en = 0;
778
779 stmmac_ptp_register(priv);
780
781 return 0;
782}
783
784static void stmmac_release_ptp(struct stmmac_priv *priv)
785{
786 if (priv->plat->clk_ptp_ref)
787 clk_disable_unprepare(priv->plat->clk_ptp_ref);
788 stmmac_ptp_unregister(priv);
789}
790
791
792
793
794
795
796static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797{
798 u32 tx_cnt = priv->plat->tx_queues_to_use;
799
800 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801 priv->pause, tx_cnt);
802}
803
804
805
806
807
808
809
810
811
812
813static void stmmac_adjust_link(struct net_device *dev)
814{
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct phy_device *phydev = dev->phydev;
817 bool new_state = false;
818
819 if (!phydev)
820 return;
821
822 mutex_lock(&priv->lock);
823
824 if (phydev->link) {
825 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826
827
828
829 if (phydev->duplex != priv->oldduplex) {
830 new_state = true;
831 if (!phydev->duplex)
832 ctrl &= ~priv->hw->link.duplex;
833 else
834 ctrl |= priv->hw->link.duplex;
835 priv->oldduplex = phydev->duplex;
836 }
837
838 if (phydev->pause)
839 stmmac_mac_flow_ctrl(priv, phydev->duplex);
840
841 if (phydev->speed != priv->speed) {
842 new_state = true;
843 ctrl &= ~priv->hw->link.speed_mask;
844 switch (phydev->speed) {
845 case SPEED_1000:
846 ctrl |= priv->hw->link.speed1000;
847 break;
848 case SPEED_100:
849 ctrl |= priv->hw->link.speed100;
850 break;
851 case SPEED_10:
852 ctrl |= priv->hw->link.speed10;
853 break;
854 default:
855 netif_warn(priv, link, priv->dev,
856 "broken speed: %d\n", phydev->speed);
857 phydev->speed = SPEED_UNKNOWN;
858 break;
859 }
860 if (phydev->speed != SPEED_UNKNOWN)
861 stmmac_hw_fix_mac_speed(priv);
862 priv->speed = phydev->speed;
863 }
864
865 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866
867 if (!priv->oldlink) {
868 new_state = true;
869 priv->oldlink = true;
870 }
871 } else if (priv->oldlink) {
872 new_state = true;
873 priv->oldlink = false;
874 priv->speed = SPEED_UNKNOWN;
875 priv->oldduplex = DUPLEX_UNKNOWN;
876 }
877
878 if (new_state && netif_msg_link(priv))
879 phy_print_status(phydev);
880
881 mutex_unlock(&priv->lock);
882
883 if (phydev->is_pseudo_fixed_link)
884
885
886
887 phydev->irq = PHY_IGNORE_INTERRUPT;
888 else
889
890
891
892 priv->eee_enabled = stmmac_eee_init(priv);
893}
894
895
896
897
898
899
900
901
902static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903{
904 int interface = priv->plat->interface;
905
906 if (priv->dma_cap.pcs) {
907 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912 priv->hw->pcs = STMMAC_PCS_RGMII;
913 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
914 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915 priv->hw->pcs = STMMAC_PCS_SGMII;
916 }
917 }
918}
919
920
921
922
923
924
925
926
927
928static int stmmac_init_phy(struct net_device *dev)
929{
930 struct stmmac_priv *priv = netdev_priv(dev);
931 u32 tx_cnt = priv->plat->tx_queues_to_use;
932 struct phy_device *phydev;
933 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
934 char bus_id[MII_BUS_ID_SIZE];
935 int interface = priv->plat->interface;
936 int max_speed = priv->plat->max_speed;
937 priv->oldlink = false;
938 priv->speed = SPEED_UNKNOWN;
939 priv->oldduplex = DUPLEX_UNKNOWN;
940
941 if (priv->plat->phy_node) {
942 phydev = of_phy_connect(dev, priv->plat->phy_node,
943 &stmmac_adjust_link, 0, interface);
944 } else {
945 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
946 priv->plat->bus_id);
947
948 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
949 priv->plat->phy_addr);
950 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
951 phy_id_fmt);
952
953 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
954 interface);
955 }
956
957 if (IS_ERR_OR_NULL(phydev)) {
958 netdev_err(priv->dev, "Could not attach to PHY\n");
959 if (!phydev)
960 return -ENODEV;
961
962 return PTR_ERR(phydev);
963 }
964
965
966 if ((interface == PHY_INTERFACE_MODE_MII) ||
967 (interface == PHY_INTERFACE_MODE_RMII) ||
968 (max_speed < 1000 && max_speed > 0))
969 phy_set_max_speed(phydev, SPEED_100);
970
971
972
973
974
975 if (tx_cnt > 1) {
976 phy_remove_link_mode(phydev,
977 ETHTOOL_LINK_MODE_10baseT_Half_BIT);
978 phy_remove_link_mode(phydev,
979 ETHTOOL_LINK_MODE_100baseT_Half_BIT);
980 phy_remove_link_mode(phydev,
981 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
982 }
983
984
985
986
987
988
989
990
991 if (!priv->plat->phy_node && phydev->phy_id == 0) {
992 phy_disconnect(phydev);
993 return -ENODEV;
994 }
995
996
997
998
999
1000 if (phydev->is_pseudo_fixed_link)
1001 phydev->irq = PHY_POLL;
1002
1003 phy_attached_info(phydev);
1004 return 0;
1005}
1006
1007static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1008{
1009 u32 rx_cnt = priv->plat->rx_queues_to_use;
1010 void *head_rx;
1011 u32 queue;
1012
1013
1014 for (queue = 0; queue < rx_cnt; queue++) {
1015 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1016
1017 pr_info("\tRX Queue %u rings\n", queue);
1018
1019 if (priv->extend_desc)
1020 head_rx = (void *)rx_q->dma_erx;
1021 else
1022 head_rx = (void *)rx_q->dma_rx;
1023
1024
1025 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1026 }
1027}
1028
1029static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1030{
1031 u32 tx_cnt = priv->plat->tx_queues_to_use;
1032 void *head_tx;
1033 u32 queue;
1034
1035
1036 for (queue = 0; queue < tx_cnt; queue++) {
1037 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1038
1039 pr_info("\tTX Queue %d rings\n", queue);
1040
1041 if (priv->extend_desc)
1042 head_tx = (void *)tx_q->dma_etx;
1043 else
1044 head_tx = (void *)tx_q->dma_tx;
1045
1046 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1047 }
1048}
1049
1050static void stmmac_display_rings(struct stmmac_priv *priv)
1051{
1052
1053 stmmac_display_rx_rings(priv);
1054
1055
1056 stmmac_display_tx_rings(priv);
1057}
1058
1059static int stmmac_set_bfsize(int mtu, int bufsize)
1060{
1061 int ret = bufsize;
1062
1063 if (mtu >= BUF_SIZE_4KiB)
1064 ret = BUF_SIZE_8KiB;
1065 else if (mtu >= BUF_SIZE_2KiB)
1066 ret = BUF_SIZE_4KiB;
1067 else if (mtu > DEFAULT_BUFSIZE)
1068 ret = BUF_SIZE_2KiB;
1069 else
1070 ret = DEFAULT_BUFSIZE;
1071
1072 return ret;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1083{
1084 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1085 int i;
1086
1087
1088 for (i = 0; i < DMA_RX_SIZE; i++)
1089 if (priv->extend_desc)
1090 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1091 priv->use_riwt, priv->mode,
1092 (i == DMA_RX_SIZE - 1));
1093 else
1094 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1095 priv->use_riwt, priv->mode,
1096 (i == DMA_RX_SIZE - 1));
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1107{
1108 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1109 int i;
1110
1111
1112 for (i = 0; i < DMA_TX_SIZE; i++)
1113 if (priv->extend_desc)
1114 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1115 priv->mode, (i == DMA_TX_SIZE - 1));
1116 else
1117 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1118 priv->mode, (i == DMA_TX_SIZE - 1));
1119}
1120
1121
1122
1123
1124
1125
1126
1127static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1128{
1129 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1130 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1131 u32 queue;
1132
1133
1134 for (queue = 0; queue < rx_queue_cnt; queue++)
1135 stmmac_clear_rx_descriptors(priv, queue);
1136
1137
1138 for (queue = 0; queue < tx_queue_cnt; queue++)
1139 stmmac_clear_tx_descriptors(priv, queue);
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1153 int i, gfp_t flags, u32 queue)
1154{
1155 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1156 struct sk_buff *skb;
1157
1158 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1159 if (!skb) {
1160 netdev_err(priv->dev,
1161 "%s: Rx init fails; skb is NULL\n", __func__);
1162 return -ENOMEM;
1163 }
1164 rx_q->rx_skbuff[i] = skb;
1165 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1166 priv->dma_buf_sz,
1167 DMA_FROM_DEVICE);
1168 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1169 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1170 dev_kfree_skb_any(skb);
1171 return -EINVAL;
1172 }
1173
1174 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1175
1176 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1177 stmmac_init_desc3(priv, p);
1178
1179 return 0;
1180}
1181
1182
1183
1184
1185
1186
1187
1188static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1189{
1190 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191
1192 if (rx_q->rx_skbuff[i]) {
1193 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1194 priv->dma_buf_sz, DMA_FROM_DEVICE);
1195 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1196 }
1197 rx_q->rx_skbuff[i] = NULL;
1198}
1199
1200
1201
1202
1203
1204
1205
1206static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1207{
1208 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1209
1210 if (tx_q->tx_skbuff_dma[i].buf) {
1211 if (tx_q->tx_skbuff_dma[i].map_as_page)
1212 dma_unmap_page(priv->device,
1213 tx_q->tx_skbuff_dma[i].buf,
1214 tx_q->tx_skbuff_dma[i].len,
1215 DMA_TO_DEVICE);
1216 else
1217 dma_unmap_single(priv->device,
1218 tx_q->tx_skbuff_dma[i].buf,
1219 tx_q->tx_skbuff_dma[i].len,
1220 DMA_TO_DEVICE);
1221 }
1222
1223 if (tx_q->tx_skbuff[i]) {
1224 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1225 tx_q->tx_skbuff[i] = NULL;
1226 tx_q->tx_skbuff_dma[i].buf = 0;
1227 tx_q->tx_skbuff_dma[i].map_as_page = false;
1228 }
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1240{
1241 struct stmmac_priv *priv = netdev_priv(dev);
1242 u32 rx_count = priv->plat->rx_queues_to_use;
1243 int ret = -ENOMEM;
1244 int bfsize = 0;
1245 int queue;
1246 int i;
1247
1248 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1249 if (bfsize < 0)
1250 bfsize = 0;
1251
1252 if (bfsize < BUF_SIZE_16KiB)
1253 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1254
1255 priv->dma_buf_sz = bfsize;
1256
1257
1258 netif_dbg(priv, probe, priv->dev,
1259 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1260
1261 for (queue = 0; queue < rx_count; queue++) {
1262 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1263
1264 netif_dbg(priv, probe, priv->dev,
1265 "(%s) dma_rx_phy=0x%08x\n", __func__,
1266 (u32)rx_q->dma_rx_phy);
1267
1268 for (i = 0; i < DMA_RX_SIZE; i++) {
1269 struct dma_desc *p;
1270
1271 if (priv->extend_desc)
1272 p = &((rx_q->dma_erx + i)->basic);
1273 else
1274 p = rx_q->dma_rx + i;
1275
1276 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1277 queue);
1278 if (ret)
1279 goto err_init_rx_buffers;
1280
1281 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1282 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1283 (unsigned int)rx_q->rx_skbuff_dma[i]);
1284 }
1285
1286 rx_q->cur_rx = 0;
1287 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1288
1289 stmmac_clear_rx_descriptors(priv, queue);
1290
1291
1292 if (priv->mode == STMMAC_CHAIN_MODE) {
1293 if (priv->extend_desc)
1294 stmmac_mode_init(priv, rx_q->dma_erx,
1295 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1296 else
1297 stmmac_mode_init(priv, rx_q->dma_rx,
1298 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1299 }
1300 }
1301
1302 buf_sz = bfsize;
1303
1304 return 0;
1305
1306err_init_rx_buffers:
1307 while (queue >= 0) {
1308 while (--i >= 0)
1309 stmmac_free_rx_buffer(priv, queue, i);
1310
1311 if (queue == 0)
1312 break;
1313
1314 i = DMA_RX_SIZE;
1315 queue--;
1316 }
1317
1318 return ret;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328static int init_dma_tx_desc_rings(struct net_device *dev)
1329{
1330 struct stmmac_priv *priv = netdev_priv(dev);
1331 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1332 u32 queue;
1333 int i;
1334
1335 for (queue = 0; queue < tx_queue_cnt; queue++) {
1336 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1337
1338 netif_dbg(priv, probe, priv->dev,
1339 "(%s) dma_tx_phy=0x%08x\n", __func__,
1340 (u32)tx_q->dma_tx_phy);
1341
1342
1343 if (priv->mode == STMMAC_CHAIN_MODE) {
1344 if (priv->extend_desc)
1345 stmmac_mode_init(priv, tx_q->dma_etx,
1346 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1347 else
1348 stmmac_mode_init(priv, tx_q->dma_tx,
1349 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1350 }
1351
1352 for (i = 0; i < DMA_TX_SIZE; i++) {
1353 struct dma_desc *p;
1354 if (priv->extend_desc)
1355 p = &((tx_q->dma_etx + i)->basic);
1356 else
1357 p = tx_q->dma_tx + i;
1358
1359 stmmac_clear_desc(priv, p);
1360
1361 tx_q->tx_skbuff_dma[i].buf = 0;
1362 tx_q->tx_skbuff_dma[i].map_as_page = false;
1363 tx_q->tx_skbuff_dma[i].len = 0;
1364 tx_q->tx_skbuff_dma[i].last_segment = false;
1365 tx_q->tx_skbuff[i] = NULL;
1366 }
1367
1368 tx_q->dirty_tx = 0;
1369 tx_q->cur_tx = 0;
1370 tx_q->mss = 0;
1371
1372 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1373 }
1374
1375 return 0;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1387{
1388 struct stmmac_priv *priv = netdev_priv(dev);
1389 int ret;
1390
1391 ret = init_dma_rx_desc_rings(dev, flags);
1392 if (ret)
1393 return ret;
1394
1395 ret = init_dma_tx_desc_rings(dev);
1396
1397 stmmac_clear_descriptors(priv);
1398
1399 if (netif_msg_hw(priv))
1400 stmmac_display_rings(priv);
1401
1402 return ret;
1403}
1404
1405
1406
1407
1408
1409
1410static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1411{
1412 int i;
1413
1414 for (i = 0; i < DMA_RX_SIZE; i++)
1415 stmmac_free_rx_buffer(priv, queue, i);
1416}
1417
1418
1419
1420
1421
1422
1423static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1424{
1425 int i;
1426
1427 for (i = 0; i < DMA_TX_SIZE; i++)
1428 stmmac_free_tx_buffer(priv, queue, i);
1429}
1430
1431
1432
1433
1434
1435static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1436{
1437 u32 rx_count = priv->plat->rx_queues_to_use;
1438 u32 queue;
1439
1440
1441 for (queue = 0; queue < rx_count; queue++) {
1442 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1443
1444
1445 dma_free_rx_skbufs(priv, queue);
1446
1447
1448 if (!priv->extend_desc)
1449 dma_free_coherent(priv->device,
1450 DMA_RX_SIZE * sizeof(struct dma_desc),
1451 rx_q->dma_rx, rx_q->dma_rx_phy);
1452 else
1453 dma_free_coherent(priv->device, DMA_RX_SIZE *
1454 sizeof(struct dma_extended_desc),
1455 rx_q->dma_erx, rx_q->dma_rx_phy);
1456
1457 kfree(rx_q->rx_skbuff_dma);
1458 kfree(rx_q->rx_skbuff);
1459 }
1460}
1461
1462
1463
1464
1465
1466static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1467{
1468 u32 tx_count = priv->plat->tx_queues_to_use;
1469 u32 queue;
1470
1471
1472 for (queue = 0; queue < tx_count; queue++) {
1473 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1474
1475
1476 dma_free_tx_skbufs(priv, queue);
1477
1478
1479 if (!priv->extend_desc)
1480 dma_free_coherent(priv->device,
1481 DMA_TX_SIZE * sizeof(struct dma_desc),
1482 tx_q->dma_tx, tx_q->dma_tx_phy);
1483 else
1484 dma_free_coherent(priv->device, DMA_TX_SIZE *
1485 sizeof(struct dma_extended_desc),
1486 tx_q->dma_etx, tx_q->dma_tx_phy);
1487
1488 kfree(tx_q->tx_skbuff_dma);
1489 kfree(tx_q->tx_skbuff);
1490 }
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1502{
1503 u32 rx_count = priv->plat->rx_queues_to_use;
1504 int ret = -ENOMEM;
1505 u32 queue;
1506
1507
1508 for (queue = 0; queue < rx_count; queue++) {
1509 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1510
1511 rx_q->queue_index = queue;
1512 rx_q->priv_data = priv;
1513
1514 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1515 sizeof(dma_addr_t),
1516 GFP_KERNEL);
1517 if (!rx_q->rx_skbuff_dma)
1518 goto err_dma;
1519
1520 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1521 sizeof(struct sk_buff *),
1522 GFP_KERNEL);
1523 if (!rx_q->rx_skbuff)
1524 goto err_dma;
1525
1526 if (priv->extend_desc) {
1527 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1528 DMA_RX_SIZE *
1529 sizeof(struct
1530 dma_extended_desc),
1531 &rx_q->dma_rx_phy,
1532 GFP_KERNEL);
1533 if (!rx_q->dma_erx)
1534 goto err_dma;
1535
1536 } else {
1537 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1538 DMA_RX_SIZE *
1539 sizeof(struct
1540 dma_desc),
1541 &rx_q->dma_rx_phy,
1542 GFP_KERNEL);
1543 if (!rx_q->dma_rx)
1544 goto err_dma;
1545 }
1546 }
1547
1548 return 0;
1549
1550err_dma:
1551 free_dma_rx_desc_resources(priv);
1552
1553 return ret;
1554}
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1565{
1566 u32 tx_count = priv->plat->tx_queues_to_use;
1567 int ret = -ENOMEM;
1568 u32 queue;
1569
1570
1571 for (queue = 0; queue < tx_count; queue++) {
1572 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1573
1574 tx_q->queue_index = queue;
1575 tx_q->priv_data = priv;
1576
1577 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1578 sizeof(*tx_q->tx_skbuff_dma),
1579 GFP_KERNEL);
1580 if (!tx_q->tx_skbuff_dma)
1581 goto err_dma;
1582
1583 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1584 sizeof(struct sk_buff *),
1585 GFP_KERNEL);
1586 if (!tx_q->tx_skbuff)
1587 goto err_dma;
1588
1589 if (priv->extend_desc) {
1590 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1591 DMA_TX_SIZE *
1592 sizeof(struct
1593 dma_extended_desc),
1594 &tx_q->dma_tx_phy,
1595 GFP_KERNEL);
1596 if (!tx_q->dma_etx)
1597 goto err_dma;
1598 } else {
1599 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1600 DMA_TX_SIZE *
1601 sizeof(struct
1602 dma_desc),
1603 &tx_q->dma_tx_phy,
1604 GFP_KERNEL);
1605 if (!tx_q->dma_tx)
1606 goto err_dma;
1607 }
1608 }
1609
1610 return 0;
1611
1612err_dma:
1613 free_dma_tx_desc_resources(priv);
1614
1615 return ret;
1616}
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1627{
1628
1629 int ret = alloc_dma_rx_desc_resources(priv);
1630
1631 if (ret)
1632 return ret;
1633
1634 ret = alloc_dma_tx_desc_resources(priv);
1635
1636 return ret;
1637}
1638
1639
1640
1641
1642
1643static void free_dma_desc_resources(struct stmmac_priv *priv)
1644{
1645
1646 free_dma_rx_desc_resources(priv);
1647
1648
1649 free_dma_tx_desc_resources(priv);
1650}
1651
1652
1653
1654
1655
1656
1657static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1658{
1659 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1660 int queue;
1661 u8 mode;
1662
1663 for (queue = 0; queue < rx_queues_count; queue++) {
1664 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1665 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1666 }
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1677{
1678 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1679 stmmac_start_rx(priv, priv->ioaddr, chan);
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1690{
1691 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1692 stmmac_start_tx(priv, priv->ioaddr, chan);
1693}
1694
1695
1696
1697
1698
1699
1700
1701
1702static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1703{
1704 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1705 stmmac_stop_rx(priv, priv->ioaddr, chan);
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1716{
1717 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1718 stmmac_stop_tx(priv, priv->ioaddr, chan);
1719}
1720
1721
1722
1723
1724
1725
1726
1727static void stmmac_start_all_dma(struct stmmac_priv *priv)
1728{
1729 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731 u32 chan = 0;
1732
1733 for (chan = 0; chan < rx_channels_count; chan++)
1734 stmmac_start_rx_dma(priv, chan);
1735
1736 for (chan = 0; chan < tx_channels_count; chan++)
1737 stmmac_start_tx_dma(priv, chan);
1738}
1739
1740
1741
1742
1743
1744
1745
1746static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1747{
1748 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750 u32 chan = 0;
1751
1752 for (chan = 0; chan < rx_channels_count; chan++)
1753 stmmac_stop_rx_dma(priv, chan);
1754
1755 for (chan = 0; chan < tx_channels_count; chan++)
1756 stmmac_stop_tx_dma(priv, chan);
1757}
1758
1759
1760
1761
1762
1763
1764
1765static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1766{
1767 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1768 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1769 int rxfifosz = priv->plat->rx_fifo_size;
1770 int txfifosz = priv->plat->tx_fifo_size;
1771 u32 txmode = 0;
1772 u32 rxmode = 0;
1773 u32 chan = 0;
1774 u8 qmode = 0;
1775
1776 if (rxfifosz == 0)
1777 rxfifosz = priv->dma_cap.rx_fifo_size;
1778 if (txfifosz == 0)
1779 txfifosz = priv->dma_cap.tx_fifo_size;
1780
1781
1782 rxfifosz /= rx_channels_count;
1783 txfifosz /= tx_channels_count;
1784
1785 if (priv->plat->force_thresh_dma_mode) {
1786 txmode = tc;
1787 rxmode = tc;
1788 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1789
1790
1791
1792
1793
1794
1795
1796 txmode = SF_DMA_MODE;
1797 rxmode = SF_DMA_MODE;
1798 priv->xstats.threshold = SF_DMA_MODE;
1799 } else {
1800 txmode = tc;
1801 rxmode = SF_DMA_MODE;
1802 }
1803
1804
1805 for (chan = 0; chan < rx_channels_count; chan++) {
1806 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1807
1808 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1809 rxfifosz, qmode);
1810 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1811 chan);
1812 }
1813
1814 for (chan = 0; chan < tx_channels_count; chan++) {
1815 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1816
1817 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1818 txfifosz, qmode);
1819 }
1820}
1821
1822
1823
1824
1825
1826
1827
1828static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1829{
1830 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1831 unsigned int bytes_compl = 0, pkts_compl = 0;
1832 unsigned int entry;
1833
1834 netif_tx_lock(priv->dev);
1835
1836 priv->xstats.tx_clean++;
1837
1838 entry = tx_q->dirty_tx;
1839 while (entry != tx_q->cur_tx) {
1840 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1841 struct dma_desc *p;
1842 int status;
1843
1844 if (priv->extend_desc)
1845 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1846 else
1847 p = tx_q->dma_tx + entry;
1848
1849 status = stmmac_tx_status(priv, &priv->dev->stats,
1850 &priv->xstats, p, priv->ioaddr);
1851
1852 if (unlikely(status & tx_dma_own))
1853 break;
1854
1855
1856
1857
1858 dma_rmb();
1859
1860
1861 if (likely(!(status & tx_not_ls))) {
1862
1863 if (unlikely(status & tx_err)) {
1864 priv->dev->stats.tx_errors++;
1865 } else {
1866 priv->dev->stats.tx_packets++;
1867 priv->xstats.tx_pkt_n++;
1868 }
1869 stmmac_get_tx_hwtstamp(priv, p, skb);
1870 }
1871
1872 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1873 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1874 dma_unmap_page(priv->device,
1875 tx_q->tx_skbuff_dma[entry].buf,
1876 tx_q->tx_skbuff_dma[entry].len,
1877 DMA_TO_DEVICE);
1878 else
1879 dma_unmap_single(priv->device,
1880 tx_q->tx_skbuff_dma[entry].buf,
1881 tx_q->tx_skbuff_dma[entry].len,
1882 DMA_TO_DEVICE);
1883 tx_q->tx_skbuff_dma[entry].buf = 0;
1884 tx_q->tx_skbuff_dma[entry].len = 0;
1885 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1886 }
1887
1888 stmmac_clean_desc3(priv, tx_q, p);
1889
1890 tx_q->tx_skbuff_dma[entry].last_segment = false;
1891 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1892
1893 if (likely(skb != NULL)) {
1894 pkts_compl++;
1895 bytes_compl += skb->len;
1896 dev_consume_skb_any(skb);
1897 tx_q->tx_skbuff[entry] = NULL;
1898 }
1899
1900 stmmac_release_tx_desc(priv, p, priv->mode);
1901
1902 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1903 }
1904 tx_q->dirty_tx = entry;
1905
1906 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1907 pkts_compl, bytes_compl);
1908
1909 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1910 queue))) &&
1911 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1912
1913 netif_dbg(priv, tx_done, priv->dev,
1914 "%s: restart transmit\n", __func__);
1915 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1916 }
1917
1918 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1919 stmmac_enable_eee_mode(priv);
1920 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1921 }
1922 netif_tx_unlock(priv->dev);
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1933{
1934 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1935 int i;
1936
1937 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1938
1939 stmmac_stop_tx_dma(priv, chan);
1940 dma_free_tx_skbufs(priv, chan);
1941 for (i = 0; i < DMA_TX_SIZE; i++)
1942 if (priv->extend_desc)
1943 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1944 priv->mode, (i == DMA_TX_SIZE - 1));
1945 else
1946 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1947 priv->mode, (i == DMA_TX_SIZE - 1));
1948 tx_q->dirty_tx = 0;
1949 tx_q->cur_tx = 0;
1950 tx_q->mss = 0;
1951 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1952 stmmac_start_tx_dma(priv, chan);
1953
1954 priv->dev->stats.tx_errors++;
1955 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1969 u32 rxmode, u32 chan)
1970{
1971 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1972 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1973 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1974 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1975 int rxfifosz = priv->plat->rx_fifo_size;
1976 int txfifosz = priv->plat->tx_fifo_size;
1977
1978 if (rxfifosz == 0)
1979 rxfifosz = priv->dma_cap.rx_fifo_size;
1980 if (txfifosz == 0)
1981 txfifosz = priv->dma_cap.tx_fifo_size;
1982
1983
1984 rxfifosz /= rx_channels_count;
1985 txfifosz /= tx_channels_count;
1986
1987 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1988 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1989}
1990
1991static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1992{
1993 int ret;
1994
1995 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1996 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1997 if (ret && (ret != -EINVAL)) {
1998 stmmac_global_err(priv);
1999 return true;
2000 }
2001
2002 return false;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2013{
2014 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2015 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2016 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2017 tx_channel_count : rx_channel_count;
2018 u32 chan;
2019 bool poll_scheduled = false;
2020 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2021
2022
2023 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2024 channels_to_check = ARRAY_SIZE(status);
2025
2026
2027
2028
2029
2030
2031
2032 for (chan = 0; chan < channels_to_check; chan++)
2033 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2034 &priv->xstats, chan);
2035
2036 for (chan = 0; chan < rx_channel_count; chan++) {
2037 if (likely(status[chan] & handle_rx)) {
2038 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2039
2040 if (likely(napi_schedule_prep(&rx_q->napi))) {
2041 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2042 __napi_schedule(&rx_q->napi);
2043 poll_scheduled = true;
2044 }
2045 }
2046 }
2047
2048
2049
2050
2051
2052 if (!poll_scheduled) {
2053 for (chan = 0; chan < tx_channel_count; chan++) {
2054 if (status[chan] & handle_tx) {
2055
2056
2057
2058 struct stmmac_rx_queue *rx_q =
2059 &priv->rx_queue[0];
2060
2061 if (likely(napi_schedule_prep(&rx_q->napi))) {
2062 stmmac_disable_dma_irq(priv,
2063 priv->ioaddr, chan);
2064 __napi_schedule(&rx_q->napi);
2065 }
2066 break;
2067 }
2068 }
2069 }
2070
2071 for (chan = 0; chan < tx_channel_count; chan++) {
2072 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2073
2074 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2075 (tc <= 256)) {
2076 tc += 64;
2077 if (priv->plat->force_thresh_dma_mode)
2078 stmmac_set_dma_operation_mode(priv,
2079 tc,
2080 tc,
2081 chan);
2082 else
2083 stmmac_set_dma_operation_mode(priv,
2084 tc,
2085 SF_DMA_MODE,
2086 chan);
2087 priv->xstats.threshold = tc;
2088 }
2089 } else if (unlikely(status[chan] == tx_hard_error)) {
2090 stmmac_tx_err(priv, chan);
2091 }
2092 }
2093}
2094
2095
2096
2097
2098
2099
2100static void stmmac_mmc_setup(struct stmmac_priv *priv)
2101{
2102 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2103 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2104
2105 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2106
2107 if (priv->dma_cap.rmon) {
2108 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2109 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2110 } else
2111 netdev_info(priv->dev, "No MAC Management Counters available\n");
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123static int stmmac_get_hw_features(struct stmmac_priv *priv)
2124{
2125 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2136{
2137 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2138 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2139 if (!is_valid_ether_addr(priv->dev->dev_addr))
2140 eth_hw_addr_random(priv->dev);
2141 netdev_info(priv->dev, "device MAC address %pM\n",
2142 priv->dev->dev_addr);
2143 }
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2155{
2156 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2157 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2158 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2159 struct stmmac_rx_queue *rx_q;
2160 struct stmmac_tx_queue *tx_q;
2161 u32 chan = 0;
2162 int atds = 0;
2163 int ret = 0;
2164
2165 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2166 dev_err(priv->device, "Invalid DMA configuration\n");
2167 return -EINVAL;
2168 }
2169
2170 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2171 atds = 1;
2172
2173 ret = stmmac_reset(priv, priv->ioaddr);
2174 if (ret) {
2175 dev_err(priv->device, "Failed to reset the dma\n");
2176 return ret;
2177 }
2178
2179
2180 for (chan = 0; chan < rx_channels_count; chan++) {
2181 rx_q = &priv->rx_queue[chan];
2182
2183 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2184 rx_q->dma_rx_phy, chan);
2185
2186 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2187 (DMA_RX_SIZE * sizeof(struct dma_desc));
2188 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2189 rx_q->rx_tail_addr, chan);
2190 }
2191
2192
2193 for (chan = 0; chan < tx_channels_count; chan++) {
2194 tx_q = &priv->tx_queue[chan];
2195
2196 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2197 tx_q->dma_tx_phy, chan);
2198
2199 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2200 (DMA_TX_SIZE * sizeof(struct dma_desc));
2201 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2202 tx_q->tx_tail_addr, chan);
2203 }
2204
2205
2206 for (chan = 0; chan < dma_csr_ch; chan++)
2207 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2208
2209
2210 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2211
2212 if (priv->plat->axi)
2213 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2214
2215 return ret;
2216}
2217
2218
2219
2220
2221
2222
2223
2224static void stmmac_tx_timer(struct timer_list *t)
2225{
2226 struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2227 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2228 u32 queue;
2229
2230
2231 for (queue = 0; queue < tx_queues_count; queue++)
2232 stmmac_tx_clean(priv, queue);
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2244{
2245 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2246 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2247 timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2248 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2249 add_timer(&priv->txtimer);
2250}
2251
2252static void stmmac_set_rings_length(struct stmmac_priv *priv)
2253{
2254 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2255 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2256 u32 chan;
2257
2258
2259 for (chan = 0; chan < tx_channels_count; chan++)
2260 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2261 (DMA_TX_SIZE - 1), chan);
2262
2263
2264 for (chan = 0; chan < rx_channels_count; chan++)
2265 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2266 (DMA_RX_SIZE - 1), chan);
2267}
2268
2269
2270
2271
2272
2273
2274static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2275{
2276 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2277 u32 weight;
2278 u32 queue;
2279
2280 for (queue = 0; queue < tx_queues_count; queue++) {
2281 weight = priv->plat->tx_queues_cfg[queue].weight;
2282 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2283 }
2284}
2285
2286
2287
2288
2289
2290
2291static void stmmac_configure_cbs(struct stmmac_priv *priv)
2292{
2293 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2294 u32 mode_to_use;
2295 u32 queue;
2296
2297
2298 for (queue = 1; queue < tx_queues_count; queue++) {
2299 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2300 if (mode_to_use == MTL_QUEUE_DCB)
2301 continue;
2302
2303 stmmac_config_cbs(priv, priv->hw,
2304 priv->plat->tx_queues_cfg[queue].send_slope,
2305 priv->plat->tx_queues_cfg[queue].idle_slope,
2306 priv->plat->tx_queues_cfg[queue].high_credit,
2307 priv->plat->tx_queues_cfg[queue].low_credit,
2308 queue);
2309 }
2310}
2311
2312
2313
2314
2315
2316
2317static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2318{
2319 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2320 u32 queue;
2321 u32 chan;
2322
2323 for (queue = 0; queue < rx_queues_count; queue++) {
2324 chan = priv->plat->rx_queues_cfg[queue].chan;
2325 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2326 }
2327}
2328
2329
2330
2331
2332
2333
2334static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2335{
2336 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2337 u32 queue;
2338 u32 prio;
2339
2340 for (queue = 0; queue < rx_queues_count; queue++) {
2341 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2342 continue;
2343
2344 prio = priv->plat->rx_queues_cfg[queue].prio;
2345 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2346 }
2347}
2348
2349
2350
2351
2352
2353
2354static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2355{
2356 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2357 u32 queue;
2358 u32 prio;
2359
2360 for (queue = 0; queue < tx_queues_count; queue++) {
2361 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2362 continue;
2363
2364 prio = priv->plat->tx_queues_cfg[queue].prio;
2365 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2366 }
2367}
2368
2369
2370
2371
2372
2373
2374static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2375{
2376 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377 u32 queue;
2378 u8 packet;
2379
2380 for (queue = 0; queue < rx_queues_count; queue++) {
2381
2382 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2383 continue;
2384
2385 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2386 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2387 }
2388}
2389
2390
2391
2392
2393
2394
2395static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2396{
2397 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2398 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2399
2400 if (tx_queues_count > 1)
2401 stmmac_set_tx_queue_weight(priv);
2402
2403
2404 if (rx_queues_count > 1)
2405 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2406 priv->plat->rx_sched_algorithm);
2407
2408
2409 if (tx_queues_count > 1)
2410 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2411 priv->plat->tx_sched_algorithm);
2412
2413
2414 if (tx_queues_count > 1)
2415 stmmac_configure_cbs(priv);
2416
2417
2418 stmmac_rx_queue_dma_chan_map(priv);
2419
2420
2421 stmmac_mac_enable_rx_queues(priv);
2422
2423
2424 if (rx_queues_count > 1)
2425 stmmac_mac_config_rx_queues_prio(priv);
2426
2427
2428 if (tx_queues_count > 1)
2429 stmmac_mac_config_tx_queues_prio(priv);
2430
2431
2432 if (rx_queues_count > 1)
2433 stmmac_mac_config_rx_queues_routing(priv);
2434}
2435
2436static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2437{
2438 if (priv->dma_cap.asp) {
2439 netdev_info(priv->dev, "Enabling Safety Features\n");
2440 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2441 } else {
2442 netdev_info(priv->dev, "No Safety Features support found\n");
2443 }
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2459{
2460 struct stmmac_priv *priv = netdev_priv(dev);
2461 u32 rx_cnt = priv->plat->rx_queues_to_use;
2462 u32 tx_cnt = priv->plat->tx_queues_to_use;
2463 u32 chan;
2464 int ret;
2465
2466
2467 ret = stmmac_init_dma_engine(priv);
2468 if (ret < 0) {
2469 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2470 __func__);
2471 return ret;
2472 }
2473
2474
2475 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2476
2477
2478 if (priv->hw->pcs) {
2479 int speed = priv->plat->mac_port_sel_speed;
2480
2481 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2482 (speed == SPEED_1000)) {
2483 priv->hw->ps = speed;
2484 } else {
2485 dev_warn(priv->device, "invalid port speed\n");
2486 priv->hw->ps = 0;
2487 }
2488 }
2489
2490
2491 stmmac_core_init(priv, priv->hw, dev);
2492
2493
2494 stmmac_mtl_configuration(priv);
2495
2496
2497 stmmac_safety_feat_configuration(priv);
2498
2499 ret = stmmac_rx_ipc(priv, priv->hw);
2500 if (!ret) {
2501 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2502 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2503 priv->hw->rx_csum = 0;
2504 }
2505
2506
2507 stmmac_mac_set(priv, priv->ioaddr, true);
2508
2509
2510 stmmac_dma_operation_mode(priv);
2511
2512 stmmac_mmc_setup(priv);
2513
2514 if (init_ptp) {
2515 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2516 if (ret < 0)
2517 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2518
2519 ret = stmmac_init_ptp(priv);
2520 if (ret == -EOPNOTSUPP)
2521 netdev_warn(priv->dev, "PTP not supported by HW\n");
2522 else if (ret)
2523 netdev_warn(priv->dev, "PTP init failed\n");
2524 }
2525
2526#ifdef CONFIG_DEBUG_FS
2527 ret = stmmac_init_fs(dev);
2528 if (ret < 0)
2529 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2530 __func__);
2531#endif
2532
2533 stmmac_start_all_dma(priv);
2534
2535 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2536
2537 if (priv->use_riwt) {
2538 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2539 if (!ret)
2540 priv->rx_riwt = MAX_DMA_RIWT;
2541 }
2542
2543 if (priv->hw->pcs)
2544 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2545
2546
2547 stmmac_set_rings_length(priv);
2548
2549
2550 if (priv->tso) {
2551 for (chan = 0; chan < tx_cnt; chan++)
2552 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2553 }
2554
2555 return 0;
2556}
2557
2558static void stmmac_hw_teardown(struct net_device *dev)
2559{
2560 struct stmmac_priv *priv = netdev_priv(dev);
2561
2562 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574static int stmmac_open(struct net_device *dev)
2575{
2576 struct stmmac_priv *priv = netdev_priv(dev);
2577 int ret;
2578
2579 stmmac_check_ether_addr(priv);
2580
2581 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2582 priv->hw->pcs != STMMAC_PCS_TBI &&
2583 priv->hw->pcs != STMMAC_PCS_RTBI) {
2584 ret = stmmac_init_phy(dev);
2585 if (ret) {
2586 netdev_err(priv->dev,
2587 "%s: Cannot attach to PHY (error: %d)\n",
2588 __func__, ret);
2589 return ret;
2590 }
2591 }
2592
2593
2594 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2595 priv->xstats.threshold = tc;
2596
2597 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2598 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2599
2600 ret = alloc_dma_desc_resources(priv);
2601 if (ret < 0) {
2602 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2603 __func__);
2604 goto dma_desc_error;
2605 }
2606
2607 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2608 if (ret < 0) {
2609 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2610 __func__);
2611 goto init_error;
2612 }
2613
2614 ret = stmmac_hw_setup(dev, true);
2615 if (ret < 0) {
2616 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2617 goto init_error;
2618 }
2619
2620 stmmac_init_tx_coalesce(priv);
2621
2622 if (dev->phydev)
2623 phy_start(dev->phydev);
2624
2625
2626 ret = request_irq(dev->irq, stmmac_interrupt,
2627 IRQF_SHARED, dev->name, dev);
2628 if (unlikely(ret < 0)) {
2629 netdev_err(priv->dev,
2630 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2631 __func__, dev->irq, ret);
2632 goto irq_error;
2633 }
2634
2635
2636 if (priv->wol_irq != dev->irq) {
2637 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2638 IRQF_SHARED, dev->name, dev);
2639 if (unlikely(ret < 0)) {
2640 netdev_err(priv->dev,
2641 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2642 __func__, priv->wol_irq, ret);
2643 goto wolirq_error;
2644 }
2645 }
2646
2647
2648 if (priv->lpi_irq > 0) {
2649 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2650 dev->name, dev);
2651 if (unlikely(ret < 0)) {
2652 netdev_err(priv->dev,
2653 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2654 __func__, priv->lpi_irq, ret);
2655 goto lpiirq_error;
2656 }
2657 }
2658
2659 stmmac_enable_all_queues(priv);
2660 stmmac_start_all_queues(priv);
2661
2662 return 0;
2663
2664lpiirq_error:
2665 if (priv->wol_irq != dev->irq)
2666 free_irq(priv->wol_irq, dev);
2667wolirq_error:
2668 free_irq(dev->irq, dev);
2669irq_error:
2670 if (dev->phydev)
2671 phy_stop(dev->phydev);
2672
2673 del_timer_sync(&priv->txtimer);
2674 stmmac_hw_teardown(dev);
2675init_error:
2676 free_dma_desc_resources(priv);
2677dma_desc_error:
2678 if (dev->phydev)
2679 phy_disconnect(dev->phydev);
2680
2681 return ret;
2682}
2683
2684
2685
2686
2687
2688
2689
2690static int stmmac_release(struct net_device *dev)
2691{
2692 struct stmmac_priv *priv = netdev_priv(dev);
2693
2694 if (priv->eee_enabled)
2695 del_timer_sync(&priv->eee_ctrl_timer);
2696
2697
2698 if (dev->phydev) {
2699 phy_stop(dev->phydev);
2700 phy_disconnect(dev->phydev);
2701 }
2702
2703 stmmac_stop_all_queues(priv);
2704
2705 stmmac_disable_all_queues(priv);
2706
2707 del_timer_sync(&priv->txtimer);
2708
2709
2710 free_irq(dev->irq, dev);
2711 if (priv->wol_irq != dev->irq)
2712 free_irq(priv->wol_irq, dev);
2713 if (priv->lpi_irq > 0)
2714 free_irq(priv->lpi_irq, dev);
2715
2716
2717 stmmac_stop_all_dma(priv);
2718
2719
2720 free_dma_desc_resources(priv);
2721
2722
2723 stmmac_mac_set(priv, priv->ioaddr, false);
2724
2725 netif_carrier_off(dev);
2726
2727#ifdef CONFIG_DEBUG_FS
2728 stmmac_exit_fs(dev);
2729#endif
2730
2731 stmmac_release_ptp(priv);
2732
2733 return 0;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2748 int total_len, bool last_segment, u32 queue)
2749{
2750 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2751 struct dma_desc *desc;
2752 u32 buff_size;
2753 int tmp_len;
2754
2755 tmp_len = total_len;
2756
2757 while (tmp_len > 0) {
2758 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2759 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2760 desc = tx_q->dma_tx + tx_q->cur_tx;
2761
2762 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2763 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2764 TSO_MAX_BUFF_SIZE : tmp_len;
2765
2766 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2767 0, 1,
2768 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2769 0, 0);
2770
2771 tmp_len -= TSO_MAX_BUFF_SIZE;
2772 }
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2803{
2804 struct dma_desc *desc, *first, *mss_desc = NULL;
2805 struct stmmac_priv *priv = netdev_priv(dev);
2806 int nfrags = skb_shinfo(skb)->nr_frags;
2807 u32 queue = skb_get_queue_mapping(skb);
2808 unsigned int first_entry, des;
2809 struct stmmac_tx_queue *tx_q;
2810 int tmp_pay_len = 0;
2811 u32 pay_len, mss;
2812 u8 proto_hdr_len;
2813 int i;
2814
2815 tx_q = &priv->tx_queue[queue];
2816
2817
2818 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2819
2820
2821 if (unlikely(stmmac_tx_avail(priv, queue) <
2822 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2823 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2824 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2825 queue));
2826
2827 netdev_err(priv->dev,
2828 "%s: Tx Ring full when queue awake\n",
2829 __func__);
2830 }
2831 return NETDEV_TX_BUSY;
2832 }
2833
2834 pay_len = skb_headlen(skb) - proto_hdr_len;
2835
2836 mss = skb_shinfo(skb)->gso_size;
2837
2838
2839 if (mss != tx_q->mss) {
2840 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2841 stmmac_set_mss(priv, mss_desc, mss);
2842 tx_q->mss = mss;
2843 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2844 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2845 }
2846
2847 if (netif_msg_tx_queued(priv)) {
2848 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2849 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2850 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2851 skb->data_len);
2852 }
2853
2854 first_entry = tx_q->cur_tx;
2855 WARN_ON(tx_q->tx_skbuff[first_entry]);
2856
2857 desc = tx_q->dma_tx + first_entry;
2858 first = desc;
2859
2860
2861 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2862 DMA_TO_DEVICE);
2863 if (dma_mapping_error(priv->device, des))
2864 goto dma_map_err;
2865
2866 tx_q->tx_skbuff_dma[first_entry].buf = des;
2867 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2868
2869 first->des0 = cpu_to_le32(des);
2870
2871
2872 if (pay_len)
2873 first->des1 = cpu_to_le32(des + proto_hdr_len);
2874
2875
2876 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2877
2878 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2879
2880
2881 for (i = 0; i < nfrags; i++) {
2882 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2883
2884 des = skb_frag_dma_map(priv->device, frag, 0,
2885 skb_frag_size(frag),
2886 DMA_TO_DEVICE);
2887 if (dma_mapping_error(priv->device, des))
2888 goto dma_map_err;
2889
2890 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2891 (i == nfrags - 1), queue);
2892
2893 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2894 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2895 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2896 }
2897
2898 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2899
2900
2901 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2902
2903
2904
2905
2906
2907
2908 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2909
2910 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2911 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2912 __func__);
2913 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2914 }
2915
2916 dev->stats.tx_bytes += skb->len;
2917 priv->xstats.tx_tso_frames++;
2918 priv->xstats.tx_tso_nfrags += nfrags;
2919
2920
2921 priv->tx_count_frames += nfrags + 1;
2922 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2923 mod_timer(&priv->txtimer,
2924 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2925 } else {
2926 priv->tx_count_frames = 0;
2927 stmmac_set_tx_ic(priv, desc);
2928 priv->xstats.tx_set_ic_bit++;
2929 }
2930
2931 skb_tx_timestamp(skb);
2932
2933 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2934 priv->hwts_tx_en)) {
2935
2936 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2937 stmmac_enable_tx_timestamp(priv, first);
2938 }
2939
2940
2941 stmmac_prepare_tso_tx_desc(priv, first, 1,
2942 proto_hdr_len,
2943 pay_len,
2944 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2945 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2946
2947
2948 if (mss_desc) {
2949
2950
2951
2952
2953
2954 dma_wmb();
2955 stmmac_set_tx_owner(priv, mss_desc);
2956 }
2957
2958
2959
2960
2961
2962 wmb();
2963
2964 if (netif_msg_pktdata(priv)) {
2965 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2966 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2967 tx_q->cur_tx, first, nfrags);
2968
2969 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2970
2971 pr_info(">>> frame to be transmitted: ");
2972 print_pkt(skb->data, skb_headlen(skb));
2973 }
2974
2975 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2976
2977 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2978
2979 return NETDEV_TX_OK;
2980
2981dma_map_err:
2982 dev_err(priv->device, "Tx dma map failed\n");
2983 dev_kfree_skb(skb);
2984 priv->dev->stats.tx_dropped++;
2985 return NETDEV_TX_OK;
2986}
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2997{
2998 struct stmmac_priv *priv = netdev_priv(dev);
2999 unsigned int nopaged_len = skb_headlen(skb);
3000 int i, csum_insertion = 0, is_jumbo = 0;
3001 u32 queue = skb_get_queue_mapping(skb);
3002 int nfrags = skb_shinfo(skb)->nr_frags;
3003 int entry;
3004 unsigned int first_entry;
3005 struct dma_desc *desc, *first;
3006 struct stmmac_tx_queue *tx_q;
3007 unsigned int enh_desc;
3008 unsigned int des;
3009
3010 tx_q = &priv->tx_queue[queue];
3011
3012
3013 if (skb_is_gso(skb) && priv->tso) {
3014 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3015 return stmmac_tso_xmit(skb, dev);
3016 }
3017
3018 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3019 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3020 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3021 queue));
3022
3023 netdev_err(priv->dev,
3024 "%s: Tx Ring full when queue awake\n",
3025 __func__);
3026 }
3027 return NETDEV_TX_BUSY;
3028 }
3029
3030 if (priv->tx_path_in_lpi_mode)
3031 stmmac_disable_eee_mode(priv);
3032
3033 entry = tx_q->cur_tx;
3034 first_entry = entry;
3035 WARN_ON(tx_q->tx_skbuff[first_entry]);
3036
3037 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3038
3039 if (likely(priv->extend_desc))
3040 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3041 else
3042 desc = tx_q->dma_tx + entry;
3043
3044 first = desc;
3045
3046 enh_desc = priv->plat->enh_desc;
3047
3048 if (enh_desc)
3049 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3050
3051 if (unlikely(is_jumbo)) {
3052 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3053 if (unlikely(entry < 0) && (entry != -EINVAL))
3054 goto dma_map_err;
3055 }
3056
3057 for (i = 0; i < nfrags; i++) {
3058 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3059 int len = skb_frag_size(frag);
3060 bool last_segment = (i == (nfrags - 1));
3061
3062 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3063 WARN_ON(tx_q->tx_skbuff[entry]);
3064
3065 if (likely(priv->extend_desc))
3066 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3067 else
3068 desc = tx_q->dma_tx + entry;
3069
3070 des = skb_frag_dma_map(priv->device, frag, 0, len,
3071 DMA_TO_DEVICE);
3072 if (dma_mapping_error(priv->device, des))
3073 goto dma_map_err;
3074
3075 tx_q->tx_skbuff_dma[entry].buf = des;
3076
3077 stmmac_set_desc_addr(priv, desc, des);
3078
3079 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3080 tx_q->tx_skbuff_dma[entry].len = len;
3081 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3082
3083
3084 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3085 priv->mode, 1, last_segment, skb->len);
3086 }
3087
3088
3089 tx_q->tx_skbuff[entry] = skb;
3090
3091
3092
3093
3094
3095
3096 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3097 tx_q->cur_tx = entry;
3098
3099 if (netif_msg_pktdata(priv)) {
3100 void *tx_head;
3101
3102 netdev_dbg(priv->dev,
3103 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3104 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3105 entry, first, nfrags);
3106
3107 if (priv->extend_desc)
3108 tx_head = (void *)tx_q->dma_etx;
3109 else
3110 tx_head = (void *)tx_q->dma_tx;
3111
3112 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3113
3114 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3115 print_pkt(skb->data, skb->len);
3116 }
3117
3118 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3119 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3120 __func__);
3121 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3122 }
3123
3124 dev->stats.tx_bytes += skb->len;
3125
3126
3127
3128
3129
3130
3131 priv->tx_count_frames += nfrags + 1;
3132 if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3133 !priv->tx_timer_armed) {
3134 mod_timer(&priv->txtimer,
3135 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3136 priv->tx_timer_armed = true;
3137 } else {
3138 priv->tx_count_frames = 0;
3139 stmmac_set_tx_ic(priv, desc);
3140 priv->xstats.tx_set_ic_bit++;
3141 priv->tx_timer_armed = false;
3142 }
3143
3144 skb_tx_timestamp(skb);
3145
3146
3147
3148
3149
3150 if (likely(!is_jumbo)) {
3151 bool last_segment = (nfrags == 0);
3152
3153 des = dma_map_single(priv->device, skb->data,
3154 nopaged_len, DMA_TO_DEVICE);
3155 if (dma_mapping_error(priv->device, des))
3156 goto dma_map_err;
3157
3158 tx_q->tx_skbuff_dma[first_entry].buf = des;
3159
3160 stmmac_set_desc_addr(priv, first, des);
3161
3162 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3163 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3164
3165 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3166 priv->hwts_tx_en)) {
3167
3168 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3169 stmmac_enable_tx_timestamp(priv, first);
3170 }
3171
3172
3173 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3174 csum_insertion, priv->mode, 1, last_segment,
3175 skb->len);
3176
3177
3178
3179
3180
3181 wmb();
3182 }
3183
3184 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3185
3186 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3187 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3188
3189 return NETDEV_TX_OK;
3190
3191dma_map_err:
3192 netdev_err(priv->dev, "Tx DMA map failed\n");
3193 dev_kfree_skb(skb);
3194 priv->dev->stats.tx_dropped++;
3195 return NETDEV_TX_OK;
3196}
3197
3198static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3199{
3200 struct vlan_ethhdr *veth;
3201 __be16 vlan_proto;
3202 u16 vlanid;
3203
3204 veth = (struct vlan_ethhdr *)skb->data;
3205 vlan_proto = veth->h_vlan_proto;
3206
3207 if ((vlan_proto == htons(ETH_P_8021Q) &&
3208 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3209 (vlan_proto == htons(ETH_P_8021AD) &&
3210 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3211
3212 vlanid = ntohs(veth->h_vlan_TCI);
3213 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3214 skb_pull(skb, VLAN_HLEN);
3215 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3216 }
3217}
3218
3219
3220static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3221{
3222 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3223 return 0;
3224
3225 return 1;
3226}
3227
3228
3229
3230
3231
3232
3233
3234
3235static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3236{
3237 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3238 int dirty = stmmac_rx_dirty(priv, queue);
3239 unsigned int entry = rx_q->dirty_rx;
3240
3241 int bfsize = priv->dma_buf_sz;
3242
3243 while (dirty-- > 0) {
3244 struct dma_desc *p;
3245
3246 if (priv->extend_desc)
3247 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3248 else
3249 p = rx_q->dma_rx + entry;
3250
3251 if (likely(!rx_q->rx_skbuff[entry])) {
3252 struct sk_buff *skb;
3253
3254 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3255 if (unlikely(!skb)) {
3256
3257 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3258 if (unlikely(net_ratelimit()))
3259 dev_err(priv->device,
3260 "fail to alloc skb entry %d\n",
3261 entry);
3262 break;
3263 }
3264
3265 rx_q->rx_skbuff[entry] = skb;
3266 rx_q->rx_skbuff_dma[entry] =
3267 dma_map_single(priv->device, skb->data, bfsize,
3268 DMA_FROM_DEVICE);
3269 if (dma_mapping_error(priv->device,
3270 rx_q->rx_skbuff_dma[entry])) {
3271 netdev_err(priv->dev, "Rx DMA map failed\n");
3272 dev_kfree_skb(skb);
3273 break;
3274 }
3275
3276 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3277 stmmac_refill_desc3(priv, rx_q, p);
3278
3279 if (rx_q->rx_zeroc_thresh > 0)
3280 rx_q->rx_zeroc_thresh--;
3281
3282 netif_dbg(priv, rx_status, priv->dev,
3283 "refill entry #%d\n", entry);
3284 }
3285 dma_wmb();
3286
3287 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3288
3289 dma_wmb();
3290
3291 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3292 }
3293 rx_q->dirty_rx = entry;
3294}
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3305{
3306 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3307 unsigned int entry = rx_q->cur_rx;
3308 int coe = priv->hw->rx_csum;
3309 unsigned int next_entry;
3310 unsigned int count = 0;
3311
3312 if (netif_msg_rx_status(priv)) {
3313 void *rx_head;
3314
3315 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3316 if (priv->extend_desc)
3317 rx_head = (void *)rx_q->dma_erx;
3318 else
3319 rx_head = (void *)rx_q->dma_rx;
3320
3321 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3322 }
3323 while (count < limit) {
3324 int status;
3325 struct dma_desc *p;
3326 struct dma_desc *np;
3327
3328 if (priv->extend_desc)
3329 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3330 else
3331 p = rx_q->dma_rx + entry;
3332
3333
3334 status = stmmac_rx_status(priv, &priv->dev->stats,
3335 &priv->xstats, p);
3336
3337 if (unlikely(status & dma_own))
3338 break;
3339
3340 count++;
3341
3342 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3343 next_entry = rx_q->cur_rx;
3344
3345 if (priv->extend_desc)
3346 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3347 else
3348 np = rx_q->dma_rx + next_entry;
3349
3350 prefetch(np);
3351
3352 if (priv->extend_desc)
3353 stmmac_rx_extended_status(priv, &priv->dev->stats,
3354 &priv->xstats, rx_q->dma_erx + entry);
3355 if (unlikely(status == discard_frame)) {
3356 priv->dev->stats.rx_errors++;
3357 if (priv->hwts_rx_en && !priv->extend_desc) {
3358
3359
3360
3361
3362
3363 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3364 rx_q->rx_skbuff[entry] = NULL;
3365 dma_unmap_single(priv->device,
3366 rx_q->rx_skbuff_dma[entry],
3367 priv->dma_buf_sz,
3368 DMA_FROM_DEVICE);
3369 }
3370 } else {
3371 struct sk_buff *skb;
3372 int frame_len;
3373 unsigned int des;
3374
3375 stmmac_get_desc_addr(priv, p, &des);
3376 frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3377
3378
3379
3380
3381
3382 if (frame_len > priv->dma_buf_sz) {
3383 netdev_err(priv->dev,
3384 "len %d larger than size (%d)\n",
3385 frame_len, priv->dma_buf_sz);
3386 priv->dev->stats.rx_length_errors++;
3387 break;
3388 }
3389
3390
3391
3392
3393
3394
3395
3396
3397 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3398 unlikely(status != llc_snap))
3399 frame_len -= ETH_FCS_LEN;
3400
3401 if (netif_msg_rx_status(priv)) {
3402 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3403 p, entry, des);
3404 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3405 frame_len, status);
3406 }
3407
3408
3409
3410
3411
3412 if (unlikely(!priv->plat->has_gmac4 &&
3413 ((frame_len < priv->rx_copybreak) ||
3414 stmmac_rx_threshold_count(rx_q)))) {
3415 skb = netdev_alloc_skb_ip_align(priv->dev,
3416 frame_len);
3417 if (unlikely(!skb)) {
3418 if (net_ratelimit())
3419 dev_warn(priv->device,
3420 "packet dropped\n");
3421 priv->dev->stats.rx_dropped++;
3422 break;
3423 }
3424
3425 dma_sync_single_for_cpu(priv->device,
3426 rx_q->rx_skbuff_dma
3427 [entry], frame_len,
3428 DMA_FROM_DEVICE);
3429 skb_copy_to_linear_data(skb,
3430 rx_q->
3431 rx_skbuff[entry]->data,
3432 frame_len);
3433
3434 skb_put(skb, frame_len);
3435 dma_sync_single_for_device(priv->device,
3436 rx_q->rx_skbuff_dma
3437 [entry], frame_len,
3438 DMA_FROM_DEVICE);
3439 } else {
3440 skb = rx_q->rx_skbuff[entry];
3441 if (unlikely(!skb)) {
3442 netdev_err(priv->dev,
3443 "%s: Inconsistent Rx chain\n",
3444 priv->dev->name);
3445 priv->dev->stats.rx_dropped++;
3446 break;
3447 }
3448 prefetch(skb->data - NET_IP_ALIGN);
3449 rx_q->rx_skbuff[entry] = NULL;
3450 rx_q->rx_zeroc_thresh++;
3451
3452 skb_put(skb, frame_len);
3453 dma_unmap_single(priv->device,
3454 rx_q->rx_skbuff_dma[entry],
3455 priv->dma_buf_sz,
3456 DMA_FROM_DEVICE);
3457 }
3458
3459 if (netif_msg_pktdata(priv)) {
3460 netdev_dbg(priv->dev, "frame received (%dbytes)",
3461 frame_len);
3462 print_pkt(skb->data, frame_len);
3463 }
3464
3465 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3466
3467 stmmac_rx_vlan(priv->dev, skb);
3468
3469 skb->protocol = eth_type_trans(skb, priv->dev);
3470
3471 if (unlikely(!coe))
3472 skb_checksum_none_assert(skb);
3473 else
3474 skb->ip_summed = CHECKSUM_UNNECESSARY;
3475
3476 napi_gro_receive(&rx_q->napi, skb);
3477
3478 priv->dev->stats.rx_packets++;
3479 priv->dev->stats.rx_bytes += frame_len;
3480 }
3481 entry = next_entry;
3482 }
3483
3484 stmmac_rx_refill(priv, queue);
3485
3486 priv->xstats.rx_pkt_n += count;
3487
3488 return count;
3489}
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499static int stmmac_poll(struct napi_struct *napi, int budget)
3500{
3501 struct stmmac_rx_queue *rx_q =
3502 container_of(napi, struct stmmac_rx_queue, napi);
3503 struct stmmac_priv *priv = rx_q->priv_data;
3504 u32 tx_count = priv->plat->tx_queues_to_use;
3505 u32 chan = rx_q->queue_index;
3506 int work_done = 0;
3507 u32 queue;
3508
3509 priv->xstats.napi_poll++;
3510
3511
3512 for (queue = 0; queue < tx_count; queue++)
3513 stmmac_tx_clean(priv, queue);
3514
3515 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3516 if (work_done < budget) {
3517 napi_complete_done(napi, work_done);
3518 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3519 }
3520 return work_done;
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531static void stmmac_tx_timeout(struct net_device *dev)
3532{
3533 struct stmmac_priv *priv = netdev_priv(dev);
3534
3535 stmmac_global_err(priv);
3536}
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547static void stmmac_set_rx_mode(struct net_device *dev)
3548{
3549 struct stmmac_priv *priv = netdev_priv(dev);
3550
3551 stmmac_set_filter(priv, priv->hw, dev);
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3566{
3567 struct stmmac_priv *priv = netdev_priv(dev);
3568
3569 if (netif_running(dev)) {
3570 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3571 return -EBUSY;
3572 }
3573
3574 dev->mtu = new_mtu;
3575
3576 netdev_update_features(dev);
3577
3578 return 0;
3579}
3580
3581static netdev_features_t stmmac_fix_features(struct net_device *dev,
3582 netdev_features_t features)
3583{
3584 struct stmmac_priv *priv = netdev_priv(dev);
3585
3586 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3587 features &= ~NETIF_F_RXCSUM;
3588
3589 if (!priv->plat->tx_coe)
3590 features &= ~NETIF_F_CSUM_MASK;
3591
3592
3593
3594
3595
3596
3597 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3598 features &= ~NETIF_F_CSUM_MASK;
3599
3600
3601 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3602 if (features & NETIF_F_TSO)
3603 priv->tso = true;
3604 else
3605 priv->tso = false;
3606 }
3607
3608 return features;
3609}
3610
3611static int stmmac_set_features(struct net_device *netdev,
3612 netdev_features_t features)
3613{
3614 struct stmmac_priv *priv = netdev_priv(netdev);
3615
3616
3617 if (features & NETIF_F_RXCSUM)
3618 priv->hw->rx_csum = priv->plat->rx_coe;
3619 else
3620 priv->hw->rx_csum = 0;
3621
3622
3623
3624 stmmac_rx_ipc(priv, priv->hw);
3625
3626 return 0;
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3641{
3642 struct net_device *dev = (struct net_device *)dev_id;
3643 struct stmmac_priv *priv = netdev_priv(dev);
3644 u32 rx_cnt = priv->plat->rx_queues_to_use;
3645 u32 tx_cnt = priv->plat->tx_queues_to_use;
3646 u32 queues_count;
3647 u32 queue;
3648
3649 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3650
3651 if (priv->irq_wake)
3652 pm_wakeup_event(priv->device, 0);
3653
3654 if (unlikely(!dev)) {
3655 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3656 return IRQ_NONE;
3657 }
3658
3659
3660 if (test_bit(STMMAC_DOWN, &priv->state))
3661 return IRQ_HANDLED;
3662
3663 if (stmmac_safety_feat_interrupt(priv))
3664 return IRQ_HANDLED;
3665
3666
3667 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3668 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3669 int mtl_status;
3670
3671 if (unlikely(status)) {
3672
3673 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3674 priv->tx_path_in_lpi_mode = true;
3675 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3676 priv->tx_path_in_lpi_mode = false;
3677 }
3678
3679 for (queue = 0; queue < queues_count; queue++) {
3680 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3681
3682 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3683 queue);
3684 if (mtl_status != -EINVAL)
3685 status |= mtl_status;
3686
3687 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3688 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3689 rx_q->rx_tail_addr,
3690 queue);
3691 }
3692
3693
3694 if (priv->hw->pcs) {
3695 if (priv->xstats.pcs_link)
3696 netif_carrier_on(dev);
3697 else
3698 netif_carrier_off(dev);
3699 }
3700 }
3701
3702
3703 stmmac_dma_interrupt(priv);
3704
3705 return IRQ_HANDLED;
3706}
3707
3708#ifdef CONFIG_NET_POLL_CONTROLLER
3709
3710
3711
3712static void stmmac_poll_controller(struct net_device *dev)
3713{
3714 disable_irq(dev->irq);
3715 stmmac_interrupt(dev->irq, dev);
3716 enable_irq(dev->irq);
3717}
3718#endif
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3730{
3731 int ret = -EOPNOTSUPP;
3732
3733 if (!netif_running(dev))
3734 return -EINVAL;
3735
3736 switch (cmd) {
3737 case SIOCGMIIPHY:
3738 case SIOCGMIIREG:
3739 case SIOCSMIIREG:
3740 if (!dev->phydev)
3741 return -EINVAL;
3742 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3743 break;
3744 case SIOCSHWTSTAMP:
3745 ret = stmmac_hwtstamp_ioctl(dev, rq);
3746 break;
3747 default:
3748 break;
3749 }
3750
3751 return ret;
3752}
3753
3754static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3755 void *cb_priv)
3756{
3757 struct stmmac_priv *priv = cb_priv;
3758 int ret = -EOPNOTSUPP;
3759
3760 stmmac_disable_all_queues(priv);
3761
3762 switch (type) {
3763 case TC_SETUP_CLSU32:
3764 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3765 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3766 break;
3767 default:
3768 break;
3769 }
3770
3771 stmmac_enable_all_queues(priv);
3772 return ret;
3773}
3774
3775static LIST_HEAD(stmmac_block_cb_list);
3776
3777static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3778 void *type_data)
3779{
3780 struct stmmac_priv *priv = netdev_priv(ndev);
3781
3782 switch (type) {
3783 case TC_SETUP_BLOCK:
3784 return flow_block_cb_setup_simple(type_data,
3785 &stmmac_block_cb_list,
3786 stmmac_setup_tc_block_cb,
3787 priv, priv, true);
3788 default:
3789 return -EOPNOTSUPP;
3790 }
3791}
3792
3793static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3794{
3795 struct stmmac_priv *priv = netdev_priv(ndev);
3796 int ret = 0;
3797
3798 ret = eth_mac_addr(ndev, addr);
3799 if (ret)
3800 return ret;
3801
3802 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3803
3804 return ret;
3805}
3806
3807#ifdef CONFIG_DEBUG_FS
3808static struct dentry *stmmac_fs_dir;
3809
3810static void sysfs_display_ring(void *head, int size, int extend_desc,
3811 struct seq_file *seq)
3812{
3813 int i;
3814 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3815 struct dma_desc *p = (struct dma_desc *)head;
3816
3817 for (i = 0; i < size; i++) {
3818 if (extend_desc) {
3819 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3820 i, (unsigned int)virt_to_phys(ep),
3821 le32_to_cpu(ep->basic.des0),
3822 le32_to_cpu(ep->basic.des1),
3823 le32_to_cpu(ep->basic.des2),
3824 le32_to_cpu(ep->basic.des3));
3825 ep++;
3826 } else {
3827 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3828 i, (unsigned int)virt_to_phys(p),
3829 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3830 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3831 p++;
3832 }
3833 seq_printf(seq, "\n");
3834 }
3835}
3836
3837static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3838{
3839 struct net_device *dev = seq->private;
3840 struct stmmac_priv *priv = netdev_priv(dev);
3841 u32 rx_count = priv->plat->rx_queues_to_use;
3842 u32 tx_count = priv->plat->tx_queues_to_use;
3843 u32 queue;
3844
3845 for (queue = 0; queue < rx_count; queue++) {
3846 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3847
3848 seq_printf(seq, "RX Queue %d:\n", queue);
3849
3850 if (priv->extend_desc) {
3851 seq_printf(seq, "Extended descriptor ring:\n");
3852 sysfs_display_ring((void *)rx_q->dma_erx,
3853 DMA_RX_SIZE, 1, seq);
3854 } else {
3855 seq_printf(seq, "Descriptor ring:\n");
3856 sysfs_display_ring((void *)rx_q->dma_rx,
3857 DMA_RX_SIZE, 0, seq);
3858 }
3859 }
3860
3861 for (queue = 0; queue < tx_count; queue++) {
3862 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3863
3864 seq_printf(seq, "TX Queue %d:\n", queue);
3865
3866 if (priv->extend_desc) {
3867 seq_printf(seq, "Extended descriptor ring:\n");
3868 sysfs_display_ring((void *)tx_q->dma_etx,
3869 DMA_TX_SIZE, 1, seq);
3870 } else {
3871 seq_printf(seq, "Descriptor ring:\n");
3872 sysfs_display_ring((void *)tx_q->dma_tx,
3873 DMA_TX_SIZE, 0, seq);
3874 }
3875 }
3876
3877 return 0;
3878}
3879
3880static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3881{
3882 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3883}
3884
3885
3886
3887static const struct file_operations stmmac_rings_status_fops = {
3888 .owner = THIS_MODULE,
3889 .open = stmmac_sysfs_ring_open,
3890 .read = seq_read,
3891 .llseek = seq_lseek,
3892 .release = single_release,
3893};
3894
3895static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3896{
3897 struct net_device *dev = seq->private;
3898 struct stmmac_priv *priv = netdev_priv(dev);
3899
3900 if (!priv->hw_cap_support) {
3901 seq_printf(seq, "DMA HW features not supported\n");
3902 return 0;
3903 }
3904
3905 seq_printf(seq, "==============================\n");
3906 seq_printf(seq, "\tDMA HW features\n");
3907 seq_printf(seq, "==============================\n");
3908
3909 seq_printf(seq, "\t10/100 Mbps: %s\n",
3910 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3911 seq_printf(seq, "\t1000 Mbps: %s\n",
3912 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3913 seq_printf(seq, "\tHalf duplex: %s\n",
3914 (priv->dma_cap.half_duplex) ? "Y" : "N");
3915 seq_printf(seq, "\tHash Filter: %s\n",
3916 (priv->dma_cap.hash_filter) ? "Y" : "N");
3917 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3918 (priv->dma_cap.multi_addr) ? "Y" : "N");
3919 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3920 (priv->dma_cap.pcs) ? "Y" : "N");
3921 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3922 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3923 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3924 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3925 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3926 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3927 seq_printf(seq, "\tRMON module: %s\n",
3928 (priv->dma_cap.rmon) ? "Y" : "N");
3929 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3930 (priv->dma_cap.time_stamp) ? "Y" : "N");
3931 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3932 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3933 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3934 (priv->dma_cap.eee) ? "Y" : "N");
3935 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3936 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3937 (priv->dma_cap.tx_coe) ? "Y" : "N");
3938 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3939 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3940 (priv->dma_cap.rx_coe) ? "Y" : "N");
3941 } else {
3942 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3943 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3944 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3945 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3946 }
3947 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3948 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3949 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3950 priv->dma_cap.number_rx_channel);
3951 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3952 priv->dma_cap.number_tx_channel);
3953 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3954 (priv->dma_cap.enh_desc) ? "Y" : "N");
3955
3956 return 0;
3957}
3958
3959static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3960{
3961 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3962}
3963
3964static const struct file_operations stmmac_dma_cap_fops = {
3965 .owner = THIS_MODULE,
3966 .open = stmmac_sysfs_dma_cap_open,
3967 .read = seq_read,
3968 .llseek = seq_lseek,
3969 .release = single_release,
3970};
3971
3972static int stmmac_init_fs(struct net_device *dev)
3973{
3974 struct stmmac_priv *priv = netdev_priv(dev);
3975
3976
3977 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3978
3979 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3980 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3981
3982 return -ENOMEM;
3983 }
3984
3985
3986 priv->dbgfs_rings_status =
3987 debugfs_create_file("descriptors_status", 0444,
3988 priv->dbgfs_dir, dev,
3989 &stmmac_rings_status_fops);
3990
3991 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3992 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3993 debugfs_remove_recursive(priv->dbgfs_dir);
3994
3995 return -ENOMEM;
3996 }
3997
3998
3999 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4000 priv->dbgfs_dir,
4001 dev, &stmmac_dma_cap_fops);
4002
4003 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4004 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4005 debugfs_remove_recursive(priv->dbgfs_dir);
4006
4007 return -ENOMEM;
4008 }
4009
4010 return 0;
4011}
4012
4013static void stmmac_exit_fs(struct net_device *dev)
4014{
4015 struct stmmac_priv *priv = netdev_priv(dev);
4016
4017 debugfs_remove_recursive(priv->dbgfs_dir);
4018}
4019#endif
4020
4021static const struct net_device_ops stmmac_netdev_ops = {
4022 .ndo_open = stmmac_open,
4023 .ndo_start_xmit = stmmac_xmit,
4024 .ndo_stop = stmmac_release,
4025 .ndo_change_mtu = stmmac_change_mtu,
4026 .ndo_fix_features = stmmac_fix_features,
4027 .ndo_set_features = stmmac_set_features,
4028 .ndo_set_rx_mode = stmmac_set_rx_mode,
4029 .ndo_tx_timeout = stmmac_tx_timeout,
4030 .ndo_do_ioctl = stmmac_ioctl,
4031 .ndo_setup_tc = stmmac_setup_tc,
4032#ifdef CONFIG_NET_POLL_CONTROLLER
4033 .ndo_poll_controller = stmmac_poll_controller,
4034#endif
4035 .ndo_set_mac_address = stmmac_set_mac_address,
4036};
4037
4038static void stmmac_reset_subtask(struct stmmac_priv *priv)
4039{
4040 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4041 return;
4042 if (test_bit(STMMAC_DOWN, &priv->state))
4043 return;
4044
4045 netdev_err(priv->dev, "Reset adapter.\n");
4046
4047 rtnl_lock();
4048 netif_trans_update(priv->dev);
4049 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4050 usleep_range(1000, 2000);
4051
4052 set_bit(STMMAC_DOWN, &priv->state);
4053 dev_close(priv->dev);
4054 dev_open(priv->dev, NULL);
4055 clear_bit(STMMAC_DOWN, &priv->state);
4056 clear_bit(STMMAC_RESETING, &priv->state);
4057 rtnl_unlock();
4058}
4059
4060static void stmmac_service_task(struct work_struct *work)
4061{
4062 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4063 service_task);
4064
4065 stmmac_reset_subtask(priv);
4066 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4067}
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077static int stmmac_hw_init(struct stmmac_priv *priv)
4078{
4079 int ret;
4080
4081
4082 if (priv->plat->has_sun8i)
4083 chain_mode = 1;
4084 priv->chain_mode = chain_mode;
4085
4086
4087 ret = stmmac_hwif_init(priv);
4088 if (ret)
4089 return ret;
4090
4091
4092 priv->hw_cap_support = stmmac_get_hw_features(priv);
4093 if (priv->hw_cap_support) {
4094 dev_info(priv->device, "DMA HW capability register supported\n");
4095
4096
4097
4098
4099
4100
4101 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4102 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4103 priv->hw->pmt = priv->plat->pmt;
4104
4105
4106 if (priv->plat->force_thresh_dma_mode)
4107 priv->plat->tx_coe = 0;
4108 else
4109 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4110
4111
4112 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4113
4114 if (priv->dma_cap.rx_coe_type2)
4115 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4116 else if (priv->dma_cap.rx_coe_type1)
4117 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4118
4119 } else {
4120 dev_info(priv->device, "No HW DMA feature register supported\n");
4121 }
4122
4123 if (priv->plat->rx_coe) {
4124 priv->hw->rx_csum = priv->plat->rx_coe;
4125 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4126 if (priv->synopsys_id < DWMAC_CORE_4_00)
4127 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4128 }
4129 if (priv->plat->tx_coe)
4130 dev_info(priv->device, "TX Checksum insertion supported\n");
4131
4132 if (priv->plat->pmt) {
4133 dev_info(priv->device, "Wake-Up On Lan supported\n");
4134 device_set_wakeup_capable(priv->device, 1);
4135 }
4136
4137 if (priv->dma_cap.tsoen)
4138 dev_info(priv->device, "TSO supported\n");
4139
4140
4141 if (priv->hwif_quirks) {
4142 ret = priv->hwif_quirks(priv);
4143 if (ret)
4144 return ret;
4145 }
4146
4147 return 0;
4148}
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160int stmmac_dvr_probe(struct device *device,
4161 struct plat_stmmacenet_data *plat_dat,
4162 struct stmmac_resources *res)
4163{
4164 struct net_device *ndev = NULL;
4165 struct stmmac_priv *priv;
4166 int ret = 0;
4167 u32 queue;
4168
4169 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4170 MTL_MAX_TX_QUEUES,
4171 MTL_MAX_RX_QUEUES);
4172 if (!ndev)
4173 return -ENOMEM;
4174
4175 SET_NETDEV_DEV(ndev, device);
4176
4177 priv = netdev_priv(ndev);
4178 priv->device = device;
4179 priv->dev = ndev;
4180
4181 stmmac_set_ethtool_ops(ndev);
4182 priv->pause = pause;
4183 priv->plat = plat_dat;
4184 priv->ioaddr = res->addr;
4185 priv->dev->base_addr = (unsigned long)res->addr;
4186
4187 priv->dev->irq = res->irq;
4188 priv->wol_irq = res->wol_irq;
4189 priv->lpi_irq = res->lpi_irq;
4190
4191 if (res->mac)
4192 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4193
4194 dev_set_drvdata(device, priv->dev);
4195
4196
4197 stmmac_verify_args();
4198
4199
4200 priv->wq = create_singlethread_workqueue("stmmac_wq");
4201 if (!priv->wq) {
4202 dev_err(priv->device, "failed to create workqueue\n");
4203 goto error_wq;
4204 }
4205
4206 INIT_WORK(&priv->service_task, stmmac_service_task);
4207
4208
4209
4210
4211 if ((phyaddr >= 0) && (phyaddr <= 31))
4212 priv->plat->phy_addr = phyaddr;
4213
4214 if (priv->plat->stmmac_rst) {
4215 ret = reset_control_assert(priv->plat->stmmac_rst);
4216 reset_control_deassert(priv->plat->stmmac_rst);
4217
4218
4219
4220 if (ret == -ENOTSUPP)
4221 reset_control_reset(priv->plat->stmmac_rst);
4222 }
4223
4224
4225 ret = stmmac_hw_init(priv);
4226 if (ret)
4227 goto error_hw_init;
4228
4229
4230 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4231 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4232
4233 ndev->netdev_ops = &stmmac_netdev_ops;
4234
4235 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4236 NETIF_F_RXCSUM;
4237
4238 ret = stmmac_tc_init(priv, priv);
4239 if (!ret) {
4240 ndev->hw_features |= NETIF_F_HW_TC;
4241 }
4242
4243 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4244 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4245 priv->tso = true;
4246 dev_info(priv->device, "TSO feature enabled\n");
4247 }
4248 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4249 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4250#ifdef STMMAC_VLAN_TAG_USED
4251
4252 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4253#endif
4254 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4255
4256
4257 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4258 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4259 ndev->max_mtu = JUMBO_LEN;
4260 else
4261 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4262
4263
4264
4265 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4266 (priv->plat->maxmtu >= ndev->min_mtu))
4267 ndev->max_mtu = priv->plat->maxmtu;
4268 else if (priv->plat->maxmtu < ndev->min_mtu)
4269 dev_warn(priv->device,
4270 "%s: warning: maxmtu having invalid value (%d)\n",
4271 __func__, priv->plat->maxmtu);
4272
4273 if (flow_ctrl)
4274 priv->flow_ctrl = FLOW_AUTO;
4275
4276
4277
4278
4279
4280
4281 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4282 priv->use_riwt = 1;
4283 dev_info(priv->device,
4284 "Enable RX Mitigation via HW Watchdog Timer\n");
4285 }
4286
4287 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4288 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4289
4290 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4291 (8 * priv->plat->rx_queues_to_use));
4292 }
4293
4294 mutex_init(&priv->lock);
4295
4296
4297
4298
4299
4300
4301
4302 if (!priv->plat->clk_csr)
4303 stmmac_clk_csr_set(priv);
4304 else
4305 priv->clk_csr = priv->plat->clk_csr;
4306
4307 stmmac_check_pcs_mode(priv);
4308
4309 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4310 priv->hw->pcs != STMMAC_PCS_TBI &&
4311 priv->hw->pcs != STMMAC_PCS_RTBI) {
4312
4313 ret = stmmac_mdio_register(ndev);
4314 if (ret < 0) {
4315 dev_err(priv->device,
4316 "%s: MDIO bus (id: %d) registration failed",
4317 __func__, priv->plat->bus_id);
4318 goto error_mdio_register;
4319 }
4320 }
4321
4322 ret = register_netdev(ndev);
4323 if (ret) {
4324 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4325 __func__, ret);
4326 goto error_netdev_register;
4327 }
4328
4329 return ret;
4330
4331error_netdev_register:
4332 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4333 priv->hw->pcs != STMMAC_PCS_TBI &&
4334 priv->hw->pcs != STMMAC_PCS_RTBI)
4335 stmmac_mdio_unregister(ndev);
4336error_mdio_register:
4337 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4338 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4339
4340 netif_napi_del(&rx_q->napi);
4341 }
4342error_hw_init:
4343 destroy_workqueue(priv->wq);
4344error_wq:
4345 free_netdev(ndev);
4346
4347 return ret;
4348}
4349EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4350
4351
4352
4353
4354
4355
4356
4357int stmmac_dvr_remove(struct device *dev)
4358{
4359 struct net_device *ndev = dev_get_drvdata(dev);
4360 struct stmmac_priv *priv = netdev_priv(ndev);
4361
4362 netdev_info(priv->dev, "%s: removing driver", __func__);
4363
4364 stmmac_stop_all_dma(priv);
4365
4366 stmmac_mac_set(priv, priv->ioaddr, false);
4367 netif_carrier_off(ndev);
4368 unregister_netdev(ndev);
4369 if (priv->plat->stmmac_rst)
4370 reset_control_assert(priv->plat->stmmac_rst);
4371 clk_disable_unprepare(priv->plat->pclk);
4372 clk_disable_unprepare(priv->plat->stmmac_clk);
4373 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4374 priv->hw->pcs != STMMAC_PCS_TBI &&
4375 priv->hw->pcs != STMMAC_PCS_RTBI)
4376 stmmac_mdio_unregister(ndev);
4377 destroy_workqueue(priv->wq);
4378 mutex_destroy(&priv->lock);
4379 free_netdev(ndev);
4380
4381 return 0;
4382}
4383EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4384
4385
4386
4387
4388
4389
4390
4391
4392int stmmac_suspend(struct device *dev)
4393{
4394 struct net_device *ndev = dev_get_drvdata(dev);
4395 struct stmmac_priv *priv = netdev_priv(ndev);
4396
4397 if (!ndev || !netif_running(ndev))
4398 return 0;
4399
4400 if (ndev->phydev)
4401 phy_stop(ndev->phydev);
4402
4403 mutex_lock(&priv->lock);
4404
4405 netif_device_detach(ndev);
4406 stmmac_stop_all_queues(priv);
4407
4408 stmmac_disable_all_queues(priv);
4409
4410
4411 stmmac_stop_all_dma(priv);
4412
4413
4414 if (device_may_wakeup(priv->device)) {
4415 stmmac_pmt(priv, priv->hw, priv->wolopts);
4416 priv->irq_wake = 1;
4417 } else {
4418 stmmac_mac_set(priv, priv->ioaddr, false);
4419 pinctrl_pm_select_sleep_state(priv->device);
4420
4421 clk_disable(priv->plat->pclk);
4422 clk_disable(priv->plat->stmmac_clk);
4423 }
4424 mutex_unlock(&priv->lock);
4425
4426 priv->oldlink = false;
4427 priv->speed = SPEED_UNKNOWN;
4428 priv->oldduplex = DUPLEX_UNKNOWN;
4429 return 0;
4430}
4431EXPORT_SYMBOL_GPL(stmmac_suspend);
4432
4433
4434
4435
4436
4437static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4438{
4439 u32 rx_cnt = priv->plat->rx_queues_to_use;
4440 u32 tx_cnt = priv->plat->tx_queues_to_use;
4441 u32 queue;
4442
4443 for (queue = 0; queue < rx_cnt; queue++) {
4444 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4445
4446 rx_q->cur_rx = 0;
4447 rx_q->dirty_rx = 0;
4448 }
4449
4450 for (queue = 0; queue < tx_cnt; queue++) {
4451 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4452
4453 tx_q->cur_tx = 0;
4454 tx_q->dirty_tx = 0;
4455 tx_q->mss = 0;
4456 }
4457}
4458
4459
4460
4461
4462
4463
4464
4465int stmmac_resume(struct device *dev)
4466{
4467 struct net_device *ndev = dev_get_drvdata(dev);
4468 struct stmmac_priv *priv = netdev_priv(ndev);
4469
4470 if (!netif_running(ndev))
4471 return 0;
4472
4473
4474
4475
4476
4477
4478
4479 if (device_may_wakeup(priv->device)) {
4480 mutex_lock(&priv->lock);
4481 stmmac_pmt(priv, priv->hw, 0);
4482 mutex_unlock(&priv->lock);
4483 priv->irq_wake = 0;
4484 } else {
4485 pinctrl_pm_select_default_state(priv->device);
4486
4487 clk_enable(priv->plat->stmmac_clk);
4488 clk_enable(priv->plat->pclk);
4489
4490 if (priv->mii)
4491 stmmac_mdio_reset(priv->mii);
4492 }
4493
4494 netif_device_attach(ndev);
4495
4496 mutex_lock(&priv->lock);
4497
4498 stmmac_reset_queues_param(priv);
4499
4500 stmmac_clear_descriptors(priv);
4501
4502 stmmac_hw_setup(ndev, false);
4503 stmmac_init_tx_coalesce(priv);
4504 stmmac_set_rx_mode(ndev);
4505
4506 stmmac_enable_all_queues(priv);
4507
4508 stmmac_start_all_queues(priv);
4509
4510 mutex_unlock(&priv->lock);
4511
4512 if (ndev->phydev)
4513 phy_start(ndev->phydev);
4514
4515 return 0;
4516}
4517EXPORT_SYMBOL_GPL(stmmac_resume);
4518
4519#ifndef MODULE
4520static int __init stmmac_cmdline_opt(char *str)
4521{
4522 char *opt;
4523
4524 if (!str || !*str)
4525 return -EINVAL;
4526 while ((opt = strsep(&str, ",")) != NULL) {
4527 if (!strncmp(opt, "debug:", 6)) {
4528 if (kstrtoint(opt + 6, 0, &debug))
4529 goto err;
4530 } else if (!strncmp(opt, "phyaddr:", 8)) {
4531 if (kstrtoint(opt + 8, 0, &phyaddr))
4532 goto err;
4533 } else if (!strncmp(opt, "buf_sz:", 7)) {
4534 if (kstrtoint(opt + 7, 0, &buf_sz))
4535 goto err;
4536 } else if (!strncmp(opt, "tc:", 3)) {
4537 if (kstrtoint(opt + 3, 0, &tc))
4538 goto err;
4539 } else if (!strncmp(opt, "watchdog:", 9)) {
4540 if (kstrtoint(opt + 9, 0, &watchdog))
4541 goto err;
4542 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4543 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4544 goto err;
4545 } else if (!strncmp(opt, "pause:", 6)) {
4546 if (kstrtoint(opt + 6, 0, &pause))
4547 goto err;
4548 } else if (!strncmp(opt, "eee_timer:", 10)) {
4549 if (kstrtoint(opt + 10, 0, &eee_timer))
4550 goto err;
4551 } else if (!strncmp(opt, "chain_mode:", 11)) {
4552 if (kstrtoint(opt + 11, 0, &chain_mode))
4553 goto err;
4554 }
4555 }
4556 return 0;
4557
4558err:
4559 pr_err("%s: ERROR broken module parameter conversion", __func__);
4560 return -EINVAL;
4561}
4562
4563__setup("stmmaceth=", stmmac_cmdline_opt);
4564#endif
4565
4566static int __init stmmac_init(void)
4567{
4568#ifdef CONFIG_DEBUG_FS
4569
4570 if (!stmmac_fs_dir) {
4571 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4572
4573 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4574 pr_err("ERROR %s, debugfs create directory failed\n",
4575 STMMAC_RESOURCE_NAME);
4576
4577 return -ENOMEM;
4578 }
4579 }
4580#endif
4581
4582 return 0;
4583}
4584
4585static void __exit stmmac_exit(void)
4586{
4587#ifdef CONFIG_DEBUG_FS
4588 debugfs_remove_recursive(stmmac_fs_dir);
4589#endif
4590}
4591
4592module_init(stmmac_init)
4593module_exit(stmmac_exit)
4594
4595MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4596MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4597MODULE_LICENSE("GPL");
4598