1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/clk.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/if_ether.h>
25#include <linux/crc32.h>
26#include <linux/mii.h>
27#include <linux/if.h>
28#include <linux/if_vlan.h>
29#include <linux/dma-mapping.h>
30#include <linux/slab.h>
31#include <linux/pm_runtime.h>
32#include <linux/prefetch.h>
33#include <linux/pinctrl/consumer.h>
34#ifdef CONFIG_DEBUG_FS
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37#endif
38#include <linux/net_tstamp.h>
39#include <linux/phylink.h>
40#include <linux/udp.h>
41#include <linux/bpf_trace.h>
42#include <net/pkt_cls.h>
43#include <net/xdp_sock_drv.h>
44#include "stmmac_ptp.h"
45#include "stmmac.h"
46#include "stmmac_xdp.h"
47#include <linux/reset.h>
48#include <linux/of_mdio.h>
49#include "dwmac1000.h"
50#include "dwxgmac2.h"
51#include "hwif.h"
52
53
54
55
56
57#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58 PTP_TCR_TSCTRLSSR)
59
60#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
62
63
64#define TX_TIMEO 5000
65static int watchdog = TX_TIMEO;
66module_param(watchdog, int, 0644);
67MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69static int debug = -1;
70module_param(debug, int, 0644);
71MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73static int phyaddr = -1;
74module_param(phyaddr, int, 0444);
75MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
78#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
79
80
81#define STMMAC_XSK_TX_BUDGET_MAX 256
82#define STMMAC_TX_XSK_AVAIL 16
83#define STMMAC_RX_FILL_BATCH 16
84
85#define STMMAC_XDP_PASS 0
86#define STMMAC_XDP_CONSUMED BIT(0)
87#define STMMAC_XDP_TX BIT(1)
88#define STMMAC_XDP_REDIRECT BIT(2)
89
90static int flow_ctrl = FLOW_AUTO;
91module_param(flow_ctrl, int, 0644);
92MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94static int pause = PAUSE_TIME;
95module_param(pause, int, 0644);
96MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98#define TC_DEFAULT 64
99static int tc = TC_DEFAULT;
100module_param(tc, int, 0644);
101MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103#define DEFAULT_BUFSIZE 1536
104static int buf_sz = DEFAULT_BUFSIZE;
105module_param(buf_sz, int, 0644);
106MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108#define STMMAC_RX_COPYBREAK 256
109
110static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114#define STMMAC_DEFAULT_LPI_TIMER 1000
115static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116module_param(eee_timer, int, 0644);
117MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120
121
122
123static unsigned int chain_mode;
124module_param(chain_mode, int, 0444);
125MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128
129static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
135static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
136 u32 rxmode, u32 chan);
137
138#ifdef CONFIG_DEBUG_FS
139static const struct net_device_ops stmmac_netdev_ops;
140static void stmmac_init_fs(struct net_device *dev);
141static void stmmac_exit_fs(struct net_device *dev);
142#endif
143
144#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
145
146int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
147{
148 int ret = 0;
149
150 if (enabled) {
151 ret = clk_prepare_enable(priv->plat->stmmac_clk);
152 if (ret)
153 return ret;
154 ret = clk_prepare_enable(priv->plat->pclk);
155 if (ret) {
156 clk_disable_unprepare(priv->plat->stmmac_clk);
157 return ret;
158 }
159 if (priv->plat->clks_config) {
160 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
161 if (ret) {
162 clk_disable_unprepare(priv->plat->stmmac_clk);
163 clk_disable_unprepare(priv->plat->pclk);
164 return ret;
165 }
166 }
167 } else {
168 clk_disable_unprepare(priv->plat->stmmac_clk);
169 clk_disable_unprepare(priv->plat->pclk);
170 if (priv->plat->clks_config)
171 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
172 }
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
177
178
179
180
181
182
183static void stmmac_verify_args(void)
184{
185 if (unlikely(watchdog < 0))
186 watchdog = TX_TIMEO;
187 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
188 buf_sz = DEFAULT_BUFSIZE;
189 if (unlikely(flow_ctrl > 1))
190 flow_ctrl = FLOW_AUTO;
191 else if (likely(flow_ctrl < 0))
192 flow_ctrl = FLOW_OFF;
193 if (unlikely((pause < 0) || (pause > 0xffff)))
194 pause = PAUSE_TIME;
195 if (eee_timer < 0)
196 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
197}
198
199static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200{
201 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 u32 queue;
205
206 for (queue = 0; queue < maxq; queue++) {
207 struct stmmac_channel *ch = &priv->channel[queue];
208
209 if (stmmac_xdp_is_enabled(priv) &&
210 test_bit(queue, priv->af_xdp_zc_qps)) {
211 napi_disable(&ch->rxtx_napi);
212 continue;
213 }
214
215 if (queue < rx_queues_cnt)
216 napi_disable(&ch->rx_napi);
217 if (queue < tx_queues_cnt)
218 napi_disable(&ch->tx_napi);
219 }
220}
221
222
223
224
225
226static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227{
228 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 struct stmmac_rx_queue *rx_q;
230 u32 queue;
231
232
233 for (queue = 0; queue < rx_queues_cnt; queue++) {
234 rx_q = &priv->rx_queue[queue];
235 if (rx_q->xsk_pool) {
236 synchronize_rcu();
237 break;
238 }
239 }
240
241 __stmmac_disable_all_queues(priv);
242}
243
244
245
246
247
248static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249{
250 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 u32 queue;
254
255 for (queue = 0; queue < maxq; queue++) {
256 struct stmmac_channel *ch = &priv->channel[queue];
257
258 if (stmmac_xdp_is_enabled(priv) &&
259 test_bit(queue, priv->af_xdp_zc_qps)) {
260 napi_enable(&ch->rxtx_napi);
261 continue;
262 }
263
264 if (queue < rx_queues_cnt)
265 napi_enable(&ch->rx_napi);
266 if (queue < tx_queues_cnt)
267 napi_enable(&ch->tx_napi);
268 }
269}
270
271static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272{
273 if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 queue_work(priv->wq, &priv->service_task);
276}
277
278static void stmmac_global_err(struct stmmac_priv *priv)
279{
280 netif_carrier_off(priv->dev);
281 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 stmmac_service_event_schedule(priv);
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298{
299 u32 clk_rate;
300
301 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302
303
304
305
306
307
308
309
310 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 if (clk_rate < CSR_F_35M)
312 priv->clk_csr = STMMAC_CSR_20_35M;
313 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 priv->clk_csr = STMMAC_CSR_35_60M;
315 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 priv->clk_csr = STMMAC_CSR_60_100M;
317 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 priv->clk_csr = STMMAC_CSR_100_150M;
319 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 priv->clk_csr = STMMAC_CSR_150_250M;
321 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 priv->clk_csr = STMMAC_CSR_250_300M;
323 }
324
325 if (priv->plat->has_sun8i) {
326 if (clk_rate > 160000000)
327 priv->clk_csr = 0x03;
328 else if (clk_rate > 80000000)
329 priv->clk_csr = 0x02;
330 else if (clk_rate > 40000000)
331 priv->clk_csr = 0x01;
332 else
333 priv->clk_csr = 0;
334 }
335
336 if (priv->plat->has_xgmac) {
337 if (clk_rate > 400000000)
338 priv->clk_csr = 0x5;
339 else if (clk_rate > 350000000)
340 priv->clk_csr = 0x4;
341 else if (clk_rate > 300000000)
342 priv->clk_csr = 0x3;
343 else if (clk_rate > 250000000)
344 priv->clk_csr = 0x2;
345 else if (clk_rate > 150000000)
346 priv->clk_csr = 0x1;
347 else
348 priv->clk_csr = 0x0;
349 }
350}
351
352static void print_pkt(unsigned char *buf, int len)
353{
354 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
355 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
356}
357
358static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
359{
360 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
361 u32 avail;
362
363 if (tx_q->dirty_tx > tx_q->cur_tx)
364 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
365 else
366 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
367
368 return avail;
369}
370
371
372
373
374
375
376static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
377{
378 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
379 u32 dirty;
380
381 if (rx_q->dirty_rx <= rx_q->cur_rx)
382 dirty = rx_q->cur_rx - rx_q->dirty_rx;
383 else
384 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
385
386 return dirty;
387}
388
389static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
390{
391 int tx_lpi_timer;
392
393
394 priv->eee_sw_timer_en = en ? 0 : 1;
395 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
396 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
397}
398
399
400
401
402
403
404
405static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
406{
407 u32 tx_cnt = priv->plat->tx_queues_to_use;
408 u32 queue;
409
410
411 for (queue = 0; queue < tx_cnt; queue++) {
412 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
413
414 if (tx_q->dirty_tx != tx_q->cur_tx)
415 return -EBUSY;
416 }
417
418
419 if (!priv->tx_path_in_lpi_mode)
420 stmmac_set_eee_mode(priv, priv->hw,
421 priv->plat->en_tx_lpi_clockgating);
422 return 0;
423}
424
425
426
427
428
429
430
431void stmmac_disable_eee_mode(struct stmmac_priv *priv)
432{
433 if (!priv->eee_sw_timer_en) {
434 stmmac_lpi_entry_timer_config(priv, 0);
435 return;
436 }
437
438 stmmac_reset_eee_mode(priv, priv->hw);
439 del_timer_sync(&priv->eee_ctrl_timer);
440 priv->tx_path_in_lpi_mode = false;
441}
442
443
444
445
446
447
448
449
450static void stmmac_eee_ctrl_timer(struct timer_list *t)
451{
452 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
453
454 if (stmmac_enable_eee_mode(priv))
455 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
456}
457
458
459
460
461
462
463
464
465
466bool stmmac_eee_init(struct stmmac_priv *priv)
467{
468 int eee_tw_timer = priv->eee_tw_timer;
469
470
471
472
473 if (priv->hw->pcs == STMMAC_PCS_TBI ||
474 priv->hw->pcs == STMMAC_PCS_RTBI)
475 return false;
476
477
478 if (!priv->dma_cap.eee)
479 return false;
480
481 mutex_lock(&priv->lock);
482
483
484 if (!priv->eee_active) {
485 if (priv->eee_enabled) {
486 netdev_dbg(priv->dev, "disable EEE\n");
487 stmmac_lpi_entry_timer_config(priv, 0);
488 del_timer_sync(&priv->eee_ctrl_timer);
489 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
490 if (priv->hw->xpcs)
491 xpcs_config_eee(priv->hw->xpcs,
492 priv->plat->mult_fact_100ns,
493 false);
494 }
495 mutex_unlock(&priv->lock);
496 return false;
497 }
498
499 if (priv->eee_active && !priv->eee_enabled) {
500 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
501 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
502 eee_tw_timer);
503 if (priv->hw->xpcs)
504 xpcs_config_eee(priv->hw->xpcs,
505 priv->plat->mult_fact_100ns,
506 true);
507 }
508
509 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
510 del_timer_sync(&priv->eee_ctrl_timer);
511 priv->tx_path_in_lpi_mode = false;
512 stmmac_lpi_entry_timer_config(priv, 1);
513 } else {
514 stmmac_lpi_entry_timer_config(priv, 0);
515 mod_timer(&priv->eee_ctrl_timer,
516 STMMAC_LPI_T(priv->tx_lpi_timer));
517 }
518
519 mutex_unlock(&priv->lock);
520 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
521 return true;
522}
523
524
525
526
527
528
529
530
531
532static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
533 struct dma_desc *p, struct sk_buff *skb)
534{
535 struct skb_shared_hwtstamps shhwtstamp;
536 bool found = false;
537 u64 ns = 0;
538
539 if (!priv->hwts_tx_en)
540 return;
541
542
543 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
544 return;
545
546
547 if (stmmac_get_tx_timestamp_status(priv, p)) {
548 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
549 found = true;
550 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
551 found = true;
552 }
553
554 if (found) {
555 ns -= priv->plat->cdc_error_adj;
556
557 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
558 shhwtstamp.hwtstamp = ns_to_ktime(ns);
559
560 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
561
562 skb_tstamp_tx(skb, &shhwtstamp);
563 }
564}
565
566
567
568
569
570
571
572
573
574
575static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
576 struct dma_desc *np, struct sk_buff *skb)
577{
578 struct skb_shared_hwtstamps *shhwtstamp = NULL;
579 struct dma_desc *desc = p;
580 u64 ns = 0;
581
582 if (!priv->hwts_rx_en)
583 return;
584
585 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
586 desc = np;
587
588
589 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
590 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
591
592 ns -= priv->plat->cdc_error_adj;
593
594 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
595 shhwtstamp = skb_hwtstamps(skb);
596 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
597 shhwtstamp->hwtstamp = ns_to_ktime(ns);
598 } else {
599 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
600 }
601}
602
603
604
605
606
607
608
609
610
611
612
613
614static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
615{
616 struct stmmac_priv *priv = netdev_priv(dev);
617 struct hwtstamp_config config;
618 u32 ptp_v2 = 0;
619 u32 tstamp_all = 0;
620 u32 ptp_over_ipv4_udp = 0;
621 u32 ptp_over_ipv6_udp = 0;
622 u32 ptp_over_ethernet = 0;
623 u32 snap_type_sel = 0;
624 u32 ts_master_en = 0;
625 u32 ts_event_en = 0;
626
627 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
628 netdev_alert(priv->dev, "No support for HW time stamping\n");
629 priv->hwts_tx_en = 0;
630 priv->hwts_rx_en = 0;
631
632 return -EOPNOTSUPP;
633 }
634
635 if (copy_from_user(&config, ifr->ifr_data,
636 sizeof(config)))
637 return -EFAULT;
638
639 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
640 __func__, config.flags, config.tx_type, config.rx_filter);
641
642 if (config.tx_type != HWTSTAMP_TX_OFF &&
643 config.tx_type != HWTSTAMP_TX_ON)
644 return -ERANGE;
645
646 if (priv->adv_ts) {
647 switch (config.rx_filter) {
648 case HWTSTAMP_FILTER_NONE:
649
650 config.rx_filter = HWTSTAMP_FILTER_NONE;
651 break;
652
653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
654
655 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
656
657
658
659
660
661
662 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
663 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
664 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
665 break;
666
667 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
668
669 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
670
671 ts_event_en = PTP_TCR_TSEVNTENA;
672
673 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
674 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
675 break;
676
677 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
678
679 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
680
681 ts_master_en = PTP_TCR_TSMSTRENA;
682 ts_event_en = PTP_TCR_TSEVNTENA;
683
684 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
685 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
686 break;
687
688 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
689
690 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
691 ptp_v2 = PTP_TCR_TSVER2ENA;
692
693 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
694
695 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
696 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
697 break;
698
699 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
700
701 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
702 ptp_v2 = PTP_TCR_TSVER2ENA;
703
704 ts_event_en = PTP_TCR_TSEVNTENA;
705
706 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
707 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
708 break;
709
710 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
711
712 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
713 ptp_v2 = PTP_TCR_TSVER2ENA;
714
715 ts_master_en = PTP_TCR_TSMSTRENA;
716 ts_event_en = PTP_TCR_TSEVNTENA;
717
718 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
719 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
720 break;
721
722 case HWTSTAMP_FILTER_PTP_V2_EVENT:
723
724 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
725 ptp_v2 = PTP_TCR_TSVER2ENA;
726 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
727 if (priv->synopsys_id < DWMAC_CORE_4_10)
728 ts_event_en = PTP_TCR_TSEVNTENA;
729 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
730 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
731 ptp_over_ethernet = PTP_TCR_TSIPENA;
732 break;
733
734 case HWTSTAMP_FILTER_PTP_V2_SYNC:
735
736 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
737 ptp_v2 = PTP_TCR_TSVER2ENA;
738
739 ts_event_en = PTP_TCR_TSEVNTENA;
740
741 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
742 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
743 ptp_over_ethernet = PTP_TCR_TSIPENA;
744 break;
745
746 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
747
748 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
749 ptp_v2 = PTP_TCR_TSVER2ENA;
750
751 ts_master_en = PTP_TCR_TSMSTRENA;
752 ts_event_en = PTP_TCR_TSEVNTENA;
753
754 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 ptp_over_ethernet = PTP_TCR_TSIPENA;
757 break;
758
759 case HWTSTAMP_FILTER_NTP_ALL:
760 case HWTSTAMP_FILTER_ALL:
761
762 config.rx_filter = HWTSTAMP_FILTER_ALL;
763 tstamp_all = PTP_TCR_TSENALL;
764 break;
765
766 default:
767 return -ERANGE;
768 }
769 } else {
770 switch (config.rx_filter) {
771 case HWTSTAMP_FILTER_NONE:
772 config.rx_filter = HWTSTAMP_FILTER_NONE;
773 break;
774 default:
775
776 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
777 break;
778 }
779 }
780 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
781 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
782
783 priv->systime_flags = STMMAC_HWTS_ACTIVE;
784
785 if (priv->hwts_tx_en || priv->hwts_rx_en) {
786 priv->systime_flags |= tstamp_all | ptp_v2 |
787 ptp_over_ethernet | ptp_over_ipv6_udp |
788 ptp_over_ipv4_udp | ts_event_en |
789 ts_master_en | snap_type_sel;
790 }
791
792 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
793
794 memcpy(&priv->tstamp_config, &config, sizeof(config));
795
796 return copy_to_user(ifr->ifr_data, &config,
797 sizeof(config)) ? -EFAULT : 0;
798}
799
800
801
802
803
804
805
806
807
808
809static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
810{
811 struct stmmac_priv *priv = netdev_priv(dev);
812 struct hwtstamp_config *config = &priv->tstamp_config;
813
814 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
815 return -EOPNOTSUPP;
816
817 return copy_to_user(ifr->ifr_data, config,
818 sizeof(*config)) ? -EFAULT : 0;
819}
820
821
822
823
824
825
826
827
828
829
830
831int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
832{
833 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
834 struct timespec64 now;
835 u32 sec_inc = 0;
836 u64 temp = 0;
837 int ret;
838
839 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
840 return -EOPNOTSUPP;
841
842 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
843 if (ret < 0) {
844 netdev_warn(priv->dev,
845 "failed to enable PTP reference clock: %pe\n",
846 ERR_PTR(ret));
847 return ret;
848 }
849
850 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
851 priv->systime_flags = systime_flags;
852
853
854 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
855 priv->plat->clk_ptp_rate,
856 xmac, &sec_inc);
857 temp = div_u64(1000000000ULL, sec_inc);
858
859
860 priv->sub_second_inc = sec_inc;
861
862
863
864
865
866
867 temp = (u64)(temp << 32);
868 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
869 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
870
871
872 ktime_get_real_ts64(&now);
873
874
875 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
876
877 return 0;
878}
879EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
880
881
882
883
884
885
886
887
888static int stmmac_init_ptp(struct stmmac_priv *priv)
889{
890 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
891 int ret;
892
893 if (priv->plat->ptp_clk_freq_config)
894 priv->plat->ptp_clk_freq_config(priv);
895
896 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
897 if (ret)
898 return ret;
899
900 priv->adv_ts = 0;
901
902 if (xmac && priv->dma_cap.atime_stamp)
903 priv->adv_ts = 1;
904
905 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
906 priv->adv_ts = 1;
907
908 if (priv->dma_cap.time_stamp)
909 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
910
911 if (priv->adv_ts)
912 netdev_info(priv->dev,
913 "IEEE 1588-2008 Advanced Timestamp supported\n");
914
915 priv->hwts_tx_en = 0;
916 priv->hwts_rx_en = 0;
917
918 return 0;
919}
920
921static void stmmac_release_ptp(struct stmmac_priv *priv)
922{
923 clk_disable_unprepare(priv->plat->clk_ptp_ref);
924 stmmac_ptp_unregister(priv);
925}
926
927
928
929
930
931
932
933static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
934{
935 u32 tx_cnt = priv->plat->tx_queues_to_use;
936
937 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
938 priv->pause, tx_cnt);
939}
940
941static void stmmac_validate(struct phylink_config *config,
942 unsigned long *supported,
943 struct phylink_link_state *state)
944{
945 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
946 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
947 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
948 int tx_cnt = priv->plat->tx_queues_to_use;
949 int max_speed = priv->plat->max_speed;
950
951 phylink_set(mac_supported, 10baseT_Half);
952 phylink_set(mac_supported, 10baseT_Full);
953 phylink_set(mac_supported, 100baseT_Half);
954 phylink_set(mac_supported, 100baseT_Full);
955 phylink_set(mac_supported, 1000baseT_Half);
956 phylink_set(mac_supported, 1000baseT_Full);
957 phylink_set(mac_supported, 1000baseKX_Full);
958
959 phylink_set(mac_supported, Autoneg);
960 phylink_set(mac_supported, Pause);
961 phylink_set(mac_supported, Asym_Pause);
962 phylink_set_port_modes(mac_supported);
963
964
965 if ((max_speed > 0) && (max_speed < 1000)) {
966 phylink_set(mask, 1000baseT_Full);
967 phylink_set(mask, 1000baseX_Full);
968 } else if (priv->plat->has_gmac4) {
969 if (!max_speed || max_speed >= 2500) {
970 phylink_set(mac_supported, 2500baseT_Full);
971 phylink_set(mac_supported, 2500baseX_Full);
972 }
973 } else if (priv->plat->has_xgmac) {
974 if (!max_speed || (max_speed >= 2500)) {
975 phylink_set(mac_supported, 2500baseT_Full);
976 phylink_set(mac_supported, 2500baseX_Full);
977 }
978 if (!max_speed || (max_speed >= 5000)) {
979 phylink_set(mac_supported, 5000baseT_Full);
980 }
981 if (!max_speed || (max_speed >= 10000)) {
982 phylink_set(mac_supported, 10000baseSR_Full);
983 phylink_set(mac_supported, 10000baseLR_Full);
984 phylink_set(mac_supported, 10000baseER_Full);
985 phylink_set(mac_supported, 10000baseLRM_Full);
986 phylink_set(mac_supported, 10000baseT_Full);
987 phylink_set(mac_supported, 10000baseKX4_Full);
988 phylink_set(mac_supported, 10000baseKR_Full);
989 }
990 if (!max_speed || (max_speed >= 25000)) {
991 phylink_set(mac_supported, 25000baseCR_Full);
992 phylink_set(mac_supported, 25000baseKR_Full);
993 phylink_set(mac_supported, 25000baseSR_Full);
994 }
995 if (!max_speed || (max_speed >= 40000)) {
996 phylink_set(mac_supported, 40000baseKR4_Full);
997 phylink_set(mac_supported, 40000baseCR4_Full);
998 phylink_set(mac_supported, 40000baseSR4_Full);
999 phylink_set(mac_supported, 40000baseLR4_Full);
1000 }
1001 if (!max_speed || (max_speed >= 50000)) {
1002 phylink_set(mac_supported, 50000baseCR2_Full);
1003 phylink_set(mac_supported, 50000baseKR2_Full);
1004 phylink_set(mac_supported, 50000baseSR2_Full);
1005 phylink_set(mac_supported, 50000baseKR_Full);
1006 phylink_set(mac_supported, 50000baseSR_Full);
1007 phylink_set(mac_supported, 50000baseCR_Full);
1008 phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
1009 phylink_set(mac_supported, 50000baseDR_Full);
1010 }
1011 if (!max_speed || (max_speed >= 100000)) {
1012 phylink_set(mac_supported, 100000baseKR4_Full);
1013 phylink_set(mac_supported, 100000baseSR4_Full);
1014 phylink_set(mac_supported, 100000baseCR4_Full);
1015 phylink_set(mac_supported, 100000baseLR4_ER4_Full);
1016 phylink_set(mac_supported, 100000baseKR2_Full);
1017 phylink_set(mac_supported, 100000baseSR2_Full);
1018 phylink_set(mac_supported, 100000baseCR2_Full);
1019 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
1020 phylink_set(mac_supported, 100000baseDR2_Full);
1021 }
1022 }
1023
1024
1025 if (tx_cnt > 1) {
1026 phylink_set(mask, 10baseT_Half);
1027 phylink_set(mask, 100baseT_Half);
1028 phylink_set(mask, 1000baseT_Half);
1029 }
1030
1031 linkmode_and(supported, supported, mac_supported);
1032 linkmode_andnot(supported, supported, mask);
1033
1034 linkmode_and(state->advertising, state->advertising, mac_supported);
1035 linkmode_andnot(state->advertising, state->advertising, mask);
1036
1037
1038 if (priv->hw->xpcs)
1039 xpcs_validate(priv->hw->xpcs, supported, state);
1040}
1041
1042static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1043 const struct phylink_link_state *state)
1044{
1045
1046}
1047
1048static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1049{
1050 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1051 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1052 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1053 bool *hs_enable = &fpe_cfg->hs_enable;
1054
1055 if (is_up && *hs_enable) {
1056 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1057 } else {
1058 *lo_state = FPE_STATE_OFF;
1059 *lp_state = FPE_STATE_OFF;
1060 }
1061}
1062
1063static void stmmac_mac_link_down(struct phylink_config *config,
1064 unsigned int mode, phy_interface_t interface)
1065{
1066 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1067
1068 stmmac_mac_set(priv, priv->ioaddr, false);
1069 priv->eee_active = false;
1070 priv->tx_lpi_enabled = false;
1071 priv->eee_enabled = stmmac_eee_init(priv);
1072 stmmac_set_eee_pls(priv, priv->hw, false);
1073
1074 if (priv->dma_cap.fpesel)
1075 stmmac_fpe_link_state_handle(priv, false);
1076}
1077
1078static void stmmac_mac_link_up(struct phylink_config *config,
1079 struct phy_device *phy,
1080 unsigned int mode, phy_interface_t interface,
1081 int speed, int duplex,
1082 bool tx_pause, bool rx_pause)
1083{
1084 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1085 u32 ctrl;
1086
1087 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1088 ctrl &= ~priv->hw->link.speed_mask;
1089
1090 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1091 switch (speed) {
1092 case SPEED_10000:
1093 ctrl |= priv->hw->link.xgmii.speed10000;
1094 break;
1095 case SPEED_5000:
1096 ctrl |= priv->hw->link.xgmii.speed5000;
1097 break;
1098 case SPEED_2500:
1099 ctrl |= priv->hw->link.xgmii.speed2500;
1100 break;
1101 default:
1102 return;
1103 }
1104 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1105 switch (speed) {
1106 case SPEED_100000:
1107 ctrl |= priv->hw->link.xlgmii.speed100000;
1108 break;
1109 case SPEED_50000:
1110 ctrl |= priv->hw->link.xlgmii.speed50000;
1111 break;
1112 case SPEED_40000:
1113 ctrl |= priv->hw->link.xlgmii.speed40000;
1114 break;
1115 case SPEED_25000:
1116 ctrl |= priv->hw->link.xlgmii.speed25000;
1117 break;
1118 case SPEED_10000:
1119 ctrl |= priv->hw->link.xgmii.speed10000;
1120 break;
1121 case SPEED_2500:
1122 ctrl |= priv->hw->link.speed2500;
1123 break;
1124 case SPEED_1000:
1125 ctrl |= priv->hw->link.speed1000;
1126 break;
1127 default:
1128 return;
1129 }
1130 } else {
1131 switch (speed) {
1132 case SPEED_2500:
1133 ctrl |= priv->hw->link.speed2500;
1134 break;
1135 case SPEED_1000:
1136 ctrl |= priv->hw->link.speed1000;
1137 break;
1138 case SPEED_100:
1139 ctrl |= priv->hw->link.speed100;
1140 break;
1141 case SPEED_10:
1142 ctrl |= priv->hw->link.speed10;
1143 break;
1144 default:
1145 return;
1146 }
1147 }
1148
1149 priv->speed = speed;
1150
1151 if (priv->plat->fix_mac_speed)
1152 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1153
1154 if (!duplex)
1155 ctrl &= ~priv->hw->link.duplex;
1156 else
1157 ctrl |= priv->hw->link.duplex;
1158
1159
1160 if (tx_pause && rx_pause)
1161 stmmac_mac_flow_ctrl(priv, duplex);
1162
1163 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1164
1165 stmmac_mac_set(priv, priv->ioaddr, true);
1166 if (phy && priv->dma_cap.eee) {
1167 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1168 priv->eee_enabled = stmmac_eee_init(priv);
1169 priv->tx_lpi_enabled = priv->eee_enabled;
1170 stmmac_set_eee_pls(priv, priv->hw, true);
1171 }
1172
1173 if (priv->dma_cap.fpesel)
1174 stmmac_fpe_link_state_handle(priv, true);
1175}
1176
1177static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1178 .validate = stmmac_validate,
1179 .mac_config = stmmac_mac_config,
1180 .mac_link_down = stmmac_mac_link_down,
1181 .mac_link_up = stmmac_mac_link_up,
1182};
1183
1184
1185
1186
1187
1188
1189
1190
1191static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1192{
1193 int interface = priv->plat->interface;
1194
1195 if (priv->dma_cap.pcs) {
1196 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1197 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1198 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1199 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1200 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1201 priv->hw->pcs = STMMAC_PCS_RGMII;
1202 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1203 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1204 priv->hw->pcs = STMMAC_PCS_SGMII;
1205 }
1206 }
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static int stmmac_init_phy(struct net_device *dev)
1218{
1219 struct stmmac_priv *priv = netdev_priv(dev);
1220 struct device_node *node;
1221 int ret;
1222
1223 node = priv->plat->phylink_node;
1224
1225 if (node)
1226 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1227
1228
1229
1230
1231 if (!node || ret) {
1232 int addr = priv->plat->phy_addr;
1233 struct phy_device *phydev;
1234
1235 phydev = mdiobus_get_phy(priv->mii, addr);
1236 if (!phydev) {
1237 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1238 return -ENODEV;
1239 }
1240
1241 ret = phylink_connect_phy(priv->phylink, phydev);
1242 }
1243
1244 if (!priv->plat->pmt) {
1245 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1246
1247 phylink_ethtool_get_wol(priv->phylink, &wol);
1248 device_set_wakeup_capable(priv->device, !!wol.supported);
1249 }
1250
1251 return ret;
1252}
1253
1254static int stmmac_phy_setup(struct stmmac_priv *priv)
1255{
1256 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1257 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1258 int mode = priv->plat->phy_interface;
1259 struct phylink *phylink;
1260
1261 priv->phylink_config.dev = &priv->dev->dev;
1262 priv->phylink_config.type = PHYLINK_NETDEV;
1263 priv->phylink_config.pcs_poll = true;
1264 if (priv->plat->mdio_bus_data)
1265 priv->phylink_config.ovr_an_inband =
1266 mdio_bus_data->xpcs_an_inband;
1267
1268 if (!fwnode)
1269 fwnode = dev_fwnode(priv->device);
1270
1271 phylink = phylink_create(&priv->phylink_config, fwnode,
1272 mode, &stmmac_phylink_mac_ops);
1273 if (IS_ERR(phylink))
1274 return PTR_ERR(phylink);
1275
1276 if (priv->hw->xpcs)
1277 phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1278
1279 priv->phylink = phylink;
1280 return 0;
1281}
1282
1283static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1284{
1285 u32 rx_cnt = priv->plat->rx_queues_to_use;
1286 unsigned int desc_size;
1287 void *head_rx;
1288 u32 queue;
1289
1290
1291 for (queue = 0; queue < rx_cnt; queue++) {
1292 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1293
1294 pr_info("\tRX Queue %u rings\n", queue);
1295
1296 if (priv->extend_desc) {
1297 head_rx = (void *)rx_q->dma_erx;
1298 desc_size = sizeof(struct dma_extended_desc);
1299 } else {
1300 head_rx = (void *)rx_q->dma_rx;
1301 desc_size = sizeof(struct dma_desc);
1302 }
1303
1304
1305 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1306 rx_q->dma_rx_phy, desc_size);
1307 }
1308}
1309
1310static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1311{
1312 u32 tx_cnt = priv->plat->tx_queues_to_use;
1313 unsigned int desc_size;
1314 void *head_tx;
1315 u32 queue;
1316
1317
1318 for (queue = 0; queue < tx_cnt; queue++) {
1319 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1320
1321 pr_info("\tTX Queue %d rings\n", queue);
1322
1323 if (priv->extend_desc) {
1324 head_tx = (void *)tx_q->dma_etx;
1325 desc_size = sizeof(struct dma_extended_desc);
1326 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1327 head_tx = (void *)tx_q->dma_entx;
1328 desc_size = sizeof(struct dma_edesc);
1329 } else {
1330 head_tx = (void *)tx_q->dma_tx;
1331 desc_size = sizeof(struct dma_desc);
1332 }
1333
1334 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1335 tx_q->dma_tx_phy, desc_size);
1336 }
1337}
1338
1339static void stmmac_display_rings(struct stmmac_priv *priv)
1340{
1341
1342 stmmac_display_rx_rings(priv);
1343
1344
1345 stmmac_display_tx_rings(priv);
1346}
1347
1348static int stmmac_set_bfsize(int mtu, int bufsize)
1349{
1350 int ret = bufsize;
1351
1352 if (mtu >= BUF_SIZE_8KiB)
1353 ret = BUF_SIZE_16KiB;
1354 else if (mtu >= BUF_SIZE_4KiB)
1355 ret = BUF_SIZE_8KiB;
1356 else if (mtu >= BUF_SIZE_2KiB)
1357 ret = BUF_SIZE_4KiB;
1358 else if (mtu > DEFAULT_BUFSIZE)
1359 ret = BUF_SIZE_2KiB;
1360 else
1361 ret = DEFAULT_BUFSIZE;
1362
1363 return ret;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1374{
1375 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1376 int i;
1377
1378
1379 for (i = 0; i < priv->dma_rx_size; i++)
1380 if (priv->extend_desc)
1381 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1382 priv->use_riwt, priv->mode,
1383 (i == priv->dma_rx_size - 1),
1384 priv->dma_buf_sz);
1385 else
1386 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1387 priv->use_riwt, priv->mode,
1388 (i == priv->dma_rx_size - 1),
1389 priv->dma_buf_sz);
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1400{
1401 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1402 int i;
1403
1404
1405 for (i = 0; i < priv->dma_tx_size; i++) {
1406 int last = (i == (priv->dma_tx_size - 1));
1407 struct dma_desc *p;
1408
1409 if (priv->extend_desc)
1410 p = &tx_q->dma_etx[i].basic;
1411 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1412 p = &tx_q->dma_entx[i].basic;
1413 else
1414 p = &tx_q->dma_tx[i];
1415
1416 stmmac_init_tx_desc(priv, p, priv->mode, last);
1417 }
1418}
1419
1420
1421
1422
1423
1424
1425
1426static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1427{
1428 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1429 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1430 u32 queue;
1431
1432
1433 for (queue = 0; queue < rx_queue_cnt; queue++)
1434 stmmac_clear_rx_descriptors(priv, queue);
1435
1436
1437 for (queue = 0; queue < tx_queue_cnt; queue++)
1438 stmmac_clear_tx_descriptors(priv, queue);
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1452 int i, gfp_t flags, u32 queue)
1453{
1454 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1455 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1456 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1457
1458 if (priv->dma_cap.addr64 <= 32)
1459 gfp |= GFP_DMA32;
1460
1461 if (!buf->page) {
1462 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1463 if (!buf->page)
1464 return -ENOMEM;
1465 buf->page_offset = stmmac_rx_offset(priv);
1466 }
1467
1468 if (priv->sph && !buf->sec_page) {
1469 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1470 if (!buf->sec_page)
1471 return -ENOMEM;
1472
1473 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1474 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1475 } else {
1476 buf->sec_page = NULL;
1477 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1478 }
1479
1480 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1481
1482 stmmac_set_desc_addr(priv, p, buf->addr);
1483 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1484 stmmac_init_desc3(priv, p);
1485
1486 return 0;
1487}
1488
1489
1490
1491
1492
1493
1494
1495static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1496{
1497 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1498 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499
1500 if (buf->page)
1501 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1502 buf->page = NULL;
1503
1504 if (buf->sec_page)
1505 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1506 buf->sec_page = NULL;
1507}
1508
1509
1510
1511
1512
1513
1514
1515static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1516{
1517 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1518
1519 if (tx_q->tx_skbuff_dma[i].buf &&
1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 dma_unmap_page(priv->device,
1523 tx_q->tx_skbuff_dma[i].buf,
1524 tx_q->tx_skbuff_dma[i].len,
1525 DMA_TO_DEVICE);
1526 else
1527 dma_unmap_single(priv->device,
1528 tx_q->tx_skbuff_dma[i].buf,
1529 tx_q->tx_skbuff_dma[i].len,
1530 DMA_TO_DEVICE);
1531 }
1532
1533 if (tx_q->xdpf[i] &&
1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 xdp_return_frame(tx_q->xdpf[i]);
1537 tx_q->xdpf[i] = NULL;
1538 }
1539
1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 tx_q->xsk_frames_done++;
1542
1543 if (tx_q->tx_skbuff[i] &&
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 tx_q->tx_skbuff[i] = NULL;
1547 }
1548
1549 tx_q->tx_skbuff_dma[i].buf = 0;
1550 tx_q->tx_skbuff_dma[i].map_as_page = false;
1551}
1552
1553
1554
1555
1556
1557
1558static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1559{
1560 int i;
1561
1562 for (i = 0; i < priv->dma_rx_size; i++)
1563 stmmac_free_rx_buffer(priv, queue, i);
1564}
1565
1566static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1567 gfp_t flags)
1568{
1569 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1570 int i;
1571
1572 for (i = 0; i < priv->dma_rx_size; i++) {
1573 struct dma_desc *p;
1574 int ret;
1575
1576 if (priv->extend_desc)
1577 p = &((rx_q->dma_erx + i)->basic);
1578 else
1579 p = rx_q->dma_rx + i;
1580
1581 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1582 queue);
1583 if (ret)
1584 return ret;
1585
1586 rx_q->buf_alloc_num++;
1587 }
1588
1589 return 0;
1590}
1591
1592
1593
1594
1595
1596
1597static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1598{
1599 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1600 int i;
1601
1602 for (i = 0; i < priv->dma_rx_size; i++) {
1603 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1604
1605 if (!buf->xdp)
1606 continue;
1607
1608 xsk_buff_free(buf->xdp);
1609 buf->xdp = NULL;
1610 }
1611}
1612
1613static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1614{
1615 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1616 int i;
1617
1618 for (i = 0; i < priv->dma_rx_size; i++) {
1619 struct stmmac_rx_buffer *buf;
1620 dma_addr_t dma_addr;
1621 struct dma_desc *p;
1622
1623 if (priv->extend_desc)
1624 p = (struct dma_desc *)(rx_q->dma_erx + i);
1625 else
1626 p = rx_q->dma_rx + i;
1627
1628 buf = &rx_q->buf_pool[i];
1629
1630 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1631 if (!buf->xdp)
1632 return -ENOMEM;
1633
1634 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1635 stmmac_set_desc_addr(priv, p, dma_addr);
1636 rx_q->buf_alloc_num++;
1637 }
1638
1639 return 0;
1640}
1641
1642static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1643{
1644 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1645 return NULL;
1646
1647 return xsk_get_pool_from_qid(priv->dev, queue);
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1660{
1661 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1662 int ret;
1663
1664 netif_dbg(priv, probe, priv->dev,
1665 "(%s) dma_rx_phy=0x%08x\n", __func__,
1666 (u32)rx_q->dma_rx_phy);
1667
1668 stmmac_clear_rx_descriptors(priv, queue);
1669
1670 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1671
1672 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1673
1674 if (rx_q->xsk_pool) {
1675 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1676 MEM_TYPE_XSK_BUFF_POOL,
1677 NULL));
1678 netdev_info(priv->dev,
1679 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1680 rx_q->queue_index);
1681 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1682 } else {
1683 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684 MEM_TYPE_PAGE_POOL,
1685 rx_q->page_pool));
1686 netdev_info(priv->dev,
1687 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1688 rx_q->queue_index);
1689 }
1690
1691 if (rx_q->xsk_pool) {
1692
1693
1694
1695 stmmac_alloc_rx_buffers_zc(priv, queue);
1696 } else {
1697 ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1698 if (ret < 0)
1699 return -ENOMEM;
1700 }
1701
1702 rx_q->cur_rx = 0;
1703 rx_q->dirty_rx = 0;
1704
1705
1706 if (priv->mode == STMMAC_CHAIN_MODE) {
1707 if (priv->extend_desc)
1708 stmmac_mode_init(priv, rx_q->dma_erx,
1709 rx_q->dma_rx_phy,
1710 priv->dma_rx_size, 1);
1711 else
1712 stmmac_mode_init(priv, rx_q->dma_rx,
1713 rx_q->dma_rx_phy,
1714 priv->dma_rx_size, 0);
1715 }
1716
1717 return 0;
1718}
1719
1720static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1721{
1722 struct stmmac_priv *priv = netdev_priv(dev);
1723 u32 rx_count = priv->plat->rx_queues_to_use;
1724 u32 queue;
1725 int ret;
1726
1727
1728 netif_dbg(priv, probe, priv->dev,
1729 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1730
1731 for (queue = 0; queue < rx_count; queue++) {
1732 ret = __init_dma_rx_desc_rings(priv, queue, flags);
1733 if (ret)
1734 goto err_init_rx_buffers;
1735 }
1736
1737 return 0;
1738
1739err_init_rx_buffers:
1740 while (queue >= 0) {
1741 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1742
1743 if (rx_q->xsk_pool)
1744 dma_free_rx_xskbufs(priv, queue);
1745 else
1746 dma_free_rx_skbufs(priv, queue);
1747
1748 rx_q->buf_alloc_num = 0;
1749 rx_q->xsk_pool = NULL;
1750
1751 if (queue == 0)
1752 break;
1753
1754 queue--;
1755 }
1756
1757 return ret;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1769{
1770 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1771 int i;
1772
1773 netif_dbg(priv, probe, priv->dev,
1774 "(%s) dma_tx_phy=0x%08x\n", __func__,
1775 (u32)tx_q->dma_tx_phy);
1776
1777
1778 if (priv->mode == STMMAC_CHAIN_MODE) {
1779 if (priv->extend_desc)
1780 stmmac_mode_init(priv, tx_q->dma_etx,
1781 tx_q->dma_tx_phy,
1782 priv->dma_tx_size, 1);
1783 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1784 stmmac_mode_init(priv, tx_q->dma_tx,
1785 tx_q->dma_tx_phy,
1786 priv->dma_tx_size, 0);
1787 }
1788
1789 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1790
1791 for (i = 0; i < priv->dma_tx_size; i++) {
1792 struct dma_desc *p;
1793
1794 if (priv->extend_desc)
1795 p = &((tx_q->dma_etx + i)->basic);
1796 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1797 p = &((tx_q->dma_entx + i)->basic);
1798 else
1799 p = tx_q->dma_tx + i;
1800
1801 stmmac_clear_desc(priv, p);
1802
1803 tx_q->tx_skbuff_dma[i].buf = 0;
1804 tx_q->tx_skbuff_dma[i].map_as_page = false;
1805 tx_q->tx_skbuff_dma[i].len = 0;
1806 tx_q->tx_skbuff_dma[i].last_segment = false;
1807 tx_q->tx_skbuff[i] = NULL;
1808 }
1809
1810 tx_q->dirty_tx = 0;
1811 tx_q->cur_tx = 0;
1812 tx_q->mss = 0;
1813
1814 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1815
1816 return 0;
1817}
1818
1819static int init_dma_tx_desc_rings(struct net_device *dev)
1820{
1821 struct stmmac_priv *priv = netdev_priv(dev);
1822 u32 tx_queue_cnt;
1823 u32 queue;
1824
1825 tx_queue_cnt = priv->plat->tx_queues_to_use;
1826
1827 for (queue = 0; queue < tx_queue_cnt; queue++)
1828 __init_dma_tx_desc_rings(priv, queue);
1829
1830 return 0;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1842{
1843 struct stmmac_priv *priv = netdev_priv(dev);
1844 int ret;
1845
1846 ret = init_dma_rx_desc_rings(dev, flags);
1847 if (ret)
1848 return ret;
1849
1850 ret = init_dma_tx_desc_rings(dev);
1851
1852 stmmac_clear_descriptors(priv);
1853
1854 if (netif_msg_hw(priv))
1855 stmmac_display_rings(priv);
1856
1857 return ret;
1858}
1859
1860
1861
1862
1863
1864
1865static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1866{
1867 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1868 int i;
1869
1870 tx_q->xsk_frames_done = 0;
1871
1872 for (i = 0; i < priv->dma_tx_size; i++)
1873 stmmac_free_tx_buffer(priv, queue, i);
1874
1875 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1876 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1877 tx_q->xsk_frames_done = 0;
1878 tx_q->xsk_pool = NULL;
1879 }
1880}
1881
1882
1883
1884
1885
1886static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1887{
1888 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1889 u32 queue;
1890
1891 for (queue = 0; queue < tx_queue_cnt; queue++)
1892 dma_free_tx_skbufs(priv, queue);
1893}
1894
1895
1896
1897
1898
1899
1900static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1901{
1902 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1903
1904
1905 if (rx_q->xsk_pool)
1906 dma_free_rx_xskbufs(priv, queue);
1907 else
1908 dma_free_rx_skbufs(priv, queue);
1909
1910 rx_q->buf_alloc_num = 0;
1911 rx_q->xsk_pool = NULL;
1912
1913
1914 if (!priv->extend_desc)
1915 dma_free_coherent(priv->device, priv->dma_rx_size *
1916 sizeof(struct dma_desc),
1917 rx_q->dma_rx, rx_q->dma_rx_phy);
1918 else
1919 dma_free_coherent(priv->device, priv->dma_rx_size *
1920 sizeof(struct dma_extended_desc),
1921 rx_q->dma_erx, rx_q->dma_rx_phy);
1922
1923 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1924 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1925
1926 kfree(rx_q->buf_pool);
1927 if (rx_q->page_pool)
1928 page_pool_destroy(rx_q->page_pool);
1929}
1930
1931static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1932{
1933 u32 rx_count = priv->plat->rx_queues_to_use;
1934 u32 queue;
1935
1936
1937 for (queue = 0; queue < rx_count; queue++)
1938 __free_dma_rx_desc_resources(priv, queue);
1939}
1940
1941
1942
1943
1944
1945
1946static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1947{
1948 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1949 size_t size;
1950 void *addr;
1951
1952
1953 dma_free_tx_skbufs(priv, queue);
1954
1955 if (priv->extend_desc) {
1956 size = sizeof(struct dma_extended_desc);
1957 addr = tx_q->dma_etx;
1958 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1959 size = sizeof(struct dma_edesc);
1960 addr = tx_q->dma_entx;
1961 } else {
1962 size = sizeof(struct dma_desc);
1963 addr = tx_q->dma_tx;
1964 }
1965
1966 size *= priv->dma_tx_size;
1967
1968 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1969
1970 kfree(tx_q->tx_skbuff_dma);
1971 kfree(tx_q->tx_skbuff);
1972}
1973
1974static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1975{
1976 u32 tx_count = priv->plat->tx_queues_to_use;
1977 u32 queue;
1978
1979
1980 for (queue = 0; queue < tx_count; queue++)
1981 __free_dma_tx_desc_resources(priv, queue);
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1994{
1995 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1996 struct stmmac_channel *ch = &priv->channel[queue];
1997 bool xdp_prog = stmmac_xdp_is_enabled(priv);
1998 struct page_pool_params pp_params = { 0 };
1999 unsigned int num_pages;
2000 unsigned int napi_id;
2001 int ret;
2002
2003 rx_q->queue_index = queue;
2004 rx_q->priv_data = priv;
2005
2006 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2007 pp_params.pool_size = priv->dma_rx_size;
2008 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
2009 pp_params.order = ilog2(num_pages);
2010 pp_params.nid = dev_to_node(priv->device);
2011 pp_params.dev = priv->device;
2012 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2013 pp_params.offset = stmmac_rx_offset(priv);
2014 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2015
2016 rx_q->page_pool = page_pool_create(&pp_params);
2017 if (IS_ERR(rx_q->page_pool)) {
2018 ret = PTR_ERR(rx_q->page_pool);
2019 rx_q->page_pool = NULL;
2020 return ret;
2021 }
2022
2023 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2024 sizeof(*rx_q->buf_pool),
2025 GFP_KERNEL);
2026 if (!rx_q->buf_pool)
2027 return -ENOMEM;
2028
2029 if (priv->extend_desc) {
2030 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2031 priv->dma_rx_size *
2032 sizeof(struct dma_extended_desc),
2033 &rx_q->dma_rx_phy,
2034 GFP_KERNEL);
2035 if (!rx_q->dma_erx)
2036 return -ENOMEM;
2037
2038 } else {
2039 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2040 priv->dma_rx_size *
2041 sizeof(struct dma_desc),
2042 &rx_q->dma_rx_phy,
2043 GFP_KERNEL);
2044 if (!rx_q->dma_rx)
2045 return -ENOMEM;
2046 }
2047
2048 if (stmmac_xdp_is_enabled(priv) &&
2049 test_bit(queue, priv->af_xdp_zc_qps))
2050 napi_id = ch->rxtx_napi.napi_id;
2051 else
2052 napi_id = ch->rx_napi.napi_id;
2053
2054 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2055 rx_q->queue_index,
2056 napi_id);
2057 if (ret) {
2058 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2059 return -EINVAL;
2060 }
2061
2062 return 0;
2063}
2064
2065static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2066{
2067 u32 rx_count = priv->plat->rx_queues_to_use;
2068 u32 queue;
2069 int ret;
2070
2071
2072 for (queue = 0; queue < rx_count; queue++) {
2073 ret = __alloc_dma_rx_desc_resources(priv, queue);
2074 if (ret)
2075 goto err_dma;
2076 }
2077
2078 return 0;
2079
2080err_dma:
2081 free_dma_rx_desc_resources(priv);
2082
2083 return ret;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2096{
2097 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2098 size_t size;
2099 void *addr;
2100
2101 tx_q->queue_index = queue;
2102 tx_q->priv_data = priv;
2103
2104 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2105 sizeof(*tx_q->tx_skbuff_dma),
2106 GFP_KERNEL);
2107 if (!tx_q->tx_skbuff_dma)
2108 return -ENOMEM;
2109
2110 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2111 sizeof(struct sk_buff *),
2112 GFP_KERNEL);
2113 if (!tx_q->tx_skbuff)
2114 return -ENOMEM;
2115
2116 if (priv->extend_desc)
2117 size = sizeof(struct dma_extended_desc);
2118 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2119 size = sizeof(struct dma_edesc);
2120 else
2121 size = sizeof(struct dma_desc);
2122
2123 size *= priv->dma_tx_size;
2124
2125 addr = dma_alloc_coherent(priv->device, size,
2126 &tx_q->dma_tx_phy, GFP_KERNEL);
2127 if (!addr)
2128 return -ENOMEM;
2129
2130 if (priv->extend_desc)
2131 tx_q->dma_etx = addr;
2132 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2133 tx_q->dma_entx = addr;
2134 else
2135 tx_q->dma_tx = addr;
2136
2137 return 0;
2138}
2139
2140static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2141{
2142 u32 tx_count = priv->plat->tx_queues_to_use;
2143 u32 queue;
2144 int ret;
2145
2146
2147 for (queue = 0; queue < tx_count; queue++) {
2148 ret = __alloc_dma_tx_desc_resources(priv, queue);
2149 if (ret)
2150 goto err_dma;
2151 }
2152
2153 return 0;
2154
2155err_dma:
2156 free_dma_tx_desc_resources(priv);
2157 return ret;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2169{
2170
2171 int ret = alloc_dma_rx_desc_resources(priv);
2172
2173 if (ret)
2174 return ret;
2175
2176 ret = alloc_dma_tx_desc_resources(priv);
2177
2178 return ret;
2179}
2180
2181
2182
2183
2184
2185static void free_dma_desc_resources(struct stmmac_priv *priv)
2186{
2187
2188 free_dma_tx_desc_resources(priv);
2189
2190
2191
2192
2193 free_dma_rx_desc_resources(priv);
2194}
2195
2196
2197
2198
2199
2200
2201static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2202{
2203 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2204 int queue;
2205 u8 mode;
2206
2207 for (queue = 0; queue < rx_queues_count; queue++) {
2208 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2209 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2210 }
2211}
2212
2213
2214
2215
2216
2217
2218
2219
2220static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2221{
2222 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2223 stmmac_start_rx(priv, priv->ioaddr, chan);
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2234{
2235 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2236 stmmac_start_tx(priv, priv->ioaddr, chan);
2237}
2238
2239
2240
2241
2242
2243
2244
2245
2246static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2247{
2248 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2249 stmmac_stop_rx(priv, priv->ioaddr, chan);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2260{
2261 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2262 stmmac_stop_tx(priv, priv->ioaddr, chan);
2263}
2264
2265static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2266{
2267 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2268 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2269 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2270 u32 chan;
2271
2272 for (chan = 0; chan < dma_csr_ch; chan++) {
2273 struct stmmac_channel *ch = &priv->channel[chan];
2274 unsigned long flags;
2275
2276 spin_lock_irqsave(&ch->lock, flags);
2277 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2278 spin_unlock_irqrestore(&ch->lock, flags);
2279 }
2280}
2281
2282
2283
2284
2285
2286
2287
2288static void stmmac_start_all_dma(struct stmmac_priv *priv)
2289{
2290 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2291 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2292 u32 chan = 0;
2293
2294 for (chan = 0; chan < rx_channels_count; chan++)
2295 stmmac_start_rx_dma(priv, chan);
2296
2297 for (chan = 0; chan < tx_channels_count; chan++)
2298 stmmac_start_tx_dma(priv, chan);
2299}
2300
2301
2302
2303
2304
2305
2306
2307static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2308{
2309 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2310 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2311 u32 chan = 0;
2312
2313 for (chan = 0; chan < rx_channels_count; chan++)
2314 stmmac_stop_rx_dma(priv, chan);
2315
2316 for (chan = 0; chan < tx_channels_count; chan++)
2317 stmmac_stop_tx_dma(priv, chan);
2318}
2319
2320
2321
2322
2323
2324
2325
2326static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2327{
2328 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2329 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2330 int rxfifosz = priv->plat->rx_fifo_size;
2331 int txfifosz = priv->plat->tx_fifo_size;
2332 u32 txmode = 0;
2333 u32 rxmode = 0;
2334 u32 chan = 0;
2335 u8 qmode = 0;
2336
2337 if (rxfifosz == 0)
2338 rxfifosz = priv->dma_cap.rx_fifo_size;
2339 if (txfifosz == 0)
2340 txfifosz = priv->dma_cap.tx_fifo_size;
2341
2342
2343 rxfifosz /= rx_channels_count;
2344 txfifosz /= tx_channels_count;
2345
2346 if (priv->plat->force_thresh_dma_mode) {
2347 txmode = tc;
2348 rxmode = tc;
2349 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2350
2351
2352
2353
2354
2355
2356
2357 txmode = SF_DMA_MODE;
2358 rxmode = SF_DMA_MODE;
2359 priv->xstats.threshold = SF_DMA_MODE;
2360 } else {
2361 txmode = tc;
2362 rxmode = SF_DMA_MODE;
2363 }
2364
2365
2366 for (chan = 0; chan < rx_channels_count; chan++) {
2367 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2368 u32 buf_size;
2369
2370 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2371
2372 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2373 rxfifosz, qmode);
2374
2375 if (rx_q->xsk_pool) {
2376 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2377 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2378 buf_size,
2379 chan);
2380 } else {
2381 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2382 priv->dma_buf_sz,
2383 chan);
2384 }
2385 }
2386
2387 for (chan = 0; chan < tx_channels_count; chan++) {
2388 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2389
2390 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2391 txfifosz, qmode);
2392 }
2393}
2394
2395static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2396{
2397 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2398 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2399 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2400 unsigned int entry = tx_q->cur_tx;
2401 struct dma_desc *tx_desc = NULL;
2402 struct xdp_desc xdp_desc;
2403 bool work_done = true;
2404
2405
2406 txq_trans_cond_update(nq);
2407
2408 budget = min(budget, stmmac_tx_avail(priv, queue));
2409
2410 while (budget-- > 0) {
2411 dma_addr_t dma_addr;
2412 bool set_ic;
2413
2414
2415
2416
2417 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2418 !netif_carrier_ok(priv->dev)) {
2419 work_done = false;
2420 break;
2421 }
2422
2423 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2424 break;
2425
2426 if (likely(priv->extend_desc))
2427 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2428 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2429 tx_desc = &tx_q->dma_entx[entry].basic;
2430 else
2431 tx_desc = tx_q->dma_tx + entry;
2432
2433 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2434 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2435
2436 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2437
2438
2439
2440
2441
2442 tx_q->tx_skbuff_dma[entry].buf = 0;
2443 tx_q->xdpf[entry] = NULL;
2444
2445 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2446 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2447 tx_q->tx_skbuff_dma[entry].last_segment = true;
2448 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2449
2450 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2451
2452 tx_q->tx_count_frames++;
2453
2454 if (!priv->tx_coal_frames[queue])
2455 set_ic = false;
2456 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2457 set_ic = true;
2458 else
2459 set_ic = false;
2460
2461 if (set_ic) {
2462 tx_q->tx_count_frames = 0;
2463 stmmac_set_tx_ic(priv, tx_desc);
2464 priv->xstats.tx_set_ic_bit++;
2465 }
2466
2467 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2468 true, priv->mode, true, true,
2469 xdp_desc.len);
2470
2471 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2472
2473 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2474 entry = tx_q->cur_tx;
2475 }
2476
2477 if (tx_desc) {
2478 stmmac_flush_tx_descriptors(priv, queue);
2479 xsk_tx_release(pool);
2480 }
2481
2482
2483
2484
2485
2486
2487 return !!budget && work_done;
2488}
2489
2490static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2491{
2492 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2493 tc += 64;
2494
2495 if (priv->plat->force_thresh_dma_mode)
2496 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2497 else
2498 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2499 chan);
2500
2501 priv->xstats.threshold = tc;
2502 }
2503}
2504
2505
2506
2507
2508
2509
2510
2511
2512static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2513{
2514 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2515 unsigned int bytes_compl = 0, pkts_compl = 0;
2516 unsigned int entry, xmits = 0, count = 0;
2517
2518 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2519
2520 priv->xstats.tx_clean++;
2521
2522 tx_q->xsk_frames_done = 0;
2523
2524 entry = tx_q->dirty_tx;
2525
2526
2527 while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2528 struct xdp_frame *xdpf;
2529 struct sk_buff *skb;
2530 struct dma_desc *p;
2531 int status;
2532
2533 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2534 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2535 xdpf = tx_q->xdpf[entry];
2536 skb = NULL;
2537 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2538 xdpf = NULL;
2539 skb = tx_q->tx_skbuff[entry];
2540 } else {
2541 xdpf = NULL;
2542 skb = NULL;
2543 }
2544
2545 if (priv->extend_desc)
2546 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2547 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2548 p = &tx_q->dma_entx[entry].basic;
2549 else
2550 p = tx_q->dma_tx + entry;
2551
2552 status = stmmac_tx_status(priv, &priv->dev->stats,
2553 &priv->xstats, p, priv->ioaddr);
2554
2555 if (unlikely(status & tx_dma_own))
2556 break;
2557
2558 count++;
2559
2560
2561
2562
2563 dma_rmb();
2564
2565
2566 if (likely(!(status & tx_not_ls))) {
2567
2568 if (unlikely(status & tx_err)) {
2569 priv->dev->stats.tx_errors++;
2570 if (unlikely(status & tx_err_bump_tc))
2571 stmmac_bump_dma_threshold(priv, queue);
2572 } else {
2573 priv->dev->stats.tx_packets++;
2574 priv->xstats.tx_pkt_n++;
2575 priv->xstats.txq_stats[queue].tx_pkt_n++;
2576 }
2577 if (skb)
2578 stmmac_get_tx_hwtstamp(priv, p, skb);
2579 }
2580
2581 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2582 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2583 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2584 dma_unmap_page(priv->device,
2585 tx_q->tx_skbuff_dma[entry].buf,
2586 tx_q->tx_skbuff_dma[entry].len,
2587 DMA_TO_DEVICE);
2588 else
2589 dma_unmap_single(priv->device,
2590 tx_q->tx_skbuff_dma[entry].buf,
2591 tx_q->tx_skbuff_dma[entry].len,
2592 DMA_TO_DEVICE);
2593 tx_q->tx_skbuff_dma[entry].buf = 0;
2594 tx_q->tx_skbuff_dma[entry].len = 0;
2595 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2596 }
2597
2598 stmmac_clean_desc3(priv, tx_q, p);
2599
2600 tx_q->tx_skbuff_dma[entry].last_segment = false;
2601 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2602
2603 if (xdpf &&
2604 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2605 xdp_return_frame_rx_napi(xdpf);
2606 tx_q->xdpf[entry] = NULL;
2607 }
2608
2609 if (xdpf &&
2610 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2611 xdp_return_frame(xdpf);
2612 tx_q->xdpf[entry] = NULL;
2613 }
2614
2615 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2616 tx_q->xsk_frames_done++;
2617
2618 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2619 if (likely(skb)) {
2620 pkts_compl++;
2621 bytes_compl += skb->len;
2622 dev_consume_skb_any(skb);
2623 tx_q->tx_skbuff[entry] = NULL;
2624 }
2625 }
2626
2627 stmmac_release_tx_desc(priv, p, priv->mode);
2628
2629 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2630 }
2631 tx_q->dirty_tx = entry;
2632
2633 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2634 pkts_compl, bytes_compl);
2635
2636 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2637 queue))) &&
2638 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2639
2640 netif_dbg(priv, tx_done, priv->dev,
2641 "%s: restart transmit\n", __func__);
2642 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2643 }
2644
2645 if (tx_q->xsk_pool) {
2646 bool work_done;
2647
2648 if (tx_q->xsk_frames_done)
2649 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2650
2651 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2652 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2653
2654
2655
2656
2657
2658
2659 work_done = stmmac_xdp_xmit_zc(priv, queue,
2660 STMMAC_XSK_TX_BUDGET_MAX);
2661 if (work_done)
2662 xmits = budget - 1;
2663 else
2664 xmits = budget;
2665 }
2666
2667 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2668 priv->eee_sw_timer_en) {
2669 if (stmmac_enable_eee_mode(priv))
2670 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2671 }
2672
2673
2674 if (tx_q->dirty_tx != tx_q->cur_tx)
2675 hrtimer_start(&tx_q->txtimer,
2676 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2677 HRTIMER_MODE_REL);
2678
2679 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2680
2681
2682 return max(count, xmits);
2683}
2684
2685
2686
2687
2688
2689
2690
2691
2692static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2693{
2694 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2695
2696 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2697
2698 stmmac_stop_tx_dma(priv, chan);
2699 dma_free_tx_skbufs(priv, chan);
2700 stmmac_clear_tx_descriptors(priv, chan);
2701 tx_q->dirty_tx = 0;
2702 tx_q->cur_tx = 0;
2703 tx_q->mss = 0;
2704 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2705 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2706 tx_q->dma_tx_phy, chan);
2707 stmmac_start_tx_dma(priv, chan);
2708
2709 priv->dev->stats.tx_errors++;
2710 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2724 u32 rxmode, u32 chan)
2725{
2726 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2727 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2728 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2729 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2730 int rxfifosz = priv->plat->rx_fifo_size;
2731 int txfifosz = priv->plat->tx_fifo_size;
2732
2733 if (rxfifosz == 0)
2734 rxfifosz = priv->dma_cap.rx_fifo_size;
2735 if (txfifosz == 0)
2736 txfifosz = priv->dma_cap.tx_fifo_size;
2737
2738
2739 rxfifosz /= rx_channels_count;
2740 txfifosz /= tx_channels_count;
2741
2742 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2743 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2744}
2745
2746static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2747{
2748 int ret;
2749
2750 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2751 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2752 if (ret && (ret != -EINVAL)) {
2753 stmmac_global_err(priv);
2754 return true;
2755 }
2756
2757 return false;
2758}
2759
2760static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2761{
2762 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2763 &priv->xstats, chan, dir);
2764 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2765 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2766 struct stmmac_channel *ch = &priv->channel[chan];
2767 struct napi_struct *rx_napi;
2768 struct napi_struct *tx_napi;
2769 unsigned long flags;
2770
2771 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2772 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2773
2774 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2775 if (napi_schedule_prep(rx_napi)) {
2776 spin_lock_irqsave(&ch->lock, flags);
2777 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2778 spin_unlock_irqrestore(&ch->lock, flags);
2779 __napi_schedule(rx_napi);
2780 }
2781 }
2782
2783 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2784 if (napi_schedule_prep(tx_napi)) {
2785 spin_lock_irqsave(&ch->lock, flags);
2786 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2787 spin_unlock_irqrestore(&ch->lock, flags);
2788 __napi_schedule(tx_napi);
2789 }
2790 }
2791
2792 return status;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2803{
2804 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2805 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2806 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2807 tx_channel_count : rx_channel_count;
2808 u32 chan;
2809 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2810
2811
2812 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2813 channels_to_check = ARRAY_SIZE(status);
2814
2815 for (chan = 0; chan < channels_to_check; chan++)
2816 status[chan] = stmmac_napi_check(priv, chan,
2817 DMA_DIR_RXTX);
2818
2819 for (chan = 0; chan < tx_channel_count; chan++) {
2820 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2821
2822 stmmac_bump_dma_threshold(priv, chan);
2823 } else if (unlikely(status[chan] == tx_hard_error)) {
2824 stmmac_tx_err(priv, chan);
2825 }
2826 }
2827}
2828
2829
2830
2831
2832
2833
2834static void stmmac_mmc_setup(struct stmmac_priv *priv)
2835{
2836 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2837 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2838
2839 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2840
2841 if (priv->dma_cap.rmon) {
2842 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2843 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2844 } else
2845 netdev_info(priv->dev, "No MAC Management Counters available\n");
2846}
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857static int stmmac_get_hw_features(struct stmmac_priv *priv)
2858{
2859 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2870{
2871 u8 addr[ETH_ALEN];
2872
2873 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2874 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2875 if (is_valid_ether_addr(addr))
2876 eth_hw_addr_set(priv->dev, addr);
2877 else
2878 eth_hw_addr_random(priv->dev);
2879 dev_info(priv->device, "device MAC address %pM\n",
2880 priv->dev->dev_addr);
2881 }
2882}
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2893{
2894 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2895 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2896 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2897 struct stmmac_rx_queue *rx_q;
2898 struct stmmac_tx_queue *tx_q;
2899 u32 chan = 0;
2900 int atds = 0;
2901 int ret = 0;
2902
2903 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2904 dev_err(priv->device, "Invalid DMA configuration\n");
2905 return -EINVAL;
2906 }
2907
2908 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2909 atds = 1;
2910
2911 ret = stmmac_reset(priv, priv->ioaddr);
2912 if (ret) {
2913 dev_err(priv->device, "Failed to reset the dma\n");
2914 return ret;
2915 }
2916
2917
2918 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2919
2920 if (priv->plat->axi)
2921 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2922
2923
2924 for (chan = 0; chan < dma_csr_ch; chan++) {
2925 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2926 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2927 }
2928
2929
2930 for (chan = 0; chan < rx_channels_count; chan++) {
2931 rx_q = &priv->rx_queue[chan];
2932
2933 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2934 rx_q->dma_rx_phy, chan);
2935
2936 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2937 (rx_q->buf_alloc_num *
2938 sizeof(struct dma_desc));
2939 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2940 rx_q->rx_tail_addr, chan);
2941 }
2942
2943
2944 for (chan = 0; chan < tx_channels_count; chan++) {
2945 tx_q = &priv->tx_queue[chan];
2946
2947 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2948 tx_q->dma_tx_phy, chan);
2949
2950 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2951 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2952 tx_q->tx_tail_addr, chan);
2953 }
2954
2955 return ret;
2956}
2957
2958static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2959{
2960 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2961
2962 hrtimer_start(&tx_q->txtimer,
2963 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2964 HRTIMER_MODE_REL);
2965}
2966
2967
2968
2969
2970
2971
2972
2973static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2974{
2975 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2976 struct stmmac_priv *priv = tx_q->priv_data;
2977 struct stmmac_channel *ch;
2978 struct napi_struct *napi;
2979
2980 ch = &priv->channel[tx_q->queue_index];
2981 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2982
2983 if (likely(napi_schedule_prep(napi))) {
2984 unsigned long flags;
2985
2986 spin_lock_irqsave(&ch->lock, flags);
2987 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2988 spin_unlock_irqrestore(&ch->lock, flags);
2989 __napi_schedule(napi);
2990 }
2991
2992 return HRTIMER_NORESTART;
2993}
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003static void stmmac_init_coalesce(struct stmmac_priv *priv)
3004{
3005 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3006 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3007 u32 chan;
3008
3009 for (chan = 0; chan < tx_channel_count; chan++) {
3010 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3011
3012 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3013 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3014
3015 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3016 tx_q->txtimer.function = stmmac_tx_timer;
3017 }
3018
3019 for (chan = 0; chan < rx_channel_count; chan++)
3020 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3021}
3022
3023static void stmmac_set_rings_length(struct stmmac_priv *priv)
3024{
3025 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3026 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3027 u32 chan;
3028
3029
3030 for (chan = 0; chan < tx_channels_count; chan++)
3031 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3032 (priv->dma_tx_size - 1), chan);
3033
3034
3035 for (chan = 0; chan < rx_channels_count; chan++)
3036 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3037 (priv->dma_rx_size - 1), chan);
3038}
3039
3040
3041
3042
3043
3044
3045static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3046{
3047 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3048 u32 weight;
3049 u32 queue;
3050
3051 for (queue = 0; queue < tx_queues_count; queue++) {
3052 weight = priv->plat->tx_queues_cfg[queue].weight;
3053 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3054 }
3055}
3056
3057
3058
3059
3060
3061
3062static void stmmac_configure_cbs(struct stmmac_priv *priv)
3063{
3064 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3065 u32 mode_to_use;
3066 u32 queue;
3067
3068
3069 for (queue = 1; queue < tx_queues_count; queue++) {
3070 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3071 if (mode_to_use == MTL_QUEUE_DCB)
3072 continue;
3073
3074 stmmac_config_cbs(priv, priv->hw,
3075 priv->plat->tx_queues_cfg[queue].send_slope,
3076 priv->plat->tx_queues_cfg[queue].idle_slope,
3077 priv->plat->tx_queues_cfg[queue].high_credit,
3078 priv->plat->tx_queues_cfg[queue].low_credit,
3079 queue);
3080 }
3081}
3082
3083
3084
3085
3086
3087
3088static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3089{
3090 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3091 u32 queue;
3092 u32 chan;
3093
3094 for (queue = 0; queue < rx_queues_count; queue++) {
3095 chan = priv->plat->rx_queues_cfg[queue].chan;
3096 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3097 }
3098}
3099
3100
3101
3102
3103
3104
3105static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3106{
3107 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3108 u32 queue;
3109 u32 prio;
3110
3111 for (queue = 0; queue < rx_queues_count; queue++) {
3112 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3113 continue;
3114
3115 prio = priv->plat->rx_queues_cfg[queue].prio;
3116 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3117 }
3118}
3119
3120
3121
3122
3123
3124
3125static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3126{
3127 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3128 u32 queue;
3129 u32 prio;
3130
3131 for (queue = 0; queue < tx_queues_count; queue++) {
3132 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3133 continue;
3134
3135 prio = priv->plat->tx_queues_cfg[queue].prio;
3136 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3137 }
3138}
3139
3140
3141
3142
3143
3144
3145static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3146{
3147 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3148 u32 queue;
3149 u8 packet;
3150
3151 for (queue = 0; queue < rx_queues_count; queue++) {
3152
3153 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3154 continue;
3155
3156 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3157 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3158 }
3159}
3160
3161static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3162{
3163 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3164 priv->rss.enable = false;
3165 return;
3166 }
3167
3168 if (priv->dev->features & NETIF_F_RXHASH)
3169 priv->rss.enable = true;
3170 else
3171 priv->rss.enable = false;
3172
3173 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3174 priv->plat->rx_queues_to_use);
3175}
3176
3177
3178
3179
3180
3181
3182static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3183{
3184 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3185 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3186
3187 if (tx_queues_count > 1)
3188 stmmac_set_tx_queue_weight(priv);
3189
3190
3191 if (rx_queues_count > 1)
3192 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3193 priv->plat->rx_sched_algorithm);
3194
3195
3196 if (tx_queues_count > 1)
3197 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3198 priv->plat->tx_sched_algorithm);
3199
3200
3201 if (tx_queues_count > 1)
3202 stmmac_configure_cbs(priv);
3203
3204
3205 stmmac_rx_queue_dma_chan_map(priv);
3206
3207
3208 stmmac_mac_enable_rx_queues(priv);
3209
3210
3211 if (rx_queues_count > 1)
3212 stmmac_mac_config_rx_queues_prio(priv);
3213
3214
3215 if (tx_queues_count > 1)
3216 stmmac_mac_config_tx_queues_prio(priv);
3217
3218
3219 if (rx_queues_count > 1)
3220 stmmac_mac_config_rx_queues_routing(priv);
3221
3222
3223 if (rx_queues_count > 1)
3224 stmmac_mac_config_rss(priv);
3225}
3226
3227static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3228{
3229 if (priv->dma_cap.asp) {
3230 netdev_info(priv->dev, "Enabling Safety Features\n");
3231 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3232 priv->plat->safety_feat_cfg);
3233 } else {
3234 netdev_info(priv->dev, "No Safety Features support found\n");
3235 }
3236}
3237
3238static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3239{
3240 char *name;
3241
3242 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3243 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3244
3245 name = priv->wq_name;
3246 sprintf(name, "%s-fpe", priv->dev->name);
3247
3248 priv->fpe_wq = create_singlethread_workqueue(name);
3249 if (!priv->fpe_wq) {
3250 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3251
3252 return -ENOMEM;
3253 }
3254 netdev_info(priv->dev, "FPE workqueue start");
3255
3256 return 0;
3257}
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3273{
3274 struct stmmac_priv *priv = netdev_priv(dev);
3275 u32 rx_cnt = priv->plat->rx_queues_to_use;
3276 u32 tx_cnt = priv->plat->tx_queues_to_use;
3277 bool sph_en;
3278 u32 chan;
3279 int ret;
3280
3281
3282 ret = stmmac_init_dma_engine(priv);
3283 if (ret < 0) {
3284 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3285 __func__);
3286 return ret;
3287 }
3288
3289
3290 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3291
3292
3293 if (priv->hw->pcs) {
3294 int speed = priv->plat->mac_port_sel_speed;
3295
3296 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3297 (speed == SPEED_1000)) {
3298 priv->hw->ps = speed;
3299 } else {
3300 dev_warn(priv->device, "invalid port speed\n");
3301 priv->hw->ps = 0;
3302 }
3303 }
3304
3305
3306 stmmac_core_init(priv, priv->hw, dev);
3307
3308
3309 stmmac_mtl_configuration(priv);
3310
3311
3312 stmmac_safety_feat_configuration(priv);
3313
3314 ret = stmmac_rx_ipc(priv, priv->hw);
3315 if (!ret) {
3316 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3317 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3318 priv->hw->rx_csum = 0;
3319 }
3320
3321
3322 stmmac_mac_set(priv, priv->ioaddr, true);
3323
3324
3325 stmmac_dma_operation_mode(priv);
3326
3327 stmmac_mmc_setup(priv);
3328
3329 ret = stmmac_init_ptp(priv);
3330 if (ret == -EOPNOTSUPP)
3331 netdev_warn(priv->dev, "PTP not supported by HW\n");
3332 else if (ret)
3333 netdev_warn(priv->dev, "PTP init failed\n");
3334 else if (ptp_register)
3335 stmmac_ptp_register(priv);
3336
3337 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3338
3339
3340 if (!priv->tx_lpi_timer)
3341 priv->tx_lpi_timer = eee_timer * 1000;
3342
3343 if (priv->use_riwt) {
3344 u32 queue;
3345
3346 for (queue = 0; queue < rx_cnt; queue++) {
3347 if (!priv->rx_riwt[queue])
3348 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3349
3350 stmmac_rx_watchdog(priv, priv->ioaddr,
3351 priv->rx_riwt[queue], queue);
3352 }
3353 }
3354
3355 if (priv->hw->pcs)
3356 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3357
3358
3359 stmmac_set_rings_length(priv);
3360
3361
3362 if (priv->tso) {
3363 for (chan = 0; chan < tx_cnt; chan++) {
3364 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3365
3366
3367 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3368 continue;
3369
3370 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3371 }
3372 }
3373
3374
3375 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3376 for (chan = 0; chan < rx_cnt; chan++)
3377 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3378
3379
3380
3381 if (priv->dma_cap.vlins)
3382 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3383
3384
3385 for (chan = 0; chan < tx_cnt; chan++) {
3386 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3387 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3388
3389 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3390 }
3391
3392
3393 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3394 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3395
3396
3397 stmmac_start_all_dma(priv);
3398
3399 if (priv->dma_cap.fpesel) {
3400 stmmac_fpe_start_wq(priv);
3401
3402 if (priv->plat->fpe_cfg->enable)
3403 stmmac_fpe_handshake(priv, true);
3404 }
3405
3406 return 0;
3407}
3408
3409static void stmmac_hw_teardown(struct net_device *dev)
3410{
3411 struct stmmac_priv *priv = netdev_priv(dev);
3412
3413 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3414}
3415
3416static void stmmac_free_irq(struct net_device *dev,
3417 enum request_irq_err irq_err, int irq_idx)
3418{
3419 struct stmmac_priv *priv = netdev_priv(dev);
3420 int j;
3421
3422 switch (irq_err) {
3423 case REQ_IRQ_ERR_ALL:
3424 irq_idx = priv->plat->tx_queues_to_use;
3425 fallthrough;
3426 case REQ_IRQ_ERR_TX:
3427 for (j = irq_idx - 1; j >= 0; j--) {
3428 if (priv->tx_irq[j] > 0) {
3429 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3430 free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3431 }
3432 }
3433 irq_idx = priv->plat->rx_queues_to_use;
3434 fallthrough;
3435 case REQ_IRQ_ERR_RX:
3436 for (j = irq_idx - 1; j >= 0; j--) {
3437 if (priv->rx_irq[j] > 0) {
3438 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3439 free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3440 }
3441 }
3442
3443 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3444 free_irq(priv->sfty_ue_irq, dev);
3445 fallthrough;
3446 case REQ_IRQ_ERR_SFTY_UE:
3447 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3448 free_irq(priv->sfty_ce_irq, dev);
3449 fallthrough;
3450 case REQ_IRQ_ERR_SFTY_CE:
3451 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3452 free_irq(priv->lpi_irq, dev);
3453 fallthrough;
3454 case REQ_IRQ_ERR_LPI:
3455 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3456 free_irq(priv->wol_irq, dev);
3457 fallthrough;
3458 case REQ_IRQ_ERR_WOL:
3459 free_irq(dev->irq, dev);
3460 fallthrough;
3461 case REQ_IRQ_ERR_MAC:
3462 case REQ_IRQ_ERR_NO:
3463
3464 break;
3465 }
3466}
3467
3468static int stmmac_request_irq_multi_msi(struct net_device *dev)
3469{
3470 struct stmmac_priv *priv = netdev_priv(dev);
3471 enum request_irq_err irq_err;
3472 cpumask_t cpu_mask;
3473 int irq_idx = 0;
3474 char *int_name;
3475 int ret;
3476 int i;
3477
3478
3479 int_name = priv->int_name_mac;
3480 sprintf(int_name, "%s:%s", dev->name, "mac");
3481 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3482 0, int_name, dev);
3483 if (unlikely(ret < 0)) {
3484 netdev_err(priv->dev,
3485 "%s: alloc mac MSI %d (error: %d)\n",
3486 __func__, dev->irq, ret);
3487 irq_err = REQ_IRQ_ERR_MAC;
3488 goto irq_error;
3489 }
3490
3491
3492
3493
3494 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3495 int_name = priv->int_name_wol;
3496 sprintf(int_name, "%s:%s", dev->name, "wol");
3497 ret = request_irq(priv->wol_irq,
3498 stmmac_mac_interrupt,
3499 0, int_name, dev);
3500 if (unlikely(ret < 0)) {
3501 netdev_err(priv->dev,
3502 "%s: alloc wol MSI %d (error: %d)\n",
3503 __func__, priv->wol_irq, ret);
3504 irq_err = REQ_IRQ_ERR_WOL;
3505 goto irq_error;
3506 }
3507 }
3508
3509
3510
3511
3512 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3513 int_name = priv->int_name_lpi;
3514 sprintf(int_name, "%s:%s", dev->name, "lpi");
3515 ret = request_irq(priv->lpi_irq,
3516 stmmac_mac_interrupt,
3517 0, int_name, dev);
3518 if (unlikely(ret < 0)) {
3519 netdev_err(priv->dev,
3520 "%s: alloc lpi MSI %d (error: %d)\n",
3521 __func__, priv->lpi_irq, ret);
3522 irq_err = REQ_IRQ_ERR_LPI;
3523 goto irq_error;
3524 }
3525 }
3526
3527
3528
3529
3530 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3531 int_name = priv->int_name_sfty_ce;
3532 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3533 ret = request_irq(priv->sfty_ce_irq,
3534 stmmac_safety_interrupt,
3535 0, int_name, dev);
3536 if (unlikely(ret < 0)) {
3537 netdev_err(priv->dev,
3538 "%s: alloc sfty ce MSI %d (error: %d)\n",
3539 __func__, priv->sfty_ce_irq, ret);
3540 irq_err = REQ_IRQ_ERR_SFTY_CE;
3541 goto irq_error;
3542 }
3543 }
3544
3545
3546
3547
3548 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3549 int_name = priv->int_name_sfty_ue;
3550 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3551 ret = request_irq(priv->sfty_ue_irq,
3552 stmmac_safety_interrupt,
3553 0, int_name, dev);
3554 if (unlikely(ret < 0)) {
3555 netdev_err(priv->dev,
3556 "%s: alloc sfty ue MSI %d (error: %d)\n",
3557 __func__, priv->sfty_ue_irq, ret);
3558 irq_err = REQ_IRQ_ERR_SFTY_UE;
3559 goto irq_error;
3560 }
3561 }
3562
3563
3564 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3565 if (i >= MTL_MAX_RX_QUEUES)
3566 break;
3567 if (priv->rx_irq[i] == 0)
3568 continue;
3569
3570 int_name = priv->int_name_rx_irq[i];
3571 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3572 ret = request_irq(priv->rx_irq[i],
3573 stmmac_msi_intr_rx,
3574 0, int_name, &priv->rx_queue[i]);
3575 if (unlikely(ret < 0)) {
3576 netdev_err(priv->dev,
3577 "%s: alloc rx-%d MSI %d (error: %d)\n",
3578 __func__, i, priv->rx_irq[i], ret);
3579 irq_err = REQ_IRQ_ERR_RX;
3580 irq_idx = i;
3581 goto irq_error;
3582 }
3583 cpumask_clear(&cpu_mask);
3584 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3585 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3586 }
3587
3588
3589 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3590 if (i >= MTL_MAX_TX_QUEUES)
3591 break;
3592 if (priv->tx_irq[i] == 0)
3593 continue;
3594
3595 int_name = priv->int_name_tx_irq[i];
3596 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3597 ret = request_irq(priv->tx_irq[i],
3598 stmmac_msi_intr_tx,
3599 0, int_name, &priv->tx_queue[i]);
3600 if (unlikely(ret < 0)) {
3601 netdev_err(priv->dev,
3602 "%s: alloc tx-%d MSI %d (error: %d)\n",
3603 __func__, i, priv->tx_irq[i], ret);
3604 irq_err = REQ_IRQ_ERR_TX;
3605 irq_idx = i;
3606 goto irq_error;
3607 }
3608 cpumask_clear(&cpu_mask);
3609 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3610 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3611 }
3612
3613 return 0;
3614
3615irq_error:
3616 stmmac_free_irq(dev, irq_err, irq_idx);
3617 return ret;
3618}
3619
3620static int stmmac_request_irq_single(struct net_device *dev)
3621{
3622 struct stmmac_priv *priv = netdev_priv(dev);
3623 enum request_irq_err irq_err;
3624 int ret;
3625
3626 ret = request_irq(dev->irq, stmmac_interrupt,
3627 IRQF_SHARED, dev->name, dev);
3628 if (unlikely(ret < 0)) {
3629 netdev_err(priv->dev,
3630 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3631 __func__, dev->irq, ret);
3632 irq_err = REQ_IRQ_ERR_MAC;
3633 goto irq_error;
3634 }
3635
3636
3637
3638
3639 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3640 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3641 IRQF_SHARED, dev->name, dev);
3642 if (unlikely(ret < 0)) {
3643 netdev_err(priv->dev,
3644 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3645 __func__, priv->wol_irq, ret);
3646 irq_err = REQ_IRQ_ERR_WOL;
3647 goto irq_error;
3648 }
3649 }
3650
3651
3652 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3653 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3654 IRQF_SHARED, dev->name, dev);
3655 if (unlikely(ret < 0)) {
3656 netdev_err(priv->dev,
3657 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3658 __func__, priv->lpi_irq, ret);
3659 irq_err = REQ_IRQ_ERR_LPI;
3660 goto irq_error;
3661 }
3662 }
3663
3664 return 0;
3665
3666irq_error:
3667 stmmac_free_irq(dev, irq_err, 0);
3668 return ret;
3669}
3670
3671static int stmmac_request_irq(struct net_device *dev)
3672{
3673 struct stmmac_priv *priv = netdev_priv(dev);
3674 int ret;
3675
3676
3677 if (priv->plat->multi_msi_en)
3678 ret = stmmac_request_irq_multi_msi(dev);
3679 else
3680 ret = stmmac_request_irq_single(dev);
3681
3682 return ret;
3683}
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694static int stmmac_open(struct net_device *dev)
3695{
3696 struct stmmac_priv *priv = netdev_priv(dev);
3697 int mode = priv->plat->phy_interface;
3698 int bfsize = 0;
3699 u32 chan;
3700 int ret;
3701
3702 ret = pm_runtime_get_sync(priv->device);
3703 if (ret < 0) {
3704 pm_runtime_put_noidle(priv->device);
3705 return ret;
3706 }
3707
3708 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3709 priv->hw->pcs != STMMAC_PCS_RTBI &&
3710 (!priv->hw->xpcs ||
3711 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3712 ret = stmmac_init_phy(dev);
3713 if (ret) {
3714 netdev_err(priv->dev,
3715 "%s: Cannot attach to PHY (error: %d)\n",
3716 __func__, ret);
3717 goto init_phy_error;
3718 }
3719 }
3720
3721
3722 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3723 priv->xstats.threshold = tc;
3724
3725 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3726 if (bfsize < 0)
3727 bfsize = 0;
3728
3729 if (bfsize < BUF_SIZE_16KiB)
3730 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3731
3732 priv->dma_buf_sz = bfsize;
3733 buf_sz = bfsize;
3734
3735 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3736
3737 if (!priv->dma_tx_size)
3738 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3739 if (!priv->dma_rx_size)
3740 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3741
3742
3743 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3744 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3745 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3746
3747
3748 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3749 }
3750
3751 ret = alloc_dma_desc_resources(priv);
3752 if (ret < 0) {
3753 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3754 __func__);
3755 goto dma_desc_error;
3756 }
3757
3758 ret = init_dma_desc_rings(dev, GFP_KERNEL);
3759 if (ret < 0) {
3760 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3761 __func__);
3762 goto init_error;
3763 }
3764
3765 ret = stmmac_hw_setup(dev, true);
3766 if (ret < 0) {
3767 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3768 goto init_error;
3769 }
3770
3771 stmmac_init_coalesce(priv);
3772
3773 phylink_start(priv->phylink);
3774
3775 phylink_speed_up(priv->phylink);
3776
3777 ret = stmmac_request_irq(dev);
3778 if (ret)
3779 goto irq_error;
3780
3781 stmmac_enable_all_queues(priv);
3782 netif_tx_start_all_queues(priv->dev);
3783 stmmac_enable_all_dma_irq(priv);
3784
3785 return 0;
3786
3787irq_error:
3788 phylink_stop(priv->phylink);
3789
3790 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3791 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3792
3793 stmmac_hw_teardown(dev);
3794init_error:
3795 free_dma_desc_resources(priv);
3796dma_desc_error:
3797 phylink_disconnect_phy(priv->phylink);
3798init_phy_error:
3799 pm_runtime_put(priv->device);
3800 return ret;
3801}
3802
3803static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3804{
3805 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3806
3807 if (priv->fpe_wq)
3808 destroy_workqueue(priv->fpe_wq);
3809
3810 netdev_info(priv->dev, "FPE workqueue stop");
3811}
3812
3813
3814
3815
3816
3817
3818
3819static int stmmac_release(struct net_device *dev)
3820{
3821 struct stmmac_priv *priv = netdev_priv(dev);
3822 u32 chan;
3823
3824 netif_tx_disable(dev);
3825
3826 if (device_may_wakeup(priv->device))
3827 phylink_speed_down(priv->phylink, false);
3828
3829 phylink_stop(priv->phylink);
3830 phylink_disconnect_phy(priv->phylink);
3831
3832 stmmac_disable_all_queues(priv);
3833
3834 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3835 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3836
3837
3838 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3839
3840 if (priv->eee_enabled) {
3841 priv->tx_path_in_lpi_mode = false;
3842 del_timer_sync(&priv->eee_ctrl_timer);
3843 }
3844
3845
3846 stmmac_stop_all_dma(priv);
3847
3848
3849 free_dma_desc_resources(priv);
3850
3851
3852 stmmac_mac_set(priv, priv->ioaddr, false);
3853
3854 netif_carrier_off(dev);
3855
3856 stmmac_release_ptp(priv);
3857
3858 pm_runtime_put(priv->device);
3859
3860 if (priv->dma_cap.fpesel)
3861 stmmac_fpe_stop_wq(priv);
3862
3863 return 0;
3864}
3865
3866static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3867 struct stmmac_tx_queue *tx_q)
3868{
3869 u16 tag = 0x0, inner_tag = 0x0;
3870 u32 inner_type = 0x0;
3871 struct dma_desc *p;
3872
3873 if (!priv->dma_cap.vlins)
3874 return false;
3875 if (!skb_vlan_tag_present(skb))
3876 return false;
3877 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3878 inner_tag = skb_vlan_tag_get(skb);
3879 inner_type = STMMAC_VLAN_INSERT;
3880 }
3881
3882 tag = skb_vlan_tag_get(skb);
3883
3884 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3885 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3886 else
3887 p = &tx_q->dma_tx[tx_q->cur_tx];
3888
3889 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3890 return false;
3891
3892 stmmac_set_tx_owner(priv, p);
3893 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3894 return true;
3895}
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3909 int total_len, bool last_segment, u32 queue)
3910{
3911 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3912 struct dma_desc *desc;
3913 u32 buff_size;
3914 int tmp_len;
3915
3916 tmp_len = total_len;
3917
3918 while (tmp_len > 0) {
3919 dma_addr_t curr_addr;
3920
3921 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3922 priv->dma_tx_size);
3923 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3924
3925 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3926 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3927 else
3928 desc = &tx_q->dma_tx[tx_q->cur_tx];
3929
3930 curr_addr = des + (total_len - tmp_len);
3931 if (priv->dma_cap.addr64 <= 32)
3932 desc->des0 = cpu_to_le32(curr_addr);
3933 else
3934 stmmac_set_desc_addr(priv, desc, curr_addr);
3935
3936 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3937 TSO_MAX_BUFF_SIZE : tmp_len;
3938
3939 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3940 0, 1,
3941 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3942 0, 0);
3943
3944 tmp_len -= TSO_MAX_BUFF_SIZE;
3945 }
3946}
3947
3948static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3949{
3950 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3951 int desc_size;
3952
3953 if (likely(priv->extend_desc))
3954 desc_size = sizeof(struct dma_extended_desc);
3955 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3956 desc_size = sizeof(struct dma_edesc);
3957 else
3958 desc_size = sizeof(struct dma_desc);
3959
3960
3961
3962
3963
3964 wmb();
3965
3966 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3967 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3968}
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3998{
3999 struct dma_desc *desc, *first, *mss_desc = NULL;
4000 struct stmmac_priv *priv = netdev_priv(dev);
4001 int nfrags = skb_shinfo(skb)->nr_frags;
4002 u32 queue = skb_get_queue_mapping(skb);
4003 unsigned int first_entry, tx_packets;
4004 int tmp_pay_len = 0, first_tx;
4005 struct stmmac_tx_queue *tx_q;
4006 bool has_vlan, set_ic;
4007 u8 proto_hdr_len, hdr;
4008 u32 pay_len, mss;
4009 dma_addr_t des;
4010 int i;
4011
4012 tx_q = &priv->tx_queue[queue];
4013 first_tx = tx_q->cur_tx;
4014
4015
4016 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4017 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4018 hdr = sizeof(struct udphdr);
4019 } else {
4020 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4021 hdr = tcp_hdrlen(skb);
4022 }
4023
4024
4025 if (unlikely(stmmac_tx_avail(priv, queue) <
4026 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4027 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4028 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4029 queue));
4030
4031 netdev_err(priv->dev,
4032 "%s: Tx Ring full when queue awake\n",
4033 __func__);
4034 }
4035 return NETDEV_TX_BUSY;
4036 }
4037
4038 pay_len = skb_headlen(skb) - proto_hdr_len;
4039
4040 mss = skb_shinfo(skb)->gso_size;
4041
4042
4043 if (mss != tx_q->mss) {
4044 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4045 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4046 else
4047 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4048
4049 stmmac_set_mss(priv, mss_desc, mss);
4050 tx_q->mss = mss;
4051 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4052 priv->dma_tx_size);
4053 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4054 }
4055
4056 if (netif_msg_tx_queued(priv)) {
4057 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4058 __func__, hdr, proto_hdr_len, pay_len, mss);
4059 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4060 skb->data_len);
4061 }
4062
4063
4064 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4065
4066 first_entry = tx_q->cur_tx;
4067 WARN_ON(tx_q->tx_skbuff[first_entry]);
4068
4069 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4070 desc = &tx_q->dma_entx[first_entry].basic;
4071 else
4072 desc = &tx_q->dma_tx[first_entry];
4073 first = desc;
4074
4075 if (has_vlan)
4076 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4077
4078
4079 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4080 DMA_TO_DEVICE);
4081 if (dma_mapping_error(priv->device, des))
4082 goto dma_map_err;
4083
4084 tx_q->tx_skbuff_dma[first_entry].buf = des;
4085 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4086 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4087 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4088
4089 if (priv->dma_cap.addr64 <= 32) {
4090 first->des0 = cpu_to_le32(des);
4091
4092
4093 if (pay_len)
4094 first->des1 = cpu_to_le32(des + proto_hdr_len);
4095
4096
4097 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4098 } else {
4099 stmmac_set_desc_addr(priv, first, des);
4100 tmp_pay_len = pay_len;
4101 des += proto_hdr_len;
4102 pay_len = 0;
4103 }
4104
4105 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4106
4107
4108 for (i = 0; i < nfrags; i++) {
4109 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4110
4111 des = skb_frag_dma_map(priv->device, frag, 0,
4112 skb_frag_size(frag),
4113 DMA_TO_DEVICE);
4114 if (dma_mapping_error(priv->device, des))
4115 goto dma_map_err;
4116
4117 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4118 (i == nfrags - 1), queue);
4119
4120 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4121 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4122 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4123 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4124 }
4125
4126 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4127
4128
4129 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4130 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4131
4132
4133 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4134 tx_q->tx_count_frames += tx_packets;
4135
4136 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4137 set_ic = true;
4138 else if (!priv->tx_coal_frames[queue])
4139 set_ic = false;
4140 else if (tx_packets > priv->tx_coal_frames[queue])
4141 set_ic = true;
4142 else if ((tx_q->tx_count_frames %
4143 priv->tx_coal_frames[queue]) < tx_packets)
4144 set_ic = true;
4145 else
4146 set_ic = false;
4147
4148 if (set_ic) {
4149 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4150 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4151 else
4152 desc = &tx_q->dma_tx[tx_q->cur_tx];
4153
4154 tx_q->tx_count_frames = 0;
4155 stmmac_set_tx_ic(priv, desc);
4156 priv->xstats.tx_set_ic_bit++;
4157 }
4158
4159
4160
4161
4162
4163
4164 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4165
4166 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4167 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4168 __func__);
4169 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4170 }
4171
4172 dev->stats.tx_bytes += skb->len;
4173 priv->xstats.tx_tso_frames++;
4174 priv->xstats.tx_tso_nfrags += nfrags;
4175
4176 if (priv->sarc_type)
4177 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4178
4179 skb_tx_timestamp(skb);
4180
4181 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4182 priv->hwts_tx_en)) {
4183
4184 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4185 stmmac_enable_tx_timestamp(priv, first);
4186 }
4187
4188
4189 stmmac_prepare_tso_tx_desc(priv, first, 1,
4190 proto_hdr_len,
4191 pay_len,
4192 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4193 hdr / 4, (skb->len - proto_hdr_len));
4194
4195
4196 if (mss_desc) {
4197
4198
4199
4200
4201
4202 dma_wmb();
4203 stmmac_set_tx_owner(priv, mss_desc);
4204 }
4205
4206 if (netif_msg_pktdata(priv)) {
4207 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4208 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4209 tx_q->cur_tx, first, nfrags);
4210 pr_info(">>> frame to be transmitted: ");
4211 print_pkt(skb->data, skb_headlen(skb));
4212 }
4213
4214 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4215
4216 stmmac_flush_tx_descriptors(priv, queue);
4217 stmmac_tx_timer_arm(priv, queue);
4218
4219 return NETDEV_TX_OK;
4220
4221dma_map_err:
4222 dev_err(priv->device, "Tx dma map failed\n");
4223 dev_kfree_skb(skb);
4224 priv->dev->stats.tx_dropped++;
4225 return NETDEV_TX_OK;
4226}
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4237{
4238 unsigned int first_entry, tx_packets, enh_desc;
4239 struct stmmac_priv *priv = netdev_priv(dev);
4240 unsigned int nopaged_len = skb_headlen(skb);
4241 int i, csum_insertion = 0, is_jumbo = 0;
4242 u32 queue = skb_get_queue_mapping(skb);
4243 int nfrags = skb_shinfo(skb)->nr_frags;
4244 int gso = skb_shinfo(skb)->gso_type;
4245 struct dma_edesc *tbs_desc = NULL;
4246 struct dma_desc *desc, *first;
4247 struct stmmac_tx_queue *tx_q;
4248 bool has_vlan, set_ic;
4249 int entry, first_tx;
4250 dma_addr_t des;
4251
4252 tx_q = &priv->tx_queue[queue];
4253 first_tx = tx_q->cur_tx;
4254
4255 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4256 stmmac_disable_eee_mode(priv);
4257
4258
4259 if (skb_is_gso(skb) && priv->tso) {
4260 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4261 return stmmac_tso_xmit(skb, dev);
4262 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4263 return stmmac_tso_xmit(skb, dev);
4264 }
4265
4266 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4267 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4268 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4269 queue));
4270
4271 netdev_err(priv->dev,
4272 "%s: Tx Ring full when queue awake\n",
4273 __func__);
4274 }
4275 return NETDEV_TX_BUSY;
4276 }
4277
4278
4279 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4280
4281 entry = tx_q->cur_tx;
4282 first_entry = entry;
4283 WARN_ON(tx_q->tx_skbuff[first_entry]);
4284
4285 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4286
4287 if (likely(priv->extend_desc))
4288 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4289 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4290 desc = &tx_q->dma_entx[entry].basic;
4291 else
4292 desc = tx_q->dma_tx + entry;
4293
4294 first = desc;
4295
4296 if (has_vlan)
4297 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4298
4299 enh_desc = priv->plat->enh_desc;
4300
4301 if (enh_desc)
4302 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4303
4304 if (unlikely(is_jumbo)) {
4305 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4306 if (unlikely(entry < 0) && (entry != -EINVAL))
4307 goto dma_map_err;
4308 }
4309
4310 for (i = 0; i < nfrags; i++) {
4311 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4312 int len = skb_frag_size(frag);
4313 bool last_segment = (i == (nfrags - 1));
4314
4315 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4316 WARN_ON(tx_q->tx_skbuff[entry]);
4317
4318 if (likely(priv->extend_desc))
4319 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4320 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4321 desc = &tx_q->dma_entx[entry].basic;
4322 else
4323 desc = tx_q->dma_tx + entry;
4324
4325 des = skb_frag_dma_map(priv->device, frag, 0, len,
4326 DMA_TO_DEVICE);
4327 if (dma_mapping_error(priv->device, des))
4328 goto dma_map_err;
4329
4330 tx_q->tx_skbuff_dma[entry].buf = des;
4331
4332 stmmac_set_desc_addr(priv, desc, des);
4333
4334 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4335 tx_q->tx_skbuff_dma[entry].len = len;
4336 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4337 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4338
4339
4340 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4341 priv->mode, 1, last_segment, skb->len);
4342 }
4343
4344
4345 tx_q->tx_skbuff[entry] = skb;
4346 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4347
4348
4349
4350
4351
4352
4353 tx_packets = (entry + 1) - first_tx;
4354 tx_q->tx_count_frames += tx_packets;
4355
4356 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4357 set_ic = true;
4358 else if (!priv->tx_coal_frames[queue])
4359 set_ic = false;
4360 else if (tx_packets > priv->tx_coal_frames[queue])
4361 set_ic = true;
4362 else if ((tx_q->tx_count_frames %
4363 priv->tx_coal_frames[queue]) < tx_packets)
4364 set_ic = true;
4365 else
4366 set_ic = false;
4367
4368 if (set_ic) {
4369 if (likely(priv->extend_desc))
4370 desc = &tx_q->dma_etx[entry].basic;
4371 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4372 desc = &tx_q->dma_entx[entry].basic;
4373 else
4374 desc = &tx_q->dma_tx[entry];
4375
4376 tx_q->tx_count_frames = 0;
4377 stmmac_set_tx_ic(priv, desc);
4378 priv->xstats.tx_set_ic_bit++;
4379 }
4380
4381
4382
4383
4384
4385
4386 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4387 tx_q->cur_tx = entry;
4388
4389 if (netif_msg_pktdata(priv)) {
4390 netdev_dbg(priv->dev,
4391 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4392 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4393 entry, first, nfrags);
4394
4395 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4396 print_pkt(skb->data, skb->len);
4397 }
4398
4399 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4400 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4401 __func__);
4402 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4403 }
4404
4405 dev->stats.tx_bytes += skb->len;
4406
4407 if (priv->sarc_type)
4408 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4409
4410 skb_tx_timestamp(skb);
4411
4412
4413
4414
4415
4416 if (likely(!is_jumbo)) {
4417 bool last_segment = (nfrags == 0);
4418
4419 des = dma_map_single(priv->device, skb->data,
4420 nopaged_len, DMA_TO_DEVICE);
4421 if (dma_mapping_error(priv->device, des))
4422 goto dma_map_err;
4423
4424 tx_q->tx_skbuff_dma[first_entry].buf = des;
4425 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4426 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4427
4428 stmmac_set_desc_addr(priv, first, des);
4429
4430 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4431 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4432
4433 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4434 priv->hwts_tx_en)) {
4435
4436 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4437 stmmac_enable_tx_timestamp(priv, first);
4438 }
4439
4440
4441 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4442 csum_insertion, priv->mode, 0, last_segment,
4443 skb->len);
4444 }
4445
4446 if (tx_q->tbs & STMMAC_TBS_EN) {
4447 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4448
4449 tbs_desc = &tx_q->dma_entx[first_entry];
4450 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4451 }
4452
4453 stmmac_set_tx_owner(priv, first);
4454
4455 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4456
4457 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4458
4459 stmmac_flush_tx_descriptors(priv, queue);
4460 stmmac_tx_timer_arm(priv, queue);
4461
4462 return NETDEV_TX_OK;
4463
4464dma_map_err:
4465 netdev_err(priv->dev, "Tx DMA map failed\n");
4466 dev_kfree_skb(skb);
4467 priv->dev->stats.tx_dropped++;
4468 return NETDEV_TX_OK;
4469}
4470
4471static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4472{
4473 struct vlan_ethhdr *veth;
4474 __be16 vlan_proto;
4475 u16 vlanid;
4476
4477 veth = (struct vlan_ethhdr *)skb->data;
4478 vlan_proto = veth->h_vlan_proto;
4479
4480 if ((vlan_proto == htons(ETH_P_8021Q) &&
4481 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4482 (vlan_proto == htons(ETH_P_8021AD) &&
4483 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4484
4485 vlanid = ntohs(veth->h_vlan_TCI);
4486 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4487 skb_pull(skb, VLAN_HLEN);
4488 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4489 }
4490}
4491
4492
4493
4494
4495
4496
4497
4498
4499static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4500{
4501 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4502 int dirty = stmmac_rx_dirty(priv, queue);
4503 unsigned int entry = rx_q->dirty_rx;
4504 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4505
4506 if (priv->dma_cap.addr64 <= 32)
4507 gfp |= GFP_DMA32;
4508
4509 while (dirty-- > 0) {
4510 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4511 struct dma_desc *p;
4512 bool use_rx_wd;
4513
4514 if (priv->extend_desc)
4515 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4516 else
4517 p = rx_q->dma_rx + entry;
4518
4519 if (!buf->page) {
4520 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4521 if (!buf->page)
4522 break;
4523 }
4524
4525 if (priv->sph && !buf->sec_page) {
4526 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4527 if (!buf->sec_page)
4528 break;
4529
4530 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4531 }
4532
4533 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4534
4535 stmmac_set_desc_addr(priv, p, buf->addr);
4536 if (priv->sph)
4537 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4538 else
4539 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4540 stmmac_refill_desc3(priv, rx_q, p);
4541
4542 rx_q->rx_count_frames++;
4543 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4544 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4545 rx_q->rx_count_frames = 0;
4546
4547 use_rx_wd = !priv->rx_coal_frames[queue];
4548 use_rx_wd |= rx_q->rx_count_frames > 0;
4549 if (!priv->use_riwt)
4550 use_rx_wd = false;
4551
4552 dma_wmb();
4553 stmmac_set_rx_owner(priv, p, use_rx_wd);
4554
4555 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4556 }
4557 rx_q->dirty_rx = entry;
4558 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4559 (rx_q->dirty_rx * sizeof(struct dma_desc));
4560 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4561}
4562
4563static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4564 struct dma_desc *p,
4565 int status, unsigned int len)
4566{
4567 unsigned int plen = 0, hlen = 0;
4568 int coe = priv->hw->rx_csum;
4569
4570
4571 if (priv->sph && len)
4572 return 0;
4573
4574
4575 stmmac_get_rx_header_len(priv, p, &hlen);
4576 if (priv->sph && hlen) {
4577 priv->xstats.rx_split_hdr_pkt_n++;
4578 return hlen;
4579 }
4580
4581
4582 if (status & rx_not_ls)
4583 return priv->dma_buf_sz;
4584
4585 plen = stmmac_get_rx_frame_len(priv, p, coe);
4586
4587
4588 return min_t(unsigned int, priv->dma_buf_sz, plen);
4589}
4590
4591static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4592 struct dma_desc *p,
4593 int status, unsigned int len)
4594{
4595 int coe = priv->hw->rx_csum;
4596 unsigned int plen = 0;
4597
4598
4599 if (!priv->sph)
4600 return 0;
4601
4602
4603 if (status & rx_not_ls)
4604 return priv->dma_buf_sz;
4605
4606 plen = stmmac_get_rx_frame_len(priv, p, coe);
4607
4608
4609 return plen - len;
4610}
4611
4612static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4613 struct xdp_frame *xdpf, bool dma_map)
4614{
4615 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4616 unsigned int entry = tx_q->cur_tx;
4617 struct dma_desc *tx_desc;
4618 dma_addr_t dma_addr;
4619 bool set_ic;
4620
4621 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4622 return STMMAC_XDP_CONSUMED;
4623
4624 if (likely(priv->extend_desc))
4625 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4626 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4627 tx_desc = &tx_q->dma_entx[entry].basic;
4628 else
4629 tx_desc = tx_q->dma_tx + entry;
4630
4631 if (dma_map) {
4632 dma_addr = dma_map_single(priv->device, xdpf->data,
4633 xdpf->len, DMA_TO_DEVICE);
4634 if (dma_mapping_error(priv->device, dma_addr))
4635 return STMMAC_XDP_CONSUMED;
4636
4637 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4638 } else {
4639 struct page *page = virt_to_page(xdpf->data);
4640
4641 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4642 xdpf->headroom;
4643 dma_sync_single_for_device(priv->device, dma_addr,
4644 xdpf->len, DMA_BIDIRECTIONAL);
4645
4646 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4647 }
4648
4649 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4650 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4651 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4652 tx_q->tx_skbuff_dma[entry].last_segment = true;
4653 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4654
4655 tx_q->xdpf[entry] = xdpf;
4656
4657 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4658
4659 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4660 true, priv->mode, true, true,
4661 xdpf->len);
4662
4663 tx_q->tx_count_frames++;
4664
4665 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4666 set_ic = true;
4667 else
4668 set_ic = false;
4669
4670 if (set_ic) {
4671 tx_q->tx_count_frames = 0;
4672 stmmac_set_tx_ic(priv, tx_desc);
4673 priv->xstats.tx_set_ic_bit++;
4674 }
4675
4676 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4677
4678 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4679 tx_q->cur_tx = entry;
4680
4681 return STMMAC_XDP_TX;
4682}
4683
4684static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4685 int cpu)
4686{
4687 int index = cpu;
4688
4689 if (unlikely(index < 0))
4690 index = 0;
4691
4692 while (index >= priv->plat->tx_queues_to_use)
4693 index -= priv->plat->tx_queues_to_use;
4694
4695 return index;
4696}
4697
4698static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4699 struct xdp_buff *xdp)
4700{
4701 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4702 int cpu = smp_processor_id();
4703 struct netdev_queue *nq;
4704 int queue;
4705 int res;
4706
4707 if (unlikely(!xdpf))
4708 return STMMAC_XDP_CONSUMED;
4709
4710 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4711 nq = netdev_get_tx_queue(priv->dev, queue);
4712
4713 __netif_tx_lock(nq, cpu);
4714
4715 txq_trans_cond_update(nq);
4716
4717 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4718 if (res == STMMAC_XDP_TX)
4719 stmmac_flush_tx_descriptors(priv, queue);
4720
4721 __netif_tx_unlock(nq);
4722
4723 return res;
4724}
4725
4726static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4727 struct bpf_prog *prog,
4728 struct xdp_buff *xdp)
4729{
4730 u32 act;
4731 int res;
4732
4733 act = bpf_prog_run_xdp(prog, xdp);
4734 switch (act) {
4735 case XDP_PASS:
4736 res = STMMAC_XDP_PASS;
4737 break;
4738 case XDP_TX:
4739 res = stmmac_xdp_xmit_back(priv, xdp);
4740 break;
4741 case XDP_REDIRECT:
4742 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4743 res = STMMAC_XDP_CONSUMED;
4744 else
4745 res = STMMAC_XDP_REDIRECT;
4746 break;
4747 default:
4748 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4749 fallthrough;
4750 case XDP_ABORTED:
4751 trace_xdp_exception(priv->dev, prog, act);
4752 fallthrough;
4753 case XDP_DROP:
4754 res = STMMAC_XDP_CONSUMED;
4755 break;
4756 }
4757
4758 return res;
4759}
4760
4761static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4762 struct xdp_buff *xdp)
4763{
4764 struct bpf_prog *prog;
4765 int res;
4766
4767 prog = READ_ONCE(priv->xdp_prog);
4768 if (!prog) {
4769 res = STMMAC_XDP_PASS;
4770 goto out;
4771 }
4772
4773 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4774out:
4775 return ERR_PTR(-res);
4776}
4777
4778static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4779 int xdp_status)
4780{
4781 int cpu = smp_processor_id();
4782 int queue;
4783
4784 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4785
4786 if (xdp_status & STMMAC_XDP_TX)
4787 stmmac_tx_timer_arm(priv, queue);
4788
4789 if (xdp_status & STMMAC_XDP_REDIRECT)
4790 xdp_do_flush();
4791}
4792
4793static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4794 struct xdp_buff *xdp)
4795{
4796 unsigned int metasize = xdp->data - xdp->data_meta;
4797 unsigned int datasize = xdp->data_end - xdp->data;
4798 struct sk_buff *skb;
4799
4800 skb = __napi_alloc_skb(&ch->rxtx_napi,
4801 xdp->data_end - xdp->data_hard_start,
4802 GFP_ATOMIC | __GFP_NOWARN);
4803 if (unlikely(!skb))
4804 return NULL;
4805
4806 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4807 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4808 if (metasize)
4809 skb_metadata_set(skb, metasize);
4810
4811 return skb;
4812}
4813
4814static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4815 struct dma_desc *p, struct dma_desc *np,
4816 struct xdp_buff *xdp)
4817{
4818 struct stmmac_channel *ch = &priv->channel[queue];
4819 unsigned int len = xdp->data_end - xdp->data;
4820 enum pkt_hash_types hash_type;
4821 int coe = priv->hw->rx_csum;
4822 struct sk_buff *skb;
4823 u32 hash;
4824
4825 skb = stmmac_construct_skb_zc(ch, xdp);
4826 if (!skb) {
4827 priv->dev->stats.rx_dropped++;
4828 return;
4829 }
4830
4831 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4832 stmmac_rx_vlan(priv->dev, skb);
4833 skb->protocol = eth_type_trans(skb, priv->dev);
4834
4835 if (unlikely(!coe))
4836 skb_checksum_none_assert(skb);
4837 else
4838 skb->ip_summed = CHECKSUM_UNNECESSARY;
4839
4840 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4841 skb_set_hash(skb, hash, hash_type);
4842
4843 skb_record_rx_queue(skb, queue);
4844 napi_gro_receive(&ch->rxtx_napi, skb);
4845
4846 priv->dev->stats.rx_packets++;
4847 priv->dev->stats.rx_bytes += len;
4848}
4849
4850static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4851{
4852 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4853 unsigned int entry = rx_q->dirty_rx;
4854 struct dma_desc *rx_desc = NULL;
4855 bool ret = true;
4856
4857 budget = min(budget, stmmac_rx_dirty(priv, queue));
4858
4859 while (budget-- > 0 && entry != rx_q->cur_rx) {
4860 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4861 dma_addr_t dma_addr;
4862 bool use_rx_wd;
4863
4864 if (!buf->xdp) {
4865 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4866 if (!buf->xdp) {
4867 ret = false;
4868 break;
4869 }
4870 }
4871
4872 if (priv->extend_desc)
4873 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4874 else
4875 rx_desc = rx_q->dma_rx + entry;
4876
4877 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4878 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4879 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4880 stmmac_refill_desc3(priv, rx_q, rx_desc);
4881
4882 rx_q->rx_count_frames++;
4883 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4884 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4885 rx_q->rx_count_frames = 0;
4886
4887 use_rx_wd = !priv->rx_coal_frames[queue];
4888 use_rx_wd |= rx_q->rx_count_frames > 0;
4889 if (!priv->use_riwt)
4890 use_rx_wd = false;
4891
4892 dma_wmb();
4893 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4894
4895 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4896 }
4897
4898 if (rx_desc) {
4899 rx_q->dirty_rx = entry;
4900 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4901 (rx_q->dirty_rx * sizeof(struct dma_desc));
4902 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4903 }
4904
4905 return ret;
4906}
4907
4908static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4909{
4910 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4911 unsigned int count = 0, error = 0, len = 0;
4912 int dirty = stmmac_rx_dirty(priv, queue);
4913 unsigned int next_entry = rx_q->cur_rx;
4914 unsigned int desc_size;
4915 struct bpf_prog *prog;
4916 bool failure = false;
4917 int xdp_status = 0;
4918 int status = 0;
4919
4920 if (netif_msg_rx_status(priv)) {
4921 void *rx_head;
4922
4923 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4924 if (priv->extend_desc) {
4925 rx_head = (void *)rx_q->dma_erx;
4926 desc_size = sizeof(struct dma_extended_desc);
4927 } else {
4928 rx_head = (void *)rx_q->dma_rx;
4929 desc_size = sizeof(struct dma_desc);
4930 }
4931
4932 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4933 rx_q->dma_rx_phy, desc_size);
4934 }
4935 while (count < limit) {
4936 struct stmmac_rx_buffer *buf;
4937 unsigned int buf1_len = 0;
4938 struct dma_desc *np, *p;
4939 int entry;
4940 int res;
4941
4942 if (!count && rx_q->state_saved) {
4943 error = rx_q->state.error;
4944 len = rx_q->state.len;
4945 } else {
4946 rx_q->state_saved = false;
4947 error = 0;
4948 len = 0;
4949 }
4950
4951 if (count >= limit)
4952 break;
4953
4954read_again:
4955 buf1_len = 0;
4956 entry = next_entry;
4957 buf = &rx_q->buf_pool[entry];
4958
4959 if (dirty >= STMMAC_RX_FILL_BATCH) {
4960 failure = failure ||
4961 !stmmac_rx_refill_zc(priv, queue, dirty);
4962 dirty = 0;
4963 }
4964
4965 if (priv->extend_desc)
4966 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4967 else
4968 p = rx_q->dma_rx + entry;
4969
4970
4971 status = stmmac_rx_status(priv, &priv->dev->stats,
4972 &priv->xstats, p);
4973
4974 if (unlikely(status & dma_own))
4975 break;
4976
4977
4978 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4979 priv->dma_rx_size);
4980 next_entry = rx_q->cur_rx;
4981
4982 if (priv->extend_desc)
4983 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4984 else
4985 np = rx_q->dma_rx + next_entry;
4986
4987 prefetch(np);
4988
4989
4990 if (!buf->xdp)
4991 break;
4992
4993 if (priv->extend_desc)
4994 stmmac_rx_extended_status(priv, &priv->dev->stats,
4995 &priv->xstats,
4996 rx_q->dma_erx + entry);
4997 if (unlikely(status == discard_frame)) {
4998 xsk_buff_free(buf->xdp);
4999 buf->xdp = NULL;
5000 dirty++;
5001 error = 1;
5002 if (!priv->hwts_rx_en)
5003 priv->dev->stats.rx_errors++;
5004 }
5005
5006 if (unlikely(error && (status & rx_not_ls)))
5007 goto read_again;
5008 if (unlikely(error)) {
5009 count++;
5010 continue;
5011 }
5012
5013
5014 if (likely(status & rx_not_ls)) {
5015 xsk_buff_free(buf->xdp);
5016 buf->xdp = NULL;
5017 dirty++;
5018 count++;
5019 goto read_again;
5020 }
5021
5022
5023 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5024 len += buf1_len;
5025
5026
5027
5028
5029
5030
5031
5032
5033 if (likely(!(status & rx_not_ls)) &&
5034 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5035 unlikely(status != llc_snap))) {
5036 buf1_len -= ETH_FCS_LEN;
5037 len -= ETH_FCS_LEN;
5038 }
5039
5040
5041 buf->xdp->data_end = buf->xdp->data + buf1_len;
5042 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5043
5044 prog = READ_ONCE(priv->xdp_prog);
5045 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5046
5047 switch (res) {
5048 case STMMAC_XDP_PASS:
5049 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5050 xsk_buff_free(buf->xdp);
5051 break;
5052 case STMMAC_XDP_CONSUMED:
5053 xsk_buff_free(buf->xdp);
5054 priv->dev->stats.rx_dropped++;
5055 break;
5056 case STMMAC_XDP_TX:
5057 case STMMAC_XDP_REDIRECT:
5058 xdp_status |= res;
5059 break;
5060 }
5061
5062 buf->xdp = NULL;
5063 dirty++;
5064 count++;
5065 }
5066
5067 if (status & rx_not_ls) {
5068 rx_q->state_saved = true;
5069 rx_q->state.error = error;
5070 rx_q->state.len = len;
5071 }
5072
5073 stmmac_finalize_xdp_rx(priv, xdp_status);
5074
5075 priv->xstats.rx_pkt_n += count;
5076 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5077
5078 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5079 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5080 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5081 else
5082 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5083
5084 return (int)count;
5085 }
5086
5087 return failure ? limit : (int)count;
5088}
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5099{
5100 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5101 struct stmmac_channel *ch = &priv->channel[queue];
5102 unsigned int count = 0, error = 0, len = 0;
5103 int status = 0, coe = priv->hw->rx_csum;
5104 unsigned int next_entry = rx_q->cur_rx;
5105 enum dma_data_direction dma_dir;
5106 unsigned int desc_size;
5107 struct sk_buff *skb = NULL;
5108 struct xdp_buff xdp;
5109 int xdp_status = 0;
5110 int buf_sz;
5111
5112 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5113 buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5114
5115 if (netif_msg_rx_status(priv)) {
5116 void *rx_head;
5117
5118 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5119 if (priv->extend_desc) {
5120 rx_head = (void *)rx_q->dma_erx;
5121 desc_size = sizeof(struct dma_extended_desc);
5122 } else {
5123 rx_head = (void *)rx_q->dma_rx;
5124 desc_size = sizeof(struct dma_desc);
5125 }
5126
5127 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5128 rx_q->dma_rx_phy, desc_size);
5129 }
5130 while (count < limit) {
5131 unsigned int buf1_len = 0, buf2_len = 0;
5132 enum pkt_hash_types hash_type;
5133 struct stmmac_rx_buffer *buf;
5134 struct dma_desc *np, *p;
5135 int entry;
5136 u32 hash;
5137
5138 if (!count && rx_q->state_saved) {
5139 skb = rx_q->state.skb;
5140 error = rx_q->state.error;
5141 len = rx_q->state.len;
5142 } else {
5143 rx_q->state_saved = false;
5144 skb = NULL;
5145 error = 0;
5146 len = 0;
5147 }
5148
5149 if (count >= limit)
5150 break;
5151
5152read_again:
5153 buf1_len = 0;
5154 buf2_len = 0;
5155 entry = next_entry;
5156 buf = &rx_q->buf_pool[entry];
5157
5158 if (priv->extend_desc)
5159 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5160 else
5161 p = rx_q->dma_rx + entry;
5162
5163
5164 status = stmmac_rx_status(priv, &priv->dev->stats,
5165 &priv->xstats, p);
5166
5167 if (unlikely(status & dma_own))
5168 break;
5169
5170 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5171 priv->dma_rx_size);
5172 next_entry = rx_q->cur_rx;
5173
5174 if (priv->extend_desc)
5175 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5176 else
5177 np = rx_q->dma_rx + next_entry;
5178
5179 prefetch(np);
5180
5181 if (priv->extend_desc)
5182 stmmac_rx_extended_status(priv, &priv->dev->stats,
5183 &priv->xstats, rx_q->dma_erx + entry);
5184 if (unlikely(status == discard_frame)) {
5185 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5186 buf->page = NULL;
5187 error = 1;
5188 if (!priv->hwts_rx_en)
5189 priv->dev->stats.rx_errors++;
5190 }
5191
5192 if (unlikely(error && (status & rx_not_ls)))
5193 goto read_again;
5194 if (unlikely(error)) {
5195 dev_kfree_skb(skb);
5196 skb = NULL;
5197 count++;
5198 continue;
5199 }
5200
5201
5202
5203 prefetch(page_address(buf->page) + buf->page_offset);
5204 if (buf->sec_page)
5205 prefetch(page_address(buf->sec_page));
5206
5207 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5208 len += buf1_len;
5209 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5210 len += buf2_len;
5211
5212
5213
5214
5215
5216
5217
5218
5219 if (likely(!(status & rx_not_ls)) &&
5220 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5221 unlikely(status != llc_snap))) {
5222 if (buf2_len) {
5223 buf2_len -= ETH_FCS_LEN;
5224 len -= ETH_FCS_LEN;
5225 } else if (buf1_len) {
5226 buf1_len -= ETH_FCS_LEN;
5227 len -= ETH_FCS_LEN;
5228 }
5229 }
5230
5231 if (!skb) {
5232 unsigned int pre_len, sync_len;
5233
5234 dma_sync_single_for_cpu(priv->device, buf->addr,
5235 buf1_len, dma_dir);
5236
5237 xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5238 xdp_prepare_buff(&xdp, page_address(buf->page),
5239 buf->page_offset, buf1_len, false);
5240
5241 pre_len = xdp.data_end - xdp.data_hard_start -
5242 buf->page_offset;
5243 skb = stmmac_xdp_run_prog(priv, &xdp);
5244
5245
5246
5247 sync_len = xdp.data_end - xdp.data_hard_start -
5248 buf->page_offset;
5249 sync_len = max(sync_len, pre_len);
5250
5251
5252 if (IS_ERR(skb)) {
5253 unsigned int xdp_res = -PTR_ERR(skb);
5254
5255 if (xdp_res & STMMAC_XDP_CONSUMED) {
5256 page_pool_put_page(rx_q->page_pool,
5257 virt_to_head_page(xdp.data),
5258 sync_len, true);
5259 buf->page = NULL;
5260 priv->dev->stats.rx_dropped++;
5261
5262
5263
5264
5265 skb = NULL;
5266
5267 if (unlikely((status & rx_not_ls)))
5268 goto read_again;
5269
5270 count++;
5271 continue;
5272 } else if (xdp_res & (STMMAC_XDP_TX |
5273 STMMAC_XDP_REDIRECT)) {
5274 xdp_status |= xdp_res;
5275 buf->page = NULL;
5276 skb = NULL;
5277 count++;
5278 continue;
5279 }
5280 }
5281 }
5282
5283 if (!skb) {
5284
5285 buf1_len = xdp.data_end - xdp.data;
5286
5287 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5288 if (!skb) {
5289 priv->dev->stats.rx_dropped++;
5290 count++;
5291 goto drain_data;
5292 }
5293
5294
5295 skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5296 skb_put(skb, buf1_len);
5297
5298
5299 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5300 buf->page = NULL;
5301 } else if (buf1_len) {
5302 dma_sync_single_for_cpu(priv->device, buf->addr,
5303 buf1_len, dma_dir);
5304 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5305 buf->page, buf->page_offset, buf1_len,
5306 priv->dma_buf_sz);
5307
5308
5309 page_pool_release_page(rx_q->page_pool, buf->page);
5310 buf->page = NULL;
5311 }
5312
5313 if (buf2_len) {
5314 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5315 buf2_len, dma_dir);
5316 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5317 buf->sec_page, 0, buf2_len,
5318 priv->dma_buf_sz);
5319
5320
5321 page_pool_release_page(rx_q->page_pool, buf->sec_page);
5322 buf->sec_page = NULL;
5323 }
5324
5325drain_data:
5326 if (likely(status & rx_not_ls))
5327 goto read_again;
5328 if (!skb)
5329 continue;
5330
5331
5332
5333 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5334 stmmac_rx_vlan(priv->dev, skb);
5335 skb->protocol = eth_type_trans(skb, priv->dev);
5336
5337 if (unlikely(!coe))
5338 skb_checksum_none_assert(skb);
5339 else
5340 skb->ip_summed = CHECKSUM_UNNECESSARY;
5341
5342 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5343 skb_set_hash(skb, hash, hash_type);
5344
5345 skb_record_rx_queue(skb, queue);
5346 napi_gro_receive(&ch->rx_napi, skb);
5347 skb = NULL;
5348
5349 priv->dev->stats.rx_packets++;
5350 priv->dev->stats.rx_bytes += len;
5351 count++;
5352 }
5353
5354 if (status & rx_not_ls || skb) {
5355 rx_q->state_saved = true;
5356 rx_q->state.skb = skb;
5357 rx_q->state.error = error;
5358 rx_q->state.len = len;
5359 }
5360
5361 stmmac_finalize_xdp_rx(priv, xdp_status);
5362
5363 stmmac_rx_refill(priv, queue);
5364
5365 priv->xstats.rx_pkt_n += count;
5366 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5367
5368 return count;
5369}
5370
5371static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5372{
5373 struct stmmac_channel *ch =
5374 container_of(napi, struct stmmac_channel, rx_napi);
5375 struct stmmac_priv *priv = ch->priv_data;
5376 u32 chan = ch->index;
5377 int work_done;
5378
5379 priv->xstats.napi_poll++;
5380
5381 work_done = stmmac_rx(priv, budget, chan);
5382 if (work_done < budget && napi_complete_done(napi, work_done)) {
5383 unsigned long flags;
5384
5385 spin_lock_irqsave(&ch->lock, flags);
5386 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5387 spin_unlock_irqrestore(&ch->lock, flags);
5388 }
5389
5390 return work_done;
5391}
5392
5393static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5394{
5395 struct stmmac_channel *ch =
5396 container_of(napi, struct stmmac_channel, tx_napi);
5397 struct stmmac_priv *priv = ch->priv_data;
5398 u32 chan = ch->index;
5399 int work_done;
5400
5401 priv->xstats.napi_poll++;
5402
5403 work_done = stmmac_tx_clean(priv, budget, chan);
5404 work_done = min(work_done, budget);
5405
5406 if (work_done < budget && napi_complete_done(napi, work_done)) {
5407 unsigned long flags;
5408
5409 spin_lock_irqsave(&ch->lock, flags);
5410 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5411 spin_unlock_irqrestore(&ch->lock, flags);
5412 }
5413
5414 return work_done;
5415}
5416
5417static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5418{
5419 struct stmmac_channel *ch =
5420 container_of(napi, struct stmmac_channel, rxtx_napi);
5421 struct stmmac_priv *priv = ch->priv_data;
5422 int rx_done, tx_done, rxtx_done;
5423 u32 chan = ch->index;
5424
5425 priv->xstats.napi_poll++;
5426
5427 tx_done = stmmac_tx_clean(priv, budget, chan);
5428 tx_done = min(tx_done, budget);
5429
5430 rx_done = stmmac_rx_zc(priv, budget, chan);
5431
5432 rxtx_done = max(tx_done, rx_done);
5433
5434
5435
5436
5437 if (rxtx_done >= budget)
5438 return budget;
5439
5440
5441 if (napi_complete_done(napi, rxtx_done)) {
5442 unsigned long flags;
5443
5444 spin_lock_irqsave(&ch->lock, flags);
5445
5446
5447
5448 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5449 spin_unlock_irqrestore(&ch->lock, flags);
5450 }
5451
5452 return min(rxtx_done, budget - 1);
5453}
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5465{
5466 struct stmmac_priv *priv = netdev_priv(dev);
5467
5468 stmmac_global_err(priv);
5469}
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480static void stmmac_set_rx_mode(struct net_device *dev)
5481{
5482 struct stmmac_priv *priv = netdev_priv(dev);
5483
5484 stmmac_set_filter(priv, priv->hw, dev);
5485}
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5499{
5500 struct stmmac_priv *priv = netdev_priv(dev);
5501 int txfifosz = priv->plat->tx_fifo_size;
5502 const int mtu = new_mtu;
5503
5504 if (txfifosz == 0)
5505 txfifosz = priv->dma_cap.tx_fifo_size;
5506
5507 txfifosz /= priv->plat->tx_queues_to_use;
5508
5509 if (netif_running(dev)) {
5510 netdev_err(priv->dev, "must be stopped to change its MTU\n");
5511 return -EBUSY;
5512 }
5513
5514 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5515 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5516 return -EINVAL;
5517 }
5518
5519 new_mtu = STMMAC_ALIGN(new_mtu);
5520
5521
5522 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5523 return -EINVAL;
5524
5525 dev->mtu = mtu;
5526
5527 netdev_update_features(dev);
5528
5529 return 0;
5530}
5531
5532static netdev_features_t stmmac_fix_features(struct net_device *dev,
5533 netdev_features_t features)
5534{
5535 struct stmmac_priv *priv = netdev_priv(dev);
5536
5537 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5538 features &= ~NETIF_F_RXCSUM;
5539
5540 if (!priv->plat->tx_coe)
5541 features &= ~NETIF_F_CSUM_MASK;
5542
5543
5544
5545
5546
5547
5548 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5549 features &= ~NETIF_F_CSUM_MASK;
5550
5551
5552 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5553 if (features & NETIF_F_TSO)
5554 priv->tso = true;
5555 else
5556 priv->tso = false;
5557 }
5558
5559 return features;
5560}
5561
5562static int stmmac_set_features(struct net_device *netdev,
5563 netdev_features_t features)
5564{
5565 struct stmmac_priv *priv = netdev_priv(netdev);
5566
5567
5568 if (features & NETIF_F_RXCSUM)
5569 priv->hw->rx_csum = priv->plat->rx_coe;
5570 else
5571 priv->hw->rx_csum = 0;
5572
5573
5574
5575 stmmac_rx_ipc(priv, priv->hw);
5576
5577 if (priv->sph_cap) {
5578 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5579 u32 chan;
5580
5581 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5582 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5583 }
5584
5585 return 0;
5586}
5587
5588static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5589{
5590 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5591 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5592 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5593 bool *hs_enable = &fpe_cfg->hs_enable;
5594
5595 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5596 return;
5597
5598
5599 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5600 if (*lp_state < FPE_STATE_CAPABLE)
5601 *lp_state = FPE_STATE_CAPABLE;
5602
5603
5604 if (*hs_enable)
5605 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5606 MPACKET_RESPONSE);
5607 }
5608
5609
5610 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5611 if (*lo_state < FPE_STATE_CAPABLE)
5612 *lo_state = FPE_STATE_CAPABLE;
5613 }
5614
5615
5616 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5617 *lp_state = FPE_STATE_ENTERING_ON;
5618
5619
5620 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5621 *lo_state = FPE_STATE_ENTERING_ON;
5622
5623 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5624 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5625 priv->fpe_wq) {
5626 queue_work(priv->fpe_wq, &priv->fpe_task);
5627 }
5628}
5629
5630static void stmmac_common_interrupt(struct stmmac_priv *priv)
5631{
5632 u32 rx_cnt = priv->plat->rx_queues_to_use;
5633 u32 tx_cnt = priv->plat->tx_queues_to_use;
5634 u32 queues_count;
5635 u32 queue;
5636 bool xmac;
5637
5638 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5639 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5640
5641 if (priv->irq_wake)
5642 pm_wakeup_event(priv->device, 0);
5643
5644 if (priv->dma_cap.estsel)
5645 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5646 &priv->xstats, tx_cnt);
5647
5648 if (priv->dma_cap.fpesel) {
5649 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5650 priv->dev);
5651
5652 stmmac_fpe_event_status(priv, status);
5653 }
5654
5655
5656 if ((priv->plat->has_gmac) || xmac) {
5657 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5658
5659 if (unlikely(status)) {
5660
5661 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5662 priv->tx_path_in_lpi_mode = true;
5663 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5664 priv->tx_path_in_lpi_mode = false;
5665 }
5666
5667 for (queue = 0; queue < queues_count; queue++) {
5668 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5669 queue);
5670 }
5671
5672
5673 if (priv->hw->pcs) {
5674 if (priv->xstats.pcs_link)
5675 netif_carrier_on(priv->dev);
5676 else
5677 netif_carrier_off(priv->dev);
5678 }
5679
5680 stmmac_timestamp_interrupt(priv, priv);
5681 }
5682}
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5696{
5697 struct net_device *dev = (struct net_device *)dev_id;
5698 struct stmmac_priv *priv = netdev_priv(dev);
5699
5700
5701 if (test_bit(STMMAC_DOWN, &priv->state))
5702 return IRQ_HANDLED;
5703
5704
5705 if (stmmac_safety_feat_interrupt(priv))
5706 return IRQ_HANDLED;
5707
5708
5709 stmmac_common_interrupt(priv);
5710
5711
5712 stmmac_dma_interrupt(priv);
5713
5714 return IRQ_HANDLED;
5715}
5716
5717static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5718{
5719 struct net_device *dev = (struct net_device *)dev_id;
5720 struct stmmac_priv *priv = netdev_priv(dev);
5721
5722 if (unlikely(!dev)) {
5723 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5724 return IRQ_NONE;
5725 }
5726
5727
5728 if (test_bit(STMMAC_DOWN, &priv->state))
5729 return IRQ_HANDLED;
5730
5731
5732 stmmac_common_interrupt(priv);
5733
5734 return IRQ_HANDLED;
5735}
5736
5737static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5738{
5739 struct net_device *dev = (struct net_device *)dev_id;
5740 struct stmmac_priv *priv = netdev_priv(dev);
5741
5742 if (unlikely(!dev)) {
5743 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5744 return IRQ_NONE;
5745 }
5746
5747
5748 if (test_bit(STMMAC_DOWN, &priv->state))
5749 return IRQ_HANDLED;
5750
5751
5752 stmmac_safety_feat_interrupt(priv);
5753
5754 return IRQ_HANDLED;
5755}
5756
5757static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5758{
5759 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5760 int chan = tx_q->queue_index;
5761 struct stmmac_priv *priv;
5762 int status;
5763
5764 priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5765
5766 if (unlikely(!data)) {
5767 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5768 return IRQ_NONE;
5769 }
5770
5771
5772 if (test_bit(STMMAC_DOWN, &priv->state))
5773 return IRQ_HANDLED;
5774
5775 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5776
5777 if (unlikely(status & tx_hard_error_bump_tc)) {
5778
5779 stmmac_bump_dma_threshold(priv, chan);
5780 } else if (unlikely(status == tx_hard_error)) {
5781 stmmac_tx_err(priv, chan);
5782 }
5783
5784 return IRQ_HANDLED;
5785}
5786
5787static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5788{
5789 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5790 int chan = rx_q->queue_index;
5791 struct stmmac_priv *priv;
5792
5793 priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5794
5795 if (unlikely(!data)) {
5796 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5797 return IRQ_NONE;
5798 }
5799
5800
5801 if (test_bit(STMMAC_DOWN, &priv->state))
5802 return IRQ_HANDLED;
5803
5804 stmmac_napi_check(priv, chan, DMA_DIR_RX);
5805
5806 return IRQ_HANDLED;
5807}
5808
5809#ifdef CONFIG_NET_POLL_CONTROLLER
5810
5811
5812
5813static void stmmac_poll_controller(struct net_device *dev)
5814{
5815 struct stmmac_priv *priv = netdev_priv(dev);
5816 int i;
5817
5818
5819 if (test_bit(STMMAC_DOWN, &priv->state))
5820 return;
5821
5822 if (priv->plat->multi_msi_en) {
5823 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5824 stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5825
5826 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5827 stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5828 } else {
5829 disable_irq(dev->irq);
5830 stmmac_interrupt(dev->irq, dev);
5831 enable_irq(dev->irq);
5832 }
5833}
5834#endif
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5846{
5847 struct stmmac_priv *priv = netdev_priv (dev);
5848 int ret = -EOPNOTSUPP;
5849
5850 if (!netif_running(dev))
5851 return -EINVAL;
5852
5853 switch (cmd) {
5854 case SIOCGMIIPHY:
5855 case SIOCGMIIREG:
5856 case SIOCSMIIREG:
5857 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5858 break;
5859 case SIOCSHWTSTAMP:
5860 ret = stmmac_hwtstamp_set(dev, rq);
5861 break;
5862 case SIOCGHWTSTAMP:
5863 ret = stmmac_hwtstamp_get(dev, rq);
5864 break;
5865 default:
5866 break;
5867 }
5868
5869 return ret;
5870}
5871
5872static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5873 void *cb_priv)
5874{
5875 struct stmmac_priv *priv = cb_priv;
5876 int ret = -EOPNOTSUPP;
5877
5878 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5879 return ret;
5880
5881 __stmmac_disable_all_queues(priv);
5882
5883 switch (type) {
5884 case TC_SETUP_CLSU32:
5885 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5886 break;
5887 case TC_SETUP_CLSFLOWER:
5888 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5889 break;
5890 default:
5891 break;
5892 }
5893
5894 stmmac_enable_all_queues(priv);
5895 return ret;
5896}
5897
5898static LIST_HEAD(stmmac_block_cb_list);
5899
5900static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5901 void *type_data)
5902{
5903 struct stmmac_priv *priv = netdev_priv(ndev);
5904
5905 switch (type) {
5906 case TC_SETUP_BLOCK:
5907 return flow_block_cb_setup_simple(type_data,
5908 &stmmac_block_cb_list,
5909 stmmac_setup_tc_block_cb,
5910 priv, priv, true);
5911 case TC_SETUP_QDISC_CBS:
5912 return stmmac_tc_setup_cbs(priv, priv, type_data);
5913 case TC_SETUP_QDISC_TAPRIO:
5914 return stmmac_tc_setup_taprio(priv, priv, type_data);
5915 case TC_SETUP_QDISC_ETF:
5916 return stmmac_tc_setup_etf(priv, priv, type_data);
5917 default:
5918 return -EOPNOTSUPP;
5919 }
5920}
5921
5922static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5923 struct net_device *sb_dev)
5924{
5925 int gso = skb_shinfo(skb)->gso_type;
5926
5927 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5928
5929
5930
5931
5932
5933
5934 return 0;
5935 }
5936
5937 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5938}
5939
5940static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5941{
5942 struct stmmac_priv *priv = netdev_priv(ndev);
5943 int ret = 0;
5944
5945 ret = pm_runtime_get_sync(priv->device);
5946 if (ret < 0) {
5947 pm_runtime_put_noidle(priv->device);
5948 return ret;
5949 }
5950
5951 ret = eth_mac_addr(ndev, addr);
5952 if (ret)
5953 goto set_mac_error;
5954
5955 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5956
5957set_mac_error:
5958 pm_runtime_put(priv->device);
5959
5960 return ret;
5961}
5962
5963#ifdef CONFIG_DEBUG_FS
5964static struct dentry *stmmac_fs_dir;
5965
5966static void sysfs_display_ring(void *head, int size, int extend_desc,
5967 struct seq_file *seq, dma_addr_t dma_phy_addr)
5968{
5969 int i;
5970 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5971 struct dma_desc *p = (struct dma_desc *)head;
5972 dma_addr_t dma_addr;
5973
5974 for (i = 0; i < size; i++) {
5975 if (extend_desc) {
5976 dma_addr = dma_phy_addr + i * sizeof(*ep);
5977 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5978 i, &dma_addr,
5979 le32_to_cpu(ep->basic.des0),
5980 le32_to_cpu(ep->basic.des1),
5981 le32_to_cpu(ep->basic.des2),
5982 le32_to_cpu(ep->basic.des3));
5983 ep++;
5984 } else {
5985 dma_addr = dma_phy_addr + i * sizeof(*p);
5986 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5987 i, &dma_addr,
5988 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5989 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5990 p++;
5991 }
5992 seq_printf(seq, "\n");
5993 }
5994}
5995
5996static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5997{
5998 struct net_device *dev = seq->private;
5999 struct stmmac_priv *priv = netdev_priv(dev);
6000 u32 rx_count = priv->plat->rx_queues_to_use;
6001 u32 tx_count = priv->plat->tx_queues_to_use;
6002 u32 queue;
6003
6004 if ((dev->flags & IFF_UP) == 0)
6005 return 0;
6006
6007 for (queue = 0; queue < rx_count; queue++) {
6008 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6009
6010 seq_printf(seq, "RX Queue %d:\n", queue);
6011
6012 if (priv->extend_desc) {
6013 seq_printf(seq, "Extended descriptor ring:\n");
6014 sysfs_display_ring((void *)rx_q->dma_erx,
6015 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6016 } else {
6017 seq_printf(seq, "Descriptor ring:\n");
6018 sysfs_display_ring((void *)rx_q->dma_rx,
6019 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6020 }
6021 }
6022
6023 for (queue = 0; queue < tx_count; queue++) {
6024 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6025
6026 seq_printf(seq, "TX Queue %d:\n", queue);
6027
6028 if (priv->extend_desc) {
6029 seq_printf(seq, "Extended descriptor ring:\n");
6030 sysfs_display_ring((void *)tx_q->dma_etx,
6031 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6032 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6033 seq_printf(seq, "Descriptor ring:\n");
6034 sysfs_display_ring((void *)tx_q->dma_tx,
6035 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6036 }
6037 }
6038
6039 return 0;
6040}
6041DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6042
6043static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6044{
6045 struct net_device *dev = seq->private;
6046 struct stmmac_priv *priv = netdev_priv(dev);
6047
6048 if (!priv->hw_cap_support) {
6049 seq_printf(seq, "DMA HW features not supported\n");
6050 return 0;
6051 }
6052
6053 seq_printf(seq, "==============================\n");
6054 seq_printf(seq, "\tDMA HW features\n");
6055 seq_printf(seq, "==============================\n");
6056
6057 seq_printf(seq, "\t10/100 Mbps: %s\n",
6058 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6059 seq_printf(seq, "\t1000 Mbps: %s\n",
6060 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6061 seq_printf(seq, "\tHalf duplex: %s\n",
6062 (priv->dma_cap.half_duplex) ? "Y" : "N");
6063 seq_printf(seq, "\tHash Filter: %s\n",
6064 (priv->dma_cap.hash_filter) ? "Y" : "N");
6065 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6066 (priv->dma_cap.multi_addr) ? "Y" : "N");
6067 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6068 (priv->dma_cap.pcs) ? "Y" : "N");
6069 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6070 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6071 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6072 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6073 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6074 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6075 seq_printf(seq, "\tRMON module: %s\n",
6076 (priv->dma_cap.rmon) ? "Y" : "N");
6077 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6078 (priv->dma_cap.time_stamp) ? "Y" : "N");
6079 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6080 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6081 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6082 (priv->dma_cap.eee) ? "Y" : "N");
6083 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6084 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6085 (priv->dma_cap.tx_coe) ? "Y" : "N");
6086 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6087 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6088 (priv->dma_cap.rx_coe) ? "Y" : "N");
6089 } else {
6090 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6091 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6092 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6093 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6094 }
6095 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6096 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6097 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6098 priv->dma_cap.number_rx_channel);
6099 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6100 priv->dma_cap.number_tx_channel);
6101 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6102 priv->dma_cap.number_rx_queues);
6103 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6104 priv->dma_cap.number_tx_queues);
6105 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6106 (priv->dma_cap.enh_desc) ? "Y" : "N");
6107 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6108 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6109 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6110 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6111 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6112 priv->dma_cap.pps_out_num);
6113 seq_printf(seq, "\tSafety Features: %s\n",
6114 priv->dma_cap.asp ? "Y" : "N");
6115 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6116 priv->dma_cap.frpsel ? "Y" : "N");
6117 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6118 priv->dma_cap.addr64);
6119 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6120 priv->dma_cap.rssen ? "Y" : "N");
6121 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6122 priv->dma_cap.vlhash ? "Y" : "N");
6123 seq_printf(seq, "\tSplit Header: %s\n",
6124 priv->dma_cap.sphen ? "Y" : "N");
6125 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6126 priv->dma_cap.vlins ? "Y" : "N");
6127 seq_printf(seq, "\tDouble VLAN: %s\n",
6128 priv->dma_cap.dvlan ? "Y" : "N");
6129 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6130 priv->dma_cap.l3l4fnum);
6131 seq_printf(seq, "\tARP Offloading: %s\n",
6132 priv->dma_cap.arpoffsel ? "Y" : "N");
6133 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6134 priv->dma_cap.estsel ? "Y" : "N");
6135 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6136 priv->dma_cap.fpesel ? "Y" : "N");
6137 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6138 priv->dma_cap.tbssel ? "Y" : "N");
6139 return 0;
6140}
6141DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6142
6143
6144
6145static int stmmac_device_event(struct notifier_block *unused,
6146 unsigned long event, void *ptr)
6147{
6148 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6149 struct stmmac_priv *priv = netdev_priv(dev);
6150
6151 if (dev->netdev_ops != &stmmac_netdev_ops)
6152 goto done;
6153
6154 switch (event) {
6155 case NETDEV_CHANGENAME:
6156 if (priv->dbgfs_dir)
6157 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6158 priv->dbgfs_dir,
6159 stmmac_fs_dir,
6160 dev->name);
6161 break;
6162 }
6163done:
6164 return NOTIFY_DONE;
6165}
6166
6167static struct notifier_block stmmac_notifier = {
6168 .notifier_call = stmmac_device_event,
6169};
6170
6171static void stmmac_init_fs(struct net_device *dev)
6172{
6173 struct stmmac_priv *priv = netdev_priv(dev);
6174
6175 rtnl_lock();
6176
6177
6178 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6179
6180
6181 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6182 &stmmac_rings_status_fops);
6183
6184
6185 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6186 &stmmac_dma_cap_fops);
6187
6188 rtnl_unlock();
6189}
6190
6191static void stmmac_exit_fs(struct net_device *dev)
6192{
6193 struct stmmac_priv *priv = netdev_priv(dev);
6194
6195 debugfs_remove_recursive(priv->dbgfs_dir);
6196}
6197#endif
6198
6199static u32 stmmac_vid_crc32_le(__le16 vid_le)
6200{
6201 unsigned char *data = (unsigned char *)&vid_le;
6202 unsigned char data_byte = 0;
6203 u32 crc = ~0x0;
6204 u32 temp = 0;
6205 int i, bits;
6206
6207 bits = get_bitmask_order(VLAN_VID_MASK);
6208 for (i = 0; i < bits; i++) {
6209 if ((i % 8) == 0)
6210 data_byte = data[i / 8];
6211
6212 temp = ((crc & 1) ^ data_byte) & 1;
6213 crc >>= 1;
6214 data_byte >>= 1;
6215
6216 if (temp)
6217 crc ^= 0xedb88320;
6218 }
6219
6220 return crc;
6221}
6222
6223static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6224{
6225 u32 crc, hash = 0;
6226 __le16 pmatch = 0;
6227 int count = 0;
6228 u16 vid = 0;
6229
6230 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6231 __le16 vid_le = cpu_to_le16(vid);
6232 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6233 hash |= (1 << crc);
6234 count++;
6235 }
6236
6237 if (!priv->dma_cap.vlhash) {
6238 if (count > 2)
6239 return -EOPNOTSUPP;
6240
6241 pmatch = cpu_to_le16(vid);
6242 hash = 0;
6243 }
6244
6245 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6246}
6247
6248static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6249{
6250 struct stmmac_priv *priv = netdev_priv(ndev);
6251 bool is_double = false;
6252 int ret;
6253
6254 if (be16_to_cpu(proto) == ETH_P_8021AD)
6255 is_double = true;
6256
6257 set_bit(vid, priv->active_vlans);
6258 ret = stmmac_vlan_update(priv, is_double);
6259 if (ret) {
6260 clear_bit(vid, priv->active_vlans);
6261 return ret;
6262 }
6263
6264 if (priv->hw->num_vlan) {
6265 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6266 if (ret)
6267 return ret;
6268 }
6269
6270 return 0;
6271}
6272
6273static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6274{
6275 struct stmmac_priv *priv = netdev_priv(ndev);
6276 bool is_double = false;
6277 int ret;
6278
6279 ret = pm_runtime_get_sync(priv->device);
6280 if (ret < 0) {
6281 pm_runtime_put_noidle(priv->device);
6282 return ret;
6283 }
6284
6285 if (be16_to_cpu(proto) == ETH_P_8021AD)
6286 is_double = true;
6287
6288 clear_bit(vid, priv->active_vlans);
6289
6290 if (priv->hw->num_vlan) {
6291 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6292 if (ret)
6293 goto del_vlan_error;
6294 }
6295
6296 ret = stmmac_vlan_update(priv, is_double);
6297
6298del_vlan_error:
6299 pm_runtime_put(priv->device);
6300
6301 return ret;
6302}
6303
6304static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6305{
6306 struct stmmac_priv *priv = netdev_priv(dev);
6307
6308 switch (bpf->command) {
6309 case XDP_SETUP_PROG:
6310 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6311 case XDP_SETUP_XSK_POOL:
6312 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6313 bpf->xsk.queue_id);
6314 default:
6315 return -EOPNOTSUPP;
6316 }
6317}
6318
6319static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6320 struct xdp_frame **frames, u32 flags)
6321{
6322 struct stmmac_priv *priv = netdev_priv(dev);
6323 int cpu = smp_processor_id();
6324 struct netdev_queue *nq;
6325 int i, nxmit = 0;
6326 int queue;
6327
6328 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6329 return -ENETDOWN;
6330
6331 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6332 return -EINVAL;
6333
6334 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6335 nq = netdev_get_tx_queue(priv->dev, queue);
6336
6337 __netif_tx_lock(nq, cpu);
6338
6339 txq_trans_cond_update(nq);
6340
6341 for (i = 0; i < num_frames; i++) {
6342 int res;
6343
6344 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6345 if (res == STMMAC_XDP_CONSUMED)
6346 break;
6347
6348 nxmit++;
6349 }
6350
6351 if (flags & XDP_XMIT_FLUSH) {
6352 stmmac_flush_tx_descriptors(priv, queue);
6353 stmmac_tx_timer_arm(priv, queue);
6354 }
6355
6356 __netif_tx_unlock(nq);
6357
6358 return nxmit;
6359}
6360
6361void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6362{
6363 struct stmmac_channel *ch = &priv->channel[queue];
6364 unsigned long flags;
6365
6366 spin_lock_irqsave(&ch->lock, flags);
6367 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6368 spin_unlock_irqrestore(&ch->lock, flags);
6369
6370 stmmac_stop_rx_dma(priv, queue);
6371 __free_dma_rx_desc_resources(priv, queue);
6372}
6373
6374void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6375{
6376 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6377 struct stmmac_channel *ch = &priv->channel[queue];
6378 unsigned long flags;
6379 u32 buf_size;
6380 int ret;
6381
6382 ret = __alloc_dma_rx_desc_resources(priv, queue);
6383 if (ret) {
6384 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6385 return;
6386 }
6387
6388 ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6389 if (ret) {
6390 __free_dma_rx_desc_resources(priv, queue);
6391 netdev_err(priv->dev, "Failed to init RX desc.\n");
6392 return;
6393 }
6394
6395 stmmac_clear_rx_descriptors(priv, queue);
6396
6397 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6398 rx_q->dma_rx_phy, rx_q->queue_index);
6399
6400 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6401 sizeof(struct dma_desc));
6402 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6403 rx_q->rx_tail_addr, rx_q->queue_index);
6404
6405 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6406 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6407 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6408 buf_size,
6409 rx_q->queue_index);
6410 } else {
6411 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6412 priv->dma_buf_sz,
6413 rx_q->queue_index);
6414 }
6415
6416 stmmac_start_rx_dma(priv, queue);
6417
6418 spin_lock_irqsave(&ch->lock, flags);
6419 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6420 spin_unlock_irqrestore(&ch->lock, flags);
6421}
6422
6423void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6424{
6425 struct stmmac_channel *ch = &priv->channel[queue];
6426 unsigned long flags;
6427
6428 spin_lock_irqsave(&ch->lock, flags);
6429 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6430 spin_unlock_irqrestore(&ch->lock, flags);
6431
6432 stmmac_stop_tx_dma(priv, queue);
6433 __free_dma_tx_desc_resources(priv, queue);
6434}
6435
6436void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6437{
6438 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6439 struct stmmac_channel *ch = &priv->channel[queue];
6440 unsigned long flags;
6441 int ret;
6442
6443 ret = __alloc_dma_tx_desc_resources(priv, queue);
6444 if (ret) {
6445 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6446 return;
6447 }
6448
6449 ret = __init_dma_tx_desc_rings(priv, queue);
6450 if (ret) {
6451 __free_dma_tx_desc_resources(priv, queue);
6452 netdev_err(priv->dev, "Failed to init TX desc.\n");
6453 return;
6454 }
6455
6456 stmmac_clear_tx_descriptors(priv, queue);
6457
6458 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6459 tx_q->dma_tx_phy, tx_q->queue_index);
6460
6461 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6462 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6463
6464 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6465 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6466 tx_q->tx_tail_addr, tx_q->queue_index);
6467
6468 stmmac_start_tx_dma(priv, queue);
6469
6470 spin_lock_irqsave(&ch->lock, flags);
6471 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6472 spin_unlock_irqrestore(&ch->lock, flags);
6473}
6474
6475void stmmac_xdp_release(struct net_device *dev)
6476{
6477 struct stmmac_priv *priv = netdev_priv(dev);
6478 u32 chan;
6479
6480
6481 stmmac_disable_all_queues(priv);
6482
6483 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6484 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6485
6486
6487 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6488
6489
6490 stmmac_stop_all_dma(priv);
6491
6492
6493 free_dma_desc_resources(priv);
6494
6495
6496 stmmac_mac_set(priv, priv->ioaddr, false);
6497
6498
6499
6500
6501 netif_trans_update(dev);
6502 netif_carrier_off(dev);
6503}
6504
6505int stmmac_xdp_open(struct net_device *dev)
6506{
6507 struct stmmac_priv *priv = netdev_priv(dev);
6508 u32 rx_cnt = priv->plat->rx_queues_to_use;
6509 u32 tx_cnt = priv->plat->tx_queues_to_use;
6510 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6511 struct stmmac_rx_queue *rx_q;
6512 struct stmmac_tx_queue *tx_q;
6513 u32 buf_size;
6514 bool sph_en;
6515 u32 chan;
6516 int ret;
6517
6518 ret = alloc_dma_desc_resources(priv);
6519 if (ret < 0) {
6520 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6521 __func__);
6522 goto dma_desc_error;
6523 }
6524
6525 ret = init_dma_desc_rings(dev, GFP_KERNEL);
6526 if (ret < 0) {
6527 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6528 __func__);
6529 goto init_error;
6530 }
6531
6532
6533 for (chan = 0; chan < dma_csr_ch; chan++) {
6534 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6535 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6536 }
6537
6538
6539 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6540
6541
6542 for (chan = 0; chan < rx_cnt; chan++) {
6543 rx_q = &priv->rx_queue[chan];
6544
6545 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6546 rx_q->dma_rx_phy, chan);
6547
6548 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6549 (rx_q->buf_alloc_num *
6550 sizeof(struct dma_desc));
6551 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6552 rx_q->rx_tail_addr, chan);
6553
6554 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6555 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6556 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6557 buf_size,
6558 rx_q->queue_index);
6559 } else {
6560 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6561 priv->dma_buf_sz,
6562 rx_q->queue_index);
6563 }
6564
6565 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6566 }
6567
6568
6569 for (chan = 0; chan < tx_cnt; chan++) {
6570 tx_q = &priv->tx_queue[chan];
6571
6572 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6573 tx_q->dma_tx_phy, chan);
6574
6575 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6576 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6577 tx_q->tx_tail_addr, chan);
6578
6579 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6580 tx_q->txtimer.function = stmmac_tx_timer;
6581 }
6582
6583
6584 stmmac_mac_set(priv, priv->ioaddr, true);
6585
6586
6587 stmmac_start_all_dma(priv);
6588
6589 ret = stmmac_request_irq(dev);
6590 if (ret)
6591 goto irq_error;
6592
6593
6594 stmmac_enable_all_queues(priv);
6595 netif_carrier_on(dev);
6596 netif_tx_start_all_queues(dev);
6597 stmmac_enable_all_dma_irq(priv);
6598
6599 return 0;
6600
6601irq_error:
6602 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6603 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6604
6605 stmmac_hw_teardown(dev);
6606init_error:
6607 free_dma_desc_resources(priv);
6608dma_desc_error:
6609 return ret;
6610}
6611
6612int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6613{
6614 struct stmmac_priv *priv = netdev_priv(dev);
6615 struct stmmac_rx_queue *rx_q;
6616 struct stmmac_tx_queue *tx_q;
6617 struct stmmac_channel *ch;
6618
6619 if (test_bit(STMMAC_DOWN, &priv->state) ||
6620 !netif_carrier_ok(priv->dev))
6621 return -ENETDOWN;
6622
6623 if (!stmmac_xdp_is_enabled(priv))
6624 return -ENXIO;
6625
6626 if (queue >= priv->plat->rx_queues_to_use ||
6627 queue >= priv->plat->tx_queues_to_use)
6628 return -EINVAL;
6629
6630 rx_q = &priv->rx_queue[queue];
6631 tx_q = &priv->tx_queue[queue];
6632 ch = &priv->channel[queue];
6633
6634 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6635 return -ENXIO;
6636
6637 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6638
6639
6640
6641 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6642 __napi_schedule(&ch->rxtx_napi);
6643 }
6644
6645 return 0;
6646}
6647
6648static const struct net_device_ops stmmac_netdev_ops = {
6649 .ndo_open = stmmac_open,
6650 .ndo_start_xmit = stmmac_xmit,
6651 .ndo_stop = stmmac_release,
6652 .ndo_change_mtu = stmmac_change_mtu,
6653 .ndo_fix_features = stmmac_fix_features,
6654 .ndo_set_features = stmmac_set_features,
6655 .ndo_set_rx_mode = stmmac_set_rx_mode,
6656 .ndo_tx_timeout = stmmac_tx_timeout,
6657 .ndo_eth_ioctl = stmmac_ioctl,
6658 .ndo_setup_tc = stmmac_setup_tc,
6659 .ndo_select_queue = stmmac_select_queue,
6660#ifdef CONFIG_NET_POLL_CONTROLLER
6661 .ndo_poll_controller = stmmac_poll_controller,
6662#endif
6663 .ndo_set_mac_address = stmmac_set_mac_address,
6664 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6665 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6666 .ndo_bpf = stmmac_bpf,
6667 .ndo_xdp_xmit = stmmac_xdp_xmit,
6668 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6669};
6670
6671static void stmmac_reset_subtask(struct stmmac_priv *priv)
6672{
6673 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6674 return;
6675 if (test_bit(STMMAC_DOWN, &priv->state))
6676 return;
6677
6678 netdev_err(priv->dev, "Reset adapter.\n");
6679
6680 rtnl_lock();
6681 netif_trans_update(priv->dev);
6682 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6683 usleep_range(1000, 2000);
6684
6685 set_bit(STMMAC_DOWN, &priv->state);
6686 dev_close(priv->dev);
6687 dev_open(priv->dev, NULL);
6688 clear_bit(STMMAC_DOWN, &priv->state);
6689 clear_bit(STMMAC_RESETING, &priv->state);
6690 rtnl_unlock();
6691}
6692
6693static void stmmac_service_task(struct work_struct *work)
6694{
6695 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6696 service_task);
6697
6698 stmmac_reset_subtask(priv);
6699 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6700}
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710static int stmmac_hw_init(struct stmmac_priv *priv)
6711{
6712 int ret;
6713
6714
6715 if (priv->plat->has_sun8i)
6716 chain_mode = 1;
6717 priv->chain_mode = chain_mode;
6718
6719
6720 ret = stmmac_hwif_init(priv);
6721 if (ret)
6722 return ret;
6723
6724
6725 priv->hw_cap_support = stmmac_get_hw_features(priv);
6726 if (priv->hw_cap_support) {
6727 dev_info(priv->device, "DMA HW capability register supported\n");
6728
6729
6730
6731
6732
6733
6734 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6735 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6736 !priv->plat->use_phy_wol;
6737 priv->hw->pmt = priv->plat->pmt;
6738 if (priv->dma_cap.hash_tb_sz) {
6739 priv->hw->multicast_filter_bins =
6740 (BIT(priv->dma_cap.hash_tb_sz) << 5);
6741 priv->hw->mcast_bits_log2 =
6742 ilog2(priv->hw->multicast_filter_bins);
6743 }
6744
6745
6746 if (priv->plat->force_thresh_dma_mode)
6747 priv->plat->tx_coe = 0;
6748 else
6749 priv->plat->tx_coe = priv->dma_cap.tx_coe;
6750
6751
6752 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6753
6754 if (priv->dma_cap.rx_coe_type2)
6755 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6756 else if (priv->dma_cap.rx_coe_type1)
6757 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6758
6759 } else {
6760 dev_info(priv->device, "No HW DMA feature register supported\n");
6761 }
6762
6763 if (priv->plat->rx_coe) {
6764 priv->hw->rx_csum = priv->plat->rx_coe;
6765 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6766 if (priv->synopsys_id < DWMAC_CORE_4_00)
6767 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6768 }
6769 if (priv->plat->tx_coe)
6770 dev_info(priv->device, "TX Checksum insertion supported\n");
6771
6772 if (priv->plat->pmt) {
6773 dev_info(priv->device, "Wake-Up On Lan supported\n");
6774 device_set_wakeup_capable(priv->device, 1);
6775 }
6776
6777 if (priv->dma_cap.tsoen)
6778 dev_info(priv->device, "TSO supported\n");
6779
6780 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6781 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6782
6783
6784 if (priv->hwif_quirks) {
6785 ret = priv->hwif_quirks(priv);
6786 if (ret)
6787 return ret;
6788 }
6789
6790
6791
6792
6793
6794
6795 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6796 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6797 priv->use_riwt = 1;
6798 dev_info(priv->device,
6799 "Enable RX Mitigation via HW Watchdog Timer\n");
6800 }
6801
6802 return 0;
6803}
6804
6805static void stmmac_napi_add(struct net_device *dev)
6806{
6807 struct stmmac_priv *priv = netdev_priv(dev);
6808 u32 queue, maxq;
6809
6810 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6811
6812 for (queue = 0; queue < maxq; queue++) {
6813 struct stmmac_channel *ch = &priv->channel[queue];
6814
6815 ch->priv_data = priv;
6816 ch->index = queue;
6817 spin_lock_init(&ch->lock);
6818
6819 if (queue < priv->plat->rx_queues_to_use) {
6820 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6821 NAPI_POLL_WEIGHT);
6822 }
6823 if (queue < priv->plat->tx_queues_to_use) {
6824 netif_tx_napi_add(dev, &ch->tx_napi,
6825 stmmac_napi_poll_tx,
6826 NAPI_POLL_WEIGHT);
6827 }
6828 if (queue < priv->plat->rx_queues_to_use &&
6829 queue < priv->plat->tx_queues_to_use) {
6830 netif_napi_add(dev, &ch->rxtx_napi,
6831 stmmac_napi_poll_rxtx,
6832 NAPI_POLL_WEIGHT);
6833 }
6834 }
6835}
6836
6837static void stmmac_napi_del(struct net_device *dev)
6838{
6839 struct stmmac_priv *priv = netdev_priv(dev);
6840 u32 queue, maxq;
6841
6842 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6843
6844 for (queue = 0; queue < maxq; queue++) {
6845 struct stmmac_channel *ch = &priv->channel[queue];
6846
6847 if (queue < priv->plat->rx_queues_to_use)
6848 netif_napi_del(&ch->rx_napi);
6849 if (queue < priv->plat->tx_queues_to_use)
6850 netif_napi_del(&ch->tx_napi);
6851 if (queue < priv->plat->rx_queues_to_use &&
6852 queue < priv->plat->tx_queues_to_use) {
6853 netif_napi_del(&ch->rxtx_napi);
6854 }
6855 }
6856}
6857
6858int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6859{
6860 struct stmmac_priv *priv = netdev_priv(dev);
6861 int ret = 0;
6862
6863 if (netif_running(dev))
6864 stmmac_release(dev);
6865
6866 stmmac_napi_del(dev);
6867
6868 priv->plat->rx_queues_to_use = rx_cnt;
6869 priv->plat->tx_queues_to_use = tx_cnt;
6870
6871 stmmac_napi_add(dev);
6872
6873 if (netif_running(dev))
6874 ret = stmmac_open(dev);
6875
6876 return ret;
6877}
6878
6879int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6880{
6881 struct stmmac_priv *priv = netdev_priv(dev);
6882 int ret = 0;
6883
6884 if (netif_running(dev))
6885 stmmac_release(dev);
6886
6887 priv->dma_rx_size = rx_size;
6888 priv->dma_tx_size = tx_size;
6889
6890 if (netif_running(dev))
6891 ret = stmmac_open(dev);
6892
6893 return ret;
6894}
6895
6896#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6897static void stmmac_fpe_lp_task(struct work_struct *work)
6898{
6899 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6900 fpe_task);
6901 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6902 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6903 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6904 bool *hs_enable = &fpe_cfg->hs_enable;
6905 bool *enable = &fpe_cfg->enable;
6906 int retries = 20;
6907
6908 while (retries-- > 0) {
6909
6910 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6911 break;
6912
6913 if (*lo_state == FPE_STATE_ENTERING_ON &&
6914 *lp_state == FPE_STATE_ENTERING_ON) {
6915 stmmac_fpe_configure(priv, priv->ioaddr,
6916 priv->plat->tx_queues_to_use,
6917 priv->plat->rx_queues_to_use,
6918 *enable);
6919
6920 netdev_info(priv->dev, "configured FPE\n");
6921
6922 *lo_state = FPE_STATE_ON;
6923 *lp_state = FPE_STATE_ON;
6924 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6925 break;
6926 }
6927
6928 if ((*lo_state == FPE_STATE_CAPABLE ||
6929 *lo_state == FPE_STATE_ENTERING_ON) &&
6930 *lp_state != FPE_STATE_ON) {
6931 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6932 *lo_state, *lp_state);
6933 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6934 MPACKET_VERIFY);
6935 }
6936
6937 msleep(500);
6938 }
6939
6940 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6941}
6942
6943void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6944{
6945 if (priv->plat->fpe_cfg->hs_enable != enable) {
6946 if (enable) {
6947 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6948 MPACKET_VERIFY);
6949 } else {
6950 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6951 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6952 }
6953
6954 priv->plat->fpe_cfg->hs_enable = enable;
6955 }
6956}
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968int stmmac_dvr_probe(struct device *device,
6969 struct plat_stmmacenet_data *plat_dat,
6970 struct stmmac_resources *res)
6971{
6972 struct net_device *ndev = NULL;
6973 struct stmmac_priv *priv;
6974 u32 rxq;
6975 int i, ret = 0;
6976
6977 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6978 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6979 if (!ndev)
6980 return -ENOMEM;
6981
6982 SET_NETDEV_DEV(ndev, device);
6983
6984 priv = netdev_priv(ndev);
6985 priv->device = device;
6986 priv->dev = ndev;
6987
6988 stmmac_set_ethtool_ops(ndev);
6989 priv->pause = pause;
6990 priv->plat = plat_dat;
6991 priv->ioaddr = res->addr;
6992 priv->dev->base_addr = (unsigned long)res->addr;
6993 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6994
6995 priv->dev->irq = res->irq;
6996 priv->wol_irq = res->wol_irq;
6997 priv->lpi_irq = res->lpi_irq;
6998 priv->sfty_ce_irq = res->sfty_ce_irq;
6999 priv->sfty_ue_irq = res->sfty_ue_irq;
7000 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7001 priv->rx_irq[i] = res->rx_irq[i];
7002 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7003 priv->tx_irq[i] = res->tx_irq[i];
7004
7005 if (!is_zero_ether_addr(res->mac))
7006 eth_hw_addr_set(priv->dev, res->mac);
7007
7008 dev_set_drvdata(device, priv->dev);
7009
7010
7011 stmmac_verify_args();
7012
7013 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7014 if (!priv->af_xdp_zc_qps)
7015 return -ENOMEM;
7016
7017
7018 priv->wq = create_singlethread_workqueue("stmmac_wq");
7019 if (!priv->wq) {
7020 dev_err(priv->device, "failed to create workqueue\n");
7021 return -ENOMEM;
7022 }
7023
7024 INIT_WORK(&priv->service_task, stmmac_service_task);
7025
7026
7027 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7028
7029
7030
7031
7032 if ((phyaddr >= 0) && (phyaddr <= 31))
7033 priv->plat->phy_addr = phyaddr;
7034
7035 if (priv->plat->stmmac_rst) {
7036 ret = reset_control_assert(priv->plat->stmmac_rst);
7037 reset_control_deassert(priv->plat->stmmac_rst);
7038
7039
7040
7041 if (ret == -ENOTSUPP)
7042 reset_control_reset(priv->plat->stmmac_rst);
7043 }
7044
7045 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7046 if (ret == -ENOTSUPP)
7047 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7048 ERR_PTR(ret));
7049
7050
7051 ret = stmmac_hw_init(priv);
7052 if (ret)
7053 goto error_hw_init;
7054
7055
7056
7057 if (priv->synopsys_id < DWMAC_CORE_5_20)
7058 priv->plat->dma_cfg->dche = false;
7059
7060 stmmac_check_ether_addr(priv);
7061
7062 ndev->netdev_ops = &stmmac_netdev_ops;
7063
7064 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7065 NETIF_F_RXCSUM;
7066
7067 ret = stmmac_tc_init(priv, priv);
7068 if (!ret) {
7069 ndev->hw_features |= NETIF_F_HW_TC;
7070 }
7071
7072 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7073 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7074 if (priv->plat->has_gmac4)
7075 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7076 priv->tso = true;
7077 dev_info(priv->device, "TSO feature enabled\n");
7078 }
7079
7080 if (priv->dma_cap.sphen) {
7081 ndev->hw_features |= NETIF_F_GRO;
7082 priv->sph_cap = true;
7083 priv->sph = priv->sph_cap;
7084 dev_info(priv->device, "SPH feature enabled\n");
7085 }
7086
7087
7088
7089
7090
7091
7092 if (priv->plat->addr64)
7093 priv->dma_cap.addr64 = priv->plat->addr64;
7094
7095 if (priv->dma_cap.addr64) {
7096 ret = dma_set_mask_and_coherent(device,
7097 DMA_BIT_MASK(priv->dma_cap.addr64));
7098 if (!ret) {
7099 dev_info(priv->device, "Using %d bits DMA width\n",
7100 priv->dma_cap.addr64);
7101
7102
7103
7104
7105
7106 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7107 priv->plat->dma_cfg->eame = true;
7108 } else {
7109 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7110 if (ret) {
7111 dev_err(priv->device, "Failed to set DMA Mask\n");
7112 goto error_hw_init;
7113 }
7114
7115 priv->dma_cap.addr64 = 32;
7116 }
7117 }
7118
7119 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7120 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7121#ifdef STMMAC_VLAN_TAG_USED
7122
7123 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7124 if (priv->dma_cap.vlhash) {
7125 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7126 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7127 }
7128 if (priv->dma_cap.vlins) {
7129 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7130 if (priv->dma_cap.dvlan)
7131 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7132 }
7133#endif
7134 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7135
7136
7137 rxq = priv->plat->rx_queues_to_use;
7138 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7139 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7140 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7141
7142 if (priv->dma_cap.rssen && priv->plat->rss_en)
7143 ndev->features |= NETIF_F_RXHASH;
7144
7145
7146 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7147 if (priv->plat->has_xgmac)
7148 ndev->max_mtu = XGMAC_JUMBO_LEN;
7149 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7150 ndev->max_mtu = JUMBO_LEN;
7151 else
7152 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7153
7154
7155
7156 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7157 (priv->plat->maxmtu >= ndev->min_mtu))
7158 ndev->max_mtu = priv->plat->maxmtu;
7159 else if (priv->plat->maxmtu < ndev->min_mtu)
7160 dev_warn(priv->device,
7161 "%s: warning: maxmtu having invalid value (%d)\n",
7162 __func__, priv->plat->maxmtu);
7163
7164 if (flow_ctrl)
7165 priv->flow_ctrl = FLOW_AUTO;
7166
7167
7168 stmmac_napi_add(ndev);
7169
7170 mutex_init(&priv->lock);
7171
7172
7173
7174
7175
7176
7177
7178 if (priv->plat->clk_csr >= 0)
7179 priv->clk_csr = priv->plat->clk_csr;
7180 else
7181 stmmac_clk_csr_set(priv);
7182
7183 stmmac_check_pcs_mode(priv);
7184
7185 pm_runtime_get_noresume(device);
7186 pm_runtime_set_active(device);
7187 if (!pm_runtime_enabled(device))
7188 pm_runtime_enable(device);
7189
7190 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7191 priv->hw->pcs != STMMAC_PCS_RTBI) {
7192
7193 ret = stmmac_mdio_register(ndev);
7194 if (ret < 0) {
7195 dev_err(priv->device,
7196 "%s: MDIO bus (id: %d) registration failed",
7197 __func__, priv->plat->bus_id);
7198 goto error_mdio_register;
7199 }
7200 }
7201
7202 if (priv->plat->speed_mode_2500)
7203 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7204
7205 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7206 ret = stmmac_xpcs_setup(priv->mii);
7207 if (ret)
7208 goto error_xpcs_setup;
7209 }
7210
7211 ret = stmmac_phy_setup(priv);
7212 if (ret) {
7213 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7214 goto error_phy_setup;
7215 }
7216
7217 ret = register_netdev(ndev);
7218 if (ret) {
7219 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7220 __func__, ret);
7221 goto error_netdev_register;
7222 }
7223
7224 if (priv->plat->serdes_powerup) {
7225 ret = priv->plat->serdes_powerup(ndev,
7226 priv->plat->bsp_priv);
7227
7228 if (ret < 0)
7229 goto error_serdes_powerup;
7230 }
7231
7232#ifdef CONFIG_DEBUG_FS
7233 stmmac_init_fs(ndev);
7234#endif
7235
7236 if (priv->plat->dump_debug_regs)
7237 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7238
7239
7240
7241
7242 pm_runtime_put(device);
7243
7244 return ret;
7245
7246error_serdes_powerup:
7247 unregister_netdev(ndev);
7248error_netdev_register:
7249 phylink_destroy(priv->phylink);
7250error_xpcs_setup:
7251error_phy_setup:
7252 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7253 priv->hw->pcs != STMMAC_PCS_RTBI)
7254 stmmac_mdio_unregister(ndev);
7255error_mdio_register:
7256 stmmac_napi_del(ndev);
7257error_hw_init:
7258 destroy_workqueue(priv->wq);
7259 bitmap_free(priv->af_xdp_zc_qps);
7260
7261 return ret;
7262}
7263EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7264
7265
7266
7267
7268
7269
7270
7271int stmmac_dvr_remove(struct device *dev)
7272{
7273 struct net_device *ndev = dev_get_drvdata(dev);
7274 struct stmmac_priv *priv = netdev_priv(ndev);
7275
7276 netdev_info(priv->dev, "%s: removing driver", __func__);
7277
7278 pm_runtime_get_sync(dev);
7279 pm_runtime_disable(dev);
7280 pm_runtime_put_noidle(dev);
7281
7282 stmmac_stop_all_dma(priv);
7283 stmmac_mac_set(priv, priv->ioaddr, false);
7284 netif_carrier_off(ndev);
7285 unregister_netdev(ndev);
7286
7287
7288
7289
7290 if (priv->plat->serdes_powerdown)
7291 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7292
7293#ifdef CONFIG_DEBUG_FS
7294 stmmac_exit_fs(ndev);
7295#endif
7296 phylink_destroy(priv->phylink);
7297 if (priv->plat->stmmac_rst)
7298 reset_control_assert(priv->plat->stmmac_rst);
7299 reset_control_assert(priv->plat->stmmac_ahb_rst);
7300 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7301 priv->hw->pcs != STMMAC_PCS_RTBI)
7302 stmmac_mdio_unregister(ndev);
7303 destroy_workqueue(priv->wq);
7304 mutex_destroy(&priv->lock);
7305 bitmap_free(priv->af_xdp_zc_qps);
7306
7307 return 0;
7308}
7309EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7310
7311
7312
7313
7314
7315
7316
7317
7318int stmmac_suspend(struct device *dev)
7319{
7320 struct net_device *ndev = dev_get_drvdata(dev);
7321 struct stmmac_priv *priv = netdev_priv(ndev);
7322 u32 chan;
7323
7324 if (!ndev || !netif_running(ndev))
7325 return 0;
7326
7327 mutex_lock(&priv->lock);
7328
7329 netif_device_detach(ndev);
7330
7331 stmmac_disable_all_queues(priv);
7332
7333 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7334 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7335
7336 if (priv->eee_enabled) {
7337 priv->tx_path_in_lpi_mode = false;
7338 del_timer_sync(&priv->eee_ctrl_timer);
7339 }
7340
7341
7342 stmmac_stop_all_dma(priv);
7343
7344 if (priv->plat->serdes_powerdown)
7345 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7346
7347
7348 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7349 stmmac_pmt(priv, priv->hw, priv->wolopts);
7350 priv->irq_wake = 1;
7351 } else {
7352 stmmac_mac_set(priv, priv->ioaddr, false);
7353 pinctrl_pm_select_sleep_state(priv->device);
7354 }
7355
7356 mutex_unlock(&priv->lock);
7357
7358 rtnl_lock();
7359 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7360 phylink_suspend(priv->phylink, true);
7361 } else {
7362 if (device_may_wakeup(priv->device))
7363 phylink_speed_down(priv->phylink, false);
7364 phylink_suspend(priv->phylink, false);
7365 }
7366 rtnl_unlock();
7367
7368 if (priv->dma_cap.fpesel) {
7369
7370 stmmac_fpe_configure(priv, priv->ioaddr,
7371 priv->plat->tx_queues_to_use,
7372 priv->plat->rx_queues_to_use, false);
7373
7374 stmmac_fpe_handshake(priv, false);
7375 stmmac_fpe_stop_wq(priv);
7376 }
7377
7378 priv->speed = SPEED_UNKNOWN;
7379 return 0;
7380}
7381EXPORT_SYMBOL_GPL(stmmac_suspend);
7382
7383
7384
7385
7386
7387static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7388{
7389 u32 rx_cnt = priv->plat->rx_queues_to_use;
7390 u32 tx_cnt = priv->plat->tx_queues_to_use;
7391 u32 queue;
7392
7393 for (queue = 0; queue < rx_cnt; queue++) {
7394 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7395
7396 rx_q->cur_rx = 0;
7397 rx_q->dirty_rx = 0;
7398 }
7399
7400 for (queue = 0; queue < tx_cnt; queue++) {
7401 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7402
7403 tx_q->cur_tx = 0;
7404 tx_q->dirty_tx = 0;
7405 tx_q->mss = 0;
7406
7407 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7408 }
7409}
7410
7411
7412
7413
7414
7415
7416
7417int stmmac_resume(struct device *dev)
7418{
7419 struct net_device *ndev = dev_get_drvdata(dev);
7420 struct stmmac_priv *priv = netdev_priv(ndev);
7421 int ret;
7422
7423 if (!netif_running(ndev))
7424 return 0;
7425
7426
7427
7428
7429
7430
7431
7432 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7433 mutex_lock(&priv->lock);
7434 stmmac_pmt(priv, priv->hw, 0);
7435 mutex_unlock(&priv->lock);
7436 priv->irq_wake = 0;
7437 } else {
7438 pinctrl_pm_select_default_state(priv->device);
7439
7440 if (priv->mii)
7441 stmmac_mdio_reset(priv->mii);
7442 }
7443
7444 if (priv->plat->serdes_powerup) {
7445 ret = priv->plat->serdes_powerup(ndev,
7446 priv->plat->bsp_priv);
7447
7448 if (ret < 0)
7449 return ret;
7450 }
7451
7452 rtnl_lock();
7453 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7454 phylink_resume(priv->phylink);
7455 } else {
7456 phylink_resume(priv->phylink);
7457 if (device_may_wakeup(priv->device))
7458 phylink_speed_up(priv->phylink);
7459 }
7460 rtnl_unlock();
7461
7462 rtnl_lock();
7463 mutex_lock(&priv->lock);
7464
7465 stmmac_reset_queues_param(priv);
7466
7467 stmmac_free_tx_skbufs(priv);
7468 stmmac_clear_descriptors(priv);
7469
7470 stmmac_hw_setup(ndev, false);
7471 stmmac_init_coalesce(priv);
7472 stmmac_set_rx_mode(ndev);
7473
7474 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7475
7476 stmmac_enable_all_queues(priv);
7477 stmmac_enable_all_dma_irq(priv);
7478
7479 mutex_unlock(&priv->lock);
7480 rtnl_unlock();
7481
7482 netif_device_attach(ndev);
7483
7484 return 0;
7485}
7486EXPORT_SYMBOL_GPL(stmmac_resume);
7487
7488#ifndef MODULE
7489static int __init stmmac_cmdline_opt(char *str)
7490{
7491 char *opt;
7492
7493 if (!str || !*str)
7494 return 1;
7495 while ((opt = strsep(&str, ",")) != NULL) {
7496 if (!strncmp(opt, "debug:", 6)) {
7497 if (kstrtoint(opt + 6, 0, &debug))
7498 goto err;
7499 } else if (!strncmp(opt, "phyaddr:", 8)) {
7500 if (kstrtoint(opt + 8, 0, &phyaddr))
7501 goto err;
7502 } else if (!strncmp(opt, "buf_sz:", 7)) {
7503 if (kstrtoint(opt + 7, 0, &buf_sz))
7504 goto err;
7505 } else if (!strncmp(opt, "tc:", 3)) {
7506 if (kstrtoint(opt + 3, 0, &tc))
7507 goto err;
7508 } else if (!strncmp(opt, "watchdog:", 9)) {
7509 if (kstrtoint(opt + 9, 0, &watchdog))
7510 goto err;
7511 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7512 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7513 goto err;
7514 } else if (!strncmp(opt, "pause:", 6)) {
7515 if (kstrtoint(opt + 6, 0, &pause))
7516 goto err;
7517 } else if (!strncmp(opt, "eee_timer:", 10)) {
7518 if (kstrtoint(opt + 10, 0, &eee_timer))
7519 goto err;
7520 } else if (!strncmp(opt, "chain_mode:", 11)) {
7521 if (kstrtoint(opt + 11, 0, &chain_mode))
7522 goto err;
7523 }
7524 }
7525 return 1;
7526
7527err:
7528 pr_err("%s: ERROR broken module parameter conversion", __func__);
7529 return 1;
7530}
7531
7532__setup("stmmaceth=", stmmac_cmdline_opt);
7533#endif
7534
7535static int __init stmmac_init(void)
7536{
7537#ifdef CONFIG_DEBUG_FS
7538
7539 if (!stmmac_fs_dir)
7540 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7541 register_netdevice_notifier(&stmmac_notifier);
7542#endif
7543
7544 return 0;
7545}
7546
7547static void __exit stmmac_exit(void)
7548{
7549#ifdef CONFIG_DEBUG_FS
7550 unregister_netdevice_notifier(&stmmac_notifier);
7551 debugfs_remove_recursive(stmmac_fs_dir);
7552#endif
7553}
7554
7555module_init(stmmac_init)
7556module_exit(stmmac_exit)
7557
7558MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7559MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7560MODULE_LICENSE("GPL");
7561