1
2
3
4
5
6
7#include <linux/bitrev.h>
8#include <linux/crc32.h>
9#include <linux/iopoll.h>
10#include "stmmac.h"
11#include "stmmac_ptp.h"
12#include "dwxgmac2.h"
13
14static void dwxgmac2_core_init(struct mac_device_info *hw,
15 struct net_device *dev)
16{
17 void __iomem *ioaddr = hw->pcsr;
18 u32 tx, rx;
19
20 tx = readl(ioaddr + XGMAC_TX_CONFIG);
21 rx = readl(ioaddr + XGMAC_RX_CONFIG);
22
23 tx |= XGMAC_CORE_INIT_TX;
24 rx |= XGMAC_CORE_INIT_RX;
25
26 if (hw->ps) {
27 tx |= XGMAC_CONFIG_TE;
28 tx &= ~hw->link.speed_mask;
29
30 switch (hw->ps) {
31 case SPEED_10000:
32 tx |= hw->link.xgmii.speed10000;
33 break;
34 case SPEED_2500:
35 tx |= hw->link.speed2500;
36 break;
37 case SPEED_1000:
38 default:
39 tx |= hw->link.speed1000;
40 break;
41 }
42 }
43
44 writel(tx, ioaddr + XGMAC_TX_CONFIG);
45 writel(rx, ioaddr + XGMAC_RX_CONFIG);
46 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
47}
48
49static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
50{
51 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
52 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
53
54 if (enable) {
55 tx |= XGMAC_CONFIG_TE;
56 rx |= XGMAC_CONFIG_RE;
57 } else {
58 tx &= ~XGMAC_CONFIG_TE;
59 rx &= ~XGMAC_CONFIG_RE;
60 }
61
62 writel(tx, ioaddr + XGMAC_TX_CONFIG);
63 writel(rx, ioaddr + XGMAC_RX_CONFIG);
64}
65
66static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
67{
68 void __iomem *ioaddr = hw->pcsr;
69 u32 value;
70
71 value = readl(ioaddr + XGMAC_RX_CONFIG);
72 if (hw->rx_csum)
73 value |= XGMAC_CONFIG_IPC;
74 else
75 value &= ~XGMAC_CONFIG_IPC;
76 writel(value, ioaddr + XGMAC_RX_CONFIG);
77
78 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
79}
80
81static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
82 u32 queue)
83{
84 void __iomem *ioaddr = hw->pcsr;
85 u32 value;
86
87 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
88 if (mode == MTL_QUEUE_AVB)
89 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
90 else if (mode == MTL_QUEUE_DCB)
91 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
92 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
93}
94
95static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
96 u32 queue)
97{
98 void __iomem *ioaddr = hw->pcsr;
99 u32 value, reg;
100
101 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
102 if (queue >= 4)
103 queue -= 4;
104
105 value = readl(ioaddr + reg);
106 value &= ~XGMAC_PSRQ(queue);
107 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
108
109 writel(value, ioaddr + reg);
110}
111
112static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
113 u32 queue)
114{
115 void __iomem *ioaddr = hw->pcsr;
116 u32 value, reg;
117
118 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
119 if (queue >= 4)
120 queue -= 4;
121
122 value = readl(ioaddr + reg);
123 value &= ~XGMAC_PSTC(queue);
124 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
125
126 writel(value, ioaddr + reg);
127}
128
129static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
130 u32 rx_alg)
131{
132 void __iomem *ioaddr = hw->pcsr;
133 u32 value;
134
135 value = readl(ioaddr + XGMAC_MTL_OPMODE);
136 value &= ~XGMAC_RAA;
137
138 switch (rx_alg) {
139 case MTL_RX_ALGORITHM_SP:
140 break;
141 case MTL_RX_ALGORITHM_WSP:
142 value |= XGMAC_RAA;
143 break;
144 default:
145 break;
146 }
147
148 writel(value, ioaddr + XGMAC_MTL_OPMODE);
149}
150
151static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
152 u32 tx_alg)
153{
154 void __iomem *ioaddr = hw->pcsr;
155 bool ets = true;
156 u32 value;
157 int i;
158
159 value = readl(ioaddr + XGMAC_MTL_OPMODE);
160 value &= ~XGMAC_ETSALG;
161
162 switch (tx_alg) {
163 case MTL_TX_ALGORITHM_WRR:
164 value |= XGMAC_WRR;
165 break;
166 case MTL_TX_ALGORITHM_WFQ:
167 value |= XGMAC_WFQ;
168 break;
169 case MTL_TX_ALGORITHM_DWRR:
170 value |= XGMAC_DWRR;
171 break;
172 default:
173 ets = false;
174 break;
175 }
176
177 writel(value, ioaddr + XGMAC_MTL_OPMODE);
178
179
180 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
181 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
182 value &= ~XGMAC_TSA;
183 if (ets)
184 value |= XGMAC_ETS;
185 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
186 }
187}
188
189static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
190 u32 weight, u32 queue)
191{
192 void __iomem *ioaddr = hw->pcsr;
193
194 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
195}
196
197static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
198 u32 chan)
199{
200 void __iomem *ioaddr = hw->pcsr;
201 u32 value, reg;
202
203 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
204 if (queue >= 4)
205 queue -= 4;
206
207 value = readl(ioaddr + reg);
208 value &= ~XGMAC_QxMDMACH(queue);
209 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
210
211 writel(value, ioaddr + reg);
212}
213
214static void dwxgmac2_config_cbs(struct mac_device_info *hw,
215 u32 send_slope, u32 idle_slope,
216 u32 high_credit, u32 low_credit, u32 queue)
217{
218 void __iomem *ioaddr = hw->pcsr;
219 u32 value;
220
221 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
222 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
223 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225
226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 value &= ~XGMAC_TSA;
228 value |= XGMAC_CC | XGMAC_CBS;
229 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
230}
231
232static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
233{
234 void __iomem *ioaddr = hw->pcsr;
235 int i;
236
237 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
238 reg_space[i] = readl(ioaddr + i * 4);
239}
240
241static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
242 struct stmmac_extra_stats *x)
243{
244 void __iomem *ioaddr = hw->pcsr;
245 u32 stat, en;
246 int ret = 0;
247
248 en = readl(ioaddr + XGMAC_INT_EN);
249 stat = readl(ioaddr + XGMAC_INT_STATUS);
250
251 stat &= en;
252
253 if (stat & XGMAC_PMTIS) {
254 x->irq_receive_pmt_irq_n++;
255 readl(ioaddr + XGMAC_PMT);
256 }
257
258 if (stat & XGMAC_LPIIS) {
259 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
260
261 if (lpi & XGMAC_TLPIEN) {
262 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
263 x->irq_tx_path_in_lpi_mode_n++;
264 }
265 if (lpi & XGMAC_TLPIEX) {
266 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
267 x->irq_tx_path_exit_lpi_mode_n++;
268 }
269 if (lpi & XGMAC_RLPIEN)
270 x->irq_rx_path_in_lpi_mode_n++;
271 if (lpi & XGMAC_RLPIEX)
272 x->irq_rx_path_exit_lpi_mode_n++;
273 }
274
275 return ret;
276}
277
278static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
279{
280 void __iomem *ioaddr = hw->pcsr;
281 int ret = 0;
282 u32 status;
283
284 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
285 if (status & BIT(chan)) {
286 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
287
288 if (chan_status & XGMAC_RXOVFIS)
289 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
290
291 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
292 }
293
294 return ret;
295}
296
297static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
298 unsigned int fc, unsigned int pause_time,
299 u32 tx_cnt)
300{
301 void __iomem *ioaddr = hw->pcsr;
302 u32 i;
303
304 if (fc & FLOW_RX)
305 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
306 if (fc & FLOW_TX) {
307 for (i = 0; i < tx_cnt; i++) {
308 u32 value = XGMAC_TFE;
309
310 if (duplex)
311 value |= pause_time << XGMAC_PT_SHIFT;
312
313 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
314 }
315 }
316}
317
318static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
319{
320 void __iomem *ioaddr = hw->pcsr;
321 u32 val = 0x0;
322
323 if (mode & WAKE_MAGIC)
324 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
325 if (mode & WAKE_UCAST)
326 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
327 if (val) {
328 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
329 cfg |= XGMAC_CONFIG_RE;
330 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
331 }
332
333 writel(val, ioaddr + XGMAC_PMT);
334}
335
336static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
337 unsigned char *addr, unsigned int reg_n)
338{
339 void __iomem *ioaddr = hw->pcsr;
340 u32 value;
341
342 value = (addr[5] << 8) | addr[4];
343 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
344
345 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
346 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
347}
348
349static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
350 unsigned char *addr, unsigned int reg_n)
351{
352 void __iomem *ioaddr = hw->pcsr;
353 u32 hi_addr, lo_addr;
354
355
356 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
357 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
358
359
360 addr[0] = lo_addr & 0xff;
361 addr[1] = (lo_addr >> 8) & 0xff;
362 addr[2] = (lo_addr >> 16) & 0xff;
363 addr[3] = (lo_addr >> 24) & 0xff;
364 addr[4] = hi_addr & 0xff;
365 addr[5] = (hi_addr >> 8) & 0xff;
366}
367
368static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
369 bool en_tx_lpi_clockgating)
370{
371 void __iomem *ioaddr = hw->pcsr;
372 u32 value;
373
374 value = readl(ioaddr + XGMAC_LPI_CTRL);
375
376 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
377 if (en_tx_lpi_clockgating)
378 value |= XGMAC_TXCGE;
379
380 writel(value, ioaddr + XGMAC_LPI_CTRL);
381}
382
383static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
384{
385 void __iomem *ioaddr = hw->pcsr;
386 u32 value;
387
388 value = readl(ioaddr + XGMAC_LPI_CTRL);
389 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
390 writel(value, ioaddr + XGMAC_LPI_CTRL);
391}
392
393static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
394{
395 void __iomem *ioaddr = hw->pcsr;
396 u32 value;
397
398 value = readl(ioaddr + XGMAC_LPI_CTRL);
399 if (link)
400 value |= XGMAC_PLS;
401 else
402 value &= ~XGMAC_PLS;
403 writel(value, ioaddr + XGMAC_LPI_CTRL);
404}
405
406static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
407{
408 void __iomem *ioaddr = hw->pcsr;
409 u32 value;
410
411 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
412 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
413}
414
415static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
416 int mcbitslog2)
417{
418 int numhashregs, regs;
419
420 switch (mcbitslog2) {
421 case 6:
422 numhashregs = 2;
423 break;
424 case 7:
425 numhashregs = 4;
426 break;
427 case 8:
428 numhashregs = 8;
429 break;
430 default:
431 return;
432 }
433
434 for (regs = 0; regs < numhashregs; regs++)
435 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
436}
437
438static void dwxgmac2_set_filter(struct mac_device_info *hw,
439 struct net_device *dev)
440{
441 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
442 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
443 int mcbitslog2 = hw->mcast_bits_log2;
444 u32 mc_filter[8];
445 int i;
446
447 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
448 value |= XGMAC_FILTER_HPF;
449
450 memset(mc_filter, 0, sizeof(mc_filter));
451
452 if (dev->flags & IFF_PROMISC) {
453 value |= XGMAC_FILTER_PR;
454 value |= XGMAC_FILTER_PCF;
455 } else if ((dev->flags & IFF_ALLMULTI) ||
456 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
457 value |= XGMAC_FILTER_PM;
458
459 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
460 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
461 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
462 struct netdev_hw_addr *ha;
463
464 value |= XGMAC_FILTER_HMC;
465
466 netdev_for_each_mc_addr(ha, dev) {
467 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
468 (32 - mcbitslog2));
469 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
470 }
471 }
472
473 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
474
475
476 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
477 value |= XGMAC_FILTER_PR;
478 } else {
479 struct netdev_hw_addr *ha;
480 int reg = 1;
481
482 netdev_for_each_uc_addr(ha, dev) {
483 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
484 reg++;
485 }
486
487 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
488 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
489 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
490 }
491 }
492
493 writel(value, ioaddr + XGMAC_PACKET_FILTER);
494}
495
496static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
497{
498 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
499
500 if (enable)
501 value |= XGMAC_CONFIG_LM;
502 else
503 value &= ~XGMAC_CONFIG_LM;
504
505 writel(value, ioaddr + XGMAC_RX_CONFIG);
506}
507
508static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
509 u32 val)
510{
511 u32 ctrl = 0;
512
513 writel(val, ioaddr + XGMAC_RSS_DATA);
514 ctrl |= idx << XGMAC_RSSIA_SHIFT;
515 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
516 ctrl |= XGMAC_OB;
517 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
518
519 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
520 !(ctrl & XGMAC_OB), 100, 10000);
521}
522
523static int dwxgmac2_rss_configure(struct mac_device_info *hw,
524 struct stmmac_rss *cfg, u32 num_rxq)
525{
526 void __iomem *ioaddr = hw->pcsr;
527 u32 value, *key;
528 int i, ret;
529
530 value = readl(ioaddr + XGMAC_RSS_CTRL);
531 if (!cfg || !cfg->enable) {
532 value &= ~XGMAC_RSSE;
533 writel(value, ioaddr + XGMAC_RSS_CTRL);
534 return 0;
535 }
536
537 key = (u32 *)cfg->key;
538 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
539 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
540 if (ret)
541 return ret;
542 }
543
544 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
545 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
546 if (ret)
547 return ret;
548 }
549
550 for (i = 0; i < num_rxq; i++)
551 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
552
553 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
554 writel(value, ioaddr + XGMAC_RSS_CTRL);
555 return 0;
556}
557
558static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
559 __le16 perfect_match, bool is_double)
560{
561 void __iomem *ioaddr = hw->pcsr;
562
563 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
564
565 if (hash) {
566 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
567
568 value |= XGMAC_FILTER_VTFE;
569
570 writel(value, ioaddr + XGMAC_PACKET_FILTER);
571
572 value = readl(ioaddr + XGMAC_VLAN_TAG);
573
574 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
575 if (is_double) {
576 value |= XGMAC_VLAN_EDVLP;
577 value |= XGMAC_VLAN_ESVL;
578 value |= XGMAC_VLAN_DOVLTC;
579 }
580
581 writel(value, ioaddr + XGMAC_VLAN_TAG);
582 } else if (perfect_match) {
583 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
584
585 value |= XGMAC_FILTER_VTFE;
586
587 writel(value, ioaddr + XGMAC_PACKET_FILTER);
588
589 value = readl(ioaddr + XGMAC_VLAN_TAG);
590
591 value |= XGMAC_VLAN_ETV;
592 if (is_double) {
593 value |= XGMAC_VLAN_EDVLP;
594 value |= XGMAC_VLAN_ESVL;
595 value |= XGMAC_VLAN_DOVLTC;
596 }
597
598 writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
599 } else {
600 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
601
602 value &= ~XGMAC_FILTER_VTFE;
603
604 writel(value, ioaddr + XGMAC_PACKET_FILTER);
605
606 value = readl(ioaddr + XGMAC_VLAN_TAG);
607
608 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
609 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
610 value &= ~XGMAC_VLAN_DOVLTC;
611 value &= ~XGMAC_VLAN_VID;
612
613 writel(value, ioaddr + XGMAC_VLAN_TAG);
614 }
615}
616
617struct dwxgmac3_error_desc {
618 bool valid;
619 const char *desc;
620 const char *detailed_desc;
621};
622
623#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
624
625static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
626 const char *module_name,
627 const struct dwxgmac3_error_desc *desc,
628 unsigned long field_offset,
629 struct stmmac_safety_stats *stats)
630{
631 unsigned long loc, mask;
632 u8 *bptr = (u8 *)stats;
633 unsigned long *ptr;
634
635 ptr = (unsigned long *)(bptr + field_offset);
636
637 mask = value;
638 for_each_set_bit(loc, &mask, 32) {
639 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
640 "correctable" : "uncorrectable", module_name,
641 desc[loc].desc, desc[loc].detailed_desc);
642
643
644 ptr[loc]++;
645 }
646}
647
648static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
649 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
650 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
651 { true, "TPES", "TSO Data Path Parity Check Error" },
652 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
653 { true, "MTPES", "MTL Data Path Parity Check Error" },
654 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
655 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
656 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
657 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
658 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
659 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
660 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
661 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
662 { true, "TTES", "TX FSM Timeout Error" },
663 { true, "RTES", "RX FSM Timeout Error" },
664 { true, "CTES", "CSR FSM Timeout Error" },
665 { true, "ATES", "APP FSM Timeout Error" },
666 { true, "PTES", "PTP FSM Timeout Error" },
667 { false, "UNKNOWN", "Unknown Error" },
668 { false, "UNKNOWN", "Unknown Error" },
669 { false, "UNKNOWN", "Unknown Error" },
670 { true, "MSTTES", "Master Read/Write Timeout Error" },
671 { true, "SLVTES", "Slave Read/Write Timeout Error" },
672 { true, "ATITES", "Application Timeout on ATI Interface Error" },
673 { true, "ARITES", "Application Timeout on ARI Interface Error" },
674 { true, "FSMPES", "FSM State Parity Error" },
675 { false, "UNKNOWN", "Unknown Error" },
676 { false, "UNKNOWN", "Unknown Error" },
677 { false, "UNKNOWN", "Unknown Error" },
678 { false, "UNKNOWN", "Unknown Error" },
679 { false, "UNKNOWN", "Unknown Error" },
680 { true, "CPI", "Control Register Parity Check Error" },
681};
682
683static void dwxgmac3_handle_mac_err(struct net_device *ndev,
684 void __iomem *ioaddr, bool correctable,
685 struct stmmac_safety_stats *stats)
686{
687 u32 value;
688
689 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
690 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
691
692 dwxgmac3_log_error(ndev, value, correctable, "MAC",
693 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
694}
695
696static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
697 { true, "TXCES", "MTL TX Memory Error" },
698 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
699 { true, "TXUES", "MTL TX Memory Error" },
700 { false, "UNKNOWN", "Unknown Error" },
701 { true, "RXCES", "MTL RX Memory Error" },
702 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
703 { true, "RXUES", "MTL RX Memory Error" },
704 { false, "UNKNOWN", "Unknown Error" },
705 { true, "ECES", "MTL EST Memory Error" },
706 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
707 { true, "EUES", "MTL EST Memory Error" },
708 { false, "UNKNOWN", "Unknown Error" },
709 { true, "RPCES", "MTL RX Parser Memory Error" },
710 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
711 { true, "RPUES", "MTL RX Parser Memory Error" },
712 { false, "UNKNOWN", "Unknown Error" },
713 { false, "UNKNOWN", "Unknown Error" },
714 { false, "UNKNOWN", "Unknown Error" },
715 { false, "UNKNOWN", "Unknown Error" },
716 { false, "UNKNOWN", "Unknown Error" },
717 { false, "UNKNOWN", "Unknown Error" },
718 { false, "UNKNOWN", "Unknown Error" },
719 { false, "UNKNOWN", "Unknown Error" },
720 { false, "UNKNOWN", "Unknown Error" },
721 { false, "UNKNOWN", "Unknown Error" },
722 { false, "UNKNOWN", "Unknown Error" },
723 { false, "UNKNOWN", "Unknown Error" },
724 { false, "UNKNOWN", "Unknown Error" },
725 { false, "UNKNOWN", "Unknown Error" },
726 { false, "UNKNOWN", "Unknown Error" },
727 { false, "UNKNOWN", "Unknown Error" },
728 { false, "UNKNOWN", "Unknown Error" },
729};
730
731static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
732 void __iomem *ioaddr, bool correctable,
733 struct stmmac_safety_stats *stats)
734{
735 u32 value;
736
737 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
738 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
739
740 dwxgmac3_log_error(ndev, value, correctable, "MTL",
741 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
742}
743
744static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
745 { true, "TCES", "DMA TSO Memory Error" },
746 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
747 { true, "TUES", "DMA TSO Memory Error" },
748 { false, "UNKNOWN", "Unknown Error" },
749 { true, "DCES", "DMA DCACHE Memory Error" },
750 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
751 { true, "DUES", "DMA DCACHE Memory Error" },
752 { false, "UNKNOWN", "Unknown Error" },
753 { false, "UNKNOWN", "Unknown Error" },
754 { false, "UNKNOWN", "Unknown Error" },
755 { false, "UNKNOWN", "Unknown Error" },
756 { false, "UNKNOWN", "Unknown Error" },
757 { false, "UNKNOWN", "Unknown Error" },
758 { false, "UNKNOWN", "Unknown Error" },
759 { false, "UNKNOWN", "Unknown Error" },
760 { false, "UNKNOWN", "Unknown Error" },
761 { false, "UNKNOWN", "Unknown Error" },
762 { false, "UNKNOWN", "Unknown Error" },
763 { false, "UNKNOWN", "Unknown Error" },
764 { false, "UNKNOWN", "Unknown Error" },
765 { false, "UNKNOWN", "Unknown Error" },
766 { false, "UNKNOWN", "Unknown Error" },
767 { false, "UNKNOWN", "Unknown Error" },
768 { false, "UNKNOWN", "Unknown Error" },
769 { false, "UNKNOWN", "Unknown Error" },
770 { false, "UNKNOWN", "Unknown Error" },
771 { false, "UNKNOWN", "Unknown Error" },
772 { false, "UNKNOWN", "Unknown Error" },
773 { false, "UNKNOWN", "Unknown Error" },
774 { false, "UNKNOWN", "Unknown Error" },
775 { false, "UNKNOWN", "Unknown Error" },
776 { false, "UNKNOWN", "Unknown Error" },
777};
778
779static void dwxgmac3_handle_dma_err(struct net_device *ndev,
780 void __iomem *ioaddr, bool correctable,
781 struct stmmac_safety_stats *stats)
782{
783 u32 value;
784
785 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
786 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
787
788 dwxgmac3_log_error(ndev, value, correctable, "DMA",
789 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
790}
791
792static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
793{
794 u32 value;
795
796 if (!asp)
797 return -EINVAL;
798
799
800 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
801
802
803 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
804 value |= XGMAC_RPCEIE;
805 value |= XGMAC_ECEIE;
806 value |= XGMAC_RXCEIE;
807 value |= XGMAC_TXCEIE;
808 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
809
810
811 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
812 value |= XGMAC_DCEIE;
813 value |= XGMAC_TCEIE;
814 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
815
816
817 if (asp <= 0x1)
818 return 0;
819
820
821 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
822 value |= XGMAC_PRTYEN;
823 value |= XGMAC_TMOUTEN;
824 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
825
826 return 0;
827}
828
829static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
830 void __iomem *ioaddr,
831 unsigned int asp,
832 struct stmmac_safety_stats *stats)
833{
834 bool err, corr;
835 u32 mtl, dma;
836 int ret = 0;
837
838 if (!asp)
839 return -EINVAL;
840
841 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
842 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
843
844 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
845 corr = false;
846 if (err) {
847 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
848 ret |= !corr;
849 }
850
851 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
852 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
853 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
854 if (err) {
855 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
856 ret |= !corr;
857 }
858
859 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
860 corr = dma & XGMAC_DECIS;
861 if (err) {
862 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
863 ret |= !corr;
864 }
865
866 return ret;
867}
868
869static const struct dwxgmac3_error {
870 const struct dwxgmac3_error_desc *desc;
871} dwxgmac3_all_errors[] = {
872 { dwxgmac3_mac_errors },
873 { dwxgmac3_mtl_errors },
874 { dwxgmac3_dma_errors },
875};
876
877static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
878 int index, unsigned long *count,
879 const char **desc)
880{
881 int module = index / 32, offset = index % 32;
882 unsigned long *ptr = (unsigned long *)stats;
883
884 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
885 return -EINVAL;
886 if (!dwxgmac3_all_errors[module].desc[offset].valid)
887 return -EINVAL;
888 if (count)
889 *count = *(ptr + index);
890 if (desc)
891 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
892 return 0;
893}
894
895static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
896{
897 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
898
899 val &= ~XGMAC_FRPE;
900 writel(val, ioaddr + XGMAC_MTL_OPMODE);
901
902 return 0;
903}
904
905static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
906{
907 u32 val;
908
909 val = readl(ioaddr + XGMAC_MTL_OPMODE);
910 val |= XGMAC_FRPE;
911 writel(val, ioaddr + XGMAC_MTL_OPMODE);
912}
913
914static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
915 struct stmmac_tc_entry *entry,
916 int pos)
917{
918 int ret, i;
919
920 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
921 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
922 u32 val;
923
924
925 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
926 val, !(val & XGMAC_STARTBUSY), 1, 10000);
927 if (ret)
928 return ret;
929
930
931 val = *((u32 *)&entry->val + i);
932 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
933
934
935 val = real_pos & XGMAC_ADDR;
936 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
937
938
939 val |= XGMAC_WRRDN;
940 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
941
942
943 val |= XGMAC_STARTBUSY;
944 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
945
946
947 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
948 val, !(val & XGMAC_STARTBUSY), 1, 10000);
949 if (ret)
950 return ret;
951 }
952
953 return 0;
954}
955
956static struct stmmac_tc_entry *
957dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
958 unsigned int count, u32 curr_prio)
959{
960 struct stmmac_tc_entry *entry;
961 u32 min_prio = ~0x0;
962 int i, min_prio_idx;
963 bool found = false;
964
965 for (i = count - 1; i >= 0; i--) {
966 entry = &entries[i];
967
968
969 if (!entry->in_use)
970 continue;
971
972 if (entry->in_hw)
973 continue;
974
975 if (entry->is_last)
976 continue;
977
978 if (entry->is_frag)
979 continue;
980
981 if (entry->prio < curr_prio)
982 continue;
983
984 if (entry->prio < min_prio) {
985 min_prio = entry->prio;
986 min_prio_idx = i;
987 found = true;
988 }
989 }
990
991 if (found)
992 return &entries[min_prio_idx];
993 return NULL;
994}
995
996static int dwxgmac3_rxp_config(void __iomem *ioaddr,
997 struct stmmac_tc_entry *entries,
998 unsigned int count)
999{
1000 struct stmmac_tc_entry *entry, *frag;
1001 int i, ret, nve = 0;
1002 u32 curr_prio = 0;
1003 u32 old_val, val;
1004
1005
1006 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1007 val = old_val & ~XGMAC_CONFIG_RE;
1008 writel(val, ioaddr + XGMAC_RX_CONFIG);
1009
1010
1011 ret = dwxgmac3_rxp_disable(ioaddr);
1012 if (ret)
1013 goto re_enable;
1014
1015
1016 for (i = 0; i < count; i++) {
1017 entry = &entries[i];
1018 entry->in_hw = false;
1019 }
1020
1021
1022 while (1) {
1023 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1024 if (!entry)
1025 break;
1026
1027 curr_prio = entry->prio;
1028 frag = entry->frag_ptr;
1029
1030
1031 if (frag) {
1032 entry->val.af = 0;
1033 entry->val.rf = 0;
1034 entry->val.nc = 1;
1035 entry->val.ok_index = nve + 2;
1036 }
1037
1038 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1039 if (ret)
1040 goto re_enable;
1041
1042 entry->table_pos = nve++;
1043 entry->in_hw = true;
1044
1045 if (frag && !frag->in_hw) {
1046 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1047 if (ret)
1048 goto re_enable;
1049 frag->table_pos = nve++;
1050 frag->in_hw = true;
1051 }
1052 }
1053
1054 if (!nve)
1055 goto re_enable;
1056
1057
1058 for (i = 0; i < count; i++) {
1059 entry = &entries[i];
1060 if (!entry->is_last)
1061 continue;
1062
1063 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1064 if (ret)
1065 goto re_enable;
1066
1067 entry->table_pos = nve++;
1068 }
1069
1070
1071 val = (nve << 16) & XGMAC_NPE;
1072 val |= nve & XGMAC_NVE;
1073 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1074
1075
1076 dwxgmac3_rxp_enable(ioaddr);
1077
1078re_enable:
1079
1080 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1081 return ret;
1082}
1083
1084static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1085{
1086 void __iomem *ioaddr = hw->pcsr;
1087 u32 value;
1088
1089 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1090 value, value & XGMAC_TXTSC, 100, 10000))
1091 return -EBUSY;
1092
1093 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1094 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1095 return 0;
1096}
1097
1098static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1099 struct stmmac_pps_cfg *cfg, bool enable,
1100 u32 sub_second_inc, u32 systime_flags)
1101{
1102 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1103 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1104 u64 period;
1105
1106 if (!cfg->available)
1107 return -EINVAL;
1108 if (tnsec & XGMAC_TRGTBUSY0)
1109 return -EBUSY;
1110 if (!sub_second_inc || !systime_flags)
1111 return -EINVAL;
1112
1113 val &= ~XGMAC_PPSx_MASK(index);
1114
1115 if (!enable) {
1116 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1117 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1118 return 0;
1119 }
1120
1121 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1122 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1123 val |= XGMAC_PPSEN0;
1124
1125 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1126
1127 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1128 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1129 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1130
1131 period = cfg->period.tv_sec * 1000000000;
1132 period += cfg->period.tv_nsec;
1133
1134 do_div(period, sub_second_inc);
1135
1136 if (period <= 1)
1137 return -EINVAL;
1138
1139 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1140
1141 period >>= 1;
1142 if (period <= 1)
1143 return -EINVAL;
1144
1145 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1146
1147
1148 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1149 return 0;
1150}
1151
1152static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1153{
1154 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1155
1156 value &= ~XGMAC_CONFIG_SARC;
1157 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1158
1159 writel(value, ioaddr + XGMAC_TX_CONFIG);
1160}
1161
1162static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1163{
1164 void __iomem *ioaddr = hw->pcsr;
1165 u32 value;
1166
1167 value = readl(ioaddr + XGMAC_VLAN_INCL);
1168 value |= XGMAC_VLAN_VLTI;
1169 value |= XGMAC_VLAN_CSVL;
1170 value &= ~XGMAC_VLAN_VLC;
1171 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1172 writel(value, ioaddr + XGMAC_VLAN_INCL);
1173}
1174
1175static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1176{
1177 void __iomem *ioaddr = hw->pcsr;
1178 u32 value;
1179
1180 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1181 !(value & XGMAC_XB), 100, 10000))
1182 return -EBUSY;
1183 return 0;
1184}
1185
1186static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1187 u8 reg, u32 *data)
1188{
1189 void __iomem *ioaddr = hw->pcsr;
1190 u32 value;
1191 int ret;
1192
1193 ret = dwxgmac2_filter_wait(hw);
1194 if (ret)
1195 return ret;
1196
1197 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1198 value |= XGMAC_TT | XGMAC_XB;
1199 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1200
1201 ret = dwxgmac2_filter_wait(hw);
1202 if (ret)
1203 return ret;
1204
1205 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1206 return 0;
1207}
1208
1209static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1210 u8 reg, u32 data)
1211{
1212 void __iomem *ioaddr = hw->pcsr;
1213 u32 value;
1214 int ret;
1215
1216 ret = dwxgmac2_filter_wait(hw);
1217 if (ret)
1218 return ret;
1219
1220 writel(data, ioaddr + XGMAC_L3L4_DATA);
1221
1222 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1223 value |= XGMAC_XB;
1224 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1225
1226 return dwxgmac2_filter_wait(hw);
1227}
1228
1229static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1230 bool en, bool ipv6, bool sa, bool inv,
1231 u32 match)
1232{
1233 void __iomem *ioaddr = hw->pcsr;
1234 u32 value;
1235 int ret;
1236
1237 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1238 value |= XGMAC_FILTER_IPFE;
1239 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1240
1241 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1242 if (ret)
1243 return ret;
1244
1245
1246 if (ipv6) {
1247 value |= XGMAC_L3PEN0;
1248 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1249 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1250 if (sa) {
1251 value |= XGMAC_L3SAM0;
1252 if (inv)
1253 value |= XGMAC_L3SAIM0;
1254 } else {
1255 value |= XGMAC_L3DAM0;
1256 if (inv)
1257 value |= XGMAC_L3DAIM0;
1258 }
1259 } else {
1260 value &= ~XGMAC_L3PEN0;
1261 if (sa) {
1262 value |= XGMAC_L3SAM0;
1263 if (inv)
1264 value |= XGMAC_L3SAIM0;
1265 } else {
1266 value |= XGMAC_L3DAM0;
1267 if (inv)
1268 value |= XGMAC_L3DAIM0;
1269 }
1270 }
1271
1272 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1273 if (ret)
1274 return ret;
1275
1276 if (sa) {
1277 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1278 if (ret)
1279 return ret;
1280 } else {
1281 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1282 if (ret)
1283 return ret;
1284 }
1285
1286 if (!en)
1287 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1288
1289 return 0;
1290}
1291
1292static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1293 bool en, bool udp, bool sa, bool inv,
1294 u32 match)
1295{
1296 void __iomem *ioaddr = hw->pcsr;
1297 u32 value;
1298 int ret;
1299
1300 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1301 value |= XGMAC_FILTER_IPFE;
1302 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1303
1304 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1305 if (ret)
1306 return ret;
1307
1308 if (udp) {
1309 value |= XGMAC_L4PEN0;
1310 } else {
1311 value &= ~XGMAC_L4PEN0;
1312 }
1313
1314 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1315 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1316 if (sa) {
1317 value |= XGMAC_L4SPM0;
1318 if (inv)
1319 value |= XGMAC_L4SPIM0;
1320 } else {
1321 value |= XGMAC_L4DPM0;
1322 if (inv)
1323 value |= XGMAC_L4DPIM0;
1324 }
1325
1326 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1327 if (ret)
1328 return ret;
1329
1330 if (sa) {
1331 value = match & XGMAC_L4SP0;
1332
1333 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1334 if (ret)
1335 return ret;
1336 } else {
1337 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1338
1339 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1340 if (ret)
1341 return ret;
1342 }
1343
1344 if (!en)
1345 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1346
1347 return 0;
1348}
1349
1350static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1351 u32 addr)
1352{
1353 void __iomem *ioaddr = hw->pcsr;
1354 u32 value;
1355
1356 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1357
1358 value = readl(ioaddr + XGMAC_RX_CONFIG);
1359 if (en)
1360 value |= XGMAC_CONFIG_ARPEN;
1361 else
1362 value &= ~XGMAC_CONFIG_ARPEN;
1363 writel(value, ioaddr + XGMAC_RX_CONFIG);
1364}
1365
1366static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1367{
1368 u32 ctrl;
1369
1370 writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1371
1372 ctrl = (reg << XGMAC_ADDR_SHIFT);
1373 ctrl |= gcl ? 0 : XGMAC_GCRR;
1374
1375 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1376
1377 ctrl |= XGMAC_SRWO;
1378 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1379
1380 return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1381 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1382}
1383
1384static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1385 unsigned int ptp_rate)
1386{
1387 int i, ret = 0x0;
1388 u32 ctrl;
1389
1390 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1391 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1392 ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1393 ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1394 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1395 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1396 if (ret)
1397 return ret;
1398
1399 for (i = 0; i < cfg->gcl_size; i++) {
1400 ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1401 if (ret)
1402 return ret;
1403 }
1404
1405 ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1406 ctrl &= ~XGMAC_PTOV;
1407 ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1408 if (cfg->enable)
1409 ctrl |= XGMAC_EEST | XGMAC_SSWL;
1410 else
1411 ctrl &= ~XGMAC_EEST;
1412
1413 writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1414 return 0;
1415}
1416
1417static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
1418 u32 num_rxq, bool enable)
1419{
1420 u32 value;
1421
1422 if (!enable) {
1423 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1424
1425 value &= ~XGMAC_EFPE;
1426
1427 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1428 return;
1429 }
1430
1431 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1432 value &= ~XGMAC_RQ;
1433 value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1434 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1435
1436 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1437 value |= XGMAC_EFPE;
1438 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1439}
1440
1441const struct stmmac_ops dwxgmac210_ops = {
1442 .core_init = dwxgmac2_core_init,
1443 .set_mac = dwxgmac2_set_mac,
1444 .rx_ipc = dwxgmac2_rx_ipc,
1445 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1446 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1447 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1448 .rx_queue_routing = NULL,
1449 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1450 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1451 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1452 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1453 .config_cbs = dwxgmac2_config_cbs,
1454 .dump_regs = dwxgmac2_dump_regs,
1455 .host_irq_status = dwxgmac2_host_irq_status,
1456 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1457 .flow_ctrl = dwxgmac2_flow_ctrl,
1458 .pmt = dwxgmac2_pmt,
1459 .set_umac_addr = dwxgmac2_set_umac_addr,
1460 .get_umac_addr = dwxgmac2_get_umac_addr,
1461 .set_eee_mode = dwxgmac2_set_eee_mode,
1462 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1463 .set_eee_timer = dwxgmac2_set_eee_timer,
1464 .set_eee_pls = dwxgmac2_set_eee_pls,
1465 .pcs_ctrl_ane = NULL,
1466 .pcs_rane = NULL,
1467 .pcs_get_adv_lp = NULL,
1468 .debug = NULL,
1469 .set_filter = dwxgmac2_set_filter,
1470 .safety_feat_config = dwxgmac3_safety_feat_config,
1471 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1472 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1473 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1474 .rss_configure = dwxgmac2_rss_configure,
1475 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1476 .rxp_config = dwxgmac3_rxp_config,
1477 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1478 .flex_pps_config = dwxgmac2_flex_pps_config,
1479 .sarc_configure = dwxgmac2_sarc_configure,
1480 .enable_vlan = dwxgmac2_enable_vlan,
1481 .config_l3_filter = dwxgmac2_config_l3_filter,
1482 .config_l4_filter = dwxgmac2_config_l4_filter,
1483 .set_arp_offload = dwxgmac2_set_arp_offload,
1484 .est_configure = dwxgmac3_est_configure,
1485 .fpe_configure = dwxgmac3_fpe_configure,
1486};
1487
1488int dwxgmac2_setup(struct stmmac_priv *priv)
1489{
1490 struct mac_device_info *mac = priv->hw;
1491
1492 dev_info(priv->device, "\tXGMAC2\n");
1493
1494 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1495 mac->pcsr = priv->ioaddr;
1496 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1497 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1498 mac->mcast_bits_log2 = 0;
1499
1500 if (mac->multicast_filter_bins)
1501 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1502
1503 mac->link.duplex = 0;
1504 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1505 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1506 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1507 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1508 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1509 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1510 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1511 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1512
1513 mac->mii.addr = XGMAC_MDIO_ADDR;
1514 mac->mii.data = XGMAC_MDIO_DATA;
1515 mac->mii.addr_shift = 16;
1516 mac->mii.addr_mask = GENMASK(20, 16);
1517 mac->mii.reg_shift = 0;
1518 mac->mii.reg_mask = GENMASK(15, 0);
1519 mac->mii.clk_csr_shift = 19;
1520 mac->mii.clk_csr_mask = GENMASK(21, 19);
1521
1522 return 0;
1523}
1524