1
2
3
4
5
6
7#include <linux/bitrev.h>
8#include <linux/crc32.h>
9#include <linux/iopoll.h>
10#include "stmmac.h"
11#include "stmmac_ptp.h"
12#include "dwxlgmac2.h"
13#include "dwxgmac2.h"
14
15static void dwxgmac2_core_init(struct mac_device_info *hw,
16 struct net_device *dev)
17{
18 void __iomem *ioaddr = hw->pcsr;
19 u32 tx, rx;
20
21 tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 rx = readl(ioaddr + XGMAC_RX_CONFIG);
23
24 tx |= XGMAC_CORE_INIT_TX;
25 rx |= XGMAC_CORE_INIT_RX;
26
27 if (hw->ps) {
28 tx |= XGMAC_CONFIG_TE;
29 tx &= ~hw->link.speed_mask;
30
31 switch (hw->ps) {
32 case SPEED_10000:
33 tx |= hw->link.xgmii.speed10000;
34 break;
35 case SPEED_2500:
36 tx |= hw->link.speed2500;
37 break;
38 case SPEED_1000:
39 default:
40 tx |= hw->link.speed1000;
41 break;
42 }
43 }
44
45 writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48}
49
50static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
51{
52 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
53 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
54
55 if (enable) {
56 tx |= XGMAC_CONFIG_TE;
57 rx |= XGMAC_CONFIG_RE;
58 } else {
59 tx &= ~XGMAC_CONFIG_TE;
60 rx &= ~XGMAC_CONFIG_RE;
61 }
62
63 writel(tx, ioaddr + XGMAC_TX_CONFIG);
64 writel(rx, ioaddr + XGMAC_RX_CONFIG);
65}
66
67static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
68{
69 void __iomem *ioaddr = hw->pcsr;
70 u32 value;
71
72 value = readl(ioaddr + XGMAC_RX_CONFIG);
73 if (hw->rx_csum)
74 value |= XGMAC_CONFIG_IPC;
75 else
76 value &= ~XGMAC_CONFIG_IPC;
77 writel(value, ioaddr + XGMAC_RX_CONFIG);
78
79 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
80}
81
82static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
83 u32 queue)
84{
85 void __iomem *ioaddr = hw->pcsr;
86 u32 value;
87
88 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
89 if (mode == MTL_QUEUE_AVB)
90 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
91 else if (mode == MTL_QUEUE_DCB)
92 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
93 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
94}
95
96static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
97 u32 queue)
98{
99 void __iomem *ioaddr = hw->pcsr;
100 u32 value, reg;
101
102 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
103 if (queue >= 4)
104 queue -= 4;
105
106 value = readl(ioaddr + reg);
107 value &= ~XGMAC_PSRQ(queue);
108 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
109
110 writel(value, ioaddr + reg);
111}
112
113static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
114 u32 queue)
115{
116 void __iomem *ioaddr = hw->pcsr;
117 u32 value, reg;
118
119 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
120 if (queue >= 4)
121 queue -= 4;
122
123 value = readl(ioaddr + reg);
124 value &= ~XGMAC_PSTC(queue);
125 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
126
127 writel(value, ioaddr + reg);
128}
129
130static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
131 u32 rx_alg)
132{
133 void __iomem *ioaddr = hw->pcsr;
134 u32 value;
135
136 value = readl(ioaddr + XGMAC_MTL_OPMODE);
137 value &= ~XGMAC_RAA;
138
139 switch (rx_alg) {
140 case MTL_RX_ALGORITHM_SP:
141 break;
142 case MTL_RX_ALGORITHM_WSP:
143 value |= XGMAC_RAA;
144 break;
145 default:
146 break;
147 }
148
149 writel(value, ioaddr + XGMAC_MTL_OPMODE);
150}
151
152static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
153 u32 tx_alg)
154{
155 void __iomem *ioaddr = hw->pcsr;
156 bool ets = true;
157 u32 value;
158 int i;
159
160 value = readl(ioaddr + XGMAC_MTL_OPMODE);
161 value &= ~XGMAC_ETSALG;
162
163 switch (tx_alg) {
164 case MTL_TX_ALGORITHM_WRR:
165 value |= XGMAC_WRR;
166 break;
167 case MTL_TX_ALGORITHM_WFQ:
168 value |= XGMAC_WFQ;
169 break;
170 case MTL_TX_ALGORITHM_DWRR:
171 value |= XGMAC_DWRR;
172 break;
173 default:
174 ets = false;
175 break;
176 }
177
178 writel(value, ioaddr + XGMAC_MTL_OPMODE);
179
180
181 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
182 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
183 value &= ~XGMAC_TSA;
184 if (ets)
185 value |= XGMAC_ETS;
186 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
187 }
188}
189
190static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
191 u32 weight, u32 queue)
192{
193 void __iomem *ioaddr = hw->pcsr;
194
195 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
196}
197
198static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
199 u32 chan)
200{
201 void __iomem *ioaddr = hw->pcsr;
202 u32 value, reg;
203
204 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
205 if (queue >= 4)
206 queue -= 4;
207
208 value = readl(ioaddr + reg);
209 value &= ~XGMAC_QxMDMACH(queue);
210 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
211
212 writel(value, ioaddr + reg);
213}
214
215static void dwxgmac2_config_cbs(struct mac_device_info *hw,
216 u32 send_slope, u32 idle_slope,
217 u32 high_credit, u32 low_credit, u32 queue)
218{
219 void __iomem *ioaddr = hw->pcsr;
220 u32 value;
221
222 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
223 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
224 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
225 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
226
227 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
228 value &= ~XGMAC_TSA;
229 value |= XGMAC_CC | XGMAC_CBS;
230 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
231}
232
233static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
234{
235 void __iomem *ioaddr = hw->pcsr;
236 int i;
237
238 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
239 reg_space[i] = readl(ioaddr + i * 4);
240}
241
242static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
243 struct stmmac_extra_stats *x)
244{
245 void __iomem *ioaddr = hw->pcsr;
246 u32 stat, en;
247 int ret = 0;
248
249 en = readl(ioaddr + XGMAC_INT_EN);
250 stat = readl(ioaddr + XGMAC_INT_STATUS);
251
252 stat &= en;
253
254 if (stat & XGMAC_PMTIS) {
255 x->irq_receive_pmt_irq_n++;
256 readl(ioaddr + XGMAC_PMT);
257 }
258
259 if (stat & XGMAC_LPIIS) {
260 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
261
262 if (lpi & XGMAC_TLPIEN) {
263 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
264 x->irq_tx_path_in_lpi_mode_n++;
265 }
266 if (lpi & XGMAC_TLPIEX) {
267 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
268 x->irq_tx_path_exit_lpi_mode_n++;
269 }
270 if (lpi & XGMAC_RLPIEN)
271 x->irq_rx_path_in_lpi_mode_n++;
272 if (lpi & XGMAC_RLPIEX)
273 x->irq_rx_path_exit_lpi_mode_n++;
274 }
275
276 return ret;
277}
278
279static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
280{
281 void __iomem *ioaddr = hw->pcsr;
282 int ret = 0;
283 u32 status;
284
285 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
286 if (status & BIT(chan)) {
287 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
288
289 if (chan_status & XGMAC_RXOVFIS)
290 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
291
292 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
293 }
294
295 return ret;
296}
297
298static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
299 unsigned int fc, unsigned int pause_time,
300 u32 tx_cnt)
301{
302 void __iomem *ioaddr = hw->pcsr;
303 u32 i;
304
305 if (fc & FLOW_RX)
306 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
307 if (fc & FLOW_TX) {
308 for (i = 0; i < tx_cnt; i++) {
309 u32 value = XGMAC_TFE;
310
311 if (duplex)
312 value |= pause_time << XGMAC_PT_SHIFT;
313
314 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
315 }
316 }
317}
318
319static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
320{
321 void __iomem *ioaddr = hw->pcsr;
322 u32 val = 0x0;
323
324 if (mode & WAKE_MAGIC)
325 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
326 if (mode & WAKE_UCAST)
327 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
328 if (val) {
329 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
330 cfg |= XGMAC_CONFIG_RE;
331 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
332 }
333
334 writel(val, ioaddr + XGMAC_PMT);
335}
336
337static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
338 unsigned char *addr, unsigned int reg_n)
339{
340 void __iomem *ioaddr = hw->pcsr;
341 u32 value;
342
343 value = (addr[5] << 8) | addr[4];
344 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
345
346 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
347 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
348}
349
350static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
351 unsigned char *addr, unsigned int reg_n)
352{
353 void __iomem *ioaddr = hw->pcsr;
354 u32 hi_addr, lo_addr;
355
356
357 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
358 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
359
360
361 addr[0] = lo_addr & 0xff;
362 addr[1] = (lo_addr >> 8) & 0xff;
363 addr[2] = (lo_addr >> 16) & 0xff;
364 addr[3] = (lo_addr >> 24) & 0xff;
365 addr[4] = hi_addr & 0xff;
366 addr[5] = (hi_addr >> 8) & 0xff;
367}
368
369static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
370 bool en_tx_lpi_clockgating)
371{
372 void __iomem *ioaddr = hw->pcsr;
373 u32 value;
374
375 value = readl(ioaddr + XGMAC_LPI_CTRL);
376
377 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
378 if (en_tx_lpi_clockgating)
379 value |= XGMAC_TXCGE;
380
381 writel(value, ioaddr + XGMAC_LPI_CTRL);
382}
383
384static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
385{
386 void __iomem *ioaddr = hw->pcsr;
387 u32 value;
388
389 value = readl(ioaddr + XGMAC_LPI_CTRL);
390 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
391 writel(value, ioaddr + XGMAC_LPI_CTRL);
392}
393
394static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
395{
396 void __iomem *ioaddr = hw->pcsr;
397 u32 value;
398
399 value = readl(ioaddr + XGMAC_LPI_CTRL);
400 if (link)
401 value |= XGMAC_PLS;
402 else
403 value &= ~XGMAC_PLS;
404 writel(value, ioaddr + XGMAC_LPI_CTRL);
405}
406
407static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
408{
409 void __iomem *ioaddr = hw->pcsr;
410 u32 value;
411
412 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
413 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
414}
415
416static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
417 int mcbitslog2)
418{
419 int numhashregs, regs;
420
421 switch (mcbitslog2) {
422 case 6:
423 numhashregs = 2;
424 break;
425 case 7:
426 numhashregs = 4;
427 break;
428 case 8:
429 numhashregs = 8;
430 break;
431 default:
432 return;
433 }
434
435 for (regs = 0; regs < numhashregs; regs++)
436 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
437}
438
439static void dwxgmac2_set_filter(struct mac_device_info *hw,
440 struct net_device *dev)
441{
442 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
443 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
444 int mcbitslog2 = hw->mcast_bits_log2;
445 u32 mc_filter[8];
446 int i;
447
448 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
449 value |= XGMAC_FILTER_HPF;
450
451 memset(mc_filter, 0, sizeof(mc_filter));
452
453 if (dev->flags & IFF_PROMISC) {
454 value |= XGMAC_FILTER_PR;
455 value |= XGMAC_FILTER_PCF;
456 } else if ((dev->flags & IFF_ALLMULTI) ||
457 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
458 value |= XGMAC_FILTER_PM;
459
460 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
461 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
462 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
463 struct netdev_hw_addr *ha;
464
465 value |= XGMAC_FILTER_HMC;
466
467 netdev_for_each_mc_addr(ha, dev) {
468 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
469 (32 - mcbitslog2));
470 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
471 }
472 }
473
474 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
475
476
477 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
478 value |= XGMAC_FILTER_PR;
479 } else {
480 struct netdev_hw_addr *ha;
481 int reg = 1;
482
483 netdev_for_each_uc_addr(ha, dev) {
484 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
485 reg++;
486 }
487
488 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
489 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
490 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
491 }
492 }
493
494 writel(value, ioaddr + XGMAC_PACKET_FILTER);
495}
496
497static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
498{
499 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
500
501 if (enable)
502 value |= XGMAC_CONFIG_LM;
503 else
504 value &= ~XGMAC_CONFIG_LM;
505
506 writel(value, ioaddr + XGMAC_RX_CONFIG);
507}
508
509static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
510 u32 val)
511{
512 u32 ctrl = 0;
513
514 writel(val, ioaddr + XGMAC_RSS_DATA);
515 ctrl |= idx << XGMAC_RSSIA_SHIFT;
516 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
517 ctrl |= XGMAC_OB;
518 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
519
520 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
521 !(ctrl & XGMAC_OB), 100, 10000);
522}
523
524static int dwxgmac2_rss_configure(struct mac_device_info *hw,
525 struct stmmac_rss *cfg, u32 num_rxq)
526{
527 void __iomem *ioaddr = hw->pcsr;
528 u32 value, *key;
529 int i, ret;
530
531 value = readl(ioaddr + XGMAC_RSS_CTRL);
532 if (!cfg || !cfg->enable) {
533 value &= ~XGMAC_RSSE;
534 writel(value, ioaddr + XGMAC_RSS_CTRL);
535 return 0;
536 }
537
538 key = (u32 *)cfg->key;
539 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
540 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
541 if (ret)
542 return ret;
543 }
544
545 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
546 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
547 if (ret)
548 return ret;
549 }
550
551 for (i = 0; i < num_rxq; i++)
552 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
553
554 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
555 writel(value, ioaddr + XGMAC_RSS_CTRL);
556 return 0;
557}
558
559static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
560 __le16 perfect_match, bool is_double)
561{
562 void __iomem *ioaddr = hw->pcsr;
563
564 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
565
566 if (hash) {
567 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
568
569 value |= XGMAC_FILTER_VTFE;
570
571 writel(value, ioaddr + XGMAC_PACKET_FILTER);
572
573 value = readl(ioaddr + XGMAC_VLAN_TAG);
574
575 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
576 if (is_double) {
577 value |= XGMAC_VLAN_EDVLP;
578 value |= XGMAC_VLAN_ESVL;
579 value |= XGMAC_VLAN_DOVLTC;
580 } else {
581 value &= ~XGMAC_VLAN_EDVLP;
582 value &= ~XGMAC_VLAN_ESVL;
583 value &= ~XGMAC_VLAN_DOVLTC;
584 }
585
586 value &= ~XGMAC_VLAN_VID;
587 writel(value, ioaddr + XGMAC_VLAN_TAG);
588 } else if (perfect_match) {
589 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
590
591 value |= XGMAC_FILTER_VTFE;
592
593 writel(value, ioaddr + XGMAC_PACKET_FILTER);
594
595 value = readl(ioaddr + XGMAC_VLAN_TAG);
596
597 value &= ~XGMAC_VLAN_VTHM;
598 value |= XGMAC_VLAN_ETV;
599 if (is_double) {
600 value |= XGMAC_VLAN_EDVLP;
601 value |= XGMAC_VLAN_ESVL;
602 value |= XGMAC_VLAN_DOVLTC;
603 } else {
604 value &= ~XGMAC_VLAN_EDVLP;
605 value &= ~XGMAC_VLAN_ESVL;
606 value &= ~XGMAC_VLAN_DOVLTC;
607 }
608
609 value &= ~XGMAC_VLAN_VID;
610 writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
611 } else {
612 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
613
614 value &= ~XGMAC_FILTER_VTFE;
615
616 writel(value, ioaddr + XGMAC_PACKET_FILTER);
617
618 value = readl(ioaddr + XGMAC_VLAN_TAG);
619
620 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
621 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
622 value &= ~XGMAC_VLAN_DOVLTC;
623 value &= ~XGMAC_VLAN_VID;
624
625 writel(value, ioaddr + XGMAC_VLAN_TAG);
626 }
627}
628
629struct dwxgmac3_error_desc {
630 bool valid;
631 const char *desc;
632 const char *detailed_desc;
633};
634
635#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
636
637static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
638 const char *module_name,
639 const struct dwxgmac3_error_desc *desc,
640 unsigned long field_offset,
641 struct stmmac_safety_stats *stats)
642{
643 unsigned long loc, mask;
644 u8 *bptr = (u8 *)stats;
645 unsigned long *ptr;
646
647 ptr = (unsigned long *)(bptr + field_offset);
648
649 mask = value;
650 for_each_set_bit(loc, &mask, 32) {
651 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
652 "correctable" : "uncorrectable", module_name,
653 desc[loc].desc, desc[loc].detailed_desc);
654
655
656 ptr[loc]++;
657 }
658}
659
660static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
661 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
662 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
663 { true, "TPES", "TSO Data Path Parity Check Error" },
664 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
665 { true, "MTPES", "MTL Data Path Parity Check Error" },
666 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
667 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
668 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
669 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
670 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
671 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
672 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
673 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
674 { true, "TTES", "TX FSM Timeout Error" },
675 { true, "RTES", "RX FSM Timeout Error" },
676 { true, "CTES", "CSR FSM Timeout Error" },
677 { true, "ATES", "APP FSM Timeout Error" },
678 { true, "PTES", "PTP FSM Timeout Error" },
679 { false, "UNKNOWN", "Unknown Error" },
680 { false, "UNKNOWN", "Unknown Error" },
681 { false, "UNKNOWN", "Unknown Error" },
682 { true, "MSTTES", "Master Read/Write Timeout Error" },
683 { true, "SLVTES", "Slave Read/Write Timeout Error" },
684 { true, "ATITES", "Application Timeout on ATI Interface Error" },
685 { true, "ARITES", "Application Timeout on ARI Interface Error" },
686 { true, "FSMPES", "FSM State Parity Error" },
687 { false, "UNKNOWN", "Unknown Error" },
688 { false, "UNKNOWN", "Unknown Error" },
689 { false, "UNKNOWN", "Unknown Error" },
690 { false, "UNKNOWN", "Unknown Error" },
691 { false, "UNKNOWN", "Unknown Error" },
692 { true, "CPI", "Control Register Parity Check Error" },
693};
694
695static void dwxgmac3_handle_mac_err(struct net_device *ndev,
696 void __iomem *ioaddr, bool correctable,
697 struct stmmac_safety_stats *stats)
698{
699 u32 value;
700
701 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
702 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
703
704 dwxgmac3_log_error(ndev, value, correctable, "MAC",
705 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
706}
707
708static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
709 { true, "TXCES", "MTL TX Memory Error" },
710 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
711 { true, "TXUES", "MTL TX Memory Error" },
712 { false, "UNKNOWN", "Unknown Error" },
713 { true, "RXCES", "MTL RX Memory Error" },
714 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
715 { true, "RXUES", "MTL RX Memory Error" },
716 { false, "UNKNOWN", "Unknown Error" },
717 { true, "ECES", "MTL EST Memory Error" },
718 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
719 { true, "EUES", "MTL EST Memory Error" },
720 { false, "UNKNOWN", "Unknown Error" },
721 { true, "RPCES", "MTL RX Parser Memory Error" },
722 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
723 { true, "RPUES", "MTL RX Parser Memory Error" },
724 { false, "UNKNOWN", "Unknown Error" },
725 { false, "UNKNOWN", "Unknown Error" },
726 { false, "UNKNOWN", "Unknown Error" },
727 { false, "UNKNOWN", "Unknown Error" },
728 { false, "UNKNOWN", "Unknown Error" },
729 { false, "UNKNOWN", "Unknown Error" },
730 { false, "UNKNOWN", "Unknown Error" },
731 { false, "UNKNOWN", "Unknown Error" },
732 { false, "UNKNOWN", "Unknown Error" },
733 { false, "UNKNOWN", "Unknown Error" },
734 { false, "UNKNOWN", "Unknown Error" },
735 { false, "UNKNOWN", "Unknown Error" },
736 { false, "UNKNOWN", "Unknown Error" },
737 { false, "UNKNOWN", "Unknown Error" },
738 { false, "UNKNOWN", "Unknown Error" },
739 { false, "UNKNOWN", "Unknown Error" },
740 { false, "UNKNOWN", "Unknown Error" },
741};
742
743static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
744 void __iomem *ioaddr, bool correctable,
745 struct stmmac_safety_stats *stats)
746{
747 u32 value;
748
749 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
750 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
751
752 dwxgmac3_log_error(ndev, value, correctable, "MTL",
753 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
754}
755
756static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
757 { true, "TCES", "DMA TSO Memory Error" },
758 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
759 { true, "TUES", "DMA TSO Memory Error" },
760 { false, "UNKNOWN", "Unknown Error" },
761 { true, "DCES", "DMA DCACHE Memory Error" },
762 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
763 { true, "DUES", "DMA DCACHE Memory Error" },
764 { false, "UNKNOWN", "Unknown Error" },
765 { false, "UNKNOWN", "Unknown Error" },
766 { false, "UNKNOWN", "Unknown Error" },
767 { false, "UNKNOWN", "Unknown Error" },
768 { false, "UNKNOWN", "Unknown Error" },
769 { false, "UNKNOWN", "Unknown Error" },
770 { false, "UNKNOWN", "Unknown Error" },
771 { false, "UNKNOWN", "Unknown Error" },
772 { false, "UNKNOWN", "Unknown Error" },
773 { false, "UNKNOWN", "Unknown Error" },
774 { false, "UNKNOWN", "Unknown Error" },
775 { false, "UNKNOWN", "Unknown Error" },
776 { false, "UNKNOWN", "Unknown Error" },
777 { false, "UNKNOWN", "Unknown Error" },
778 { false, "UNKNOWN", "Unknown Error" },
779 { false, "UNKNOWN", "Unknown Error" },
780 { false, "UNKNOWN", "Unknown Error" },
781 { false, "UNKNOWN", "Unknown Error" },
782 { false, "UNKNOWN", "Unknown Error" },
783 { false, "UNKNOWN", "Unknown Error" },
784 { false, "UNKNOWN", "Unknown Error" },
785 { false, "UNKNOWN", "Unknown Error" },
786 { false, "UNKNOWN", "Unknown Error" },
787 { false, "UNKNOWN", "Unknown Error" },
788 { false, "UNKNOWN", "Unknown Error" },
789};
790
791static void dwxgmac3_handle_dma_err(struct net_device *ndev,
792 void __iomem *ioaddr, bool correctable,
793 struct stmmac_safety_stats *stats)
794{
795 u32 value;
796
797 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
798 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
799
800 dwxgmac3_log_error(ndev, value, correctable, "DMA",
801 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
802}
803
804static int
805dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
806 struct stmmac_safety_feature_cfg *safety_cfg)
807{
808 u32 value;
809
810 if (!asp)
811 return -EINVAL;
812
813
814 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
815
816
817 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
818 value |= XGMAC_RPCEIE;
819 value |= XGMAC_ECEIE;
820 value |= XGMAC_RXCEIE;
821 value |= XGMAC_TXCEIE;
822 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
823
824
825 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
826 value |= XGMAC_DCEIE;
827 value |= XGMAC_TCEIE;
828 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
829
830
831 if (asp <= 0x1)
832 return 0;
833
834
835 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
836 value |= XGMAC_PRTYEN;
837 value |= XGMAC_TMOUTEN;
838 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
839
840 return 0;
841}
842
843static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
844 void __iomem *ioaddr,
845 unsigned int asp,
846 struct stmmac_safety_stats *stats)
847{
848 bool err, corr;
849 u32 mtl, dma;
850 int ret = 0;
851
852 if (!asp)
853 return -EINVAL;
854
855 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
856 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
857
858 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
859 corr = false;
860 if (err) {
861 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
862 ret |= !corr;
863 }
864
865 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
866 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
867 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
868 if (err) {
869 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
870 ret |= !corr;
871 }
872
873 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
874 corr = dma & XGMAC_DECIS;
875 if (err) {
876 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
877 ret |= !corr;
878 }
879
880 return ret;
881}
882
883static const struct dwxgmac3_error {
884 const struct dwxgmac3_error_desc *desc;
885} dwxgmac3_all_errors[] = {
886 { dwxgmac3_mac_errors },
887 { dwxgmac3_mtl_errors },
888 { dwxgmac3_dma_errors },
889};
890
891static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
892 int index, unsigned long *count,
893 const char **desc)
894{
895 int module = index / 32, offset = index % 32;
896 unsigned long *ptr = (unsigned long *)stats;
897
898 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
899 return -EINVAL;
900 if (!dwxgmac3_all_errors[module].desc[offset].valid)
901 return -EINVAL;
902 if (count)
903 *count = *(ptr + index);
904 if (desc)
905 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
906 return 0;
907}
908
909static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
910{
911 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
912
913 val &= ~XGMAC_FRPE;
914 writel(val, ioaddr + XGMAC_MTL_OPMODE);
915
916 return 0;
917}
918
919static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
920{
921 u32 val;
922
923 val = readl(ioaddr + XGMAC_MTL_OPMODE);
924 val |= XGMAC_FRPE;
925 writel(val, ioaddr + XGMAC_MTL_OPMODE);
926}
927
928static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
929 struct stmmac_tc_entry *entry,
930 int pos)
931{
932 int ret, i;
933
934 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
935 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
936 u32 val;
937
938
939 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
940 val, !(val & XGMAC_STARTBUSY), 1, 10000);
941 if (ret)
942 return ret;
943
944
945 val = *((u32 *)&entry->val + i);
946 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
947
948
949 val = real_pos & XGMAC_ADDR;
950 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
951
952
953 val |= XGMAC_WRRDN;
954 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
955
956
957 val |= XGMAC_STARTBUSY;
958 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
959
960
961 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
962 val, !(val & XGMAC_STARTBUSY), 1, 10000);
963 if (ret)
964 return ret;
965 }
966
967 return 0;
968}
969
970static struct stmmac_tc_entry *
971dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
972 unsigned int count, u32 curr_prio)
973{
974 struct stmmac_tc_entry *entry;
975 u32 min_prio = ~0x0;
976 int i, min_prio_idx;
977 bool found = false;
978
979 for (i = count - 1; i >= 0; i--) {
980 entry = &entries[i];
981
982
983 if (!entry->in_use)
984 continue;
985
986 if (entry->in_hw)
987 continue;
988
989 if (entry->is_last)
990 continue;
991
992 if (entry->is_frag)
993 continue;
994
995 if (entry->prio < curr_prio)
996 continue;
997
998 if (entry->prio < min_prio) {
999 min_prio = entry->prio;
1000 min_prio_idx = i;
1001 found = true;
1002 }
1003 }
1004
1005 if (found)
1006 return &entries[min_prio_idx];
1007 return NULL;
1008}
1009
1010static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1011 struct stmmac_tc_entry *entries,
1012 unsigned int count)
1013{
1014 struct stmmac_tc_entry *entry, *frag;
1015 int i, ret, nve = 0;
1016 u32 curr_prio = 0;
1017 u32 old_val, val;
1018
1019
1020 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1021 val = old_val & ~XGMAC_CONFIG_RE;
1022 writel(val, ioaddr + XGMAC_RX_CONFIG);
1023
1024
1025 ret = dwxgmac3_rxp_disable(ioaddr);
1026 if (ret)
1027 goto re_enable;
1028
1029
1030 for (i = 0; i < count; i++) {
1031 entry = &entries[i];
1032 entry->in_hw = false;
1033 }
1034
1035
1036 while (1) {
1037 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1038 if (!entry)
1039 break;
1040
1041 curr_prio = entry->prio;
1042 frag = entry->frag_ptr;
1043
1044
1045 if (frag) {
1046 entry->val.af = 0;
1047 entry->val.rf = 0;
1048 entry->val.nc = 1;
1049 entry->val.ok_index = nve + 2;
1050 }
1051
1052 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1053 if (ret)
1054 goto re_enable;
1055
1056 entry->table_pos = nve++;
1057 entry->in_hw = true;
1058
1059 if (frag && !frag->in_hw) {
1060 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1061 if (ret)
1062 goto re_enable;
1063 frag->table_pos = nve++;
1064 frag->in_hw = true;
1065 }
1066 }
1067
1068 if (!nve)
1069 goto re_enable;
1070
1071
1072 for (i = 0; i < count; i++) {
1073 entry = &entries[i];
1074 if (!entry->is_last)
1075 continue;
1076
1077 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1078 if (ret)
1079 goto re_enable;
1080
1081 entry->table_pos = nve++;
1082 }
1083
1084
1085 val = (nve << 16) & XGMAC_NPE;
1086 val |= nve & XGMAC_NVE;
1087 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1088
1089
1090 dwxgmac3_rxp_enable(ioaddr);
1091
1092re_enable:
1093
1094 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1095 return ret;
1096}
1097
1098static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1099{
1100 void __iomem *ioaddr = hw->pcsr;
1101 u32 value;
1102
1103 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1104 value, value & XGMAC_TXTSC, 100, 10000))
1105 return -EBUSY;
1106
1107 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1108 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1109 return 0;
1110}
1111
1112static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1113 struct stmmac_pps_cfg *cfg, bool enable,
1114 u32 sub_second_inc, u32 systime_flags)
1115{
1116 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1117 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1118 u64 period;
1119
1120 if (!cfg->available)
1121 return -EINVAL;
1122 if (tnsec & XGMAC_TRGTBUSY0)
1123 return -EBUSY;
1124 if (!sub_second_inc || !systime_flags)
1125 return -EINVAL;
1126
1127 val &= ~XGMAC_PPSx_MASK(index);
1128
1129 if (!enable) {
1130 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1131 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1132 return 0;
1133 }
1134
1135 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1136 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1137 val |= XGMAC_PPSEN0;
1138
1139 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1140
1141 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1142 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1143 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1144
1145 period = cfg->period.tv_sec * 1000000000;
1146 period += cfg->period.tv_nsec;
1147
1148 do_div(period, sub_second_inc);
1149
1150 if (period <= 1)
1151 return -EINVAL;
1152
1153 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1154
1155 period >>= 1;
1156 if (period <= 1)
1157 return -EINVAL;
1158
1159 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1160
1161
1162 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1163 return 0;
1164}
1165
1166static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1167{
1168 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1169
1170 value &= ~XGMAC_CONFIG_SARC;
1171 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1172
1173 writel(value, ioaddr + XGMAC_TX_CONFIG);
1174}
1175
1176static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1177{
1178 void __iomem *ioaddr = hw->pcsr;
1179 u32 value;
1180
1181 value = readl(ioaddr + XGMAC_VLAN_INCL);
1182 value |= XGMAC_VLAN_VLTI;
1183 value |= XGMAC_VLAN_CSVL;
1184 value &= ~XGMAC_VLAN_VLC;
1185 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1186 writel(value, ioaddr + XGMAC_VLAN_INCL);
1187}
1188
1189static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1190{
1191 void __iomem *ioaddr = hw->pcsr;
1192 u32 value;
1193
1194 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1195 !(value & XGMAC_XB), 100, 10000))
1196 return -EBUSY;
1197 return 0;
1198}
1199
1200static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1201 u8 reg, u32 *data)
1202{
1203 void __iomem *ioaddr = hw->pcsr;
1204 u32 value;
1205 int ret;
1206
1207 ret = dwxgmac2_filter_wait(hw);
1208 if (ret)
1209 return ret;
1210
1211 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1212 value |= XGMAC_TT | XGMAC_XB;
1213 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1214
1215 ret = dwxgmac2_filter_wait(hw);
1216 if (ret)
1217 return ret;
1218
1219 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1220 return 0;
1221}
1222
1223static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1224 u8 reg, u32 data)
1225{
1226 void __iomem *ioaddr = hw->pcsr;
1227 u32 value;
1228 int ret;
1229
1230 ret = dwxgmac2_filter_wait(hw);
1231 if (ret)
1232 return ret;
1233
1234 writel(data, ioaddr + XGMAC_L3L4_DATA);
1235
1236 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1237 value |= XGMAC_XB;
1238 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1239
1240 return dwxgmac2_filter_wait(hw);
1241}
1242
1243static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1244 bool en, bool ipv6, bool sa, bool inv,
1245 u32 match)
1246{
1247 void __iomem *ioaddr = hw->pcsr;
1248 u32 value;
1249 int ret;
1250
1251 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1252 value |= XGMAC_FILTER_IPFE;
1253 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1254
1255 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1256 if (ret)
1257 return ret;
1258
1259
1260 if (ipv6) {
1261 value |= XGMAC_L3PEN0;
1262 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1263 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1264 if (sa) {
1265 value |= XGMAC_L3SAM0;
1266 if (inv)
1267 value |= XGMAC_L3SAIM0;
1268 } else {
1269 value |= XGMAC_L3DAM0;
1270 if (inv)
1271 value |= XGMAC_L3DAIM0;
1272 }
1273 } else {
1274 value &= ~XGMAC_L3PEN0;
1275 if (sa) {
1276 value |= XGMAC_L3SAM0;
1277 if (inv)
1278 value |= XGMAC_L3SAIM0;
1279 } else {
1280 value |= XGMAC_L3DAM0;
1281 if (inv)
1282 value |= XGMAC_L3DAIM0;
1283 }
1284 }
1285
1286 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1287 if (ret)
1288 return ret;
1289
1290 if (sa) {
1291 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1292 if (ret)
1293 return ret;
1294 } else {
1295 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1296 if (ret)
1297 return ret;
1298 }
1299
1300 if (!en)
1301 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1302
1303 return 0;
1304}
1305
1306static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1307 bool en, bool udp, bool sa, bool inv,
1308 u32 match)
1309{
1310 void __iomem *ioaddr = hw->pcsr;
1311 u32 value;
1312 int ret;
1313
1314 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1315 value |= XGMAC_FILTER_IPFE;
1316 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1317
1318 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1319 if (ret)
1320 return ret;
1321
1322 if (udp) {
1323 value |= XGMAC_L4PEN0;
1324 } else {
1325 value &= ~XGMAC_L4PEN0;
1326 }
1327
1328 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1329 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1330 if (sa) {
1331 value |= XGMAC_L4SPM0;
1332 if (inv)
1333 value |= XGMAC_L4SPIM0;
1334 } else {
1335 value |= XGMAC_L4DPM0;
1336 if (inv)
1337 value |= XGMAC_L4DPIM0;
1338 }
1339
1340 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1341 if (ret)
1342 return ret;
1343
1344 if (sa) {
1345 value = match & XGMAC_L4SP0;
1346
1347 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1348 if (ret)
1349 return ret;
1350 } else {
1351 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1352
1353 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1354 if (ret)
1355 return ret;
1356 }
1357
1358 if (!en)
1359 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1360
1361 return 0;
1362}
1363
1364static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1365 u32 addr)
1366{
1367 void __iomem *ioaddr = hw->pcsr;
1368 u32 value;
1369
1370 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1371
1372 value = readl(ioaddr + XGMAC_RX_CONFIG);
1373 if (en)
1374 value |= XGMAC_CONFIG_ARPEN;
1375 else
1376 value &= ~XGMAC_CONFIG_ARPEN;
1377 writel(value, ioaddr + XGMAC_RX_CONFIG);
1378}
1379
1380static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1381{
1382 u32 ctrl;
1383
1384 writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1385
1386 ctrl = (reg << XGMAC_ADDR_SHIFT);
1387 ctrl |= gcl ? 0 : XGMAC_GCRR;
1388
1389 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1390
1391 ctrl |= XGMAC_SRWO;
1392 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1393
1394 return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1395 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1396}
1397
1398static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1399 unsigned int ptp_rate)
1400{
1401 int i, ret = 0x0;
1402 u32 ctrl;
1403
1404 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1405 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1406 ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1407 ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1408 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1409 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1410 if (ret)
1411 return ret;
1412
1413 for (i = 0; i < cfg->gcl_size; i++) {
1414 ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1415 if (ret)
1416 return ret;
1417 }
1418
1419 ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1420 ctrl &= ~XGMAC_PTOV;
1421 ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1422 if (cfg->enable)
1423 ctrl |= XGMAC_EEST | XGMAC_SSWL;
1424 else
1425 ctrl &= ~XGMAC_EEST;
1426
1427 writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1428 return 0;
1429}
1430
1431static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
1432 u32 num_rxq, bool enable)
1433{
1434 u32 value;
1435
1436 if (!enable) {
1437 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1438
1439 value &= ~XGMAC_EFPE;
1440
1441 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1442 return;
1443 }
1444
1445 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1446 value &= ~XGMAC_RQ;
1447 value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1448 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1449
1450 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1451 value |= XGMAC_EFPE;
1452 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1453}
1454
1455const struct stmmac_ops dwxgmac210_ops = {
1456 .core_init = dwxgmac2_core_init,
1457 .set_mac = dwxgmac2_set_mac,
1458 .rx_ipc = dwxgmac2_rx_ipc,
1459 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1460 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1461 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1462 .rx_queue_routing = NULL,
1463 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1464 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1465 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1466 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1467 .config_cbs = dwxgmac2_config_cbs,
1468 .dump_regs = dwxgmac2_dump_regs,
1469 .host_irq_status = dwxgmac2_host_irq_status,
1470 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1471 .flow_ctrl = dwxgmac2_flow_ctrl,
1472 .pmt = dwxgmac2_pmt,
1473 .set_umac_addr = dwxgmac2_set_umac_addr,
1474 .get_umac_addr = dwxgmac2_get_umac_addr,
1475 .set_eee_mode = dwxgmac2_set_eee_mode,
1476 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1477 .set_eee_timer = dwxgmac2_set_eee_timer,
1478 .set_eee_pls = dwxgmac2_set_eee_pls,
1479 .pcs_ctrl_ane = NULL,
1480 .pcs_rane = NULL,
1481 .pcs_get_adv_lp = NULL,
1482 .debug = NULL,
1483 .set_filter = dwxgmac2_set_filter,
1484 .safety_feat_config = dwxgmac3_safety_feat_config,
1485 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1486 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1487 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1488 .rss_configure = dwxgmac2_rss_configure,
1489 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1490 .rxp_config = dwxgmac3_rxp_config,
1491 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1492 .flex_pps_config = dwxgmac2_flex_pps_config,
1493 .sarc_configure = dwxgmac2_sarc_configure,
1494 .enable_vlan = dwxgmac2_enable_vlan,
1495 .config_l3_filter = dwxgmac2_config_l3_filter,
1496 .config_l4_filter = dwxgmac2_config_l4_filter,
1497 .set_arp_offload = dwxgmac2_set_arp_offload,
1498 .est_configure = dwxgmac3_est_configure,
1499 .fpe_configure = dwxgmac3_fpe_configure,
1500};
1501
1502static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1503 u32 queue)
1504{
1505 void __iomem *ioaddr = hw->pcsr;
1506 u32 value;
1507
1508 value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1509 if (mode == MTL_QUEUE_AVB)
1510 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1511 else if (mode == MTL_QUEUE_DCB)
1512 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1513 writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1514}
1515
1516const struct stmmac_ops dwxlgmac2_ops = {
1517 .core_init = dwxgmac2_core_init,
1518 .set_mac = dwxgmac2_set_mac,
1519 .rx_ipc = dwxgmac2_rx_ipc,
1520 .rx_queue_enable = dwxlgmac2_rx_queue_enable,
1521 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1522 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1523 .rx_queue_routing = NULL,
1524 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1525 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1526 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1527 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1528 .config_cbs = dwxgmac2_config_cbs,
1529 .dump_regs = dwxgmac2_dump_regs,
1530 .host_irq_status = dwxgmac2_host_irq_status,
1531 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1532 .flow_ctrl = dwxgmac2_flow_ctrl,
1533 .pmt = dwxgmac2_pmt,
1534 .set_umac_addr = dwxgmac2_set_umac_addr,
1535 .get_umac_addr = dwxgmac2_get_umac_addr,
1536 .set_eee_mode = dwxgmac2_set_eee_mode,
1537 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1538 .set_eee_timer = dwxgmac2_set_eee_timer,
1539 .set_eee_pls = dwxgmac2_set_eee_pls,
1540 .pcs_ctrl_ane = NULL,
1541 .pcs_rane = NULL,
1542 .pcs_get_adv_lp = NULL,
1543 .debug = NULL,
1544 .set_filter = dwxgmac2_set_filter,
1545 .safety_feat_config = dwxgmac3_safety_feat_config,
1546 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1547 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1548 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1549 .rss_configure = dwxgmac2_rss_configure,
1550 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1551 .rxp_config = dwxgmac3_rxp_config,
1552 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1553 .flex_pps_config = dwxgmac2_flex_pps_config,
1554 .sarc_configure = dwxgmac2_sarc_configure,
1555 .enable_vlan = dwxgmac2_enable_vlan,
1556 .config_l3_filter = dwxgmac2_config_l3_filter,
1557 .config_l4_filter = dwxgmac2_config_l4_filter,
1558 .set_arp_offload = dwxgmac2_set_arp_offload,
1559 .est_configure = dwxgmac3_est_configure,
1560 .fpe_configure = dwxgmac3_fpe_configure,
1561};
1562
1563int dwxgmac2_setup(struct stmmac_priv *priv)
1564{
1565 struct mac_device_info *mac = priv->hw;
1566
1567 dev_info(priv->device, "\tXGMAC2\n");
1568
1569 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1570 mac->pcsr = priv->ioaddr;
1571 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1572 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1573 mac->mcast_bits_log2 = 0;
1574
1575 if (mac->multicast_filter_bins)
1576 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1577
1578 mac->link.duplex = 0;
1579 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1580 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1581 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1582 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1583 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1584 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1585 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1586 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1587
1588 mac->mii.addr = XGMAC_MDIO_ADDR;
1589 mac->mii.data = XGMAC_MDIO_DATA;
1590 mac->mii.addr_shift = 16;
1591 mac->mii.addr_mask = GENMASK(20, 16);
1592 mac->mii.reg_shift = 0;
1593 mac->mii.reg_mask = GENMASK(15, 0);
1594 mac->mii.clk_csr_shift = 19;
1595 mac->mii.clk_csr_mask = GENMASK(21, 19);
1596
1597 return 0;
1598}
1599
1600int dwxlgmac2_setup(struct stmmac_priv *priv)
1601{
1602 struct mac_device_info *mac = priv->hw;
1603
1604 dev_info(priv->device, "\tXLGMAC\n");
1605
1606 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1607 mac->pcsr = priv->ioaddr;
1608 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1609 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1610 mac->mcast_bits_log2 = 0;
1611
1612 if (mac->multicast_filter_bins)
1613 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1614
1615 mac->link.duplex = 0;
1616 mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1617 mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1618 mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1619 mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1620 mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1621 mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1622 mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1623 mac->link.speed_mask = XLGMAC_CONFIG_SS;
1624
1625 mac->mii.addr = XGMAC_MDIO_ADDR;
1626 mac->mii.data = XGMAC_MDIO_DATA;
1627 mac->mii.addr_shift = 16;
1628 mac->mii.addr_mask = GENMASK(20, 16);
1629 mac->mii.reg_shift = 0;
1630 mac->mii.reg_mask = GENMASK(15, 0);
1631 mac->mii.clk_csr_shift = 19;
1632 mac->mii.clk_csr_mask = GENMASK(21, 19);
1633
1634 return 0;
1635}
1636