1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/crc32.h>
17#include <linux/slab.h>
18#include <linux/ethtool.h>
19#include <linux/io.h>
20#include "stmmac_pcs.h"
21#include "dwmac4.h"
22
23static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
24{
25 void __iomem *ioaddr = hw->pcsr;
26 u32 value = readl(ioaddr + GMAC_CONFIG);
27
28 value |= GMAC_CORE_INIT;
29
30 if (mtu > 1500)
31 value |= GMAC_CONFIG_2K;
32 if (mtu > 2000)
33 value |= GMAC_CONFIG_JE;
34
35 if (hw->ps) {
36 value |= GMAC_CONFIG_TE;
37
38 value &= hw->link.speed_mask;
39 switch (hw->ps) {
40 case SPEED_1000:
41 value |= hw->link.speed1000;
42 break;
43 case SPEED_100:
44 value |= hw->link.speed100;
45 break;
46 case SPEED_10:
47 value |= hw->link.speed10;
48 break;
49 }
50 }
51
52 writel(value, ioaddr + GMAC_CONFIG);
53
54
55 value = GMAC_INT_DEFAULT_MASK;
56 if (hw->pmt)
57 value |= GMAC_INT_PMT_EN;
58 if (hw->pcs)
59 value |= GMAC_PCS_IRQ_DEFAULT;
60
61 writel(value, ioaddr + GMAC_INT_EN);
62}
63
64static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
65 u8 mode, u32 queue)
66{
67 void __iomem *ioaddr = hw->pcsr;
68 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
69
70 value &= GMAC_RX_QUEUE_CLEAR(queue);
71 if (mode == MTL_QUEUE_AVB)
72 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
73 else if (mode == MTL_QUEUE_DCB)
74 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
75
76 writel(value, ioaddr + GMAC_RXQ_CTRL0);
77}
78
79static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
80 u32 prio, u32 queue)
81{
82 void __iomem *ioaddr = hw->pcsr;
83 u32 base_register;
84 u32 value;
85
86 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
87
88 value = readl(ioaddr + base_register);
89
90 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
91 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
92 GMAC_RXQCTRL_PSRQX_MASK(queue);
93 writel(value, ioaddr + base_register);
94}
95
96static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
97 u32 prio, u32 queue)
98{
99 void __iomem *ioaddr = hw->pcsr;
100 u32 base_register;
101 u32 value;
102
103 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
104
105 value = readl(ioaddr + base_register);
106
107 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
108 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
109 GMAC_TXQCTRL_PSTQX_MASK(queue);
110
111 writel(value, ioaddr + base_register);
112}
113
114static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
115 u8 packet, u32 queue)
116{
117 void __iomem *ioaddr = hw->pcsr;
118 u32 value;
119
120 static const struct stmmac_rx_routing route_possibilities[] = {
121 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
122 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
123 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
124 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
125 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
126 };
127
128 value = readl(ioaddr + GMAC_RXQ_CTRL1);
129
130
131 value &= ~route_possibilities[packet - 1].reg_mask;
132 value |= (queue << route_possibilities[packet-1].reg_shift) &
133 route_possibilities[packet - 1].reg_mask;
134
135
136 if (packet == PACKET_AVCPQ) {
137 value &= ~GMAC_RXQCTRL_TACPQE;
138 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
139 } else if (packet == PACKET_MCBCQ) {
140 value &= ~GMAC_RXQCTRL_MCBCQEN;
141 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
142 }
143
144 writel(value, ioaddr + GMAC_RXQ_CTRL1);
145}
146
147static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
148 u32 rx_alg)
149{
150 void __iomem *ioaddr = hw->pcsr;
151 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
152
153 value &= ~MTL_OPERATION_RAA;
154 switch (rx_alg) {
155 case MTL_RX_ALGORITHM_SP:
156 value |= MTL_OPERATION_RAA_SP;
157 break;
158 case MTL_RX_ALGORITHM_WSP:
159 value |= MTL_OPERATION_RAA_WSP;
160 break;
161 default:
162 break;
163 }
164
165 writel(value, ioaddr + MTL_OPERATION_MODE);
166}
167
168static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
169 u32 tx_alg)
170{
171 void __iomem *ioaddr = hw->pcsr;
172 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
173
174 value &= ~MTL_OPERATION_SCHALG_MASK;
175 switch (tx_alg) {
176 case MTL_TX_ALGORITHM_WRR:
177 value |= MTL_OPERATION_SCHALG_WRR;
178 break;
179 case MTL_TX_ALGORITHM_WFQ:
180 value |= MTL_OPERATION_SCHALG_WFQ;
181 break;
182 case MTL_TX_ALGORITHM_DWRR:
183 value |= MTL_OPERATION_SCHALG_DWRR;
184 break;
185 case MTL_TX_ALGORITHM_SP:
186 value |= MTL_OPERATION_SCHALG_SP;
187 break;
188 default:
189 break;
190 }
191}
192
193static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
194 u32 weight, u32 queue)
195{
196 void __iomem *ioaddr = hw->pcsr;
197 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
198
199 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
200 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
201 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
202}
203
204static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
205{
206 void __iomem *ioaddr = hw->pcsr;
207 u32 value;
208
209 if (queue < 4)
210 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
211 else
212 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
213
214 if (queue == 0 || queue == 4) {
215 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
216 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
217 } else {
218 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
219 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
220 }
221
222 if (queue < 4)
223 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
224 else
225 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
226}
227
228static void dwmac4_config_cbs(struct mac_device_info *hw,
229 u32 send_slope, u32 idle_slope,
230 u32 high_credit, u32 low_credit, u32 queue)
231{
232 void __iomem *ioaddr = hw->pcsr;
233 u32 value;
234
235 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
236 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
237 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
238 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
239 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
240
241
242 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
243 value |= MTL_ETS_CTRL_AVALG;
244 value |= MTL_ETS_CTRL_CC;
245 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
246
247
248 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
249 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
250 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
251 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
252
253
254 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
255
256
257 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
258 value &= ~MTL_HIGH_CRED_HC_MASK;
259 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
260 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
261
262
263 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
264 value &= ~MTL_HIGH_CRED_LC_MASK;
265 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
266 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
267}
268
269static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
270{
271 void __iomem *ioaddr = hw->pcsr;
272 int i;
273
274 for (i = 0; i < GMAC_REG_NUM; i++)
275 reg_space[i] = readl(ioaddr + i * 4);
276}
277
278static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
279{
280 void __iomem *ioaddr = hw->pcsr;
281 u32 value = readl(ioaddr + GMAC_CONFIG);
282
283 if (hw->rx_csum)
284 value |= GMAC_CONFIG_IPC;
285 else
286 value &= ~GMAC_CONFIG_IPC;
287
288 writel(value, ioaddr + GMAC_CONFIG);
289
290 value = readl(ioaddr + GMAC_CONFIG);
291
292 return !!(value & GMAC_CONFIG_IPC);
293}
294
295static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
296{
297 void __iomem *ioaddr = hw->pcsr;
298 unsigned int pmt = 0;
299 u32 config;
300
301 if (mode & WAKE_MAGIC) {
302 pr_debug("GMAC: WOL Magic frame\n");
303 pmt |= power_down | magic_pkt_en;
304 }
305 if (mode & WAKE_UCAST) {
306 pr_debug("GMAC: WOL on global unicast\n");
307 pmt |= power_down | global_unicast | wake_up_frame_en;
308 }
309
310 if (pmt) {
311
312 config = readl(ioaddr + GMAC_CONFIG);
313 config |= GMAC_CONFIG_RE;
314 writel(config, ioaddr + GMAC_CONFIG);
315 }
316 writel(pmt, ioaddr + GMAC_PMT);
317}
318
319static void dwmac4_set_umac_addr(struct mac_device_info *hw,
320 unsigned char *addr, unsigned int reg_n)
321{
322 void __iomem *ioaddr = hw->pcsr;
323
324 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
325 GMAC_ADDR_LOW(reg_n));
326}
327
328static void dwmac4_get_umac_addr(struct mac_device_info *hw,
329 unsigned char *addr, unsigned int reg_n)
330{
331 void __iomem *ioaddr = hw->pcsr;
332
333 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
334 GMAC_ADDR_LOW(reg_n));
335}
336
337static void dwmac4_set_eee_mode(struct mac_device_info *hw,
338 bool en_tx_lpi_clockgating)
339{
340 void __iomem *ioaddr = hw->pcsr;
341 u32 value;
342
343
344
345
346
347 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
348 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
349
350 if (en_tx_lpi_clockgating)
351 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
352
353 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
354}
355
356static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
357{
358 void __iomem *ioaddr = hw->pcsr;
359 u32 value;
360
361 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
362 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
363 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
364}
365
366static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
367{
368 void __iomem *ioaddr = hw->pcsr;
369 u32 value;
370
371 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
372
373 if (link)
374 value |= GMAC4_LPI_CTRL_STATUS_PLS;
375 else
376 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
377
378 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
379}
380
381static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
382{
383 void __iomem *ioaddr = hw->pcsr;
384 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
385
386
387
388
389
390
391
392
393 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
394}
395
396static void dwmac4_set_filter(struct mac_device_info *hw,
397 struct net_device *dev)
398{
399 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
400 unsigned int value = 0;
401
402 if (dev->flags & IFF_PROMISC) {
403 value = GMAC_PACKET_FILTER_PR;
404 } else if ((dev->flags & IFF_ALLMULTI) ||
405 (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
406
407 value = GMAC_PACKET_FILTER_PM;
408
409
410
411 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
412 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
413 } else if (!netdev_mc_empty(dev)) {
414 u32 mc_filter[2];
415 struct netdev_hw_addr *ha;
416
417
418 value = GMAC_PACKET_FILTER_HMC;
419
420 memset(mc_filter, 0, sizeof(mc_filter));
421 netdev_for_each_mc_addr(ha, dev) {
422
423
424
425 int bit_nr =
426 (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
427
428
429
430
431 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
432 }
433 writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
434 writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
435 }
436
437
438 if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
439
440
441
442 value |= GMAC_PACKET_FILTER_PR;
443 } else if (!netdev_uc_empty(dev)) {
444 int reg = 1;
445 struct netdev_hw_addr *ha;
446
447 netdev_for_each_uc_addr(ha, dev) {
448 dwmac4_set_umac_addr(hw, ha->addr, reg);
449 reg++;
450 }
451 }
452
453 writel(value, ioaddr + GMAC_PACKET_FILTER);
454}
455
456static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
457 unsigned int fc, unsigned int pause_time,
458 u32 tx_cnt)
459{
460 void __iomem *ioaddr = hw->pcsr;
461 unsigned int flow = 0;
462 u32 queue = 0;
463
464 pr_debug("GMAC Flow-Control:\n");
465 if (fc & FLOW_RX) {
466 pr_debug("\tReceive Flow-Control ON\n");
467 flow |= GMAC_RX_FLOW_CTRL_RFE;
468 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
469 }
470 if (fc & FLOW_TX) {
471 pr_debug("\tTransmit Flow-Control ON\n");
472
473 if (duplex)
474 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
475
476 for (queue = 0; queue < tx_cnt; queue++) {
477 flow |= GMAC_TX_FLOW_CTRL_TFE;
478
479 if (duplex)
480 flow |=
481 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
482
483 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
484 }
485 }
486}
487
488static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
489 bool loopback)
490{
491 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
492}
493
494static void dwmac4_rane(void __iomem *ioaddr, bool restart)
495{
496 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
497}
498
499static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
500{
501 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
502}
503
504
505static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
506{
507 u32 status;
508
509 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
510 x->irq_rgmii_n++;
511
512
513 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
514 int speed_value;
515
516 x->pcs_link = 1;
517
518 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
519 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
520 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
521 x->pcs_speed = SPEED_1000;
522 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
523 x->pcs_speed = SPEED_100;
524 else
525 x->pcs_speed = SPEED_10;
526
527 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
528
529 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
530 x->pcs_duplex ? "Full" : "Half");
531 } else {
532 x->pcs_link = 0;
533 pr_info("Link is Down\n");
534 }
535}
536
537static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
538{
539 void __iomem *ioaddr = hw->pcsr;
540 u32 mtl_int_qx_status;
541 int ret = 0;
542
543 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
544
545
546 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
547
548 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
549
550 if (status & MTL_RX_OVERFLOW_INT) {
551
552 writel(status | MTL_RX_OVERFLOW_INT,
553 ioaddr + MTL_CHAN_INT_CTRL(chan));
554 ret = CORE_IRQ_MTL_RX_OVERFLOW;
555 }
556 }
557
558 return ret;
559}
560
561static int dwmac4_irq_status(struct mac_device_info *hw,
562 struct stmmac_extra_stats *x)
563{
564 void __iomem *ioaddr = hw->pcsr;
565 u32 intr_status;
566 int ret = 0;
567
568 intr_status = readl(ioaddr + GMAC_INT_STATUS);
569
570
571 if ((intr_status & mmc_tx_irq))
572 x->mmc_tx_irq_n++;
573 if (unlikely(intr_status & mmc_rx_irq))
574 x->mmc_rx_irq_n++;
575 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
576 x->mmc_rx_csum_offload_irq_n++;
577
578 if (unlikely(intr_status & pmt_irq)) {
579 readl(ioaddr + GMAC_PMT);
580 x->irq_receive_pmt_irq_n++;
581 }
582
583 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
584 if (intr_status & PCS_RGSMIIIS_IRQ)
585 dwmac4_phystatus(ioaddr, x);
586
587 return ret;
588}
589
590static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
591 u32 rx_queues, u32 tx_queues)
592{
593 u32 value;
594 u32 queue;
595
596 for (queue = 0; queue < tx_queues; queue++) {
597 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
598
599 if (value & MTL_DEBUG_TXSTSFSTS)
600 x->mtl_tx_status_fifo_full++;
601 if (value & MTL_DEBUG_TXFSTS)
602 x->mtl_tx_fifo_not_empty++;
603 if (value & MTL_DEBUG_TWCSTS)
604 x->mmtl_fifo_ctrl++;
605 if (value & MTL_DEBUG_TRCSTS_MASK) {
606 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
607 >> MTL_DEBUG_TRCSTS_SHIFT;
608 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
609 x->mtl_tx_fifo_read_ctrl_write++;
610 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
611 x->mtl_tx_fifo_read_ctrl_wait++;
612 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
613 x->mtl_tx_fifo_read_ctrl_read++;
614 else
615 x->mtl_tx_fifo_read_ctrl_idle++;
616 }
617 if (value & MTL_DEBUG_TXPAUSED)
618 x->mac_tx_in_pause++;
619 }
620
621 for (queue = 0; queue < rx_queues; queue++) {
622 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
623
624 if (value & MTL_DEBUG_RXFSTS_MASK) {
625 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
626 >> MTL_DEBUG_RRCSTS_SHIFT;
627
628 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
629 x->mtl_rx_fifo_fill_level_full++;
630 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
631 x->mtl_rx_fifo_fill_above_thresh++;
632 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
633 x->mtl_rx_fifo_fill_below_thresh++;
634 else
635 x->mtl_rx_fifo_fill_level_empty++;
636 }
637 if (value & MTL_DEBUG_RRCSTS_MASK) {
638 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
639 MTL_DEBUG_RRCSTS_SHIFT;
640
641 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
642 x->mtl_rx_fifo_read_ctrl_flush++;
643 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
644 x->mtl_rx_fifo_read_ctrl_read_data++;
645 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
646 x->mtl_rx_fifo_read_ctrl_status++;
647 else
648 x->mtl_rx_fifo_read_ctrl_idle++;
649 }
650 if (value & MTL_DEBUG_RWCSTS)
651 x->mtl_rx_fifo_ctrl_active++;
652 }
653
654
655 value = readl(ioaddr + GMAC_DEBUG);
656
657 if (value & GMAC_DEBUG_TFCSTS_MASK) {
658 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
659 >> GMAC_DEBUG_TFCSTS_SHIFT;
660
661 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
662 x->mac_tx_frame_ctrl_xfer++;
663 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
664 x->mac_tx_frame_ctrl_pause++;
665 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
666 x->mac_tx_frame_ctrl_wait++;
667 else
668 x->mac_tx_frame_ctrl_idle++;
669 }
670 if (value & GMAC_DEBUG_TPESTS)
671 x->mac_gmii_tx_proto_engine++;
672 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
673 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
674 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
675 if (value & GMAC_DEBUG_RPESTS)
676 x->mac_gmii_rx_proto_engine++;
677}
678
679static const struct stmmac_ops dwmac4_ops = {
680 .core_init = dwmac4_core_init,
681 .set_mac = stmmac_set_mac,
682 .rx_ipc = dwmac4_rx_ipc_enable,
683 .rx_queue_enable = dwmac4_rx_queue_enable,
684 .rx_queue_prio = dwmac4_rx_queue_priority,
685 .tx_queue_prio = dwmac4_tx_queue_priority,
686 .rx_queue_routing = dwmac4_tx_queue_routing,
687 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
688 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
689 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
690 .map_mtl_to_dma = dwmac4_map_mtl_dma,
691 .config_cbs = dwmac4_config_cbs,
692 .dump_regs = dwmac4_dump_regs,
693 .host_irq_status = dwmac4_irq_status,
694 .host_mtl_irq_status = dwmac4_irq_mtl_status,
695 .flow_ctrl = dwmac4_flow_ctrl,
696 .pmt = dwmac4_pmt,
697 .set_umac_addr = dwmac4_set_umac_addr,
698 .get_umac_addr = dwmac4_get_umac_addr,
699 .set_eee_mode = dwmac4_set_eee_mode,
700 .reset_eee_mode = dwmac4_reset_eee_mode,
701 .set_eee_timer = dwmac4_set_eee_timer,
702 .set_eee_pls = dwmac4_set_eee_pls,
703 .pcs_ctrl_ane = dwmac4_ctrl_ane,
704 .pcs_rane = dwmac4_rane,
705 .pcs_get_adv_lp = dwmac4_get_adv_lp,
706 .debug = dwmac4_debug,
707 .set_filter = dwmac4_set_filter,
708};
709
710static const struct stmmac_ops dwmac410_ops = {
711 .core_init = dwmac4_core_init,
712 .set_mac = stmmac_dwmac4_set_mac,
713 .rx_ipc = dwmac4_rx_ipc_enable,
714 .rx_queue_enable = dwmac4_rx_queue_enable,
715 .rx_queue_prio = dwmac4_rx_queue_priority,
716 .tx_queue_prio = dwmac4_tx_queue_priority,
717 .rx_queue_routing = dwmac4_tx_queue_routing,
718 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
719 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
720 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
721 .map_mtl_to_dma = dwmac4_map_mtl_dma,
722 .config_cbs = dwmac4_config_cbs,
723 .dump_regs = dwmac4_dump_regs,
724 .host_irq_status = dwmac4_irq_status,
725 .host_mtl_irq_status = dwmac4_irq_mtl_status,
726 .flow_ctrl = dwmac4_flow_ctrl,
727 .pmt = dwmac4_pmt,
728 .set_umac_addr = dwmac4_set_umac_addr,
729 .get_umac_addr = dwmac4_get_umac_addr,
730 .set_eee_mode = dwmac4_set_eee_mode,
731 .reset_eee_mode = dwmac4_reset_eee_mode,
732 .set_eee_timer = dwmac4_set_eee_timer,
733 .set_eee_pls = dwmac4_set_eee_pls,
734 .pcs_ctrl_ane = dwmac4_ctrl_ane,
735 .pcs_rane = dwmac4_rane,
736 .pcs_get_adv_lp = dwmac4_get_adv_lp,
737 .debug = dwmac4_debug,
738 .set_filter = dwmac4_set_filter,
739};
740
741struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
742 int perfect_uc_entries, int *synopsys_id)
743{
744 struct mac_device_info *mac;
745 u32 hwid = readl(ioaddr + GMAC_VERSION);
746
747 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
748 if (!mac)
749 return NULL;
750
751 mac->pcsr = ioaddr;
752 mac->multicast_filter_bins = mcbins;
753 mac->unicast_filter_entries = perfect_uc_entries;
754 mac->mcast_bits_log2 = 0;
755
756 if (mac->multicast_filter_bins)
757 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
758
759 mac->link.duplex = GMAC_CONFIG_DM;
760 mac->link.speed10 = GMAC_CONFIG_PS;
761 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
762 mac->link.speed1000 = 0;
763 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
764 mac->mii.addr = GMAC_MDIO_ADDR;
765 mac->mii.data = GMAC_MDIO_DATA;
766 mac->mii.addr_shift = 21;
767 mac->mii.addr_mask = GENMASK(25, 21);
768 mac->mii.reg_shift = 16;
769 mac->mii.reg_mask = GENMASK(20, 16);
770 mac->mii.clk_csr_shift = 8;
771 mac->mii.clk_csr_mask = GENMASK(11, 8);
772
773
774 *synopsys_id = stmmac_get_synopsys_id(hwid);
775
776 if (*synopsys_id > DWMAC_CORE_4_00)
777 mac->dma = &dwmac410_dma_ops;
778 else
779 mac->dma = &dwmac4_dma_ops;
780
781 if (*synopsys_id >= DWMAC_CORE_4_00)
782 mac->mac = &dwmac410_ops;
783 else
784 mac->mac = &dwmac4_ops;
785
786 return mac;
787}
788