1
2
3
4
5
6
7
8
9
10
11#include <clk.h>
12#include <common.h>
13#include <cpu_func.h>
14#include <dm.h>
15#include <generic-phy.h>
16#include <log.h>
17#include <net.h>
18#include <netdev.h>
19#include <config.h>
20#include <console.h>
21#include <malloc.h>
22#include <asm/cache.h>
23#include <asm/io.h>
24#include <phy.h>
25#include <reset.h>
26#include <miiphy.h>
27#include <wait_bit.h>
28#include <watchdog.h>
29#include <asm/system.h>
30#include <asm/arch/hardware.h>
31#include <asm/arch/sys_proto.h>
32#include <dm/device_compat.h>
33#include <linux/bitops.h>
34#include <linux/err.h>
35#include <linux/errno.h>
36#include <eth_phy.h>
37#include <zynqmp_firmware.h>
38
39
40#define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000
41#define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000
42#define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000
43#define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23
44#define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18
45
46#define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000
47#define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000
48#define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF
49
50#define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002
51#define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001
52#define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC
53
54
55#define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
56#define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000
57#define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000
58
59#define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008
60#define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004
61#define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010
62#define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200
63
64#define ZYNQ_GEM_NWCFG_SPEED100 0x00000001
65#define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400
66#define ZYNQ_GEM_NWCFG_FDEN 0x00000002
67#define ZYNQ_GEM_NWCFG_FSREM 0x00020000
68#define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000
69#define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800
70#ifdef CONFIG_ARM64
71#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000
72#else
73#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000
74#endif
75
76#ifdef CONFIG_ARM64
77# define ZYNQ_GEM_DBUS_WIDTH (1 << 21)
78#else
79# define ZYNQ_GEM_DBUS_WIDTH (0 << 21)
80#endif
81
82#define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
83 ZYNQ_GEM_NWCFG_FDEN | \
84 ZYNQ_GEM_NWCFG_FSREM | \
85 ZYNQ_GEM_NWCFG_MDCCLKDIV)
86
87#define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004
88
89#define ZYNQ_GEM_DMACR_BLENGTH 0x00000004
90
91#define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
92
93#define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
94
95#define ZYNQ_GEM_DMACR_RXBUF 0x00180000
96
97#if defined(CONFIG_PHYS_64BIT)
98# define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30)
99#else
100# define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30)
101#endif
102
103#define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
104 ZYNQ_GEM_DMACR_RXSIZE | \
105 ZYNQ_GEM_DMACR_TXSIZE | \
106 ZYNQ_GEM_DMACR_RXBUF | \
107 ZYNQ_GEM_DMA_BUS_WIDTH)
108
109#define ZYNQ_GEM_TSR_DONE 0x00000020
110
111#define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
112
113#define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
114
115#define MDIO_IDLE_TIMEOUT_MS 100
116
117
118#define PHY_DETECT_REG 1
119
120
121
122
123
124
125
126#define PHY_DETECT_MASK 0x1808
127
128
129#define ZYNQ_GEM_PCSSTATUS_LINK BIT(2)
130#define ZYNQ_GEM_PCSSTATUS_ANEG_COMPL BIT(5)
131
132
133#define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
134#define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
135#define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
136
137
138#define ZYNQ_GEM_FREQUENCY_10 2500000UL
139#define ZYNQ_GEM_FREQUENCY_100 25000000UL
140#define ZYNQ_GEM_FREQUENCY_1000 125000000UL
141
142#define RXCLK_EN BIT(0)
143
144
145struct zynq_gem_regs {
146 u32 nwctrl;
147 u32 nwcfg;
148 u32 nwsr;
149 u32 reserved1;
150 u32 dmacr;
151 u32 txsr;
152 u32 rxqbase;
153 u32 txqbase;
154 u32 rxsr;
155 u32 reserved2[2];
156 u32 idr;
157 u32 reserved3;
158 u32 phymntnc;
159 u32 reserved4[18];
160 u32 hashl;
161 u32 hashh;
162#define LADDR_LOW 0
163#define LADDR_HIGH 1
164 u32 laddr[4][LADDR_HIGH + 1];
165 u32 match[4];
166 u32 reserved6[18];
167#define STAT_SIZE 44
168 u32 stat[STAT_SIZE];
169 u32 reserved9[20];
170 u32 pcscntrl;
171 u32 pcsstatus;
172 u32 rserved12[35];
173 u32 dcfg6;
174 u32 reserved7[106];
175 u32 transmit_q1_ptr;
176 u32 reserved8[15];
177 u32 receive_q1_ptr;
178 u32 reserved10[17];
179 u32 upper_txqbase;
180 u32 reserved11[2];
181 u32 upper_rxqbase;
182};
183
184
185struct emac_bd {
186 u32 addr;
187 u32 status;
188#if defined(CONFIG_PHYS_64BIT)
189 u32 addr_hi;
190 u32 reserved;
191#endif
192};
193
194
195#define RX_BUF 32
196
197
198
199#define BD_SPACE 0x100000
200
201#define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
202
203
204#define TX_FREE_DESC 2
205
206
207struct zynq_gem_priv {
208 struct emac_bd *tx_bd;
209 struct emac_bd *rx_bd;
210 char *rxbuffers;
211 u32 rxbd_current;
212 u32 rx_first_buf;
213 int phyaddr;
214 int init;
215 struct zynq_gem_regs *iobase;
216 struct zynq_gem_regs *mdiobase;
217 phy_interface_t interface;
218 struct phy_device *phydev;
219 ofnode phy_of_node;
220 struct mii_dev *bus;
221 struct clk rx_clk;
222 struct clk tx_clk;
223 u32 max_speed;
224 bool int_pcs;
225 bool dma_64bit;
226 u32 clk_en_info;
227 struct reset_ctl_bulk resets;
228};
229
230static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
231 u32 op, u16 *data)
232{
233 u32 mgtcr;
234 struct zynq_gem_regs *regs = priv->mdiobase;
235 int err;
236
237 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
238 true, MDIO_IDLE_TIMEOUT_MS, false);
239 if (err)
240 return err;
241
242
243 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
244 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
245 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
246
247
248 writel(mgtcr, ®s->phymntnc);
249
250 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
251 true, MDIO_IDLE_TIMEOUT_MS, false);
252 if (err)
253 return err;
254
255 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
256 *data = readl(®s->phymntnc);
257
258 return 0;
259}
260
261static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
262 u32 regnum, u16 *val)
263{
264 int ret;
265
266 ret = phy_setup_op(priv, phy_addr, regnum,
267 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
268
269 if (!ret)
270 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
271 phy_addr, regnum, *val);
272
273 return ret;
274}
275
276static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
277 u32 regnum, u16 data)
278{
279 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
280 regnum, data);
281
282 return phy_setup_op(priv, phy_addr, regnum,
283 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
284}
285
286static int zynq_gem_setup_mac(struct udevice *dev)
287{
288 u32 i, macaddrlow, macaddrhigh;
289 struct eth_pdata *pdata = dev_get_plat(dev);
290 struct zynq_gem_priv *priv = dev_get_priv(dev);
291 struct zynq_gem_regs *regs = priv->iobase;
292
293
294 macaddrlow = pdata->enetaddr[0];
295 macaddrlow |= pdata->enetaddr[1] << 8;
296 macaddrlow |= pdata->enetaddr[2] << 16;
297 macaddrlow |= pdata->enetaddr[3] << 24;
298
299
300 macaddrhigh = pdata->enetaddr[4];
301 macaddrhigh |= pdata->enetaddr[5] << 8;
302
303 for (i = 0; i < 4; i++) {
304 writel(0, ®s->laddr[i][LADDR_LOW]);
305 writel(0, ®s->laddr[i][LADDR_HIGH]);
306
307 writel(0, ®s->match[i]);
308 }
309
310 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]);
311 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]);
312
313 return 0;
314}
315
316static int zynq_phy_init(struct udevice *dev)
317{
318 int ret;
319 struct zynq_gem_priv *priv = dev_get_priv(dev);
320 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
321 const u32 supported = SUPPORTED_10baseT_Half |
322 SUPPORTED_10baseT_Full |
323 SUPPORTED_100baseT_Half |
324 SUPPORTED_100baseT_Full |
325 SUPPORTED_1000baseT_Half |
326 SUPPORTED_1000baseT_Full;
327
328
329 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s_mdio->nwctrl);
330
331 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
332 priv->phyaddr = eth_phy_get_addr(dev);
333
334 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
335 priv->interface);
336 if (IS_ERR_OR_NULL(priv->phydev))
337 return -ENODEV;
338
339 if (priv->max_speed) {
340 ret = phy_set_supported(priv->phydev, priv->max_speed);
341 if (ret)
342 return ret;
343 }
344
345 priv->phydev->supported &= supported | ADVERTISED_Pause |
346 ADVERTISED_Asym_Pause;
347
348 priv->phydev->advertising = priv->phydev->supported;
349 if (!ofnode_valid(priv->phydev->node))
350 priv->phydev->node = priv->phy_of_node;
351
352 return phy_config(priv->phydev);
353}
354
355static int zynq_gem_init(struct udevice *dev)
356{
357 u32 i, nwconfig;
358 int ret;
359 unsigned long clk_rate = 0;
360 struct zynq_gem_priv *priv = dev_get_priv(dev);
361 struct zynq_gem_regs *regs = priv->iobase;
362 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
363 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
364 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
365
366 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
367 priv->dma_64bit = true;
368 else
369 priv->dma_64bit = false;
370
371#if defined(CONFIG_PHYS_64BIT)
372 if (!priv->dma_64bit) {
373 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
374 __func__);
375 return -EINVAL;
376 }
377#else
378 if (priv->dma_64bit)
379 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
380 __func__);
381#endif
382
383 if (!priv->init) {
384
385 writel(0xFFFFFFFF, ®s->idr);
386
387
388 writel(0, ®s->nwctrl);
389 writel(0, ®s->txsr);
390 writel(0, ®s->rxsr);
391 writel(0, ®s->phymntnc);
392
393
394
395
396 writel(0x0, ®s->hashl);
397
398 writel(0x0, ®s->hashh);
399
400
401 for (i = 0; i < STAT_SIZE; i++)
402 readl(®s->stat[i]);
403
404
405 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
406
407 for (i = 0; i < RX_BUF; i++) {
408 priv->rx_bd[i].status = 0xF0000000;
409 priv->rx_bd[i].addr =
410 (lower_32_bits((ulong)(priv->rxbuffers)
411 + (i * PKTSIZE_ALIGN)));
412#if defined(CONFIG_PHYS_64BIT)
413 priv->rx_bd[i].addr_hi =
414 (upper_32_bits((ulong)(priv->rxbuffers)
415 + (i * PKTSIZE_ALIGN)));
416#endif
417 }
418
419 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
420
421 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase);
422#if defined(CONFIG_PHYS_64BIT)
423 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase);
424#endif
425
426
427 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr);
428
429
430 setbits_le32(®s_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
431
432
433 dummy_tx_bd->addr = 0;
434#if defined(CONFIG_PHYS_64BIT)
435 dummy_tx_bd->addr_hi = 0;
436#endif
437 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
438 ZYNQ_GEM_TXBUF_LAST_MASK|
439 ZYNQ_GEM_TXBUF_USED_MASK;
440
441 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
442 ZYNQ_GEM_RXBUF_NEW_MASK;
443#if defined(CONFIG_PHYS_64BIT)
444 dummy_rx_bd->addr_hi = 0;
445#endif
446 dummy_rx_bd->status = 0;
447
448 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr);
449 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr);
450
451 priv->init++;
452 }
453
454 ret = phy_startup(priv->phydev);
455 if (ret)
456 return ret;
457
458 if (!priv->phydev->link) {
459 printf("%s: No link.\n", priv->phydev->dev->name);
460 return -1;
461 }
462
463 nwconfig = ZYNQ_GEM_NWCFG_INIT;
464
465
466
467
468
469 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
470 priv->int_pcs) {
471 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
472 ZYNQ_GEM_NWCFG_PCS_SEL;
473 }
474
475 switch (priv->phydev->speed) {
476 case SPEED_1000:
477 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
478 ®s->nwcfg);
479 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
480 break;
481 case SPEED_100:
482 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
483 ®s->nwcfg);
484 clk_rate = ZYNQ_GEM_FREQUENCY_100;
485 break;
486 case SPEED_10:
487 clk_rate = ZYNQ_GEM_FREQUENCY_10;
488 break;
489 }
490
491#ifdef CONFIG_ARM64
492 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
493 priv->int_pcs) {
494
495
496
497
498
499 if (priv->phydev->phy_id != PHY_FIXED_ID) {
500 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
501 ®s->pcscntrl);
502
503
504
505
506 if (priv->phydev->link) {
507 u32 pcsstatus;
508
509 pcsstatus = ZYNQ_GEM_PCSSTATUS_LINK |
510 ZYNQ_GEM_PCSSTATUS_ANEG_COMPL;
511 ret = wait_for_bit_le32(®s->pcsstatus,
512 pcsstatus,
513 true, 5000, true);
514 if (ret) {
515 dev_warn(dev,
516 "no PCS (SGMII) link\n");
517 } else {
518
519
520
521
522
523 mdelay(1);
524 }
525 }
526 } else {
527 writel(readl(®s->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
528 ®s->pcscntrl);
529 }
530 }
531#endif
532
533 ret = clk_get_rate(&priv->tx_clk);
534 if (ret != clk_rate) {
535 ret = clk_set_rate(&priv->tx_clk, clk_rate);
536 if (IS_ERR_VALUE(ret)) {
537 dev_err(dev, "failed to set tx clock rate %ld\n", clk_rate);
538 return ret;
539 }
540 }
541
542 ret = clk_enable(&priv->tx_clk);
543 if (ret) {
544 dev_err(dev, "failed to enable tx clock\n");
545 return ret;
546 }
547
548 if (priv->clk_en_info & RXCLK_EN) {
549 ret = clk_enable(&priv->rx_clk);
550 if (ret) {
551 dev_err(dev, "failed to enable rx clock\n");
552 return ret;
553 }
554 }
555 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
556 ZYNQ_GEM_NWCTRL_TXEN_MASK);
557
558 return 0;
559}
560
561static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
562{
563 dma_addr_t addr;
564 u32 size;
565 struct zynq_gem_priv *priv = dev_get_priv(dev);
566 struct zynq_gem_regs *regs = priv->iobase;
567 struct emac_bd *current_bd = &priv->tx_bd[1];
568
569
570 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
571
572 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
573#if defined(CONFIG_PHYS_64BIT)
574 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
575#endif
576 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
577 ZYNQ_GEM_TXBUF_LAST_MASK;
578
579 current_bd->addr = 0x0;
580#if defined(CONFIG_PHYS_64BIT)
581 current_bd->addr_hi = 0x0;
582#endif
583 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
584 ZYNQ_GEM_TXBUF_LAST_MASK|
585 ZYNQ_GEM_TXBUF_USED_MASK;
586
587
588 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase);
589#if defined(CONFIG_PHYS_64BIT)
590 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase);
591#endif
592
593 addr = (ulong) ptr;
594 addr &= ~(ARCH_DMA_MINALIGN - 1);
595 size = roundup(len, ARCH_DMA_MINALIGN);
596 flush_dcache_range(addr, addr + size);
597 barrier();
598
599
600 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
601
602
603 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
604 printf("TX buffers exhausted in mid frame\n");
605
606 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE,
607 true, 20000, true);
608}
609
610
611static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
612{
613 int frame_len;
614 dma_addr_t addr;
615 struct zynq_gem_priv *priv = dev_get_priv(dev);
616 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
617
618 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
619 return -1;
620
621 if (!(current_bd->status &
622 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
623 printf("GEM: SOF or EOF not set for last buffer received!\n");
624 return -1;
625 }
626
627 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
628 if (!frame_len) {
629 printf("%s: Zero size packet?\n", __func__);
630 return -1;
631 }
632
633#if defined(CONFIG_PHYS_64BIT)
634 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
635 | ((dma_addr_t)current_bd->addr_hi << 32));
636#else
637 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
638#endif
639 addr &= ~(ARCH_DMA_MINALIGN - 1);
640
641 *packetp = (uchar *)(uintptr_t)addr;
642
643 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
644 barrier();
645
646 return frame_len;
647}
648
649static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
650{
651 struct zynq_gem_priv *priv = dev_get_priv(dev);
652 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
653 struct emac_bd *first_bd;
654 dma_addr_t addr;
655
656 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
657 priv->rx_first_buf = priv->rxbd_current;
658 } else {
659 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
660 current_bd->status = 0xF0000000;
661 }
662
663 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
664 first_bd = &priv->rx_bd[priv->rx_first_buf];
665 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
666 first_bd->status = 0xF0000000;
667 }
668
669
670#if defined(CONFIG_PHYS_64BIT)
671 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
672 | ((dma_addr_t)current_bd->addr_hi << 32));
673#else
674 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
675#endif
676 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
677 ARCH_DMA_MINALIGN));
678 barrier();
679
680 if ((++priv->rxbd_current) >= RX_BUF)
681 priv->rxbd_current = 0;
682
683 return 0;
684}
685
686static void zynq_gem_halt(struct udevice *dev)
687{
688 struct zynq_gem_priv *priv = dev_get_priv(dev);
689 struct zynq_gem_regs *regs = priv->iobase;
690
691 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
692 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
693}
694
695static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
696 int devad, int reg)
697{
698 struct zynq_gem_priv *priv = bus->priv;
699 int ret;
700 u16 val = 0;
701
702 ret = phyread(priv, addr, reg, &val);
703 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
704 return val;
705}
706
707static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
708 int reg, u16 value)
709{
710 struct zynq_gem_priv *priv = bus->priv;
711
712 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
713 return phywrite(priv, addr, reg, value);
714}
715
716static int zynq_gem_reset_init(struct udevice *dev)
717{
718 struct zynq_gem_priv *priv = dev_get_priv(dev);
719 int ret;
720
721 ret = reset_get_bulk(dev, &priv->resets);
722 if (ret == -ENOTSUPP || ret == -ENOENT)
723 return 0;
724 else if (ret)
725 return ret;
726
727 ret = reset_deassert_bulk(&priv->resets);
728 if (ret) {
729 reset_release_bulk(&priv->resets);
730 return ret;
731 }
732
733 return 0;
734}
735
736static int gem_zynqmp_set_dynamic_config(struct udevice *dev)
737{
738 u32 pm_info[2];
739 int ret;
740
741 if (IS_ENABLED(CONFIG_ARCH_ZYNQMP) && IS_ENABLED(CONFIG_ZYNQMP_FIRMWARE)) {
742 if (!zynqmp_pm_is_function_supported(PM_IOCTL,
743 IOCTL_SET_GEM_CONFIG)) {
744 ret = ofnode_read_u32_array(dev_ofnode(dev),
745 "power-domains",
746 pm_info,
747 ARRAY_SIZE(pm_info));
748 if (ret) {
749 dev_err(dev,
750 "Failed to read power-domains info\n");
751 return ret;
752 }
753
754 ret = zynqmp_pm_set_gem_config(pm_info[1],
755 GEM_CONFIG_FIXED, 0);
756 if (ret)
757 return ret;
758
759 ret = zynqmp_pm_set_gem_config(pm_info[1],
760 GEM_CONFIG_SGMII_MODE,
761 1);
762 if (ret)
763 return ret;
764 }
765 }
766
767 return 0;
768}
769
770static int zynq_gem_probe(struct udevice *dev)
771{
772 void *bd_space;
773 struct zynq_gem_priv *priv = dev_get_priv(dev);
774 int ret;
775 struct phy phy;
776
777 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
778 ret = generic_phy_get_by_index(dev, 0, &phy);
779 if (!ret) {
780 ret = generic_phy_init(&phy);
781 if (ret)
782 return ret;
783 } else if (ret != -ENOENT) {
784 debug("could not get phy (err %d)\n", ret);
785 return ret;
786 }
787 }
788
789 ret = zynq_gem_reset_init(dev);
790 if (ret)
791 return ret;
792
793
794 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
795 if (!priv->rxbuffers)
796 return -ENOMEM;
797
798 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
799 ulong addr = (ulong)priv->rxbuffers;
800 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
801 barrier();
802
803
804 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
805 if (!bd_space) {
806 ret = -ENOMEM;
807 goto err1;
808 }
809
810 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
811 BD_SPACE, DCACHE_OFF);
812
813
814 priv->tx_bd = (struct emac_bd *)bd_space;
815 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
816
817 ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
818 if (ret < 0) {
819 dev_err(dev, "failed to get tx_clock\n");
820 goto err2;
821 }
822
823 if (priv->clk_en_info & RXCLK_EN) {
824 ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
825 if (ret < 0) {
826 dev_err(dev, "failed to get rx_clock\n");
827 goto err2;
828 }
829 }
830
831 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
832 priv->bus = eth_phy_get_mdio_bus(dev);
833
834 if (!priv->bus) {
835 priv->bus = mdio_alloc();
836 priv->bus->read = zynq_gem_miiphy_read;
837 priv->bus->write = zynq_gem_miiphy_write;
838 priv->bus->priv = priv;
839
840 ret = mdio_register_seq(priv->bus, dev_seq(dev));
841 if (ret)
842 goto err2;
843 }
844
845 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
846 eth_phy_set_mdio_bus(dev, priv->bus);
847
848 ret = zynq_phy_init(dev);
849 if (ret)
850 goto err3;
851
852 if (priv->interface == PHY_INTERFACE_MODE_SGMII && phy.dev) {
853 if (IS_ENABLED(CONFIG_DM_ETH_PHY)) {
854 if (device_is_compatible(dev, "cdns,zynqmp-gem") ||
855 device_is_compatible(dev, "xlnx,zynqmp-gem")) {
856 ret = gem_zynqmp_set_dynamic_config(dev);
857 if (ret) {
858 dev_err
859 (dev,
860 "Failed to set gem dynamic config\n");
861 return ret;
862 }
863 }
864 }
865 ret = generic_phy_power_on(&phy);
866 if (ret)
867 return ret;
868 }
869
870 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
871 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phydev->addr,
872 phy_string_for_interface(priv->interface));
873
874 return ret;
875
876err3:
877 mdio_unregister(priv->bus);
878err2:
879 free(priv->tx_bd);
880err1:
881 free(priv->rxbuffers);
882 return ret;
883}
884
885static int zynq_gem_remove(struct udevice *dev)
886{
887 struct zynq_gem_priv *priv = dev_get_priv(dev);
888
889 free(priv->phydev);
890 mdio_unregister(priv->bus);
891 mdio_free(priv->bus);
892
893 return 0;
894}
895
896static const struct eth_ops zynq_gem_ops = {
897 .start = zynq_gem_init,
898 .send = zynq_gem_send,
899 .recv = zynq_gem_recv,
900 .free_pkt = zynq_gem_free_pkt,
901 .stop = zynq_gem_halt,
902 .write_hwaddr = zynq_gem_setup_mac,
903};
904
905static int zynq_gem_of_to_plat(struct udevice *dev)
906{
907 struct eth_pdata *pdata = dev_get_plat(dev);
908 struct zynq_gem_priv *priv = dev_get_priv(dev);
909 struct ofnode_phandle_args phandle_args;
910
911 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
912 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
913 priv->mdiobase = priv->iobase;
914
915 priv->phyaddr = -1;
916
917 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
918 &phandle_args)) {
919 fdt_addr_t addr;
920 ofnode parent;
921
922 debug("phy-handle does exist %s\n", dev->name);
923 if (!(IS_ENABLED(CONFIG_DM_ETH_PHY)))
924 priv->phyaddr = ofnode_read_u32_default
925 (phandle_args.node, "reg", -1);
926
927 priv->phy_of_node = phandle_args.node;
928 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
929 "max-speed",
930 SPEED_1000);
931
932 parent = ofnode_get_parent(phandle_args.node);
933 if (ofnode_name_eq(parent, "mdio"))
934 parent = ofnode_get_parent(parent);
935
936 addr = ofnode_get_addr(parent);
937 if (addr != FDT_ADDR_T_NONE) {
938 debug("MDIO bus not found %s\n", dev->name);
939 priv->mdiobase = (struct zynq_gem_regs *)addr;
940 }
941 }
942
943 pdata->phy_interface = dev_read_phy_mode(dev);
944 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
945 return -EINVAL;
946 priv->interface = pdata->phy_interface;
947
948 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
949
950 priv->clk_en_info = dev_get_driver_data(dev);
951
952 return 0;
953}
954
955static const struct udevice_id zynq_gem_ids[] = {
956 { .compatible = "xlnx,versal-gem", .data = RXCLK_EN },
957 { .compatible = "cdns,versal-gem", .data = RXCLK_EN },
958 { .compatible = "xlnx,zynqmp-gem" },
959 { .compatible = "cdns,zynqmp-gem" },
960 { .compatible = "xlnx,zynq-gem" },
961 { .compatible = "cdns,zynq-gem" },
962 { .compatible = "cdns,gem" },
963 { }
964};
965
966U_BOOT_DRIVER(zynq_gem) = {
967 .name = "zynq_gem",
968 .id = UCLASS_ETH,
969 .of_match = zynq_gem_ids,
970 .of_to_plat = zynq_gem_of_to_plat,
971 .probe = zynq_gem_probe,
972 .remove = zynq_gem_remove,
973 .ops = &zynq_gem_ops,
974 .priv_auto = sizeof(struct zynq_gem_priv),
975 .plat_auto = sizeof(struct eth_pdata),
976};
977