uboot/drivers/net/bcm-sf2-eth-gmac.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Broadcom Corporation.
   3 *
   4 * SPDX-License-Identifier:     GPL-2.0+
   5 */
   6
   7#ifdef BCM_GMAC_DEBUG
   8#ifndef DEBUG
   9#define DEBUG
  10#endif
  11#endif
  12
  13#include <config.h>
  14#include <common.h>
  15#include <malloc.h>
  16#include <net.h>
  17#include <asm/io.h>
  18#include <phy.h>
  19
  20#include "bcm-sf2-eth.h"
  21#include "bcm-sf2-eth-gmac.h"
  22
  23#define SPINWAIT(exp, us) { \
  24        uint countdown = (us) + 9; \
  25        while ((exp) && (countdown >= 10)) {\
  26                udelay(10); \
  27                countdown -= 10; \
  28        } \
  29}
  30
  31static int gmac_disable_dma(struct eth_dma *dma, int dir);
  32static int gmac_enable_dma(struct eth_dma *dma, int dir);
  33
  34/* DMA Descriptor */
  35typedef struct {
  36        /* misc control bits */
  37        uint32_t        ctrl1;
  38        /* buffer count and address extension */
  39        uint32_t        ctrl2;
  40        /* memory address of the date buffer, bits 31:0 */
  41        uint32_t        addrlow;
  42        /* memory address of the date buffer, bits 63:32 */
  43        uint32_t        addrhigh;
  44} dma64dd_t;
  45
  46uint32_t g_dmactrlflags;
  47
  48static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
  49{
  50        debug("%s enter\n", __func__);
  51
  52        g_dmactrlflags &= ~mask;
  53        g_dmactrlflags |= flags;
  54
  55        /* If trying to enable parity, check if parity is actually supported */
  56        if (g_dmactrlflags & DMA_CTRL_PEN) {
  57                uint32_t control;
  58
  59                control = readl(GMAC0_DMA_TX_CTRL_ADDR);
  60                writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
  61                if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
  62                        /*
  63                         * We *can* disable it, therefore it is supported;
  64                         * restore control register
  65                         */
  66                        writel(control, GMAC0_DMA_TX_CTRL_ADDR);
  67                } else {
  68                        /* Not supported, don't allow it to be enabled */
  69                        g_dmactrlflags &= ~DMA_CTRL_PEN;
  70                }
  71        }
  72
  73        return g_dmactrlflags;
  74}
  75
  76static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
  77{
  78        uint32_t v = readl(reg);
  79        v &= ~(value);
  80        writel(v, reg);
  81}
  82
  83static inline void reg32_set_bits(uint32_t reg, uint32_t value)
  84{
  85        uint32_t v = readl(reg);
  86        v |= value;
  87        writel(v, reg);
  88}
  89
  90#ifdef BCM_GMAC_DEBUG
  91static void dma_tx_dump(struct eth_dma *dma)
  92{
  93        dma64dd_t *descp = NULL;
  94        uint8_t *bufp;
  95        int i;
  96
  97        printf("TX DMA Register:\n");
  98        printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
  99               readl(GMAC0_DMA_TX_CTRL_ADDR),
 100               readl(GMAC0_DMA_TX_PTR_ADDR),
 101               readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
 102               readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
 103               readl(GMAC0_DMA_TX_STATUS0_ADDR),
 104               readl(GMAC0_DMA_TX_STATUS1_ADDR));
 105
 106        printf("TX Descriptors:\n");
 107        for (i = 0; i < TX_BUF_NUM; i++) {
 108                descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
 109                printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
 110                       descp->ctrl1, descp->ctrl2,
 111                       descp->addrhigh, descp->addrlow);
 112        }
 113
 114        printf("TX Buffers:\n");
 115        /* Initialize TX DMA descriptor table */
 116        for (i = 0; i < TX_BUF_NUM; i++) {
 117                bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE);
 118                printf("buf%d:0x%x; ", i, (uint32_t)bufp);
 119        }
 120        printf("\n");
 121}
 122
 123static void dma_rx_dump(struct eth_dma *dma)
 124{
 125        dma64dd_t *descp = NULL;
 126        uint8_t *bufp;
 127        int i;
 128
 129        printf("RX DMA Register:\n");
 130        printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
 131               readl(GMAC0_DMA_RX_CTRL_ADDR),
 132               readl(GMAC0_DMA_RX_PTR_ADDR),
 133               readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
 134               readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
 135               readl(GMAC0_DMA_RX_STATUS0_ADDR),
 136               readl(GMAC0_DMA_RX_STATUS1_ADDR));
 137
 138        printf("RX Descriptors:\n");
 139        for (i = 0; i < RX_BUF_NUM; i++) {
 140                descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
 141                printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
 142                       descp->ctrl1, descp->ctrl2,
 143                       descp->addrhigh, descp->addrlow);
 144        }
 145
 146        printf("RX Buffers:\n");
 147        for (i = 0; i < RX_BUF_NUM; i++) {
 148                bufp = dma->rx_buf + i * RX_BUF_SIZE;
 149                printf("buf%d:0x%x; ", i, (uint32_t)bufp);
 150        }
 151        printf("\n");
 152}
 153#endif
 154
 155static int dma_tx_init(struct eth_dma *dma)
 156{
 157        dma64dd_t *descp = NULL;
 158        uint8_t *bufp;
 159        int i;
 160        uint32_t ctrl;
 161
 162        debug("%s enter\n", __func__);
 163
 164        /* clear descriptor memory */
 165        memset((void *)(dma->tx_desc_aligned), 0,
 166               TX_BUF_NUM * sizeof(dma64dd_t));
 167        memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE);
 168
 169        /* Initialize TX DMA descriptor table */
 170        for (i = 0; i < TX_BUF_NUM; i++) {
 171                descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
 172                bufp = dma->tx_buf + i * TX_BUF_SIZE;
 173                /* clear buffer memory */
 174                memset((void *)bufp, 0, TX_BUF_SIZE);
 175
 176                ctrl = 0;
 177                /* if last descr set endOfTable */
 178                if (i == (TX_BUF_NUM-1))
 179                        ctrl = D64_CTRL1_EOT;
 180                descp->ctrl1 = ctrl;
 181                descp->ctrl2 = 0;
 182                descp->addrlow = (uint32_t)bufp;
 183                descp->addrhigh = 0;
 184        }
 185
 186        /* flush descriptor and buffer */
 187        descp = dma->tx_desc_aligned;
 188        bufp = dma->tx_buf;
 189        flush_dcache_range((unsigned long)descp,
 190                           (unsigned long)(descp +
 191                                           sizeof(dma64dd_t) * TX_BUF_NUM));
 192        flush_dcache_range((unsigned long)(bufp),
 193                           (unsigned long)(bufp + TX_BUF_SIZE * TX_BUF_NUM));
 194
 195        /* initialize the DMA channel */
 196        writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
 197        writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
 198
 199        /* now update the dma last descriptor */
 200        writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
 201               GMAC0_DMA_TX_PTR_ADDR);
 202
 203        return 0;
 204}
 205
 206static int dma_rx_init(struct eth_dma *dma)
 207{
 208        uint32_t last_desc;
 209        dma64dd_t *descp = NULL;
 210        uint8_t *bufp;
 211        uint32_t ctrl;
 212        int i;
 213
 214        debug("%s enter\n", __func__);
 215
 216        /* clear descriptor memory */
 217        memset((void *)(dma->rx_desc_aligned), 0,
 218               RX_BUF_NUM * sizeof(dma64dd_t));
 219        /* clear buffer memory */
 220        memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE);
 221
 222        /* Initialize RX DMA descriptor table */
 223        for (i = 0; i < RX_BUF_NUM; i++) {
 224                descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
 225                bufp = dma->rx_buf + i * RX_BUF_SIZE;
 226                ctrl = 0;
 227                /* if last descr set endOfTable */
 228                if (i == (RX_BUF_NUM - 1))
 229                        ctrl = D64_CTRL1_EOT;
 230                descp->ctrl1 = ctrl;
 231                descp->ctrl2 = RX_BUF_SIZE;
 232                descp->addrlow = (uint32_t)bufp;
 233                descp->addrhigh = 0;
 234
 235                last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
 236                                + sizeof(dma64dd_t);
 237        }
 238
 239        descp = dma->rx_desc_aligned;
 240        bufp = dma->rx_buf;
 241        /* flush descriptor and buffer */
 242        flush_dcache_range((unsigned long)descp,
 243                           (unsigned long)(descp +
 244                                           sizeof(dma64dd_t) * RX_BUF_NUM));
 245        flush_dcache_range((unsigned long)(bufp),
 246                           (unsigned long)(bufp + RX_BUF_SIZE * RX_BUF_NUM));
 247
 248        /* initailize the DMA channel */
 249        writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
 250        writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
 251
 252        /* now update the dma last descriptor */
 253        writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
 254
 255        return 0;
 256}
 257
 258static int dma_init(struct eth_dma *dma)
 259{
 260        debug(" %s enter\n", __func__);
 261
 262        /*
 263         * Default flags: For backwards compatibility both
 264         * Rx Overflow Continue and Parity are DISABLED.
 265         */
 266        dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
 267
 268        debug("rx burst len 0x%x\n",
 269              (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
 270              >> D64_RC_BL_SHIFT);
 271        debug("tx burst len 0x%x\n",
 272              (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
 273              >> D64_XC_BL_SHIFT);
 274
 275        dma_tx_init(dma);
 276        dma_rx_init(dma);
 277
 278        /* From end of chip_init() */
 279        /* enable the overflow continue feature and disable parity */
 280        dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
 281                      DMA_CTRL_ROC /* value */);
 282
 283        return 0;
 284}
 285
 286static int dma_deinit(struct eth_dma *dma)
 287{
 288        debug(" %s enter\n", __func__);
 289
 290        gmac_disable_dma(dma, MAC_DMA_RX);
 291        gmac_disable_dma(dma, MAC_DMA_TX);
 292
 293        free(dma->tx_buf);
 294        dma->tx_buf = NULL;
 295        free(dma->tx_desc);
 296        dma->tx_desc = NULL;
 297        dma->tx_desc_aligned = NULL;
 298
 299        free(dma->rx_buf);
 300        dma->rx_buf = NULL;
 301        free(dma->rx_desc);
 302        dma->rx_desc = NULL;
 303        dma->rx_desc_aligned = NULL;
 304
 305        return 0;
 306}
 307
 308int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
 309{
 310        uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE;
 311
 312        /* kick off the dma */
 313        size_t len = length;
 314        int txout = dma->cur_tx_index;
 315        uint32_t flags;
 316        dma64dd_t *descp = NULL;
 317        uint32_t ctrl;
 318        uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
 319                              sizeof(dma64dd_t)) & D64_XP_LD_MASK;
 320        size_t buflen;
 321
 322        debug("%s enter\n", __func__);
 323
 324        /* load the buffer */
 325        memcpy(bufp, packet, len);
 326
 327        /* Add 4 bytes for Ethernet FCS/CRC */
 328        buflen = len + 4;
 329
 330        ctrl = (buflen & D64_CTRL2_BC_MASK);
 331
 332        /* the transmit will only be one frame or set SOF, EOF */
 333        /* also set int on completion */
 334        flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
 335
 336        /* txout points to the descriptor to uset */
 337        /* if last descriptor then set EOT */
 338        if (txout == (TX_BUF_NUM - 1)) {
 339                flags |= D64_CTRL1_EOT;
 340                last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
 341        }
 342
 343        /* write the descriptor */
 344        descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
 345        descp->addrlow = (uint32_t)bufp;
 346        descp->addrhigh = 0;
 347        descp->ctrl1 = flags;
 348        descp->ctrl2 = ctrl;
 349
 350        /* flush descriptor and buffer */
 351        flush_dcache_range((unsigned long)descp,
 352                           (unsigned long)(descp + sizeof(dma64dd_t)));
 353        flush_dcache_range((unsigned long)bufp,
 354                           (unsigned long)(bufp + TX_BUF_SIZE));
 355
 356        /* now update the dma last descriptor */
 357        writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
 358
 359        /* tx dma should be enabled so packet should go out */
 360
 361        /* update txout */
 362        dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
 363
 364        return 0;
 365}
 366
 367bool gmac_check_tx_done(struct eth_dma *dma)
 368{
 369        /* wait for tx to complete */
 370        uint32_t intstatus;
 371        bool xfrdone = false;
 372
 373        debug("%s enter\n", __func__);
 374
 375        intstatus = readl(GMAC0_INT_STATUS_ADDR);
 376
 377        debug("int(0x%x)\n", intstatus);
 378        if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
 379                xfrdone = true;
 380                /* clear the int bits */
 381                intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
 382                writel(intstatus, GMAC0_INT_STATUS_ADDR);
 383        } else {
 384                debug("Tx int(0x%x)\n", intstatus);
 385        }
 386
 387        return xfrdone;
 388}
 389
 390int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
 391{
 392        void *bufp, *datap;
 393        size_t rcvlen = 0, buflen = 0;
 394        uint32_t stat0 = 0, stat1 = 0;
 395        uint32_t control, offset;
 396        uint8_t statbuf[HWRXOFF*2];
 397
 398        int index, curr, active;
 399        dma64dd_t *descp = NULL;
 400
 401        /* udelay(50); */
 402
 403        /*
 404         * this api will check if a packet has been received.
 405         * If so it will return the address of the buffer and current
 406         * descriptor index will be incremented to the
 407         * next descriptor. Once done with the frame the buffer should be
 408         * added back onto the descriptor and the lastdscr should be updated
 409         * to this descriptor.
 410         */
 411        index = dma->cur_rx_index;
 412        offset = (uint32_t)(dma->rx_desc_aligned);
 413        stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
 414        stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
 415        curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
 416        active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
 417
 418        /* check if any frame */
 419        if (index == curr)
 420                return -1;
 421
 422        debug("received packet\n");
 423        debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
 424        /* remove warning */
 425        if (index == active)
 426                ;
 427
 428        /* get the packet pointer that corresponds to the rx descriptor */
 429        bufp = dma->rx_buf + index * RX_BUF_SIZE;
 430
 431        descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
 432        /* flush descriptor and buffer */
 433        flush_dcache_range((unsigned long)descp,
 434                           (unsigned long)(descp + sizeof(dma64dd_t)));
 435        flush_dcache_range((unsigned long)bufp,
 436                           (unsigned long)(bufp + RX_BUF_SIZE));
 437
 438        buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
 439
 440        stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
 441        stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
 442
 443        debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
 444              (uint32_t)bufp, index, buflen, stat0, stat1);
 445
 446        dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
 447
 448        /* get buffer offset */
 449        control = readl(GMAC0_DMA_RX_CTRL_ADDR);
 450        offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
 451        rcvlen = *(uint16_t *)bufp;
 452
 453        debug("Received %d bytes\n", rcvlen);
 454        /* copy status into temp buf then copy data from rx buffer */
 455        memcpy(statbuf, bufp, offset);
 456        datap = (void *)((uint32_t)bufp + offset);
 457        memcpy(buf, datap, rcvlen);
 458
 459        /* update descriptor that is being added back on ring */
 460        descp->ctrl2 = RX_BUF_SIZE;
 461        descp->addrlow = (uint32_t)bufp;
 462        descp->addrhigh = 0;
 463        /* flush descriptor */
 464        flush_dcache_range((unsigned long)descp,
 465                           (unsigned long)(descp + sizeof(dma64dd_t)));
 466
 467        /* set the lastdscr for the rx ring */
 468        writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
 469
 470        return (int)rcvlen;
 471}
 472
 473static int gmac_disable_dma(struct eth_dma *dma, int dir)
 474{
 475        int status;
 476
 477        debug("%s enter\n", __func__);
 478
 479        if (dir == MAC_DMA_TX) {
 480                /* address PR8249/PR7577 issue */
 481                /* suspend tx DMA first */
 482                writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
 483                SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
 484                                     D64_XS0_XS_MASK)) !=
 485                          D64_XS0_XS_DISABLED) &&
 486                         (status != D64_XS0_XS_IDLE) &&
 487                         (status != D64_XS0_XS_STOPPED), 10000);
 488
 489                /*
 490                 * PR2414 WAR: DMA engines are not disabled until
 491                 * transfer finishes
 492                 */
 493                writel(0, GMAC0_DMA_TX_CTRL_ADDR);
 494                SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
 495                                     D64_XS0_XS_MASK)) !=
 496                          D64_XS0_XS_DISABLED), 10000);
 497
 498                /* wait for the last transaction to complete */
 499                udelay(2);
 500
 501                status = (status == D64_XS0_XS_DISABLED);
 502        } else {
 503                /*
 504                 * PR2414 WAR: DMA engines are not disabled until
 505                 * transfer finishes
 506                 */
 507                writel(0, GMAC0_DMA_RX_CTRL_ADDR);
 508                SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
 509                                     D64_RS0_RS_MASK)) !=
 510                          D64_RS0_RS_DISABLED), 10000);
 511
 512                status = (status == D64_RS0_RS_DISABLED);
 513        }
 514
 515        return status;
 516}
 517
 518static int gmac_enable_dma(struct eth_dma *dma, int dir)
 519{
 520        uint32_t control;
 521
 522        debug("%s enter\n", __func__);
 523
 524        if (dir == MAC_DMA_TX) {
 525                dma->cur_tx_index = 0;
 526
 527                /*
 528                 * These bits 20:18 (burstLen) of control register can be
 529                 * written but will take effect only if these bits are
 530                 * valid. So this will not affect previous versions
 531                 * of the DMA. They will continue to have those bits set to 0.
 532                 */
 533                control = readl(GMAC0_DMA_TX_CTRL_ADDR);
 534
 535                control |= D64_XC_XE;
 536                if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
 537                        control |= D64_XC_PD;
 538
 539                writel(control, GMAC0_DMA_TX_CTRL_ADDR);
 540
 541                /* initailize the DMA channel */
 542                writel((uint32_t)(dma->tx_desc_aligned),
 543                       GMAC0_DMA_TX_ADDR_LOW_ADDR);
 544                writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
 545        } else {
 546                dma->cur_rx_index = 0;
 547
 548                control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
 549                           D64_RC_AE) | D64_RC_RE;
 550
 551                if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
 552                        control |= D64_RC_PD;
 553
 554                if (g_dmactrlflags & DMA_CTRL_ROC)
 555                        control |= D64_RC_OC;
 556
 557                /*
 558                 * These bits 20:18 (burstLen) of control register can be
 559                 * written but will take effect only if these bits are
 560                 * valid. So this will not affect previous versions
 561                 * of the DMA. They will continue to have those bits set to 0.
 562                 */
 563                control &= ~D64_RC_BL_MASK;
 564                /* Keep default Rx burstlen */
 565                control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
 566                control |= HWRXOFF << D64_RC_RO_SHIFT;
 567
 568                writel(control, GMAC0_DMA_RX_CTRL_ADDR);
 569
 570                /*
 571                 * the rx descriptor ring should have
 572                 * the addresses set properly;
 573                 * set the lastdscr for the rx ring
 574                 */
 575                writel(((uint32_t)(dma->rx_desc_aligned) +
 576                        (RX_BUF_NUM - 1) * RX_BUF_SIZE) &
 577                       D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
 578        }
 579
 580        return 0;
 581}
 582
 583bool gmac_mii_busywait(unsigned int timeout)
 584{
 585        uint32_t tmp = 0;
 586
 587        while (timeout > 10) {
 588                tmp = readl(GMAC_MII_CTRL_ADDR);
 589                if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
 590                        udelay(10);
 591                        timeout -= 10;
 592                } else {
 593                        break;
 594                }
 595        }
 596        return tmp & (1 << GMAC_MII_BUSY_SHIFT);
 597}
 598
 599int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
 600{
 601        uint32_t tmp = 0;
 602        u16 value = 0;
 603
 604        /* Busy wait timeout is 1ms */
 605        if (gmac_mii_busywait(1000)) {
 606                error("%s: Prepare MII read: MII/MDIO busy\n", __func__);
 607                return -1;
 608        }
 609
 610        /* Read operation */
 611        tmp = GMAC_MII_DATA_READ_CMD;
 612        tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
 613                (reg << GMAC_MII_PHY_REG_SHIFT);
 614        debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
 615        writel(tmp, GMAC_MII_DATA_ADDR);
 616
 617        if (gmac_mii_busywait(1000)) {
 618                error("%s: MII read failure: MII/MDIO busy\n", __func__);
 619                return -1;
 620        }
 621
 622        value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
 623        debug("MII read data 0x%x\n", value);
 624        return value;
 625}
 626
 627int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
 628                      u16 value)
 629{
 630        uint32_t tmp = 0;
 631
 632        /* Busy wait timeout is 1ms */
 633        if (gmac_mii_busywait(1000)) {
 634                error("%s: Prepare MII write: MII/MDIO busy\n", __func__);
 635                return -1;
 636        }
 637
 638        /* Write operation */
 639        tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
 640        tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
 641                (reg << GMAC_MII_PHY_REG_SHIFT));
 642        debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
 643              tmp, phyaddr, reg, value);
 644        writel(tmp, GMAC_MII_DATA_ADDR);
 645
 646        if (gmac_mii_busywait(1000)) {
 647                error("%s: MII write failure: MII/MDIO busy\n", __func__);
 648                return -1;
 649        }
 650
 651        return 0;
 652}
 653
 654void gmac_init_reset(void)
 655{
 656        debug("%s enter\n", __func__);
 657
 658        /* set command config reg CC_SR */
 659        reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
 660        udelay(GMAC_RESET_DELAY);
 661}
 662
 663void gmac_clear_reset(void)
 664{
 665        debug("%s enter\n", __func__);
 666
 667        /* clear command config reg CC_SR */
 668        reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
 669        udelay(GMAC_RESET_DELAY);
 670}
 671
 672static void gmac_enable_local(bool en)
 673{
 674        uint32_t cmdcfg;
 675
 676        debug("%s enter\n", __func__);
 677
 678        /* read command config reg */
 679        cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
 680
 681        /* put mac in reset */
 682        gmac_init_reset();
 683
 684        cmdcfg |= CC_SR;
 685
 686        /* first deassert rx_ena and tx_ena while in reset */
 687        cmdcfg &= ~(CC_RE | CC_TE);
 688        /* write command config reg */
 689        writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
 690
 691        /* bring mac out of reset */
 692        gmac_clear_reset();
 693
 694        /* if not enable exit now */
 695        if (!en)
 696                return;
 697
 698        /* enable the mac transmit and receive paths now */
 699        udelay(2);
 700        cmdcfg &= ~CC_SR;
 701        cmdcfg |= (CC_RE | CC_TE);
 702
 703        /* assert rx_ena and tx_ena when out of reset to enable the mac */
 704        writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
 705
 706        return;
 707}
 708
 709int gmac_enable(void)
 710{
 711        gmac_enable_local(1);
 712
 713        /* clear interrupts */
 714        writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
 715        return 0;
 716}
 717
 718int gmac_disable(void)
 719{
 720        gmac_enable_local(0);
 721        return 0;
 722}
 723
 724int gmac_set_speed(int speed, int duplex)
 725{
 726        uint32_t cmdcfg;
 727        uint32_t hd_ena;
 728        uint32_t speed_cfg;
 729
 730        hd_ena = duplex ? 0 : CC_HD;
 731        if (speed == 1000) {
 732                speed_cfg = 2;
 733        } else if (speed == 100) {
 734                speed_cfg = 1;
 735        } else if (speed == 10) {
 736                speed_cfg = 0;
 737        } else {
 738                error("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
 739                return -1;
 740        }
 741
 742        cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
 743        cmdcfg &= ~(CC_ES_MASK | CC_HD);
 744        cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
 745
 746        printf("Change GMAC speed to %dMB\n", speed);
 747        debug("GMAC speed cfg 0x%x\n", cmdcfg);
 748        writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
 749
 750        return 0;
 751}
 752
 753int gmac_set_mac_addr(unsigned char *mac)
 754{
 755        /* set our local address */
 756        debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
 757              mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
 758        writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
 759        writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
 760
 761        return 0;
 762}
 763
 764int gmac_mac_init(struct eth_device *dev)
 765{
 766        struct eth_info *eth = (struct eth_info *)(dev->priv);
 767        struct eth_dma *dma = &(eth->dma);
 768
 769        uint32_t tmp;
 770        uint32_t cmdcfg;
 771        int chipid;
 772
 773        debug("%s enter\n", __func__);
 774
 775        /* Always use GMAC0 */
 776        printf("Using GMAC%d\n", 0);
 777
 778        /* Reset AMAC0 core */
 779        writel(0, AMAC0_IDM_RESET_ADDR);
 780        tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
 781        /* Set clock */
 782        tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
 783        tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
 784        /* Set Tx clock */
 785        tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
 786        writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
 787
 788        /* reset gmac */
 789        /*
 790         * As AMAC is just reset, NO need?
 791         * set eth_data into loopback mode to ensure no rx traffic
 792         * gmac_loopback(eth_data, TRUE);
 793         * ET_TRACE(("%s gmac loopback\n", __func__));
 794         * udelay(1);
 795         */
 796
 797        cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
 798        cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
 799                    CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
 800                    CC_PAD_EN | CC_PF);
 801        cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
 802        /* put mac in reset */
 803        gmac_init_reset();
 804        writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
 805        gmac_clear_reset();
 806
 807        /* enable clear MIB on read */
 808        reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
 809        /* PHY: set smi_master to drive mdc_clk */
 810        reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
 811
 812        /* clear persistent sw intstatus */
 813        writel(0, GMAC0_INT_STATUS_ADDR);
 814
 815        if (dma_init(dma) < 0) {
 816                error("%s: GMAC dma_init failed\n", __func__);
 817                goto err_exit;
 818        }
 819
 820        chipid = CHIPID;
 821        printf("%s: Chip ID: 0x%x\n", __func__, chipid);
 822
 823        /* set switch bypass mode */
 824        tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
 825        tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
 826
 827        /* Switch mode */
 828        /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
 829
 830        writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
 831
 832        tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
 833        tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
 834        writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
 835
 836        /* Set MDIO to internal GPHY */
 837        tmp = readl(GMAC_MII_CTRL_ADDR);
 838        /* Select internal MDC/MDIO bus*/
 839        tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
 840        /* select MDC/MDIO connecting to on-chip internal PHYs */
 841        tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
 842        /*
 843         * give bit[6:0](MDCDIV) with required divisor to set
 844         * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
 845         */
 846        tmp |= 0x1A;
 847
 848        writel(tmp, GMAC_MII_CTRL_ADDR);
 849
 850        if (gmac_mii_busywait(1000)) {
 851                error("%s: Configure MDIO: MII/MDIO busy\n", __func__);
 852                goto err_exit;
 853        }
 854
 855        /* Configure GMAC0 */
 856        /* enable one rx interrupt per received frame */
 857        writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
 858
 859        /* read command config reg */
 860        cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
 861        /* enable 802.3x tx flow control (honor received PAUSE frames) */
 862        cmdcfg &= ~CC_RPI;
 863        /* enable promiscuous mode */
 864        cmdcfg |= CC_PROM;
 865        /* Disable loopback mode */
 866        cmdcfg &= ~CC_ML;
 867        /* set the speed */
 868        cmdcfg &= ~(CC_ES_MASK | CC_HD);
 869        /* Set to 1Gbps and full duplex by default */
 870        cmdcfg |= (2 << CC_ES_SHIFT);
 871
 872        /* put mac in reset */
 873        gmac_init_reset();
 874        /* write register */
 875        writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
 876        /* bring mac out of reset */
 877        gmac_clear_reset();
 878
 879        /* set max frame lengths; account for possible vlan tag */
 880        writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
 881
 882        return 0;
 883
 884err_exit:
 885        dma_deinit(dma);
 886        return -1;
 887}
 888
 889int gmac_add(struct eth_device *dev)
 890{
 891        struct eth_info *eth = (struct eth_info *)(dev->priv);
 892        struct eth_dma *dma = &(eth->dma);
 893        void *tmp;
 894
 895        /*
 896         * Desc has to be 16-byte aligned ?
 897         * If it is 8-byte aligned by malloc, fail Tx
 898         */
 899        tmp = malloc(sizeof(dma64dd_t) * TX_BUF_NUM + 8);
 900        if (tmp == NULL) {
 901                printf("%s: Failed to allocate TX desc Buffer\n", __func__);
 902                return -1;
 903        }
 904
 905        dma->tx_desc = (void *)tmp;
 906        dma->tx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf));
 907        debug("TX Descriptor Buffer: %p; length: 0x%x\n",
 908              dma->tx_desc_aligned, sizeof(dma64dd_t) * TX_BUF_NUM);
 909
 910        tmp = malloc(TX_BUF_SIZE * TX_BUF_NUM);
 911        if (tmp == NULL) {
 912                printf("%s: Failed to allocate TX Data Buffer\n", __func__);
 913                free(dma->tx_desc);
 914                return -1;
 915        }
 916        dma->tx_buf = (uint8_t *)tmp;
 917        debug("TX Data Buffer: %p; length: 0x%x\n",
 918              dma->tx_buf, TX_BUF_SIZE * TX_BUF_NUM);
 919
 920        /* Desc has to be 16-byte aligned ? */
 921        tmp = malloc(sizeof(dma64dd_t) * RX_BUF_NUM + 8);
 922        if (tmp == NULL) {
 923                printf("%s: Failed to allocate RX Descriptor\n", __func__);
 924                free(dma->tx_desc);
 925                free(dma->tx_buf);
 926                return -1;
 927        }
 928        dma->rx_desc = tmp;
 929        dma->rx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf));
 930        debug("RX Descriptor Buffer: %p, length: 0x%x\n",
 931              dma->rx_desc_aligned, sizeof(dma64dd_t) * RX_BUF_NUM);
 932
 933        tmp = malloc(RX_BUF_SIZE * RX_BUF_NUM);
 934        if (tmp == NULL) {
 935                printf("%s: Failed to allocate RX Data Buffer\n", __func__);
 936                free(dma->tx_desc);
 937                free(dma->tx_buf);
 938                free(dma->rx_desc);
 939                return -1;
 940        }
 941        dma->rx_buf = tmp;
 942        debug("RX Data Buffer: %p; length: 0x%x\n",
 943              dma->rx_buf, RX_BUF_SIZE * RX_BUF_NUM);
 944
 945        g_dmactrlflags = 0;
 946
 947        eth->phy_interface = PHY_INTERFACE_MODE_GMII;
 948
 949        dma->tx_packet = gmac_tx_packet;
 950        dma->check_tx_done = gmac_check_tx_done;
 951
 952        dma->check_rx_done = gmac_check_rx_done;
 953
 954        dma->enable_dma = gmac_enable_dma;
 955        dma->disable_dma = gmac_disable_dma;
 956
 957        eth->miiphy_read = gmac_miiphy_read;
 958        eth->miiphy_write = gmac_miiphy_write;
 959
 960        eth->mac_init = gmac_mac_init;
 961        eth->disable_mac = gmac_disable;
 962        eth->enable_mac = gmac_enable;
 963        eth->set_mac_addr = gmac_set_mac_addr;
 964        eth->set_mac_speed = gmac_set_speed;
 965
 966        return 0;
 967}
 968