uboot/drivers/net/mvneta.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   4 *
   5 * U-Boot version:
   6 * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de>
   7 *
   8 * Based on the Linux version which is:
   9 * Copyright (C) 2012 Marvell
  10 *
  11 * Rami Rosen <rosenr@marvell.com>
  12 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  13 */
  14
  15#include <common.h>
  16#include <dm.h>
  17#include <net.h>
  18#include <netdev.h>
  19#include <config.h>
  20#include <malloc.h>
  21#include <asm/io.h>
  22#include <linux/errno.h>
  23#include <phy.h>
  24#include <miiphy.h>
  25#include <watchdog.h>
  26#include <asm/arch/cpu.h>
  27#include <asm/arch/soc.h>
  28#include <linux/compat.h>
  29#include <linux/mbus.h>
  30#include <asm-generic/gpio.h>
  31
  32DECLARE_GLOBAL_DATA_PTR;
  33
  34#if !defined(CONFIG_PHYLIB)
  35# error Marvell mvneta requires PHYLIB
  36#endif
  37
  38#define CONFIG_NR_CPUS          1
  39#define ETH_HLEN                14      /* Total octets in header */
  40
  41/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
  42#define WRAP                    (2 + ETH_HLEN + 4 + 32)
  43#define MTU                     1500
  44#define RX_BUFFER_SIZE          (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
  45
  46#define MVNETA_SMI_TIMEOUT                      10000
  47
  48/* Registers */
  49#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  50#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(1)
  51#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  52#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  53#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  54#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  55#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  56#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  57#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  58#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  59#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  60#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  61#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  62#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  63#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  64#define MVNETA_PORT_RX_RESET                    0x1cc0
  65#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  66#define MVNETA_PHY_ADDR                         0x2000
  67#define      MVNETA_PHY_ADDR_MASK               0x1f
  68#define MVNETA_SMI                              0x2004
  69#define      MVNETA_PHY_REG_MASK                0x1f
  70/* SMI register fields */
  71#define     MVNETA_SMI_DATA_OFFS                0       /* Data */
  72#define     MVNETA_SMI_DATA_MASK                (0xffff << MVNETA_SMI_DATA_OFFS)
  73#define     MVNETA_SMI_DEV_ADDR_OFFS            16      /* PHY device address */
  74#define     MVNETA_SMI_REG_ADDR_OFFS            21      /* PHY device reg addr*/
  75#define     MVNETA_SMI_OPCODE_OFFS              26      /* Write/Read opcode */
  76#define     MVNETA_SMI_OPCODE_READ              (1 << MVNETA_SMI_OPCODE_OFFS)
  77#define     MVNETA_SMI_READ_VALID               (1 << 27)       /* Read Valid */
  78#define     MVNETA_SMI_BUSY                     (1 << 28)       /* Busy */
  79#define MVNETA_MBUS_RETRY                       0x2010
  80#define MVNETA_UNIT_INTR_CAUSE                  0x2080
  81#define MVNETA_UNIT_CONTROL                     0x20B0
  82#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  83#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  84#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  85#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  86#define MVNETA_WIN_SIZE_MASK                    (0xffff0000)
  87#define MVNETA_BASE_ADDR_ENABLE                 0x2290
  88#define      MVNETA_BASE_ADDR_ENABLE_BIT        0x1
  89#define MVNETA_PORT_ACCESS_PROTECT              0x2294
  90#define      MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3
  91#define MVNETA_PORT_CONFIG                      0x2400
  92#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  93#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  94#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  95#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  96#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  97#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  98#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  99#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
 100#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
 101                                                 MVNETA_DEF_RXQ_ARP(q)   | \
 102                                                 MVNETA_DEF_RXQ_TCP(q)   | \
 103                                                 MVNETA_DEF_RXQ_UDP(q)   | \
 104                                                 MVNETA_DEF_RXQ_BPDU(q)  | \
 105                                                 MVNETA_TX_UNSET_ERR_SUM | \
 106                                                 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
 107#define MVNETA_PORT_CONFIG_EXTEND                0x2404
 108#define MVNETA_MAC_ADDR_LOW                      0x2414
 109#define MVNETA_MAC_ADDR_HIGH                     0x2418
 110#define MVNETA_SDMA_CONFIG                       0x241c
 111#define      MVNETA_SDMA_BRST_SIZE_16            4
 112#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
 113#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 114#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 115#define      MVNETA_DESC_SWAP                    BIT(6)
 116#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 117#define MVNETA_PORT_STATUS                       0x2444
 118#define      MVNETA_TX_IN_PRGRS                  BIT(1)
 119#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 120#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 121#define MVNETA_SERDES_CFG                        0x24A0
 122#define      MVNETA_SGMII_SERDES_PROTO           0x0cc7
 123#define      MVNETA_QSGMII_SERDES_PROTO          0x0667
 124#define MVNETA_TYPE_PRIO                         0x24bc
 125#define      MVNETA_FORCE_UNI                    BIT(21)
 126#define MVNETA_TXQ_CMD_1                         0x24e4
 127#define MVNETA_TXQ_CMD                           0x2448
 128#define      MVNETA_TXQ_DISABLE_SHIFT            8
 129#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 130#define MVNETA_ACC_MODE                          0x2500
 131#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 132#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 133#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 134#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 135
 136/* Exception Interrupt Port/Queue Cause register */
 137
 138#define MVNETA_INTR_NEW_CAUSE                    0x25a0
 139#define MVNETA_INTR_NEW_MASK                     0x25a4
 140
 141/* bits  0..7  = TXQ SENT, one bit per queue.
 142 * bits  8..15 = RXQ OCCUP, one bit per queue.
 143 * bits 16..23 = RXQ FREE, one bit per queue.
 144 * bit  29 = OLD_REG_SUM, see old reg ?
 145 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 146 * bit  31 = MISC_SUM,   one bit for 4 ports
 147 */
 148#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 149#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 150#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 151#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 152
 153#define MVNETA_INTR_OLD_CAUSE                    0x25a8
 154#define MVNETA_INTR_OLD_MASK                     0x25ac
 155
 156/* Data Path Port/Queue Cause Register */
 157#define MVNETA_INTR_MISC_CAUSE                   0x25b0
 158#define MVNETA_INTR_MISC_MASK                    0x25b4
 159#define MVNETA_INTR_ENABLE                       0x25b8
 160
 161#define MVNETA_RXQ_CMD                           0x2680
 162#define      MVNETA_RXQ_DISABLE_SHIFT            8
 163#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 164#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 165#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 166#define MVNETA_GMAC_CTRL_0                       0x2c00
 167#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 168#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 169#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 170#define MVNETA_GMAC_CTRL_2                       0x2c08
 171#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 172#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 173#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 174#define MVNETA_GMAC_STATUS                       0x2c10
 175#define      MVNETA_GMAC_LINK_UP                 BIT(0)
 176#define      MVNETA_GMAC_SPEED_1000              BIT(1)
 177#define      MVNETA_GMAC_SPEED_100               BIT(2)
 178#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 179#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 180#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 181#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 182#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 183#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 184#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 185#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 186#define      MVNETA_GMAC_FORCE_LINK_UP           (BIT(0) | BIT(1))
 187#define      MVNETA_GMAC_IB_BYPASS_AN_EN         BIT(3)
 188#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 189#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 190#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 191#define      MVNETA_GMAC_SET_FC_EN               BIT(8)
 192#define      MVNETA_GMAC_ADVERT_FC_EN            BIT(9)
 193#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 194#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 195#define      MVNETA_GMAC_SAMPLE_TX_CFG_EN        BIT(15)
 196#define MVNETA_MIB_COUNTERS_BASE                 0x3080
 197#define      MVNETA_MIB_LATE_COLLISION           0x7c
 198#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 199#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 200#define MVNETA_DA_FILT_UCAST_BASE                0x3600
 201#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 202#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 203#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 204#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 205#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 206#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 207#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 208#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 209#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 210#define MVNETA_PORT_TX_RESET                     0x3cf0
 211#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 212#define MVNETA_TX_MTU                            0x3e0c
 213#define MVNETA_TX_TOKEN_SIZE                     0x3e14
 214#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 215#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 216#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 217
 218/* Descriptor ring Macros */
 219#define MVNETA_QUEUE_NEXT_DESC(q, index)        \
 220        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 221
 222/* Various constants */
 223
 224/* Coalescing */
 225#define MVNETA_TXDONE_COAL_PKTS         16
 226#define MVNETA_RX_COAL_PKTS             32
 227#define MVNETA_RX_COAL_USEC             100
 228
 229/* The two bytes Marvell header. Either contains a special value used
 230 * by Marvell switches when a specific hardware mode is enabled (not
 231 * supported by this driver) or is filled automatically by zeroes on
 232 * the RX side. Those two bytes being at the front of the Ethernet
 233 * header, they allow to have the IP header aligned on a 4 bytes
 234 * boundary automatically: the hardware skips those two bytes on its
 235 * own.
 236 */
 237#define MVNETA_MH_SIZE                  2
 238
 239#define MVNETA_VLAN_TAG_LEN             4
 240
 241#define MVNETA_CPU_D_CACHE_LINE_SIZE    32
 242#define MVNETA_TX_CSUM_MAX_SIZE         9800
 243#define MVNETA_ACC_MODE_EXT             1
 244
 245/* Timeout constants */
 246#define MVNETA_TX_DISABLE_TIMEOUT_MSEC  1000
 247#define MVNETA_RX_DISABLE_TIMEOUT_MSEC  1000
 248#define MVNETA_TX_FIFO_EMPTY_TIMEOUT    10000
 249
 250#define MVNETA_TX_MTU_MAX               0x3ffff
 251
 252/* Max number of Rx descriptors */
 253#define MVNETA_MAX_RXD 16
 254
 255/* Max number of Tx descriptors */
 256#define MVNETA_MAX_TXD 16
 257
 258/* descriptor aligned size */
 259#define MVNETA_DESC_ALIGNED_SIZE        32
 260
 261struct mvneta_port {
 262        void __iomem *base;
 263        struct mvneta_rx_queue *rxqs;
 264        struct mvneta_tx_queue *txqs;
 265
 266        u8 mcast_count[256];
 267        u16 tx_ring_size;
 268        u16 rx_ring_size;
 269
 270        phy_interface_t phy_interface;
 271        unsigned int link;
 272        unsigned int duplex;
 273        unsigned int speed;
 274
 275        int init;
 276        int phyaddr;
 277        struct phy_device *phydev;
 278#ifdef CONFIG_DM_GPIO
 279        struct gpio_desc phy_reset_gpio;
 280#endif
 281        struct mii_dev *bus;
 282};
 283
 284/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 285 * layout of the transmit and reception DMA descriptors, and their
 286 * layout is therefore defined by the hardware design
 287 */
 288
 289#define MVNETA_TX_L3_OFF_SHIFT  0
 290#define MVNETA_TX_IP_HLEN_SHIFT 8
 291#define MVNETA_TX_L4_UDP        BIT(16)
 292#define MVNETA_TX_L3_IP6        BIT(17)
 293#define MVNETA_TXD_IP_CSUM      BIT(18)
 294#define MVNETA_TXD_Z_PAD        BIT(19)
 295#define MVNETA_TXD_L_DESC       BIT(20)
 296#define MVNETA_TXD_F_DESC       BIT(21)
 297#define MVNETA_TXD_FLZ_DESC     (MVNETA_TXD_Z_PAD  | \
 298                                 MVNETA_TXD_L_DESC | \
 299                                 MVNETA_TXD_F_DESC)
 300#define MVNETA_TX_L4_CSUM_FULL  BIT(30)
 301#define MVNETA_TX_L4_CSUM_NOT   BIT(31)
 302
 303#define MVNETA_RXD_ERR_CRC              0x0
 304#define MVNETA_RXD_ERR_SUMMARY          BIT(16)
 305#define MVNETA_RXD_ERR_OVERRUN          BIT(17)
 306#define MVNETA_RXD_ERR_LEN              BIT(18)
 307#define MVNETA_RXD_ERR_RESOURCE         (BIT(17) | BIT(18))
 308#define MVNETA_RXD_ERR_CODE_MASK        (BIT(17) | BIT(18))
 309#define MVNETA_RXD_L3_IP4               BIT(25)
 310#define MVNETA_RXD_FIRST_LAST_DESC      (BIT(26) | BIT(27))
 311#define MVNETA_RXD_L4_CSUM_OK           BIT(30)
 312
 313struct mvneta_tx_desc {
 314        u32  command;           /* Options used by HW for packet transmitting.*/
 315        u16  reserverd1;        /* csum_l4 (for future use)             */
 316        u16  data_size;         /* Data size of transmitted packet in bytes */
 317        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 318        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 319        u32  reserved3[4];      /* Reserved - (for future use)          */
 320};
 321
 322struct mvneta_rx_desc {
 323        u32  status;            /* Info about received packet           */
 324        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 325        u16  data_size;         /* Size of received packet in bytes     */
 326
 327        u32  buf_phys_addr;     /* Physical address of the buffer       */
 328        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 329
 330        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 331        u16  reserved3;         /* prefetch_cmd, for future use         */
 332        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 333
 334        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 335        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 336};
 337
 338struct mvneta_tx_queue {
 339        /* Number of this TX queue, in the range 0-7 */
 340        u8 id;
 341
 342        /* Number of TX DMA descriptors in the descriptor ring */
 343        int size;
 344
 345        /* Index of last TX DMA descriptor that was inserted */
 346        int txq_put_index;
 347
 348        /* Index of the TX DMA descriptor to be cleaned up */
 349        int txq_get_index;
 350
 351        /* Virtual address of the TX DMA descriptors array */
 352        struct mvneta_tx_desc *descs;
 353
 354        /* DMA address of the TX DMA descriptors array */
 355        dma_addr_t descs_phys;
 356
 357        /* Index of the last TX DMA descriptor */
 358        int last_desc;
 359
 360        /* Index of the next TX DMA descriptor to process */
 361        int next_desc_to_proc;
 362};
 363
 364struct mvneta_rx_queue {
 365        /* rx queue number, in the range 0-7 */
 366        u8 id;
 367
 368        /* num of rx descriptors in the rx descriptor ring */
 369        int size;
 370
 371        /* Virtual address of the RX DMA descriptors array */
 372        struct mvneta_rx_desc *descs;
 373
 374        /* DMA address of the RX DMA descriptors array */
 375        dma_addr_t descs_phys;
 376
 377        /* Index of the last RX DMA descriptor */
 378        int last_desc;
 379
 380        /* Index of the next RX DMA descriptor to process */
 381        int next_desc_to_proc;
 382};
 383
 384/* U-Boot doesn't use the queues, so set the number to 1 */
 385static int rxq_number = 1;
 386static int txq_number = 1;
 387static int rxq_def;
 388
 389struct buffer_location {
 390        struct mvneta_tx_desc *tx_descs;
 391        struct mvneta_rx_desc *rx_descs;
 392        u32 rx_buffers;
 393};
 394
 395/*
 396 * All 4 interfaces use the same global buffer, since only one interface
 397 * can be enabled at once
 398 */
 399static struct buffer_location buffer_loc;
 400
 401/*
 402 * Page table entries are set to 1MB, or multiples of 1MB
 403 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
 404 */
 405#define BD_SPACE        (1 << 20)
 406
 407/*
 408 * Dummy implementation that can be overwritten by a board
 409 * specific function
 410 */
 411__weak int board_network_enable(struct mii_dev *bus)
 412{
 413        return 0;
 414}
 415
 416/* Utility/helper methods */
 417
 418/* Write helper method */
 419static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 420{
 421        writel(data, pp->base + offset);
 422}
 423
 424/* Read helper method */
 425static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 426{
 427        return readl(pp->base + offset);
 428}
 429
 430/* Clear all MIB counters */
 431static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 432{
 433        int i;
 434
 435        /* Perform dummy reads from MIB counters */
 436        for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 437                mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 438}
 439
 440/* Rx descriptors helper methods */
 441
 442/* Checks whether the RX descriptor having this status is both the first
 443 * and the last descriptor for the RX packet. Each RX packet is currently
 444 * received through a single RX descriptor, so not having each RX
 445 * descriptor with its first and last bits set is an error
 446 */
 447static int mvneta_rxq_desc_is_first_last(u32 status)
 448{
 449        return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 450                MVNETA_RXD_FIRST_LAST_DESC;
 451}
 452
 453/* Add number of descriptors ready to receive new packets */
 454static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 455                                          struct mvneta_rx_queue *rxq,
 456                                          int ndescs)
 457{
 458        /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 459         * be added at once
 460         */
 461        while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 462                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 463                            (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 464                             MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 465                ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 466        }
 467
 468        mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 469                    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 470}
 471
 472/* Get number of RX descriptors occupied by received packets */
 473static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 474                                        struct mvneta_rx_queue *rxq)
 475{
 476        u32 val;
 477
 478        val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 479        return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 480}
 481
 482/* Update num of rx desc called upon return from rx path or
 483 * from mvneta_rxq_drop_pkts().
 484 */
 485static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 486                                       struct mvneta_rx_queue *rxq,
 487                                       int rx_done, int rx_filled)
 488{
 489        u32 val;
 490
 491        if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 492                val = rx_done |
 493                  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 494                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 495                return;
 496        }
 497
 498        /* Only 255 descriptors can be added at once */
 499        while ((rx_done > 0) || (rx_filled > 0)) {
 500                if (rx_done <= 0xff) {
 501                        val = rx_done;
 502                        rx_done = 0;
 503                } else {
 504                        val = 0xff;
 505                        rx_done -= 0xff;
 506                }
 507                if (rx_filled <= 0xff) {
 508                        val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 509                        rx_filled = 0;
 510                } else {
 511                        val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 512                        rx_filled -= 0xff;
 513                }
 514                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 515        }
 516}
 517
 518/* Get pointer to next RX descriptor to be processed by SW */
 519static struct mvneta_rx_desc *
 520mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 521{
 522        int rx_desc = rxq->next_desc_to_proc;
 523
 524        rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 525        return rxq->descs + rx_desc;
 526}
 527
 528/* Tx descriptors helper methods */
 529
 530/* Update HW with number of TX descriptors to be sent */
 531static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 532                                     struct mvneta_tx_queue *txq,
 533                                     int pend_desc)
 534{
 535        u32 val;
 536
 537        /* Only 255 descriptors can be added at once ; Assume caller
 538         * process TX descriptors in quanta less than 256
 539         */
 540        val = pend_desc;
 541        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 542}
 543
 544/* Get pointer to next TX descriptor to be processed (send) by HW */
 545static struct mvneta_tx_desc *
 546mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 547{
 548        int tx_desc = txq->next_desc_to_proc;
 549
 550        txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 551        return txq->descs + tx_desc;
 552}
 553
 554/* Set rxq buf size */
 555static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
 556                                    struct mvneta_rx_queue *rxq,
 557                                    int buf_size)
 558{
 559        u32 val;
 560
 561        val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
 562
 563        val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
 564        val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
 565
 566        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 567}
 568
 569static int mvneta_port_is_fixed_link(struct mvneta_port *pp)
 570{
 571        /* phy_addr is set to invalid value for fixed link */
 572        return pp->phyaddr > PHY_MAX_ADDR;
 573}
 574
 575
 576/* Start the Ethernet port RX and TX activity */
 577static void mvneta_port_up(struct mvneta_port *pp)
 578{
 579        int queue;
 580        u32 q_map;
 581
 582        /* Enable all initialized TXs. */
 583        mvneta_mib_counters_clear(pp);
 584        q_map = 0;
 585        for (queue = 0; queue < txq_number; queue++) {
 586                struct mvneta_tx_queue *txq = &pp->txqs[queue];
 587                if (txq->descs != NULL)
 588                        q_map |= (1 << queue);
 589        }
 590        mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 591
 592        /* Enable all initialized RXQs. */
 593        q_map = 0;
 594        for (queue = 0; queue < rxq_number; queue++) {
 595                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
 596                if (rxq->descs != NULL)
 597                        q_map |= (1 << queue);
 598        }
 599        mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
 600}
 601
 602/* Stop the Ethernet port activity */
 603static void mvneta_port_down(struct mvneta_port *pp)
 604{
 605        u32 val;
 606        int count;
 607
 608        /* Stop Rx port activity. Check port Rx activity. */
 609        val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
 610
 611        /* Issue stop command for active channels only */
 612        if (val != 0)
 613                mvreg_write(pp, MVNETA_RXQ_CMD,
 614                            val << MVNETA_RXQ_DISABLE_SHIFT);
 615
 616        /* Wait for all Rx activity to terminate. */
 617        count = 0;
 618        do {
 619                if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
 620                        netdev_warn(pp->dev,
 621                                    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
 622                                    val);
 623                        break;
 624                }
 625                mdelay(1);
 626
 627                val = mvreg_read(pp, MVNETA_RXQ_CMD);
 628        } while (val & 0xff);
 629
 630        /* Stop Tx port activity. Check port Tx activity. Issue stop
 631         * command for active channels only
 632         */
 633        val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
 634
 635        if (val != 0)
 636                mvreg_write(pp, MVNETA_TXQ_CMD,
 637                            (val << MVNETA_TXQ_DISABLE_SHIFT));
 638
 639        /* Wait for all Tx activity to terminate. */
 640        count = 0;
 641        do {
 642                if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
 643                        netdev_warn(pp->dev,
 644                                    "TIMEOUT for TX stopped status=0x%08x\n",
 645                                    val);
 646                        break;
 647                }
 648                mdelay(1);
 649
 650                /* Check TX Command reg that all Txqs are stopped */
 651                val = mvreg_read(pp, MVNETA_TXQ_CMD);
 652
 653        } while (val & 0xff);
 654
 655        /* Double check to verify that TX FIFO is empty */
 656        count = 0;
 657        do {
 658                if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
 659                        netdev_warn(pp->dev,
 660                                    "TX FIFO empty timeout status=0x08%x\n",
 661                                    val);
 662                        break;
 663                }
 664                mdelay(1);
 665
 666                val = mvreg_read(pp, MVNETA_PORT_STATUS);
 667        } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
 668                 (val & MVNETA_TX_IN_PRGRS));
 669
 670        udelay(200);
 671}
 672
 673/* Enable the port by setting the port enable bit of the MAC control register */
 674static void mvneta_port_enable(struct mvneta_port *pp)
 675{
 676        u32 val;
 677
 678        /* Enable port */
 679        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 680        val |= MVNETA_GMAC0_PORT_ENABLE;
 681        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 682}
 683
 684/* Disable the port and wait for about 200 usec before retuning */
 685static void mvneta_port_disable(struct mvneta_port *pp)
 686{
 687        u32 val;
 688
 689        /* Reset the Enable bit in the Serial Control Register */
 690        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 691        val &= ~MVNETA_GMAC0_PORT_ENABLE;
 692        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 693
 694        udelay(200);
 695}
 696
 697/* Multicast tables methods */
 698
 699/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
 700static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
 701{
 702        int offset;
 703        u32 val;
 704
 705        if (queue == -1) {
 706                val = 0;
 707        } else {
 708                val = 0x1 | (queue << 1);
 709                val |= (val << 24) | (val << 16) | (val << 8);
 710        }
 711
 712        for (offset = 0; offset <= 0xc; offset += 4)
 713                mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
 714}
 715
 716/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
 717static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
 718{
 719        int offset;
 720        u32 val;
 721
 722        if (queue == -1) {
 723                val = 0;
 724        } else {
 725                val = 0x1 | (queue << 1);
 726                val |= (val << 24) | (val << 16) | (val << 8);
 727        }
 728
 729        for (offset = 0; offset <= 0xfc; offset += 4)
 730                mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
 731}
 732
 733/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
 734static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
 735{
 736        int offset;
 737        u32 val;
 738
 739        if (queue == -1) {
 740                memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
 741                val = 0;
 742        } else {
 743                memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
 744                val = 0x1 | (queue << 1);
 745                val |= (val << 24) | (val << 16) | (val << 8);
 746        }
 747
 748        for (offset = 0; offset <= 0xfc; offset += 4)
 749                mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
 750}
 751
 752/* This method sets defaults to the NETA port:
 753 *      Clears interrupt Cause and Mask registers.
 754 *      Clears all MAC tables.
 755 *      Sets defaults to all registers.
 756 *      Resets RX and TX descriptor rings.
 757 *      Resets PHY.
 758 * This method can be called after mvneta_port_down() to return the port
 759 *      settings to defaults.
 760 */
 761static void mvneta_defaults_set(struct mvneta_port *pp)
 762{
 763        int cpu;
 764        int queue;
 765        u32 val;
 766
 767        /* Clear all Cause registers */
 768        mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
 769        mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
 770        mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
 771
 772        /* Mask all interrupts */
 773        mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
 774        mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
 775        mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
 776        mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
 777
 778        /* Enable MBUS Retry bit16 */
 779        mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
 780
 781        /* Set CPU queue access map - all CPUs have access to all RX
 782         * queues and to all TX queues
 783         */
 784        for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
 785                mvreg_write(pp, MVNETA_CPU_MAP(cpu),
 786                            (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
 787                             MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
 788
 789        /* Reset RX and TX DMAs */
 790        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
 791        mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
 792
 793        /* Disable Legacy WRR, Disable EJP, Release from reset */
 794        mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
 795        for (queue = 0; queue < txq_number; queue++) {
 796                mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
 797                mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
 798        }
 799
 800        mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
 801        mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
 802
 803        /* Set Port Acceleration Mode */
 804        val = MVNETA_ACC_MODE_EXT;
 805        mvreg_write(pp, MVNETA_ACC_MODE, val);
 806
 807        /* Update val of portCfg register accordingly with all RxQueue types */
 808        val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
 809        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
 810
 811        val = 0;
 812        mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
 813        mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
 814
 815        /* Build PORT_SDMA_CONFIG_REG */
 816        val = 0;
 817
 818        /* Default burst size */
 819        val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
 820        val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
 821        val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
 822
 823        /* Assign port SDMA configuration */
 824        mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
 825
 826        /* Enable PHY polling in hardware if not in fixed-link mode */
 827        if (!mvneta_port_is_fixed_link(pp)) {
 828                val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
 829                val |= MVNETA_PHY_POLLING_ENABLE;
 830                mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 831        }
 832
 833        mvneta_set_ucast_table(pp, -1);
 834        mvneta_set_special_mcast_table(pp, -1);
 835        mvneta_set_other_mcast_table(pp, -1);
 836}
 837
 838/* Set unicast address */
 839static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
 840                                  int queue)
 841{
 842        unsigned int unicast_reg;
 843        unsigned int tbl_offset;
 844        unsigned int reg_offset;
 845
 846        /* Locate the Unicast table entry */
 847        last_nibble = (0xf & last_nibble);
 848
 849        /* offset from unicast tbl base */
 850        tbl_offset = (last_nibble / 4) * 4;
 851
 852        /* offset within the above reg  */
 853        reg_offset = last_nibble % 4;
 854
 855        unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
 856
 857        if (queue == -1) {
 858                /* Clear accepts frame bit at specified unicast DA tbl entry */
 859                unicast_reg &= ~(0xff << (8 * reg_offset));
 860        } else {
 861                unicast_reg &= ~(0xff << (8 * reg_offset));
 862                unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
 863        }
 864
 865        mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
 866}
 867
 868/* Set mac address */
 869static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
 870                                int queue)
 871{
 872        unsigned int mac_h;
 873        unsigned int mac_l;
 874
 875        if (queue != -1) {
 876                mac_l = (addr[4] << 8) | (addr[5]);
 877                mac_h = (addr[0] << 24) | (addr[1] << 16) |
 878                        (addr[2] << 8) | (addr[3] << 0);
 879
 880                mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
 881                mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
 882        }
 883
 884        /* Accept frames of this address */
 885        mvneta_set_ucast_addr(pp, addr[5], queue);
 886}
 887
 888static int mvneta_write_hwaddr(struct udevice *dev)
 889{
 890        mvneta_mac_addr_set(dev_get_priv(dev),
 891                ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr,
 892                rxq_def);
 893
 894        return 0;
 895}
 896
 897/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
 898static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
 899                                u32 phys_addr, u32 cookie)
 900{
 901        rx_desc->buf_cookie = cookie;
 902        rx_desc->buf_phys_addr = phys_addr;
 903}
 904
 905/* Decrement sent descriptors counter */
 906static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
 907                                     struct mvneta_tx_queue *txq,
 908                                     int sent_desc)
 909{
 910        u32 val;
 911
 912        /* Only 255 TX descriptors can be updated at once */
 913        while (sent_desc > 0xff) {
 914                val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
 915                mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 916                sent_desc = sent_desc - 0xff;
 917        }
 918
 919        val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
 920        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 921}
 922
 923/* Get number of TX descriptors already sent by HW */
 924static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
 925                                        struct mvneta_tx_queue *txq)
 926{
 927        u32 val;
 928        int sent_desc;
 929
 930        val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
 931        sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
 932                MVNETA_TXQ_SENT_DESC_SHIFT;
 933
 934        return sent_desc;
 935}
 936
 937/* Display more error info */
 938static void mvneta_rx_error(struct mvneta_port *pp,
 939                            struct mvneta_rx_desc *rx_desc)
 940{
 941        u32 status = rx_desc->status;
 942
 943        if (!mvneta_rxq_desc_is_first_last(status)) {
 944                netdev_err(pp->dev,
 945                           "bad rx status %08x (buffer oversize), size=%d\n",
 946                           status, rx_desc->data_size);
 947                return;
 948        }
 949
 950        switch (status & MVNETA_RXD_ERR_CODE_MASK) {
 951        case MVNETA_RXD_ERR_CRC:
 952                netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
 953                           status, rx_desc->data_size);
 954                break;
 955        case MVNETA_RXD_ERR_OVERRUN:
 956                netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
 957                           status, rx_desc->data_size);
 958                break;
 959        case MVNETA_RXD_ERR_LEN:
 960                netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
 961                           status, rx_desc->data_size);
 962                break;
 963        case MVNETA_RXD_ERR_RESOURCE:
 964                netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
 965                           status, rx_desc->data_size);
 966                break;
 967        }
 968}
 969
 970static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp,
 971                                                     int rxq)
 972{
 973        return &pp->rxqs[rxq];
 974}
 975
 976
 977/* Drop packets received by the RXQ and free buffers */
 978static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 979                                 struct mvneta_rx_queue *rxq)
 980{
 981        int rx_done;
 982
 983        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
 984        if (rx_done)
 985                mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
 986}
 987
 988/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
 989static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 990                           int num)
 991{
 992        int i;
 993
 994        for (i = 0; i < num; i++) {
 995                u32 addr;
 996
 997                /* U-Boot special: Fill in the rx buffer addresses */
 998                addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE);
 999                mvneta_rx_desc_fill(rxq->descs + i, addr, addr);
1000        }
1001
1002        /* Add this number of RX descriptors as non occupied (ready to
1003         * get packets)
1004         */
1005        mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1006
1007        return 0;
1008}
1009
1010/* Rx/Tx queue initialization/cleanup methods */
1011
1012/* Create a specified RX queue */
1013static int mvneta_rxq_init(struct mvneta_port *pp,
1014                           struct mvneta_rx_queue *rxq)
1015
1016{
1017        rxq->size = pp->rx_ring_size;
1018
1019        /* Allocate memory for RX descriptors */
1020        rxq->descs_phys = (dma_addr_t)rxq->descs;
1021        if (rxq->descs == NULL)
1022                return -ENOMEM;
1023
1024        WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
1025
1026        rxq->last_desc = rxq->size - 1;
1027
1028        /* Set Rx descriptors queue starting address */
1029        mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1030        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1031
1032        /* Fill RXQ with buffers from RX pool */
1033        mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE);
1034        mvneta_rxq_fill(pp, rxq, rxq->size);
1035
1036        return 0;
1037}
1038
1039/* Cleanup Rx queue */
1040static void mvneta_rxq_deinit(struct mvneta_port *pp,
1041                              struct mvneta_rx_queue *rxq)
1042{
1043        mvneta_rxq_drop_pkts(pp, rxq);
1044
1045        rxq->descs             = NULL;
1046        rxq->last_desc         = 0;
1047        rxq->next_desc_to_proc = 0;
1048        rxq->descs_phys        = 0;
1049}
1050
1051/* Create and initialize a tx queue */
1052static int mvneta_txq_init(struct mvneta_port *pp,
1053                           struct mvneta_tx_queue *txq)
1054{
1055        txq->size = pp->tx_ring_size;
1056
1057        /* Allocate memory for TX descriptors */
1058        txq->descs_phys = (dma_addr_t)txq->descs;
1059        if (txq->descs == NULL)
1060                return -ENOMEM;
1061
1062        WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
1063
1064        txq->last_desc = txq->size - 1;
1065
1066        /* Set maximum bandwidth for enabled TXQs */
1067        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
1068        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
1069
1070        /* Set Tx descriptors queue starting address */
1071        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
1072        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
1073
1074        return 0;
1075}
1076
1077/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
1078static void mvneta_txq_deinit(struct mvneta_port *pp,
1079                              struct mvneta_tx_queue *txq)
1080{
1081        txq->descs             = NULL;
1082        txq->last_desc         = 0;
1083        txq->next_desc_to_proc = 0;
1084        txq->descs_phys        = 0;
1085
1086        /* Set minimum bandwidth for disabled TXQs */
1087        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
1088        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
1089
1090        /* Set Tx descriptors queue starting address and size */
1091        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
1092        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
1093}
1094
1095/* Cleanup all Tx queues */
1096static void mvneta_cleanup_txqs(struct mvneta_port *pp)
1097{
1098        int queue;
1099
1100        for (queue = 0; queue < txq_number; queue++)
1101                mvneta_txq_deinit(pp, &pp->txqs[queue]);
1102}
1103
1104/* Cleanup all Rx queues */
1105static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
1106{
1107        int queue;
1108
1109        for (queue = 0; queue < rxq_number; queue++)
1110                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
1111}
1112
1113
1114/* Init all Rx queues */
1115static int mvneta_setup_rxqs(struct mvneta_port *pp)
1116{
1117        int queue;
1118
1119        for (queue = 0; queue < rxq_number; queue++) {
1120                int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
1121                if (err) {
1122                        netdev_err(pp->dev, "%s: can't create rxq=%d\n",
1123                                   __func__, queue);
1124                        mvneta_cleanup_rxqs(pp);
1125                        return err;
1126                }
1127        }
1128
1129        return 0;
1130}
1131
1132/* Init all tx queues */
1133static int mvneta_setup_txqs(struct mvneta_port *pp)
1134{
1135        int queue;
1136
1137        for (queue = 0; queue < txq_number; queue++) {
1138                int err = mvneta_txq_init(pp, &pp->txqs[queue]);
1139                if (err) {
1140                        netdev_err(pp->dev, "%s: can't create txq=%d\n",
1141                                   __func__, queue);
1142                        mvneta_cleanup_txqs(pp);
1143                        return err;
1144                }
1145        }
1146
1147        return 0;
1148}
1149
1150static void mvneta_start_dev(struct mvneta_port *pp)
1151{
1152        /* start the Rx/Tx activity */
1153        mvneta_port_enable(pp);
1154}
1155
1156static void mvneta_adjust_link(struct udevice *dev)
1157{
1158        struct mvneta_port *pp = dev_get_priv(dev);
1159        struct phy_device *phydev = pp->phydev;
1160        int status_change = 0;
1161
1162        if (mvneta_port_is_fixed_link(pp)) {
1163                debug("Using fixed link, skip link adjust\n");
1164                return;
1165        }
1166
1167        if (phydev->link) {
1168                if ((pp->speed != phydev->speed) ||
1169                    (pp->duplex != phydev->duplex)) {
1170                        u32 val;
1171
1172                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1173                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
1174                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
1175                                 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
1176                                 MVNETA_GMAC_AN_SPEED_EN |
1177                                 MVNETA_GMAC_AN_DUPLEX_EN);
1178
1179                        if (phydev->duplex)
1180                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1181
1182                        if (phydev->speed == SPEED_1000)
1183                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1184                        else
1185                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1186
1187                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1188
1189                        pp->duplex = phydev->duplex;
1190                        pp->speed  = phydev->speed;
1191                }
1192        }
1193
1194        if (phydev->link != pp->link) {
1195                if (!phydev->link) {
1196                        pp->duplex = -1;
1197                        pp->speed = 0;
1198                }
1199
1200                pp->link = phydev->link;
1201                status_change = 1;
1202        }
1203
1204        if (status_change) {
1205                if (phydev->link) {
1206                        u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1207                        val |= (MVNETA_GMAC_FORCE_LINK_PASS |
1208                                MVNETA_GMAC_FORCE_LINK_DOWN);
1209                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1210                        mvneta_port_up(pp);
1211                } else {
1212                        mvneta_port_down(pp);
1213                }
1214        }
1215}
1216
1217static int mvneta_open(struct udevice *dev)
1218{
1219        struct mvneta_port *pp = dev_get_priv(dev);
1220        int ret;
1221
1222        ret = mvneta_setup_rxqs(pp);
1223        if (ret)
1224                return ret;
1225
1226        ret = mvneta_setup_txqs(pp);
1227        if (ret)
1228                return ret;
1229
1230        mvneta_adjust_link(dev);
1231
1232        mvneta_start_dev(pp);
1233
1234        return 0;
1235}
1236
1237/* Initialize hw */
1238static int mvneta_init2(struct mvneta_port *pp)
1239{
1240        int queue;
1241
1242        /* Disable port */
1243        mvneta_port_disable(pp);
1244
1245        /* Set port default values */
1246        mvneta_defaults_set(pp);
1247
1248        pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
1249                           GFP_KERNEL);
1250        if (!pp->txqs)
1251                return -ENOMEM;
1252
1253        /* U-Boot special: use preallocated area */
1254        pp->txqs[0].descs = buffer_loc.tx_descs;
1255
1256        /* Initialize TX descriptor rings */
1257        for (queue = 0; queue < txq_number; queue++) {
1258                struct mvneta_tx_queue *txq = &pp->txqs[queue];
1259                txq->id = queue;
1260                txq->size = pp->tx_ring_size;
1261        }
1262
1263        pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
1264                           GFP_KERNEL);
1265        if (!pp->rxqs) {
1266                kfree(pp->txqs);
1267                return -ENOMEM;
1268        }
1269
1270        /* U-Boot special: use preallocated area */
1271        pp->rxqs[0].descs = buffer_loc.rx_descs;
1272
1273        /* Create Rx descriptor rings */
1274        for (queue = 0; queue < rxq_number; queue++) {
1275                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1276                rxq->id = queue;
1277                rxq->size = pp->rx_ring_size;
1278        }
1279
1280        return 0;
1281}
1282
1283/* platform glue : initialize decoding windows */
1284
1285/*
1286 * Not like A380, in Armada3700, there are two layers of decode windows for GBE:
1287 * First layer is:  GbE Address window that resides inside the GBE unit,
1288 * Second layer is: Fabric address window which is located in the NIC400
1289 *                  (South Fabric).
1290 * To simplify the address decode configuration for Armada3700, we bypass the
1291 * first layer of GBE decode window by setting the first window to 4GB.
1292 */
1293static void mvneta_bypass_mbus_windows(struct mvneta_port *pp)
1294{
1295        /*
1296         * Set window size to 4GB, to bypass GBE address decode, leave the
1297         * work to MBUS decode window
1298         */
1299        mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK);
1300
1301        /* Enable GBE address decode window 0 by set bit 0 to 0 */
1302        clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE,
1303                     MVNETA_BASE_ADDR_ENABLE_BIT);
1304
1305        /* Set GBE address decode window 0 to full Access (read or write) */
1306        setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT,
1307                     MVNETA_PORT_ACCESS_PROTECT_WIN0_RW);
1308}
1309
1310static void mvneta_conf_mbus_windows(struct mvneta_port *pp)
1311{
1312        const struct mbus_dram_target_info *dram;
1313        u32 win_enable;
1314        u32 win_protect;
1315        int i;
1316
1317        dram = mvebu_mbus_dram_info();
1318        for (i = 0; i < 6; i++) {
1319                mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1320                mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1321
1322                if (i < 4)
1323                        mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1324        }
1325
1326        win_enable = 0x3f;
1327        win_protect = 0;
1328
1329        for (i = 0; i < dram->num_cs; i++) {
1330                const struct mbus_dram_window *cs = dram->cs + i;
1331                mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
1332                            (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
1333
1334                mvreg_write(pp, MVNETA_WIN_SIZE(i),
1335                            (cs->size - 1) & 0xffff0000);
1336
1337                win_enable &= ~(1 << i);
1338                win_protect |= 3 << (2 * i);
1339        }
1340
1341        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1342}
1343
1344/* Power up the port */
1345static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
1346{
1347        u32 ctrl;
1348
1349        /* MAC Cause register should be cleared */
1350        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
1351
1352        ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1353
1354        /* Even though it might look weird, when we're configured in
1355         * SGMII or QSGMII mode, the RGMII bit needs to be set.
1356         */
1357        switch (phy_mode) {
1358        case PHY_INTERFACE_MODE_QSGMII:
1359                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
1360                ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1361                break;
1362        case PHY_INTERFACE_MODE_SGMII:
1363                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
1364                ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1365                break;
1366        case PHY_INTERFACE_MODE_RGMII:
1367        case PHY_INTERFACE_MODE_RGMII_ID:
1368                ctrl |= MVNETA_GMAC2_PORT_RGMII;
1369                break;
1370        default:
1371                return -EINVAL;
1372        }
1373
1374        /* Cancel Port Reset */
1375        ctrl &= ~MVNETA_GMAC2_PORT_RESET;
1376        mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
1377
1378        while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
1379                MVNETA_GMAC2_PORT_RESET) != 0)
1380                continue;
1381
1382        return 0;
1383}
1384
1385/* Device initialization routine */
1386static int mvneta_init(struct udevice *dev)
1387{
1388        struct eth_pdata *pdata = dev_get_platdata(dev);
1389        struct mvneta_port *pp = dev_get_priv(dev);
1390        int err;
1391
1392        pp->tx_ring_size = MVNETA_MAX_TXD;
1393        pp->rx_ring_size = MVNETA_MAX_RXD;
1394
1395        err = mvneta_init2(pp);
1396        if (err < 0) {
1397                dev_err(&pdev->dev, "can't init eth hal\n");
1398                return err;
1399        }
1400
1401        mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def);
1402
1403        err = mvneta_port_power_up(pp, pp->phy_interface);
1404        if (err < 0) {
1405                dev_err(&pdev->dev, "can't power up port\n");
1406                return err;
1407        }
1408
1409        /* Call open() now as it needs to be done before runing send() */
1410        mvneta_open(dev);
1411
1412        return 0;
1413}
1414
1415/* U-Boot only functions follow here */
1416
1417/* SMI / MDIO functions */
1418
1419static int smi_wait_ready(struct mvneta_port *pp)
1420{
1421        u32 timeout = MVNETA_SMI_TIMEOUT;
1422        u32 smi_reg;
1423
1424        /* wait till the SMI is not busy */
1425        do {
1426                /* read smi register */
1427                smi_reg = mvreg_read(pp, MVNETA_SMI);
1428                if (timeout-- == 0) {
1429                        printf("Error: SMI busy timeout\n");
1430                        return -EFAULT;
1431                }
1432        } while (smi_reg & MVNETA_SMI_BUSY);
1433
1434        return 0;
1435}
1436
1437/*
1438 * mvneta_mdio_read - miiphy_read callback function.
1439 *
1440 * Returns 16bit phy register value, or 0xffff on error
1441 */
1442static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
1443{
1444        struct mvneta_port *pp = bus->priv;
1445        u32 smi_reg;
1446        u32 timeout;
1447
1448        /* check parameters */
1449        if (addr > MVNETA_PHY_ADDR_MASK) {
1450                printf("Error: Invalid PHY address %d\n", addr);
1451                return -EFAULT;
1452        }
1453
1454        if (reg > MVNETA_PHY_REG_MASK) {
1455                printf("Err: Invalid register offset %d\n", reg);
1456                return -EFAULT;
1457        }
1458
1459        /* wait till the SMI is not busy */
1460        if (smi_wait_ready(pp) < 0)
1461                return -EFAULT;
1462
1463        /* fill the phy address and regiser offset and read opcode */
1464        smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1465                | (reg << MVNETA_SMI_REG_ADDR_OFFS)
1466                | MVNETA_SMI_OPCODE_READ;
1467
1468        /* write the smi register */
1469        mvreg_write(pp, MVNETA_SMI, smi_reg);
1470
1471        /* wait till read value is ready */
1472        timeout = MVNETA_SMI_TIMEOUT;
1473
1474        do {
1475                /* read smi register */
1476                smi_reg = mvreg_read(pp, MVNETA_SMI);
1477                if (timeout-- == 0) {
1478                        printf("Err: SMI read ready timeout\n");
1479                        return -EFAULT;
1480                }
1481        } while (!(smi_reg & MVNETA_SMI_READ_VALID));
1482
1483        /* Wait for the data to update in the SMI register */
1484        for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++)
1485                ;
1486
1487        return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK;
1488}
1489
1490/*
1491 * mvneta_mdio_write - miiphy_write callback function.
1492 *
1493 * Returns 0 if write succeed, -EINVAL on bad parameters
1494 * -ETIME on timeout
1495 */
1496static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
1497                             u16 value)
1498{
1499        struct mvneta_port *pp = bus->priv;
1500        u32 smi_reg;
1501
1502        /* check parameters */
1503        if (addr > MVNETA_PHY_ADDR_MASK) {
1504                printf("Error: Invalid PHY address %d\n", addr);
1505                return -EFAULT;
1506        }
1507
1508        if (reg > MVNETA_PHY_REG_MASK) {
1509                printf("Err: Invalid register offset %d\n", reg);
1510                return -EFAULT;
1511        }
1512
1513        /* wait till the SMI is not busy */
1514        if (smi_wait_ready(pp) < 0)
1515                return -EFAULT;
1516
1517        /* fill the phy addr and reg offset and write opcode and data */
1518        smi_reg = value << MVNETA_SMI_DATA_OFFS;
1519        smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1520                | (reg << MVNETA_SMI_REG_ADDR_OFFS);
1521        smi_reg &= ~MVNETA_SMI_OPCODE_READ;
1522
1523        /* write the smi register */
1524        mvreg_write(pp, MVNETA_SMI, smi_reg);
1525
1526        return 0;
1527}
1528
1529static int mvneta_start(struct udevice *dev)
1530{
1531        struct mvneta_port *pp = dev_get_priv(dev);
1532        struct phy_device *phydev;
1533
1534        mvneta_port_power_up(pp, pp->phy_interface);
1535
1536        if (!pp->init || pp->link == 0) {
1537                if (mvneta_port_is_fixed_link(pp)) {
1538                        u32 val;
1539
1540                        pp->init = 1;
1541                        pp->link = 1;
1542                        mvneta_init(dev);
1543
1544                        val = MVNETA_GMAC_FORCE_LINK_UP |
1545                              MVNETA_GMAC_IB_BYPASS_AN_EN |
1546                              MVNETA_GMAC_SET_FC_EN |
1547                              MVNETA_GMAC_ADVERT_FC_EN |
1548                              MVNETA_GMAC_SAMPLE_TX_CFG_EN;
1549
1550                        if (pp->duplex)
1551                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1552
1553                        if (pp->speed == SPEED_1000)
1554                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1555                        else if (pp->speed == SPEED_100)
1556                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1557
1558                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1559                } else {
1560                        /* Set phy address of the port */
1561                        mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr);
1562
1563                        phydev = phy_connect(pp->bus, pp->phyaddr, dev,
1564                                             pp->phy_interface);
1565                        if (!phydev) {
1566                                printf("phy_connect failed\n");
1567                                return -ENODEV;
1568                        }
1569
1570                        pp->phydev = phydev;
1571                        phy_config(phydev);
1572                        phy_startup(phydev);
1573                        if (!phydev->link) {
1574                                printf("%s: No link.\n", phydev->dev->name);
1575                                return -1;
1576                        }
1577
1578                        /* Full init on first call */
1579                        mvneta_init(dev);
1580                        pp->init = 1;
1581                        return 0;
1582                }
1583        }
1584
1585        /* Upon all following calls, this is enough */
1586        mvneta_port_up(pp);
1587        mvneta_port_enable(pp);
1588
1589        return 0;
1590}
1591
1592static int mvneta_send(struct udevice *dev, void *packet, int length)
1593{
1594        struct mvneta_port *pp = dev_get_priv(dev);
1595        struct mvneta_tx_queue *txq = &pp->txqs[0];
1596        struct mvneta_tx_desc *tx_desc;
1597        int sent_desc;
1598        u32 timeout = 0;
1599
1600        /* Get a descriptor for the first part of the packet */
1601        tx_desc = mvneta_txq_next_desc_get(txq);
1602
1603        tx_desc->buf_phys_addr = (u32)(uintptr_t)packet;
1604        tx_desc->data_size = length;
1605        flush_dcache_range((ulong)packet,
1606                           (ulong)packet + ALIGN(length, PKTALIGN));
1607
1608        /* First and Last descriptor */
1609        tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
1610        mvneta_txq_pend_desc_add(pp, txq, 1);
1611
1612        /* Wait for packet to be sent (queue might help with speed here) */
1613        sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1614        while (!sent_desc) {
1615                if (timeout++ > 10000) {
1616                        printf("timeout: packet not sent\n");
1617                        return -1;
1618                }
1619                sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1620        }
1621
1622        /* txDone has increased - hw sent packet */
1623        mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1624
1625        return 0;
1626}
1627
1628static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp)
1629{
1630        struct mvneta_port *pp = dev_get_priv(dev);
1631        int rx_done;
1632        struct mvneta_rx_queue *rxq;
1633        int rx_bytes = 0;
1634
1635        /* get rx queue */
1636        rxq = mvneta_rxq_handle_get(pp, rxq_def);
1637        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1638
1639        if (rx_done) {
1640                struct mvneta_rx_desc *rx_desc;
1641                unsigned char *data;
1642                u32 rx_status;
1643
1644                /*
1645                 * No cache invalidation needed here, since the desc's are
1646                 * located in a uncached memory region
1647                 */
1648                rx_desc = mvneta_rxq_next_desc_get(rxq);
1649
1650                rx_status = rx_desc->status;
1651                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1652                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1653                        mvneta_rx_error(pp, rx_desc);
1654                        /* leave the descriptor untouched */
1655                        return -EIO;
1656                }
1657
1658                /* 2 bytes for marvell header. 4 bytes for crc */
1659                rx_bytes = rx_desc->data_size - 6;
1660
1661                /* give packet to stack - skip on first 2 bytes */
1662                data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2;
1663                /*
1664                 * No cache invalidation needed here, since the rx_buffer's are
1665                 * located in a uncached memory region
1666                 */
1667                *packetp = data;
1668
1669                /*
1670                 * Only mark one descriptor as free
1671                 * since only one was processed
1672                 */
1673                mvneta_rxq_desc_num_update(pp, rxq, 1, 1);
1674        }
1675
1676        return rx_bytes;
1677}
1678
1679static int mvneta_probe(struct udevice *dev)
1680{
1681        struct eth_pdata *pdata = dev_get_platdata(dev);
1682        struct mvneta_port *pp = dev_get_priv(dev);
1683        void *blob = (void *)gd->fdt_blob;
1684        int node = dev_of_offset(dev);
1685        struct mii_dev *bus;
1686        unsigned long addr;
1687        void *bd_space;
1688        int ret;
1689        int fl_node;
1690
1691        /*
1692         * Allocate buffer area for descs and rx_buffers. This is only
1693         * done once for all interfaces. As only one interface can
1694         * be active. Make this area DMA safe by disabling the D-cache
1695         */
1696        if (!buffer_loc.tx_descs) {
1697                u32 size;
1698
1699                /* Align buffer area for descs and rx_buffers to 1MiB */
1700                bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
1701                flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE);
1702                mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
1703                                                DCACHE_OFF);
1704                buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
1705                size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
1706                                ARCH_DMA_MINALIGN);
1707                memset(buffer_loc.tx_descs, 0, size);
1708                buffer_loc.rx_descs = (struct mvneta_rx_desc *)
1709                        ((phys_addr_t)bd_space + size);
1710                size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
1711                                ARCH_DMA_MINALIGN);
1712                buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
1713        }
1714
1715        pp->base = (void __iomem *)pdata->iobase;
1716
1717        /* Configure MBUS address windows */
1718        if (device_is_compatible(dev, "marvell,armada-3700-neta"))
1719                mvneta_bypass_mbus_windows(pp);
1720        else
1721                mvneta_conf_mbus_windows(pp);
1722
1723        /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */
1724        pp->phy_interface = pdata->phy_interface;
1725
1726        /* fetch 'fixed-link' property from 'neta' node */
1727        fl_node = fdt_subnode_offset(blob, node, "fixed-link");
1728        if (fl_node != -FDT_ERR_NOTFOUND) {
1729                /* set phy_addr to invalid value for fixed link */
1730                pp->phyaddr = PHY_MAX_ADDR + 1;
1731                pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1732                pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
1733        } else {
1734                /* Now read phyaddr from DT */
1735                addr = fdtdec_get_int(blob, node, "phy", 0);
1736                addr = fdt_node_offset_by_phandle(blob, addr);
1737                pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
1738        }
1739
1740        bus = mdio_alloc();
1741        if (!bus) {
1742                printf("Failed to allocate MDIO bus\n");
1743                return -ENOMEM;
1744        }
1745
1746        bus->read = mvneta_mdio_read;
1747        bus->write = mvneta_mdio_write;
1748        snprintf(bus->name, sizeof(bus->name), dev->name);
1749        bus->priv = (void *)pp;
1750        pp->bus = bus;
1751
1752        ret = mdio_register(bus);
1753        if (ret)
1754                return ret;
1755
1756#ifdef CONFIG_DM_GPIO
1757        gpio_request_by_name(dev, "phy-reset-gpios", 0,
1758                             &pp->phy_reset_gpio, GPIOD_IS_OUT);
1759
1760        if (dm_gpio_is_valid(&pp->phy_reset_gpio)) {
1761                dm_gpio_set_value(&pp->phy_reset_gpio, 1);
1762                mdelay(10);
1763                dm_gpio_set_value(&pp->phy_reset_gpio, 0);
1764        }
1765#endif
1766
1767        return board_network_enable(bus);
1768}
1769
1770static void mvneta_stop(struct udevice *dev)
1771{
1772        struct mvneta_port *pp = dev_get_priv(dev);
1773
1774        mvneta_port_down(pp);
1775        mvneta_port_disable(pp);
1776}
1777
1778static const struct eth_ops mvneta_ops = {
1779        .start          = mvneta_start,
1780        .send           = mvneta_send,
1781        .recv           = mvneta_recv,
1782        .stop           = mvneta_stop,
1783        .write_hwaddr   = mvneta_write_hwaddr,
1784};
1785
1786static int mvneta_ofdata_to_platdata(struct udevice *dev)
1787{
1788        struct eth_pdata *pdata = dev_get_platdata(dev);
1789        const char *phy_mode;
1790
1791        pdata->iobase = devfdt_get_addr(dev);
1792
1793        /* Get phy-mode / phy_interface from DT */
1794        pdata->phy_interface = -1;
1795        phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1796                               NULL);
1797        if (phy_mode)
1798                pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1799        if (pdata->phy_interface == -1) {
1800                debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1801                return -EINVAL;
1802        }
1803
1804        return 0;
1805}
1806
1807static const struct udevice_id mvneta_ids[] = {
1808        { .compatible = "marvell,armada-370-neta" },
1809        { .compatible = "marvell,armada-xp-neta" },
1810        { .compatible = "marvell,armada-3700-neta" },
1811        { }
1812};
1813
1814U_BOOT_DRIVER(mvneta) = {
1815        .name   = "mvneta",
1816        .id     = UCLASS_ETH,
1817        .of_match = mvneta_ids,
1818        .ofdata_to_platdata = mvneta_ofdata_to_platdata,
1819        .probe  = mvneta_probe,
1820        .ops    = &mvneta_ops,
1821        .priv_auto_alloc_size = sizeof(struct mvneta_port),
1822        .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1823};
1824