uboot/drivers/net/mvpp2.c
<<
>>
Prefs
   1/*
   2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
   3 *
   4 * Copyright (C) 2014 Marvell
   5 *
   6 * Marcin Wojtas <mw@semihalf.com>
   7 *
   8 * U-Boot version:
   9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
  10 *
  11 * This file is licensed under the terms of the GNU General Public
  12 * License version 2. This program is licensed "as is" without any
  13 * warranty of any kind, whether express or implied.
  14 */
  15
  16#include <common.h>
  17#include <dm.h>
  18#include <dm/device-internal.h>
  19#include <dm/lists.h>
  20#include <net.h>
  21#include <netdev.h>
  22#include <config.h>
  23#include <malloc.h>
  24#include <asm/io.h>
  25#include <linux/errno.h>
  26#include <phy.h>
  27#include <miiphy.h>
  28#include <watchdog.h>
  29#include <asm/arch/cpu.h>
  30#include <asm/arch/soc.h>
  31#include <linux/compat.h>
  32#include <linux/mbus.h>
  33
  34DECLARE_GLOBAL_DATA_PTR;
  35
  36/* Some linux -> U-Boot compatibility stuff */
  37#define netdev_err(dev, fmt, args...)           \
  38        printf(fmt, ##args)
  39#define netdev_warn(dev, fmt, args...)          \
  40        printf(fmt, ##args)
  41#define netdev_info(dev, fmt, args...)          \
  42        printf(fmt, ##args)
  43#define netdev_dbg(dev, fmt, args...)           \
  44        printf(fmt, ##args)
  45
  46#define ETH_ALEN        6               /* Octets in one ethernet addr  */
  47
  48#define __verify_pcpu_ptr(ptr)                                          \
  49do {                                                                    \
  50        const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
  51        (void)__vpp_verify;                                             \
  52} while (0)
  53
  54#define VERIFY_PERCPU_PTR(__p)                                          \
  55({                                                                      \
  56        __verify_pcpu_ptr(__p);                                         \
  57        (typeof(*(__p)) __kernel __force *)(__p);                       \
  58})
  59
  60#define per_cpu_ptr(ptr, cpu)   ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
  61#define smp_processor_id()      0
  62#define num_present_cpus()      1
  63#define for_each_present_cpu(cpu)                       \
  64        for ((cpu) = 0; (cpu) < 1; (cpu)++)
  65
  66#define NET_SKB_PAD     max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
  67
  68#define CONFIG_NR_CPUS          1
  69#define ETH_HLEN                ETHER_HDR_SIZE  /* Total octets in header */
  70
  71/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
  72#define WRAP                    (2 + ETH_HLEN + 4 + 32)
  73#define MTU                     1500
  74#define RX_BUFFER_SIZE          (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
  75
  76#define MVPP2_SMI_TIMEOUT                       10000
  77
  78/* RX Fifo Registers */
  79#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)       (0x00 + 4 * (port))
  80#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)       (0x20 + 4 * (port))
  81#define MVPP2_RX_MIN_PKT_SIZE_REG               0x60
  82#define MVPP2_RX_FIFO_INIT_REG                  0x64
  83
  84/* RX DMA Top Registers */
  85#define MVPP2_RX_CTRL_REG(port)                 (0x140 + 4 * (port))
  86#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)    (((s) & 0xfff) << 16)
  87#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK   BIT(31)
  88#define MVPP2_POOL_BUF_SIZE_REG(pool)           (0x180 + 4 * (pool))
  89#define     MVPP2_POOL_BUF_SIZE_OFFSET          5
  90#define MVPP2_RXQ_CONFIG_REG(rxq)               (0x800 + 4 * (rxq))
  91#define     MVPP2_SNOOP_PKT_SIZE_MASK           0x1ff
  92#define     MVPP2_SNOOP_BUF_HDR_MASK            BIT(9)
  93#define     MVPP2_RXQ_POOL_SHORT_OFFS           20
  94#define     MVPP2_RXQ_POOL_SHORT_MASK           0x700000
  95#define     MVPP2_RXQ_POOL_LONG_OFFS            24
  96#define     MVPP2_RXQ_POOL_LONG_MASK            0x7000000
  97#define     MVPP2_RXQ_PACKET_OFFSET_OFFS        28
  98#define     MVPP2_RXQ_PACKET_OFFSET_MASK        0x70000000
  99#define     MVPP2_RXQ_DISABLE_MASK              BIT(31)
 100
 101/* Parser Registers */
 102#define MVPP2_PRS_INIT_LOOKUP_REG               0x1000
 103#define     MVPP2_PRS_PORT_LU_MAX               0xf
 104#define     MVPP2_PRS_PORT_LU_MASK(port)        (0xff << ((port) * 4))
 105#define     MVPP2_PRS_PORT_LU_VAL(port, val)    ((val) << ((port) * 4))
 106#define MVPP2_PRS_INIT_OFFS_REG(port)           (0x1004 + ((port) & 4))
 107#define     MVPP2_PRS_INIT_OFF_MASK(port)       (0x3f << (((port) % 4) * 8))
 108#define     MVPP2_PRS_INIT_OFF_VAL(port, val)   ((val) << (((port) % 4) * 8))
 109#define MVPP2_PRS_MAX_LOOP_REG(port)            (0x100c + ((port) & 4))
 110#define     MVPP2_PRS_MAX_LOOP_MASK(port)       (0xff << (((port) % 4) * 8))
 111#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)   ((val) << (((port) % 4) * 8))
 112#define MVPP2_PRS_TCAM_IDX_REG                  0x1100
 113#define MVPP2_PRS_TCAM_DATA_REG(idx)            (0x1104 + (idx) * 4)
 114#define     MVPP2_PRS_TCAM_INV_MASK             BIT(31)
 115#define MVPP2_PRS_SRAM_IDX_REG                  0x1200
 116#define MVPP2_PRS_SRAM_DATA_REG(idx)            (0x1204 + (idx) * 4)
 117#define MVPP2_PRS_TCAM_CTRL_REG                 0x1230
 118#define     MVPP2_PRS_TCAM_EN_MASK              BIT(0)
 119
 120/* Classifier Registers */
 121#define MVPP2_CLS_MODE_REG                      0x1800
 122#define     MVPP2_CLS_MODE_ACTIVE_MASK          BIT(0)
 123#define MVPP2_CLS_PORT_WAY_REG                  0x1810
 124#define     MVPP2_CLS_PORT_WAY_MASK(port)       (1 << (port))
 125#define MVPP2_CLS_LKP_INDEX_REG                 0x1814
 126#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS        6
 127#define MVPP2_CLS_LKP_TBL_REG                   0x1818
 128#define     MVPP2_CLS_LKP_TBL_RXQ_MASK          0xff
 129#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK    BIT(25)
 130#define MVPP2_CLS_FLOW_INDEX_REG                0x1820
 131#define MVPP2_CLS_FLOW_TBL0_REG                 0x1824
 132#define MVPP2_CLS_FLOW_TBL1_REG                 0x1828
 133#define MVPP2_CLS_FLOW_TBL2_REG                 0x182c
 134#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)    (0x1980 + ((port) * 4))
 135#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS     3
 136#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK     0x7
 137#define MVPP2_CLS_SWFWD_P2HQ_REG(port)          (0x19b0 + ((port) * 4))
 138#define MVPP2_CLS_SWFWD_PCTRL_REG               0x19d0
 139#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)    (1 << (port))
 140
 141/* Descriptor Manager Top Registers */
 142#define MVPP2_RXQ_NUM_REG                       0x2040
 143#define MVPP2_RXQ_DESC_ADDR_REG                 0x2044
 144#define MVPP2_RXQ_DESC_SIZE_REG                 0x2048
 145#define     MVPP2_RXQ_DESC_SIZE_MASK            0x3ff0
 146#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)        (0x3000 + 4 * (rxq))
 147#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET      0
 148#define     MVPP2_RXQ_NUM_NEW_OFFSET            16
 149#define MVPP2_RXQ_STATUS_REG(rxq)               (0x3400 + 4 * (rxq))
 150#define     MVPP2_RXQ_OCCUPIED_MASK             0x3fff
 151#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET       16
 152#define     MVPP2_RXQ_NON_OCCUPIED_MASK         0x3fff0000
 153#define MVPP2_RXQ_THRESH_REG                    0x204c
 154#define     MVPP2_OCCUPIED_THRESH_OFFSET        0
 155#define     MVPP2_OCCUPIED_THRESH_MASK          0x3fff
 156#define MVPP2_RXQ_INDEX_REG                     0x2050
 157#define MVPP2_TXQ_NUM_REG                       0x2080
 158#define MVPP2_TXQ_DESC_ADDR_REG                 0x2084
 159#define MVPP2_TXQ_DESC_SIZE_REG                 0x2088
 160#define     MVPP2_TXQ_DESC_SIZE_MASK            0x3ff0
 161#define MVPP2_AGGR_TXQ_UPDATE_REG               0x2090
 162#define MVPP2_TXQ_THRESH_REG                    0x2094
 163#define     MVPP2_TRANSMITTED_THRESH_OFFSET     16
 164#define     MVPP2_TRANSMITTED_THRESH_MASK       0x3fff0000
 165#define MVPP2_TXQ_INDEX_REG                     0x2098
 166#define MVPP2_TXQ_PREF_BUF_REG                  0x209c
 167#define     MVPP2_PREF_BUF_PTR(desc)            ((desc) & 0xfff)
 168#define     MVPP2_PREF_BUF_SIZE_4               (BIT(12) | BIT(13))
 169#define     MVPP2_PREF_BUF_SIZE_16              (BIT(12) | BIT(14))
 170#define     MVPP2_PREF_BUF_THRESH(val)          ((val) << 17)
 171#define     MVPP2_TXQ_DRAIN_EN_MASK             BIT(31)
 172#define MVPP2_TXQ_PENDING_REG                   0x20a0
 173#define     MVPP2_TXQ_PENDING_MASK              0x3fff
 174#define MVPP2_TXQ_INT_STATUS_REG                0x20a4
 175#define MVPP2_TXQ_SENT_REG(txq)                 (0x3c00 + 4 * (txq))
 176#define     MVPP2_TRANSMITTED_COUNT_OFFSET      16
 177#define     MVPP2_TRANSMITTED_COUNT_MASK        0x3fff0000
 178#define MVPP2_TXQ_RSVD_REQ_REG                  0x20b0
 179#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET         16
 180#define MVPP2_TXQ_RSVD_RSLT_REG                 0x20b4
 181#define     MVPP2_TXQ_RSVD_RSLT_MASK            0x3fff
 182#define MVPP2_TXQ_RSVD_CLR_REG                  0x20b8
 183#define     MVPP2_TXQ_RSVD_CLR_OFFSET           16
 184#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)       (0x2100 + 4 * (cpu))
 185#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)       (0x2140 + 4 * (cpu))
 186#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK       0x3ff0
 187#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)          (0x2180 + 4 * (cpu))
 188#define     MVPP2_AGGR_TXQ_PENDING_MASK         0x3fff
 189#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)           (0x21c0 + 4 * (cpu))
 190
 191/* MBUS bridge registers */
 192#define MVPP2_WIN_BASE(w)                       (0x4000 + ((w) << 2))
 193#define MVPP2_WIN_SIZE(w)                       (0x4020 + ((w) << 2))
 194#define MVPP2_WIN_REMAP(w)                      (0x4040 + ((w) << 2))
 195#define MVPP2_BASE_ADDR_ENABLE                  0x4060
 196
 197/* Interrupt Cause and Mask registers */
 198#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)         (0x5200 + 4 * (rxq))
 199#define MVPP2_ISR_RXQ_GROUP_REG(rxq)            (0x5400 + 4 * (rxq))
 200#define MVPP2_ISR_ENABLE_REG(port)              (0x5420 + 4 * (port))
 201#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)    ((mask) & 0xffff)
 202#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)   (((mask) << 16) & 0xffff0000)
 203#define MVPP2_ISR_RX_TX_CAUSE_REG(port)         (0x5480 + 4 * (port))
 204#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
 205#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
 206#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK    BIT(24)
 207#define     MVPP2_CAUSE_FCS_ERR_MASK            BIT(25)
 208#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK   BIT(26)
 209#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK   BIT(29)
 210#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK   BIT(30)
 211#define     MVPP2_CAUSE_MISC_SUM_MASK           BIT(31)
 212#define MVPP2_ISR_RX_TX_MASK_REG(port)          (0x54a0 + 4 * (port))
 213#define MVPP2_ISR_PON_RX_TX_MASK_REG            0x54bc
 214#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK     0xffff
 215#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK     0x3fc00000
 216#define     MVPP2_PON_CAUSE_MISC_SUM_MASK               BIT(31)
 217#define MVPP2_ISR_MISC_CAUSE_REG                0x55b0
 218
 219/* Buffer Manager registers */
 220#define MVPP2_BM_POOL_BASE_REG(pool)            (0x6000 + ((pool) * 4))
 221#define     MVPP2_BM_POOL_BASE_ADDR_MASK        0xfffff80
 222#define MVPP2_BM_POOL_SIZE_REG(pool)            (0x6040 + ((pool) * 4))
 223#define     MVPP2_BM_POOL_SIZE_MASK             0xfff0
 224#define MVPP2_BM_POOL_READ_PTR_REG(pool)        (0x6080 + ((pool) * 4))
 225#define     MVPP2_BM_POOL_GET_READ_PTR_MASK     0xfff0
 226#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)        (0x60c0 + ((pool) * 4))
 227#define     MVPP2_BM_POOL_PTRS_NUM_MASK         0xfff0
 228#define MVPP2_BM_BPPI_READ_PTR_REG(pool)        (0x6100 + ((pool) * 4))
 229#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)        (0x6140 + ((pool) * 4))
 230#define     MVPP2_BM_BPPI_PTR_NUM_MASK          0x7ff
 231#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK    BIT(16)
 232#define MVPP2_BM_POOL_CTRL_REG(pool)            (0x6200 + ((pool) * 4))
 233#define     MVPP2_BM_START_MASK                 BIT(0)
 234#define     MVPP2_BM_STOP_MASK                  BIT(1)
 235#define     MVPP2_BM_STATE_MASK                 BIT(4)
 236#define     MVPP2_BM_LOW_THRESH_OFFS            8
 237#define     MVPP2_BM_LOW_THRESH_MASK            0x7f00
 238#define     MVPP2_BM_LOW_THRESH_VALUE(val)      ((val) << \
 239                                                MVPP2_BM_LOW_THRESH_OFFS)
 240#define     MVPP2_BM_HIGH_THRESH_OFFS           16
 241#define     MVPP2_BM_HIGH_THRESH_MASK           0x7f0000
 242#define     MVPP2_BM_HIGH_THRESH_VALUE(val)     ((val) << \
 243                                                MVPP2_BM_HIGH_THRESH_OFFS)
 244#define MVPP2_BM_INTR_CAUSE_REG(pool)           (0x6240 + ((pool) * 4))
 245#define     MVPP2_BM_RELEASED_DELAY_MASK        BIT(0)
 246#define     MVPP2_BM_ALLOC_FAILED_MASK          BIT(1)
 247#define     MVPP2_BM_BPPE_EMPTY_MASK            BIT(2)
 248#define     MVPP2_BM_BPPE_FULL_MASK             BIT(3)
 249#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK      BIT(4)
 250#define MVPP2_BM_INTR_MASK_REG(pool)            (0x6280 + ((pool) * 4))
 251#define MVPP2_BM_PHY_ALLOC_REG(pool)            (0x6400 + ((pool) * 4))
 252#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK       BIT(0)
 253#define MVPP2_BM_VIRT_ALLOC_REG                 0x6440
 254#define MVPP2_BM_PHY_RLS_REG(pool)              (0x6480 + ((pool) * 4))
 255#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK       BIT(0)
 256#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK       BIT(1)
 257#define     MVPP2_BM_PHY_RLS_GRNTD_MASK         BIT(2)
 258#define MVPP2_BM_VIRT_RLS_REG                   0x64c0
 259#define MVPP2_BM_MC_RLS_REG                     0x64c4
 260#define     MVPP2_BM_MC_ID_MASK                 0xfff
 261#define     MVPP2_BM_FORCE_RELEASE_MASK         BIT(12)
 262
 263/* TX Scheduler registers */
 264#define MVPP2_TXP_SCHED_PORT_INDEX_REG          0x8000
 265#define MVPP2_TXP_SCHED_Q_CMD_REG               0x8004
 266#define     MVPP2_TXP_SCHED_ENQ_MASK            0xff
 267#define     MVPP2_TXP_SCHED_DISQ_OFFSET         8
 268#define MVPP2_TXP_SCHED_CMD_1_REG               0x8010
 269#define MVPP2_TXP_SCHED_PERIOD_REG              0x8018
 270#define MVPP2_TXP_SCHED_MTU_REG                 0x801c
 271#define     MVPP2_TXP_MTU_MAX                   0x7FFFF
 272#define MVPP2_TXP_SCHED_REFILL_REG              0x8020
 273#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK    0x7ffff
 274#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK    0x3ff00000
 275#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)     ((v) << 20)
 276#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG          0x8024
 277#define     MVPP2_TXP_TOKEN_SIZE_MAX            0xffffffff
 278#define MVPP2_TXQ_SCHED_REFILL_REG(q)           (0x8040 + ((q) << 2))
 279#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK    0x7ffff
 280#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK    0x3ff00000
 281#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)     ((v) << 20)
 282#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)       (0x8060 + ((q) << 2))
 283#define     MVPP2_TXQ_TOKEN_SIZE_MAX            0x7fffffff
 284#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)       (0x8080 + ((q) << 2))
 285#define     MVPP2_TXQ_TOKEN_CNTR_MAX            0xffffffff
 286
 287/* TX general registers */
 288#define MVPP2_TX_SNOOP_REG                      0x8800
 289#define MVPP2_TX_PORT_FLUSH_REG                 0x8810
 290#define     MVPP2_TX_PORT_FLUSH_MASK(port)      (1 << (port))
 291
 292/* LMS registers */
 293#define MVPP2_SRC_ADDR_MIDDLE                   0x24
 294#define MVPP2_SRC_ADDR_HIGH                     0x28
 295#define MVPP2_PHY_AN_CFG0_REG                   0x34
 296#define     MVPP2_PHY_AN_STOP_SMI0_MASK         BIT(7)
 297#define MVPP2_MIB_COUNTERS_BASE(port)           (0x1000 + ((port) >> 1) * \
 298                                                0x400 + (port) * 0x400)
 299#define     MVPP2_MIB_LATE_COLLISION            0x7c
 300#define MVPP2_ISR_SUM_MASK_REG                  0x220c
 301#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG      0x305c
 302#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT           0x27
 303
 304/* Per-port registers */
 305#define MVPP2_GMAC_CTRL_0_REG                   0x0
 306#define      MVPP2_GMAC_PORT_EN_MASK            BIT(0)
 307#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS        2
 308#define      MVPP2_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 309#define      MVPP2_GMAC_MIB_CNTR_EN_MASK        BIT(15)
 310#define MVPP2_GMAC_CTRL_1_REG                   0x4
 311#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK    BIT(1)
 312#define      MVPP2_GMAC_GMII_LB_EN_MASK         BIT(5)
 313#define      MVPP2_GMAC_PCS_LB_EN_BIT           6
 314#define      MVPP2_GMAC_PCS_LB_EN_MASK          BIT(6)
 315#define      MVPP2_GMAC_SA_LOW_OFFS             7
 316#define MVPP2_GMAC_CTRL_2_REG                   0x8
 317#define      MVPP2_GMAC_INBAND_AN_MASK          BIT(0)
 318#define      MVPP2_GMAC_PCS_ENABLE_MASK         BIT(3)
 319#define      MVPP2_GMAC_PORT_RGMII_MASK         BIT(4)
 320#define      MVPP2_GMAC_PORT_RESET_MASK         BIT(6)
 321#define MVPP2_GMAC_AUTONEG_CONFIG               0xc
 322#define      MVPP2_GMAC_FORCE_LINK_DOWN         BIT(0)
 323#define      MVPP2_GMAC_FORCE_LINK_PASS         BIT(1)
 324#define      MVPP2_GMAC_CONFIG_MII_SPEED        BIT(5)
 325#define      MVPP2_GMAC_CONFIG_GMII_SPEED       BIT(6)
 326#define      MVPP2_GMAC_AN_SPEED_EN             BIT(7)
 327#define      MVPP2_GMAC_FC_ADV_EN               BIT(9)
 328#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 329#define      MVPP2_GMAC_AN_DUPLEX_EN            BIT(13)
 330#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG          0x1c
 331#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS     6
 332#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
 333#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)  (((v) << 6) & \
 334                                        MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
 335
 336#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 337
 338/* Descriptor ring Macros */
 339#define MVPP2_QUEUE_NEXT_DESC(q, index) \
 340        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 341
 342/* SMI: 0xc0054 -> offset 0x54 to lms_base */
 343#define MVPP2_SMI                               0x0054
 344#define     MVPP2_PHY_REG_MASK                  0x1f
 345/* SMI register fields */
 346#define     MVPP2_SMI_DATA_OFFS                 0       /* Data */
 347#define     MVPP2_SMI_DATA_MASK                 (0xffff << MVPP2_SMI_DATA_OFFS)
 348#define     MVPP2_SMI_DEV_ADDR_OFFS             16      /* PHY device address */
 349#define     MVPP2_SMI_REG_ADDR_OFFS             21      /* PHY device reg addr*/
 350#define     MVPP2_SMI_OPCODE_OFFS               26      /* Write/Read opcode */
 351#define     MVPP2_SMI_OPCODE_READ               (1 << MVPP2_SMI_OPCODE_OFFS)
 352#define     MVPP2_SMI_READ_VALID                (1 << 27)       /* Read Valid */
 353#define     MVPP2_SMI_BUSY                      (1 << 28)       /* Busy */
 354
 355#define     MVPP2_PHY_ADDR_MASK                 0x1f
 356#define     MVPP2_PHY_REG_MASK                  0x1f
 357
 358/* Various constants */
 359
 360/* Coalescing */
 361#define MVPP2_TXDONE_COAL_PKTS_THRESH   15
 362#define MVPP2_TXDONE_HRTIMER_PERIOD_NS  1000000UL
 363#define MVPP2_RX_COAL_PKTS              32
 364#define MVPP2_RX_COAL_USEC              100
 365
 366/* The two bytes Marvell header. Either contains a special value used
 367 * by Marvell switches when a specific hardware mode is enabled (not
 368 * supported by this driver) or is filled automatically by zeroes on
 369 * the RX side. Those two bytes being at the front of the Ethernet
 370 * header, they allow to have the IP header aligned on a 4 bytes
 371 * boundary automatically: the hardware skips those two bytes on its
 372 * own.
 373 */
 374#define MVPP2_MH_SIZE                   2
 375#define MVPP2_ETH_TYPE_LEN              2
 376#define MVPP2_PPPOE_HDR_SIZE            8
 377#define MVPP2_VLAN_TAG_LEN              4
 378
 379/* Lbtd 802.3 type */
 380#define MVPP2_IP_LBDT_TYPE              0xfffa
 381
 382#define MVPP2_CPU_D_CACHE_LINE_SIZE     32
 383#define MVPP2_TX_CSUM_MAX_SIZE          9800
 384
 385/* Timeout constants */
 386#define MVPP2_TX_DISABLE_TIMEOUT_MSEC   1000
 387#define MVPP2_TX_PENDING_TIMEOUT_MSEC   1000
 388
 389#define MVPP2_TX_MTU_MAX                0x7ffff
 390
 391/* Maximum number of T-CONTs of PON port */
 392#define MVPP2_MAX_TCONT                 16
 393
 394/* Maximum number of supported ports */
 395#define MVPP2_MAX_PORTS                 4
 396
 397/* Maximum number of TXQs used by single port */
 398#define MVPP2_MAX_TXQ                   8
 399
 400/* Maximum number of RXQs used by single port */
 401#define MVPP2_MAX_RXQ                   8
 402
 403/* Default number of TXQs in use */
 404#define MVPP2_DEFAULT_TXQ               1
 405
 406/* Dfault number of RXQs in use */
 407#define MVPP2_DEFAULT_RXQ               1
 408#define CONFIG_MV_ETH_RXQ               8       /* increment by 8 */
 409
 410/* Total number of RXQs available to all ports */
 411#define MVPP2_RXQ_TOTAL_NUM             (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
 412
 413/* Max number of Rx descriptors */
 414#define MVPP2_MAX_RXD                   16
 415
 416/* Max number of Tx descriptors */
 417#define MVPP2_MAX_TXD                   16
 418
 419/* Amount of Tx descriptors that can be reserved at once by CPU */
 420#define MVPP2_CPU_DESC_CHUNK            64
 421
 422/* Max number of Tx descriptors in each aggregated queue */
 423#define MVPP2_AGGR_TXQ_SIZE             256
 424
 425/* Descriptor aligned size */
 426#define MVPP2_DESC_ALIGNED_SIZE         32
 427
 428/* Descriptor alignment mask */
 429#define MVPP2_TX_DESC_ALIGN             (MVPP2_DESC_ALIGNED_SIZE - 1)
 430
 431/* RX FIFO constants */
 432#define MVPP2_RX_FIFO_PORT_DATA_SIZE    0x2000
 433#define MVPP2_RX_FIFO_PORT_ATTR_SIZE    0x80
 434#define MVPP2_RX_FIFO_PORT_MIN_PKT      0x80
 435
 436/* RX buffer constants */
 437#define MVPP2_SKB_SHINFO_SIZE \
 438        0
 439
 440#define MVPP2_RX_PKT_SIZE(mtu) \
 441        ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
 442              ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
 443
 444#define MVPP2_RX_BUF_SIZE(pkt_size)     ((pkt_size) + NET_SKB_PAD)
 445#define MVPP2_RX_TOTAL_SIZE(buf_size)   ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
 446#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
 447        ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
 448
 449#define MVPP2_BIT_TO_BYTE(bit)          ((bit) / 8)
 450
 451/* IPv6 max L3 address size */
 452#define MVPP2_MAX_L3_ADDR_SIZE          16
 453
 454/* Port flags */
 455#define MVPP2_F_LOOPBACK                BIT(0)
 456
 457/* Marvell tag types */
 458enum mvpp2_tag_type {
 459        MVPP2_TAG_TYPE_NONE = 0,
 460        MVPP2_TAG_TYPE_MH   = 1,
 461        MVPP2_TAG_TYPE_DSA  = 2,
 462        MVPP2_TAG_TYPE_EDSA = 3,
 463        MVPP2_TAG_TYPE_VLAN = 4,
 464        MVPP2_TAG_TYPE_LAST = 5
 465};
 466
 467/* Parser constants */
 468#define MVPP2_PRS_TCAM_SRAM_SIZE        256
 469#define MVPP2_PRS_TCAM_WORDS            6
 470#define MVPP2_PRS_SRAM_WORDS            4
 471#define MVPP2_PRS_FLOW_ID_SIZE          64
 472#define MVPP2_PRS_FLOW_ID_MASK          0x3f
 473#define MVPP2_PRS_TCAM_ENTRY_INVALID    1
 474#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT   BIT(5)
 475#define MVPP2_PRS_IPV4_HEAD             0x40
 476#define MVPP2_PRS_IPV4_HEAD_MASK        0xf0
 477#define MVPP2_PRS_IPV4_MC               0xe0
 478#define MVPP2_PRS_IPV4_MC_MASK          0xf0
 479#define MVPP2_PRS_IPV4_BC_MASK          0xff
 480#define MVPP2_PRS_IPV4_IHL              0x5
 481#define MVPP2_PRS_IPV4_IHL_MASK         0xf
 482#define MVPP2_PRS_IPV6_MC               0xff
 483#define MVPP2_PRS_IPV6_MC_MASK          0xff
 484#define MVPP2_PRS_IPV6_HOP_MASK         0xff
 485#define MVPP2_PRS_TCAM_PROTO_MASK       0xff
 486#define MVPP2_PRS_TCAM_PROTO_MASK_L     0x3f
 487#define MVPP2_PRS_DBL_VLANS_MAX         100
 488
 489/* Tcam structure:
 490 * - lookup ID - 4 bits
 491 * - port ID - 1 byte
 492 * - additional information - 1 byte
 493 * - header data - 8 bytes
 494 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
 495 */
 496#define MVPP2_PRS_AI_BITS                       8
 497#define MVPP2_PRS_PORT_MASK                     0xff
 498#define MVPP2_PRS_LU_MASK                       0xf
 499#define MVPP2_PRS_TCAM_DATA_BYTE(offs)          \
 500                                    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
 501#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)       \
 502                                              (((offs) * 2) - ((offs) % 2)  + 2)
 503#define MVPP2_PRS_TCAM_AI_BYTE                  16
 504#define MVPP2_PRS_TCAM_PORT_BYTE                17
 505#define MVPP2_PRS_TCAM_LU_BYTE                  20
 506#define MVPP2_PRS_TCAM_EN_OFFS(offs)            ((offs) + 2)
 507#define MVPP2_PRS_TCAM_INV_WORD                 5
 508/* Tcam entries ID */
 509#define MVPP2_PE_DROP_ALL               0
 510#define MVPP2_PE_FIRST_FREE_TID         1
 511#define MVPP2_PE_LAST_FREE_TID          (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
 512#define MVPP2_PE_IP6_EXT_PROTO_UN       (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
 513#define MVPP2_PE_MAC_MC_IP6             (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
 514#define MVPP2_PE_IP6_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
 515#define MVPP2_PE_IP4_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
 516#define MVPP2_PE_LAST_DEFAULT_FLOW      (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
 517#define MVPP2_PE_FIRST_DEFAULT_FLOW     (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
 518#define MVPP2_PE_EDSA_TAGGED            (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
 519#define MVPP2_PE_EDSA_UNTAGGED          (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
 520#define MVPP2_PE_DSA_TAGGED             (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
 521#define MVPP2_PE_DSA_UNTAGGED           (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
 522#define MVPP2_PE_ETYPE_EDSA_TAGGED      (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
 523#define MVPP2_PE_ETYPE_EDSA_UNTAGGED    (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
 524#define MVPP2_PE_ETYPE_DSA_TAGGED       (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
 525#define MVPP2_PE_ETYPE_DSA_UNTAGGED     (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
 526#define MVPP2_PE_MH_DEFAULT             (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
 527#define MVPP2_PE_DSA_DEFAULT            (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
 528#define MVPP2_PE_IP6_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
 529#define MVPP2_PE_IP4_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 530#define MVPP2_PE_ETH_TYPE_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 531#define MVPP2_PE_VLAN_DBL               (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
 532#define MVPP2_PE_VLAN_NONE              (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 533#define MVPP2_PE_MAC_MC_ALL             (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 534#define MVPP2_PE_MAC_PROMISCUOUS        (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 535#define MVPP2_PE_MAC_NON_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
 536
 537/* Sram structure
 538 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
 539 */
 540#define MVPP2_PRS_SRAM_RI_OFFS                  0
 541#define MVPP2_PRS_SRAM_RI_WORD                  0
 542#define MVPP2_PRS_SRAM_RI_CTRL_OFFS             32
 543#define MVPP2_PRS_SRAM_RI_CTRL_WORD             1
 544#define MVPP2_PRS_SRAM_RI_CTRL_BITS             32
 545#define MVPP2_PRS_SRAM_SHIFT_OFFS               64
 546#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT           72
 547#define MVPP2_PRS_SRAM_UDF_OFFS                 73
 548#define MVPP2_PRS_SRAM_UDF_BITS                 8
 549#define MVPP2_PRS_SRAM_UDF_MASK                 0xff
 550#define MVPP2_PRS_SRAM_UDF_SIGN_BIT             81
 551#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS            82
 552#define MVPP2_PRS_SRAM_UDF_TYPE_MASK            0x7
 553#define MVPP2_PRS_SRAM_UDF_TYPE_L3              1
 554#define MVPP2_PRS_SRAM_UDF_TYPE_L4              4
 555#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS        85
 556#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK        0x3
 557#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD         1
 558#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD     2
 559#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD     3
 560#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS          87
 561#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS          2
 562#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK          0x3
 563#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD           0
 564#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD       2
 565#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD       3
 566#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS         89
 567#define MVPP2_PRS_SRAM_AI_OFFS                  90
 568#define MVPP2_PRS_SRAM_AI_CTRL_OFFS             98
 569#define MVPP2_PRS_SRAM_AI_CTRL_BITS             8
 570#define MVPP2_PRS_SRAM_AI_MASK                  0xff
 571#define MVPP2_PRS_SRAM_NEXT_LU_OFFS             106
 572#define MVPP2_PRS_SRAM_NEXT_LU_MASK             0xf
 573#define MVPP2_PRS_SRAM_LU_DONE_BIT              110
 574#define MVPP2_PRS_SRAM_LU_GEN_BIT               111
 575
 576/* Sram result info bits assignment */
 577#define MVPP2_PRS_RI_MAC_ME_MASK                0x1
 578#define MVPP2_PRS_RI_DSA_MASK                   0x2
 579#define MVPP2_PRS_RI_VLAN_MASK                  0xc
 580#define MVPP2_PRS_RI_VLAN_NONE                  ~(BIT(2) | BIT(3))
 581#define MVPP2_PRS_RI_VLAN_SINGLE                BIT(2)
 582#define MVPP2_PRS_RI_VLAN_DOUBLE                BIT(3)
 583#define MVPP2_PRS_RI_VLAN_TRIPLE                (BIT(2) | BIT(3))
 584#define MVPP2_PRS_RI_CPU_CODE_MASK              0x70
 585#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC           BIT(4)
 586#define MVPP2_PRS_RI_L2_CAST_MASK               0x600
 587#define MVPP2_PRS_RI_L2_UCAST                   ~(BIT(9) | BIT(10))
 588#define MVPP2_PRS_RI_L2_MCAST                   BIT(9)
 589#define MVPP2_PRS_RI_L2_BCAST                   BIT(10)
 590#define MVPP2_PRS_RI_PPPOE_MASK                 0x800
 591#define MVPP2_PRS_RI_L3_PROTO_MASK              0x7000
 592#define MVPP2_PRS_RI_L3_UN                      ~(BIT(12) | BIT(13) | BIT(14))
 593#define MVPP2_PRS_RI_L3_IP4                     BIT(12)
 594#define MVPP2_PRS_RI_L3_IP4_OPT                 BIT(13)
 595#define MVPP2_PRS_RI_L3_IP4_OTHER               (BIT(12) | BIT(13))
 596#define MVPP2_PRS_RI_L3_IP6                     BIT(14)
 597#define MVPP2_PRS_RI_L3_IP6_EXT                 (BIT(12) | BIT(14))
 598#define MVPP2_PRS_RI_L3_ARP                     (BIT(13) | BIT(14))
 599#define MVPP2_PRS_RI_L3_ADDR_MASK               0x18000
 600#define MVPP2_PRS_RI_L3_UCAST                   ~(BIT(15) | BIT(16))
 601#define MVPP2_PRS_RI_L3_MCAST                   BIT(15)
 602#define MVPP2_PRS_RI_L3_BCAST                   (BIT(15) | BIT(16))
 603#define MVPP2_PRS_RI_IP_FRAG_MASK               0x20000
 604#define MVPP2_PRS_RI_UDF3_MASK                  0x300000
 605#define MVPP2_PRS_RI_UDF3_RX_SPECIAL            BIT(21)
 606#define MVPP2_PRS_RI_L4_PROTO_MASK              0x1c00000
 607#define MVPP2_PRS_RI_L4_TCP                     BIT(22)
 608#define MVPP2_PRS_RI_L4_UDP                     BIT(23)
 609#define MVPP2_PRS_RI_L4_OTHER                   (BIT(22) | BIT(23))
 610#define MVPP2_PRS_RI_UDF7_MASK                  0x60000000
 611#define MVPP2_PRS_RI_UDF7_IP6_LITE              BIT(29)
 612#define MVPP2_PRS_RI_DROP_MASK                  0x80000000
 613
 614/* Sram additional info bits assignment */
 615#define MVPP2_PRS_IPV4_DIP_AI_BIT               BIT(0)
 616#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT            BIT(0)
 617#define MVPP2_PRS_IPV6_EXT_AI_BIT               BIT(1)
 618#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT            BIT(2)
 619#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT        BIT(3)
 620#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT         BIT(4)
 621#define MVPP2_PRS_SINGLE_VLAN_AI                0
 622#define MVPP2_PRS_DBL_VLAN_AI_BIT               BIT(7)
 623
 624/* DSA/EDSA type */
 625#define MVPP2_PRS_TAGGED                true
 626#define MVPP2_PRS_UNTAGGED              false
 627#define MVPP2_PRS_EDSA                  true
 628#define MVPP2_PRS_DSA                   false
 629
 630/* MAC entries, shadow udf */
 631enum mvpp2_prs_udf {
 632        MVPP2_PRS_UDF_MAC_DEF,
 633        MVPP2_PRS_UDF_MAC_RANGE,
 634        MVPP2_PRS_UDF_L2_DEF,
 635        MVPP2_PRS_UDF_L2_DEF_COPY,
 636        MVPP2_PRS_UDF_L2_USER,
 637};
 638
 639/* Lookup ID */
 640enum mvpp2_prs_lookup {
 641        MVPP2_PRS_LU_MH,
 642        MVPP2_PRS_LU_MAC,
 643        MVPP2_PRS_LU_DSA,
 644        MVPP2_PRS_LU_VLAN,
 645        MVPP2_PRS_LU_L2,
 646        MVPP2_PRS_LU_PPPOE,
 647        MVPP2_PRS_LU_IP4,
 648        MVPP2_PRS_LU_IP6,
 649        MVPP2_PRS_LU_FLOWS,
 650        MVPP2_PRS_LU_LAST,
 651};
 652
 653/* L3 cast enum */
 654enum mvpp2_prs_l3_cast {
 655        MVPP2_PRS_L3_UNI_CAST,
 656        MVPP2_PRS_L3_MULTI_CAST,
 657        MVPP2_PRS_L3_BROAD_CAST
 658};
 659
 660/* Classifier constants */
 661#define MVPP2_CLS_FLOWS_TBL_SIZE        512
 662#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS  3
 663#define MVPP2_CLS_LKP_TBL_SIZE          64
 664
 665/* BM constants */
 666#define MVPP2_BM_POOLS_NUM              1
 667#define MVPP2_BM_LONG_BUF_NUM           16
 668#define MVPP2_BM_SHORT_BUF_NUM          16
 669#define MVPP2_BM_POOL_SIZE_MAX          (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
 670#define MVPP2_BM_POOL_PTR_ALIGN         128
 671#define MVPP2_BM_SWF_LONG_POOL(port)    0
 672
 673/* BM cookie (32 bits) definition */
 674#define MVPP2_BM_COOKIE_POOL_OFFS       8
 675#define MVPP2_BM_COOKIE_CPU_OFFS        24
 676
 677/* BM short pool packet size
 678 * These value assure that for SWF the total number
 679 * of bytes allocated for each buffer will be 512
 680 */
 681#define MVPP2_BM_SHORT_PKT_SIZE         MVPP2_RX_MAX_PKT_SIZE(512)
 682
 683enum mvpp2_bm_type {
 684        MVPP2_BM_FREE,
 685        MVPP2_BM_SWF_LONG,
 686        MVPP2_BM_SWF_SHORT
 687};
 688
 689/* Definitions */
 690
 691/* Shared Packet Processor resources */
 692struct mvpp2 {
 693        /* Shared registers' base addresses */
 694        void __iomem *base;
 695        void __iomem *lms_base;
 696
 697        /* List of pointers to port structures */
 698        struct mvpp2_port **port_list;
 699
 700        /* Aggregated TXQs */
 701        struct mvpp2_tx_queue *aggr_txqs;
 702
 703        /* BM pools */
 704        struct mvpp2_bm_pool *bm_pools;
 705
 706        /* PRS shadow table */
 707        struct mvpp2_prs_shadow *prs_shadow;
 708        /* PRS auxiliary table for double vlan entries control */
 709        bool *prs_double_vlans;
 710
 711        /* Tclk value */
 712        u32 tclk;
 713
 714        struct mii_dev *bus;
 715};
 716
 717struct mvpp2_pcpu_stats {
 718        u64     rx_packets;
 719        u64     rx_bytes;
 720        u64     tx_packets;
 721        u64     tx_bytes;
 722};
 723
 724struct mvpp2_port {
 725        u8 id;
 726
 727        int irq;
 728
 729        struct mvpp2 *priv;
 730
 731        /* Per-port registers' base address */
 732        void __iomem *base;
 733
 734        struct mvpp2_rx_queue **rxqs;
 735        struct mvpp2_tx_queue **txqs;
 736
 737        int pkt_size;
 738
 739        u32 pending_cause_rx;
 740
 741        /* Per-CPU port control */
 742        struct mvpp2_port_pcpu __percpu *pcpu;
 743
 744        /* Flags */
 745        unsigned long flags;
 746
 747        u16 tx_ring_size;
 748        u16 rx_ring_size;
 749        struct mvpp2_pcpu_stats __percpu *stats;
 750
 751        struct phy_device *phy_dev;
 752        phy_interface_t phy_interface;
 753        int phy_node;
 754        int phyaddr;
 755        int init;
 756        unsigned int link;
 757        unsigned int duplex;
 758        unsigned int speed;
 759
 760        struct mvpp2_bm_pool *pool_long;
 761        struct mvpp2_bm_pool *pool_short;
 762
 763        /* Index of first port's physical RXQ */
 764        u8 first_rxq;
 765
 766        u8 dev_addr[ETH_ALEN];
 767};
 768
 769/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
 770 * layout of the transmit and reception DMA descriptors, and their
 771 * layout is therefore defined by the hardware design
 772 */
 773
 774#define MVPP2_TXD_L3_OFF_SHIFT          0
 775#define MVPP2_TXD_IP_HLEN_SHIFT         8
 776#define MVPP2_TXD_L4_CSUM_FRAG          BIT(13)
 777#define MVPP2_TXD_L4_CSUM_NOT           BIT(14)
 778#define MVPP2_TXD_IP_CSUM_DISABLE       BIT(15)
 779#define MVPP2_TXD_PADDING_DISABLE       BIT(23)
 780#define MVPP2_TXD_L4_UDP                BIT(24)
 781#define MVPP2_TXD_L3_IP6                BIT(26)
 782#define MVPP2_TXD_L_DESC                BIT(28)
 783#define MVPP2_TXD_F_DESC                BIT(29)
 784
 785#define MVPP2_RXD_ERR_SUMMARY           BIT(15)
 786#define MVPP2_RXD_ERR_CODE_MASK         (BIT(13) | BIT(14))
 787#define MVPP2_RXD_ERR_CRC               0x0
 788#define MVPP2_RXD_ERR_OVERRUN           BIT(13)
 789#define MVPP2_RXD_ERR_RESOURCE          (BIT(13) | BIT(14))
 790#define MVPP2_RXD_BM_POOL_ID_OFFS       16
 791#define MVPP2_RXD_BM_POOL_ID_MASK       (BIT(16) | BIT(17) | BIT(18))
 792#define MVPP2_RXD_HWF_SYNC              BIT(21)
 793#define MVPP2_RXD_L4_CSUM_OK            BIT(22)
 794#define MVPP2_RXD_IP4_HEADER_ERR        BIT(24)
 795#define MVPP2_RXD_L4_TCP                BIT(25)
 796#define MVPP2_RXD_L4_UDP                BIT(26)
 797#define MVPP2_RXD_L3_IP4                BIT(28)
 798#define MVPP2_RXD_L3_IP6                BIT(30)
 799#define MVPP2_RXD_BUF_HDR               BIT(31)
 800
 801struct mvpp2_tx_desc {
 802        u32 command;            /* Options used by HW for packet transmitting.*/
 803        u8  packet_offset;      /* the offset from the buffer beginning */
 804        u8  phys_txq;           /* destination queue ID                 */
 805        u16 data_size;          /* data size of transmitted packet in bytes */
 806        u32 buf_phys_addr;      /* physical addr of transmitted buffer  */
 807        u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
 808        u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
 809        u32 reserved2;          /* reserved (for future use)            */
 810};
 811
 812struct mvpp2_rx_desc {
 813        u32 status;             /* info about received packet           */
 814        u16 reserved1;          /* parser_info (for future use, PnC)    */
 815        u16 data_size;          /* size of received packet in bytes     */
 816        u32 buf_phys_addr;      /* physical address of the buffer       */
 817        u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
 818        u16 reserved2;          /* gem_port_id (for future use, PON)    */
 819        u16 reserved3;          /* csum_l4 (for future use, PnC)        */
 820        u8  reserved4;          /* bm_qset (for future use, BM)         */
 821        u8  reserved5;
 822        u16 reserved6;          /* classify_info (for future use, PnC)  */
 823        u32 reserved7;          /* flow_id (for future use, PnC) */
 824        u32 reserved8;
 825};
 826
 827/* Per-CPU Tx queue control */
 828struct mvpp2_txq_pcpu {
 829        int cpu;
 830
 831        /* Number of Tx DMA descriptors in the descriptor ring */
 832        int size;
 833
 834        /* Number of currently used Tx DMA descriptor in the
 835         * descriptor ring
 836         */
 837        int count;
 838
 839        /* Number of Tx DMA descriptors reserved for each CPU */
 840        int reserved_num;
 841
 842        /* Index of last TX DMA descriptor that was inserted */
 843        int txq_put_index;
 844
 845        /* Index of the TX DMA descriptor to be cleaned up */
 846        int txq_get_index;
 847};
 848
 849struct mvpp2_tx_queue {
 850        /* Physical number of this Tx queue */
 851        u8 id;
 852
 853        /* Logical number of this Tx queue */
 854        u8 log_id;
 855
 856        /* Number of Tx DMA descriptors in the descriptor ring */
 857        int size;
 858
 859        /* Number of currently used Tx DMA descriptor in the descriptor ring */
 860        int count;
 861
 862        /* Per-CPU control of physical Tx queues */
 863        struct mvpp2_txq_pcpu __percpu *pcpu;
 864
 865        u32 done_pkts_coal;
 866
 867        /* Virtual address of thex Tx DMA descriptors array */
 868        struct mvpp2_tx_desc *descs;
 869
 870        /* DMA address of the Tx DMA descriptors array */
 871        dma_addr_t descs_phys;
 872
 873        /* Index of the last Tx DMA descriptor */
 874        int last_desc;
 875
 876        /* Index of the next Tx DMA descriptor to process */
 877        int next_desc_to_proc;
 878};
 879
 880struct mvpp2_rx_queue {
 881        /* RX queue number, in the range 0-31 for physical RXQs */
 882        u8 id;
 883
 884        /* Num of rx descriptors in the rx descriptor ring */
 885        int size;
 886
 887        u32 pkts_coal;
 888        u32 time_coal;
 889
 890        /* Virtual address of the RX DMA descriptors array */
 891        struct mvpp2_rx_desc *descs;
 892
 893        /* DMA address of the RX DMA descriptors array */
 894        dma_addr_t descs_phys;
 895
 896        /* Index of the last RX DMA descriptor */
 897        int last_desc;
 898
 899        /* Index of the next RX DMA descriptor to process */
 900        int next_desc_to_proc;
 901
 902        /* ID of port to which physical RXQ is mapped */
 903        int port;
 904
 905        /* Port's logic RXQ number to which physical RXQ is mapped */
 906        int logic_rxq;
 907};
 908
 909union mvpp2_prs_tcam_entry {
 910        u32 word[MVPP2_PRS_TCAM_WORDS];
 911        u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
 912};
 913
 914union mvpp2_prs_sram_entry {
 915        u32 word[MVPP2_PRS_SRAM_WORDS];
 916        u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
 917};
 918
 919struct mvpp2_prs_entry {
 920        u32 index;
 921        union mvpp2_prs_tcam_entry tcam;
 922        union mvpp2_prs_sram_entry sram;
 923};
 924
 925struct mvpp2_prs_shadow {
 926        bool valid;
 927        bool finish;
 928
 929        /* Lookup ID */
 930        int lu;
 931
 932        /* User defined offset */
 933        int udf;
 934
 935        /* Result info */
 936        u32 ri;
 937        u32 ri_mask;
 938};
 939
 940struct mvpp2_cls_flow_entry {
 941        u32 index;
 942        u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
 943};
 944
 945struct mvpp2_cls_lookup_entry {
 946        u32 lkpid;
 947        u32 way;
 948        u32 data;
 949};
 950
 951struct mvpp2_bm_pool {
 952        /* Pool number in the range 0-7 */
 953        int id;
 954        enum mvpp2_bm_type type;
 955
 956        /* Buffer Pointers Pool External (BPPE) size */
 957        int size;
 958        /* Number of buffers for this pool */
 959        int buf_num;
 960        /* Pool buffer size */
 961        int buf_size;
 962        /* Packet size */
 963        int pkt_size;
 964
 965        /* BPPE virtual base address */
 966        u32 *virt_addr;
 967        /* BPPE physical base address */
 968        dma_addr_t phys_addr;
 969
 970        /* Ports using BM pool */
 971        u32 port_map;
 972
 973        /* Occupied buffers indicator */
 974        int in_use_thresh;
 975};
 976
 977struct mvpp2_buff_hdr {
 978        u32 next_buff_phys_addr;
 979        u32 next_buff_virt_addr;
 980        u16 byte_count;
 981        u16 info;
 982        u8  reserved1;          /* bm_qset (for future use, BM)         */
 983};
 984
 985/* Buffer header info bits */
 986#define MVPP2_B_HDR_INFO_MC_ID_MASK     0xfff
 987#define MVPP2_B_HDR_INFO_MC_ID(info)    ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
 988#define MVPP2_B_HDR_INFO_LAST_OFFS      12
 989#define MVPP2_B_HDR_INFO_LAST_MASK      BIT(12)
 990#define MVPP2_B_HDR_INFO_IS_LAST(info) \
 991           ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
 992
 993/* Static declaractions */
 994
 995/* Number of RXQs used by single port */
 996static int rxq_number = MVPP2_DEFAULT_RXQ;
 997/* Number of TXQs used by single port */
 998static int txq_number = MVPP2_DEFAULT_TXQ;
 999
1000#define MVPP2_DRIVER_NAME "mvpp2"
1001#define MVPP2_DRIVER_VERSION "1.0"
1002
1003/*
1004 * U-Boot internal data, mostly uncached buffers for descriptors and data
1005 */
1006struct buffer_location {
1007        struct mvpp2_tx_desc *aggr_tx_descs;
1008        struct mvpp2_tx_desc *tx_descs;
1009        struct mvpp2_rx_desc *rx_descs;
1010        u32 *bm_pool[MVPP2_BM_POOLS_NUM];
1011        u32 *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
1012        int first_rxq;
1013};
1014
1015/*
1016 * All 4 interfaces use the same global buffer, since only one interface
1017 * can be enabled at once
1018 */
1019static struct buffer_location buffer_loc;
1020
1021/*
1022 * Page table entries are set to 1MB, or multiples of 1MB
1023 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1024 */
1025#define BD_SPACE        (1 << 20)
1026
1027/* Utility/helper methods */
1028
1029static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1030{
1031        writel(data, priv->base + offset);
1032}
1033
1034static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1035{
1036        return readl(priv->base + offset);
1037}
1038
1039static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1040{
1041        txq_pcpu->txq_get_index++;
1042        if (txq_pcpu->txq_get_index == txq_pcpu->size)
1043                txq_pcpu->txq_get_index = 0;
1044}
1045
1046/* Get number of physical egress port */
1047static inline int mvpp2_egress_port(struct mvpp2_port *port)
1048{
1049        return MVPP2_MAX_TCONT + port->id;
1050}
1051
1052/* Get number of physical TXQ */
1053static inline int mvpp2_txq_phys(int port, int txq)
1054{
1055        return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1056}
1057
1058/* Parser configuration routines */
1059
1060/* Update parser tcam and sram hw entries */
1061static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1062{
1063        int i;
1064
1065        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1066                return -EINVAL;
1067
1068        /* Clear entry invalidation bit */
1069        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1070
1071        /* Write tcam index - indirect access */
1072        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1073        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1074                mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1075
1076        /* Write sram index - indirect access */
1077        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1078        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1079                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1080
1081        return 0;
1082}
1083
1084/* Read tcam entry from hw */
1085static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1086{
1087        int i;
1088
1089        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1090                return -EINVAL;
1091
1092        /* Write tcam index - indirect access */
1093        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1094
1095        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1096                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1097        if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1098                return MVPP2_PRS_TCAM_ENTRY_INVALID;
1099
1100        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1101                pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1102
1103        /* Write sram index - indirect access */
1104        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1105        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1106                pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1107
1108        return 0;
1109}
1110
1111/* Invalidate tcam hw entry */
1112static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1113{
1114        /* Write index - indirect access */
1115        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1116        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1117                    MVPP2_PRS_TCAM_INV_MASK);
1118}
1119
1120/* Enable shadow table entry and set its lookup ID */
1121static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1122{
1123        priv->prs_shadow[index].valid = true;
1124        priv->prs_shadow[index].lu = lu;
1125}
1126
1127/* Update ri fields in shadow table entry */
1128static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1129                                    unsigned int ri, unsigned int ri_mask)
1130{
1131        priv->prs_shadow[index].ri_mask = ri_mask;
1132        priv->prs_shadow[index].ri = ri;
1133}
1134
1135/* Update lookup field in tcam sw entry */
1136static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1137{
1138        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1139
1140        pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1141        pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1142}
1143
1144/* Update mask for single port in tcam sw entry */
1145static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1146                                    unsigned int port, bool add)
1147{
1148        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1149
1150        if (add)
1151                pe->tcam.byte[enable_off] &= ~(1 << port);
1152        else
1153                pe->tcam.byte[enable_off] |= 1 << port;
1154}
1155
1156/* Update port map in tcam sw entry */
1157static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1158                                        unsigned int ports)
1159{
1160        unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1161        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1162
1163        pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1164        pe->tcam.byte[enable_off] &= ~port_mask;
1165        pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1166}
1167
1168/* Obtain port map from tcam sw entry */
1169static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1170{
1171        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1172
1173        return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1174}
1175
1176/* Set byte of data and its enable bits in tcam sw entry */
1177static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1178                                         unsigned int offs, unsigned char byte,
1179                                         unsigned char enable)
1180{
1181        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1182        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1183}
1184
1185/* Get byte of data and its enable bits from tcam sw entry */
1186static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1187                                         unsigned int offs, unsigned char *byte,
1188                                         unsigned char *enable)
1189{
1190        *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1191        *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1192}
1193
1194/* Set ethertype in tcam sw entry */
1195static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1196                                  unsigned short ethertype)
1197{
1198        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1199        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1200}
1201
1202/* Set bits in sram sw entry */
1203static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1204                                    int val)
1205{
1206        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1207}
1208
1209/* Clear bits in sram sw entry */
1210static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1211                                      int val)
1212{
1213        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1214}
1215
1216/* Update ri bits in sram sw entry */
1217static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1218                                     unsigned int bits, unsigned int mask)
1219{
1220        unsigned int i;
1221
1222        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1223                int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1224
1225                if (!(mask & BIT(i)))
1226                        continue;
1227
1228                if (bits & BIT(i))
1229                        mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1230                else
1231                        mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1232
1233                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1234        }
1235}
1236
1237/* Update ai bits in sram sw entry */
1238static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1239                                     unsigned int bits, unsigned int mask)
1240{
1241        unsigned int i;
1242        int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1243
1244        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1245
1246                if (!(mask & BIT(i)))
1247                        continue;
1248
1249                if (bits & BIT(i))
1250                        mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1251                else
1252                        mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1253
1254                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1255        }
1256}
1257
1258/* Read ai bits from sram sw entry */
1259static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1260{
1261        u8 bits;
1262        int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1263        int ai_en_off = ai_off + 1;
1264        int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1265
1266        bits = (pe->sram.byte[ai_off] >> ai_shift) |
1267               (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1268
1269        return bits;
1270}
1271
1272/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1273 * lookup interation
1274 */
1275static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1276                                       unsigned int lu)
1277{
1278        int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1279
1280        mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1281                                  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1282        mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1283}
1284
1285/* In the sram sw entry set sign and value of the next lookup offset
1286 * and the offset value generated to the classifier
1287 */
1288static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1289                                     unsigned int op)
1290{
1291        /* Set sign */
1292        if (shift < 0) {
1293                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1294                shift = 0 - shift;
1295        } else {
1296                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1297        }
1298
1299        /* Set value */
1300        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1301                                                           (unsigned char)shift;
1302
1303        /* Reset and set operation */
1304        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1305                                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1306        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1307
1308        /* Set base offset as current */
1309        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1310}
1311
1312/* In the sram sw entry set sign and value of the user defined offset
1313 * generated to the classifier
1314 */
1315static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1316                                      unsigned int type, int offset,
1317                                      unsigned int op)
1318{
1319        /* Set sign */
1320        if (offset < 0) {
1321                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1322                offset = 0 - offset;
1323        } else {
1324                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1325        }
1326
1327        /* Set value */
1328        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1329                                  MVPP2_PRS_SRAM_UDF_MASK);
1330        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1331        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1332                                        MVPP2_PRS_SRAM_UDF_BITS)] &=
1333              ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1334        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1335                                        MVPP2_PRS_SRAM_UDF_BITS)] |=
1336                                (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1337
1338        /* Set offset type */
1339        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1340                                  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1341        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1342
1343        /* Set offset operation */
1344        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1345                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1346        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1347
1348        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1349                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1350                                             ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1351                                    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1352
1353        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1354                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1355                             (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1356
1357        /* Set base offset as current */
1358        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1359}
1360
1361/* Find parser flow entry */
1362static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1363{
1364        struct mvpp2_prs_entry *pe;
1365        int tid;
1366
1367        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1368        if (!pe)
1369                return NULL;
1370        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1371
1372        /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1373        for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1374                u8 bits;
1375
1376                if (!priv->prs_shadow[tid].valid ||
1377                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1378                        continue;
1379
1380                pe->index = tid;
1381                mvpp2_prs_hw_read(priv, pe);
1382                bits = mvpp2_prs_sram_ai_get(pe);
1383
1384                /* Sram store classification lookup ID in AI bits [5:0] */
1385                if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1386                        return pe;
1387        }
1388        kfree(pe);
1389
1390        return NULL;
1391}
1392
1393/* Return first free tcam index, seeking from start to end */
1394static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1395                                     unsigned char end)
1396{
1397        int tid;
1398
1399        if (start > end)
1400                swap(start, end);
1401
1402        if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1403                end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1404
1405        for (tid = start; tid <= end; tid++) {
1406                if (!priv->prs_shadow[tid].valid)
1407                        return tid;
1408        }
1409
1410        return -EINVAL;
1411}
1412
1413/* Enable/disable dropping all mac da's */
1414static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1415{
1416        struct mvpp2_prs_entry pe;
1417
1418        if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1419                /* Entry exist - update port only */
1420                pe.index = MVPP2_PE_DROP_ALL;
1421                mvpp2_prs_hw_read(priv, &pe);
1422        } else {
1423                /* Entry doesn't exist - create new */
1424                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1425                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1426                pe.index = MVPP2_PE_DROP_ALL;
1427
1428                /* Non-promiscuous mode for all ports - DROP unknown packets */
1429                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1430                                         MVPP2_PRS_RI_DROP_MASK);
1431
1432                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1433                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1434
1435                /* Update shadow table */
1436                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1437
1438                /* Mask all ports */
1439                mvpp2_prs_tcam_port_map_set(&pe, 0);
1440        }
1441
1442        /* Update port mask */
1443        mvpp2_prs_tcam_port_set(&pe, port, add);
1444
1445        mvpp2_prs_hw_write(priv, &pe);
1446}
1447
1448/* Set port to promiscuous mode */
1449static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1450{
1451        struct mvpp2_prs_entry pe;
1452
1453        /* Promiscuous mode - Accept unknown packets */
1454
1455        if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1456                /* Entry exist - update port only */
1457                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1458                mvpp2_prs_hw_read(priv, &pe);
1459        } else {
1460                /* Entry doesn't exist - create new */
1461                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1462                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1463                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1464
1465                /* Continue - set next lookup */
1466                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1467
1468                /* Set result info bits */
1469                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1470                                         MVPP2_PRS_RI_L2_CAST_MASK);
1471
1472                /* Shift to ethertype */
1473                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1474                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1475
1476                /* Mask all ports */
1477                mvpp2_prs_tcam_port_map_set(&pe, 0);
1478
1479                /* Update shadow table */
1480                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1481        }
1482
1483        /* Update port mask */
1484        mvpp2_prs_tcam_port_set(&pe, port, add);
1485
1486        mvpp2_prs_hw_write(priv, &pe);
1487}
1488
1489/* Accept multicast */
1490static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1491                                    bool add)
1492{
1493        struct mvpp2_prs_entry pe;
1494        unsigned char da_mc;
1495
1496        /* Ethernet multicast address first byte is
1497         * 0x01 for IPv4 and 0x33 for IPv6
1498         */
1499        da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1500
1501        if (priv->prs_shadow[index].valid) {
1502                /* Entry exist - update port only */
1503                pe.index = index;
1504                mvpp2_prs_hw_read(priv, &pe);
1505        } else {
1506                /* Entry doesn't exist - create new */
1507                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1508                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1509                pe.index = index;
1510
1511                /* Continue - set next lookup */
1512                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1513
1514                /* Set result info bits */
1515                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1516                                         MVPP2_PRS_RI_L2_CAST_MASK);
1517
1518                /* Update tcam entry data first byte */
1519                mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1520
1521                /* Shift to ethertype */
1522                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1523                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1524
1525                /* Mask all ports */
1526                mvpp2_prs_tcam_port_map_set(&pe, 0);
1527
1528                /* Update shadow table */
1529                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1530        }
1531
1532        /* Update port mask */
1533        mvpp2_prs_tcam_port_set(&pe, port, add);
1534
1535        mvpp2_prs_hw_write(priv, &pe);
1536}
1537
1538/* Parser per-port initialization */
1539static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1540                                   int lu_max, int offset)
1541{
1542        u32 val;
1543
1544        /* Set lookup ID */
1545        val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1546        val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1547        val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1548        mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1549
1550        /* Set maximum number of loops for packet received from port */
1551        val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1552        val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1553        val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1554        mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1555
1556        /* Set initial offset for packet header extraction for the first
1557         * searching loop
1558         */
1559        val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1560        val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1561        val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1562        mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1563}
1564
1565/* Default flow entries initialization for all ports */
1566static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1567{
1568        struct mvpp2_prs_entry pe;
1569        int port;
1570
1571        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1572                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1573                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1574                pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1575
1576                /* Mask all ports */
1577                mvpp2_prs_tcam_port_map_set(&pe, 0);
1578
1579                /* Set flow ID*/
1580                mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1581                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1582
1583                /* Update shadow table and hw entry */
1584                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1585                mvpp2_prs_hw_write(priv, &pe);
1586        }
1587}
1588
1589/* Set default entry for Marvell Header field */
1590static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1591{
1592        struct mvpp2_prs_entry pe;
1593
1594        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1595
1596        pe.index = MVPP2_PE_MH_DEFAULT;
1597        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1598        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1599                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1600        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1601
1602        /* Unmask all ports */
1603        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1604
1605        /* Update shadow table and hw entry */
1606        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1607        mvpp2_prs_hw_write(priv, &pe);
1608}
1609
1610/* Set default entires (place holder) for promiscuous, non-promiscuous and
1611 * multicast MAC addresses
1612 */
1613static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1614{
1615        struct mvpp2_prs_entry pe;
1616
1617        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1618
1619        /* Non-promiscuous mode for all ports - DROP unknown packets */
1620        pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1621        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1622
1623        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1624                                 MVPP2_PRS_RI_DROP_MASK);
1625        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1626        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1627
1628        /* Unmask all ports */
1629        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1630
1631        /* Update shadow table and hw entry */
1632        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1633        mvpp2_prs_hw_write(priv, &pe);
1634
1635        /* place holders only - no ports */
1636        mvpp2_prs_mac_drop_all_set(priv, 0, false);
1637        mvpp2_prs_mac_promisc_set(priv, 0, false);
1638        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1639        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1640}
1641
1642/* Match basic ethertypes */
1643static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1644{
1645        struct mvpp2_prs_entry pe;
1646        int tid;
1647
1648        /* Ethertype: PPPoE */
1649        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1650                                        MVPP2_PE_LAST_FREE_TID);
1651        if (tid < 0)
1652                return tid;
1653
1654        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1655        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1656        pe.index = tid;
1657
1658        mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1659
1660        mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1661                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1662        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1663        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1664                                 MVPP2_PRS_RI_PPPOE_MASK);
1665
1666        /* Update shadow table and hw entry */
1667        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1668        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1669        priv->prs_shadow[pe.index].finish = false;
1670        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1671                                MVPP2_PRS_RI_PPPOE_MASK);
1672        mvpp2_prs_hw_write(priv, &pe);
1673
1674        /* Ethertype: ARP */
1675        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1676                                        MVPP2_PE_LAST_FREE_TID);
1677        if (tid < 0)
1678                return tid;
1679
1680        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1681        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1682        pe.index = tid;
1683
1684        mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1685
1686        /* Generate flow in the next iteration*/
1687        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1688        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1689        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1690                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1691        /* Set L3 offset */
1692        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1693                                  MVPP2_ETH_TYPE_LEN,
1694                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1695
1696        /* Update shadow table and hw entry */
1697        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1698        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1699        priv->prs_shadow[pe.index].finish = true;
1700        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1701                                MVPP2_PRS_RI_L3_PROTO_MASK);
1702        mvpp2_prs_hw_write(priv, &pe);
1703
1704        /* Ethertype: LBTD */
1705        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1706                                        MVPP2_PE_LAST_FREE_TID);
1707        if (tid < 0)
1708                return tid;
1709
1710        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1711        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1712        pe.index = tid;
1713
1714        mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1715
1716        /* Generate flow in the next iteration*/
1717        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1718        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1719        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1720                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1721                                 MVPP2_PRS_RI_CPU_CODE_MASK |
1722                                 MVPP2_PRS_RI_UDF3_MASK);
1723        /* Set L3 offset */
1724        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1725                                  MVPP2_ETH_TYPE_LEN,
1726                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1727
1728        /* Update shadow table and hw entry */
1729        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1730        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1731        priv->prs_shadow[pe.index].finish = true;
1732        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1733                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1734                                MVPP2_PRS_RI_CPU_CODE_MASK |
1735                                MVPP2_PRS_RI_UDF3_MASK);
1736        mvpp2_prs_hw_write(priv, &pe);
1737
1738        /* Ethertype: IPv4 without options */
1739        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1740                                        MVPP2_PE_LAST_FREE_TID);
1741        if (tid < 0)
1742                return tid;
1743
1744        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1745        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1746        pe.index = tid;
1747
1748        mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1749        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1750                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1751                                     MVPP2_PRS_IPV4_HEAD_MASK |
1752                                     MVPP2_PRS_IPV4_IHL_MASK);
1753
1754        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1755        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1756                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1757        /* Skip eth_type + 4 bytes of IP header */
1758        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1759                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1760        /* Set L3 offset */
1761        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1762                                  MVPP2_ETH_TYPE_LEN,
1763                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1764
1765        /* Update shadow table and hw entry */
1766        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1767        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1768        priv->prs_shadow[pe.index].finish = false;
1769        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1770                                MVPP2_PRS_RI_L3_PROTO_MASK);
1771        mvpp2_prs_hw_write(priv, &pe);
1772
1773        /* Ethertype: IPv4 with options */
1774        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1775                                        MVPP2_PE_LAST_FREE_TID);
1776        if (tid < 0)
1777                return tid;
1778
1779        pe.index = tid;
1780
1781        /* Clear tcam data before updating */
1782        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1783        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1784
1785        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1786                                     MVPP2_PRS_IPV4_HEAD,
1787                                     MVPP2_PRS_IPV4_HEAD_MASK);
1788
1789        /* Clear ri before updating */
1790        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1791        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1792        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1793                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1794
1795        /* Update shadow table and hw entry */
1796        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1797        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1798        priv->prs_shadow[pe.index].finish = false;
1799        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1800                                MVPP2_PRS_RI_L3_PROTO_MASK);
1801        mvpp2_prs_hw_write(priv, &pe);
1802
1803        /* Ethertype: IPv6 without options */
1804        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1805                                        MVPP2_PE_LAST_FREE_TID);
1806        if (tid < 0)
1807                return tid;
1808
1809        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1810        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1811        pe.index = tid;
1812
1813        mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1814
1815        /* Skip DIP of IPV6 header */
1816        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1817                                 MVPP2_MAX_L3_ADDR_SIZE,
1818                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1819        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1820        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1821                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1822        /* Set L3 offset */
1823        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1824                                  MVPP2_ETH_TYPE_LEN,
1825                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1826
1827        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1828        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1829        priv->prs_shadow[pe.index].finish = false;
1830        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1831                                MVPP2_PRS_RI_L3_PROTO_MASK);
1832        mvpp2_prs_hw_write(priv, &pe);
1833
1834        /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1835        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1836        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1837        pe.index = MVPP2_PE_ETH_TYPE_UN;
1838
1839        /* Unmask all ports */
1840        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1841
1842        /* Generate flow in the next iteration*/
1843        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1844        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1845        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1846                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1847        /* Set L3 offset even it's unknown L3 */
1848        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1849                                  MVPP2_ETH_TYPE_LEN,
1850                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1851
1852        /* Update shadow table and hw entry */
1853        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1854        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1855        priv->prs_shadow[pe.index].finish = true;
1856        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1857                                MVPP2_PRS_RI_L3_PROTO_MASK);
1858        mvpp2_prs_hw_write(priv, &pe);
1859
1860        return 0;
1861}
1862
1863/* Parser default initialization */
1864static int mvpp2_prs_default_init(struct udevice *dev,
1865                                  struct mvpp2 *priv)
1866{
1867        int err, index, i;
1868
1869        /* Enable tcam table */
1870        mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
1871
1872        /* Clear all tcam and sram entries */
1873        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
1874                mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1875                for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1876                        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
1877
1878                mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
1879                for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1880                        mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
1881        }
1882
1883        /* Invalidate all tcam entries */
1884        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
1885                mvpp2_prs_hw_inv(priv, index);
1886
1887        priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
1888                                        sizeof(struct mvpp2_prs_shadow),
1889                                        GFP_KERNEL);
1890        if (!priv->prs_shadow)
1891                return -ENOMEM;
1892
1893        /* Always start from lookup = 0 */
1894        for (index = 0; index < MVPP2_MAX_PORTS; index++)
1895                mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
1896                                       MVPP2_PRS_PORT_LU_MAX, 0);
1897
1898        mvpp2_prs_def_flow_init(priv);
1899
1900        mvpp2_prs_mh_init(priv);
1901
1902        mvpp2_prs_mac_init(priv);
1903
1904        err = mvpp2_prs_etype_init(priv);
1905        if (err)
1906                return err;
1907
1908        return 0;
1909}
1910
1911/* Compare MAC DA with tcam entry data */
1912static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
1913                                       const u8 *da, unsigned char *mask)
1914{
1915        unsigned char tcam_byte, tcam_mask;
1916        int index;
1917
1918        for (index = 0; index < ETH_ALEN; index++) {
1919                mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
1920                if (tcam_mask != mask[index])
1921                        return false;
1922
1923                if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
1924                        return false;
1925        }
1926
1927        return true;
1928}
1929
1930/* Find tcam entry with matched pair <MAC DA, port> */
1931static struct mvpp2_prs_entry *
1932mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
1933                            unsigned char *mask, int udf_type)
1934{
1935        struct mvpp2_prs_entry *pe;
1936        int tid;
1937
1938        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1939        if (!pe)
1940                return NULL;
1941        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
1942
1943        /* Go through the all entires with MVPP2_PRS_LU_MAC */
1944        for (tid = MVPP2_PE_FIRST_FREE_TID;
1945             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1946                unsigned int entry_pmap;
1947
1948                if (!priv->prs_shadow[tid].valid ||
1949                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
1950                    (priv->prs_shadow[tid].udf != udf_type))
1951                        continue;
1952
1953                pe->index = tid;
1954                mvpp2_prs_hw_read(priv, pe);
1955                entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
1956
1957                if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
1958                    entry_pmap == pmap)
1959                        return pe;
1960        }
1961        kfree(pe);
1962
1963        return NULL;
1964}
1965
1966/* Update parser's mac da entry */
1967static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
1968                                   const u8 *da, bool add)
1969{
1970        struct mvpp2_prs_entry *pe;
1971        unsigned int pmap, len, ri;
1972        unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1973        int tid;
1974
1975        /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
1976        pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
1977                                         MVPP2_PRS_UDF_MAC_DEF);
1978
1979        /* No such entry */
1980        if (!pe) {
1981                if (!add)
1982                        return 0;
1983
1984                /* Create new TCAM entry */
1985                /* Find first range mac entry*/
1986                for (tid = MVPP2_PE_FIRST_FREE_TID;
1987                     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
1988                        if (priv->prs_shadow[tid].valid &&
1989                            (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
1990                            (priv->prs_shadow[tid].udf ==
1991                                                       MVPP2_PRS_UDF_MAC_RANGE))
1992                                break;
1993
1994                /* Go through the all entries from first to last */
1995                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1996                                                tid - 1);
1997                if (tid < 0)
1998                        return tid;
1999
2000                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2001                if (!pe)
2002                        return -1;
2003                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2004                pe->index = tid;
2005
2006                /* Mask all ports */
2007                mvpp2_prs_tcam_port_map_set(pe, 0);
2008        }
2009
2010        /* Update port mask */
2011        mvpp2_prs_tcam_port_set(pe, port, add);
2012
2013        /* Invalidate the entry if no ports are left enabled */
2014        pmap = mvpp2_prs_tcam_port_map_get(pe);
2015        if (pmap == 0) {
2016                if (add) {
2017                        kfree(pe);
2018                        return -1;
2019                }
2020                mvpp2_prs_hw_inv(priv, pe->index);
2021                priv->prs_shadow[pe->index].valid = false;
2022                kfree(pe);
2023                return 0;
2024        }
2025
2026        /* Continue - set next lookup */
2027        mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2028
2029        /* Set match on DA */
2030        len = ETH_ALEN;
2031        while (len--)
2032                mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2033
2034        /* Set result info bits */
2035        ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2036
2037        mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2038                                 MVPP2_PRS_RI_MAC_ME_MASK);
2039        mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2040                                MVPP2_PRS_RI_MAC_ME_MASK);
2041
2042        /* Shift to ethertype */
2043        mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2044                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2045
2046        /* Update shadow table and hw entry */
2047        priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2048        mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2049        mvpp2_prs_hw_write(priv, pe);
2050
2051        kfree(pe);
2052
2053        return 0;
2054}
2055
2056static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2057{
2058        int err;
2059
2060        /* Remove old parser entry */
2061        err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2062                                      false);
2063        if (err)
2064                return err;
2065
2066        /* Add new parser entry */
2067        err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2068        if (err)
2069                return err;
2070
2071        /* Set addr in the device */
2072        memcpy(port->dev_addr, da, ETH_ALEN);
2073
2074        return 0;
2075}
2076
2077/* Set prs flow for the port */
2078static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2079{
2080        struct mvpp2_prs_entry *pe;
2081        int tid;
2082
2083        pe = mvpp2_prs_flow_find(port->priv, port->id);
2084
2085        /* Such entry not exist */
2086        if (!pe) {
2087                /* Go through the all entires from last to first */
2088                tid = mvpp2_prs_tcam_first_free(port->priv,
2089                                                MVPP2_PE_LAST_FREE_TID,
2090                                               MVPP2_PE_FIRST_FREE_TID);
2091                if (tid < 0)
2092                        return tid;
2093
2094                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2095                if (!pe)
2096                        return -ENOMEM;
2097
2098                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2099                pe->index = tid;
2100
2101                /* Set flow ID*/
2102                mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2103                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2104
2105                /* Update shadow table */
2106                mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2107        }
2108
2109        mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2110        mvpp2_prs_hw_write(port->priv, pe);
2111        kfree(pe);
2112
2113        return 0;
2114}
2115
2116/* Classifier configuration routines */
2117
2118/* Update classification flow table registers */
2119static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2120                                 struct mvpp2_cls_flow_entry *fe)
2121{
2122        mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2123        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
2124        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
2125        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
2126}
2127
2128/* Update classification lookup table register */
2129static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2130                                   struct mvpp2_cls_lookup_entry *le)
2131{
2132        u32 val;
2133
2134        val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2135        mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2136        mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2137}
2138
2139/* Classifier default initialization */
2140static void mvpp2_cls_init(struct mvpp2 *priv)
2141{
2142        struct mvpp2_cls_lookup_entry le;
2143        struct mvpp2_cls_flow_entry fe;
2144        int index;
2145
2146        /* Enable classifier */
2147        mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2148
2149        /* Clear classifier flow table */
2150        memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2151        for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2152                fe.index = index;
2153                mvpp2_cls_flow_write(priv, &fe);
2154        }
2155
2156        /* Clear classifier lookup table */
2157        le.data = 0;
2158        for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2159                le.lkpid = index;
2160                le.way = 0;
2161                mvpp2_cls_lookup_write(priv, &le);
2162
2163                le.way = 1;
2164                mvpp2_cls_lookup_write(priv, &le);
2165        }
2166}
2167
2168static void mvpp2_cls_port_config(struct mvpp2_port *port)
2169{
2170        struct mvpp2_cls_lookup_entry le;
2171        u32 val;
2172
2173        /* Set way for the port */
2174        val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2175        val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2176        mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2177
2178        /* Pick the entry to be accessed in lookup ID decoding table
2179         * according to the way and lkpid.
2180         */
2181        le.lkpid = port->id;
2182        le.way = 0;
2183        le.data = 0;
2184
2185        /* Set initial CPU queue for receiving packets */
2186        le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2187        le.data |= port->first_rxq;
2188
2189        /* Disable classification engines */
2190        le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2191
2192        /* Update lookup ID table entry */
2193        mvpp2_cls_lookup_write(port->priv, &le);
2194}
2195
2196/* Set CPU queue number for oversize packets */
2197static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2198{
2199        u32 val;
2200
2201        mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2202                    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2203
2204        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2205                    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2206
2207        val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2208        val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2209        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2210}
2211
2212/* Buffer Manager configuration routines */
2213
2214/* Create pool */
2215static int mvpp2_bm_pool_create(struct udevice *dev,
2216                                struct mvpp2 *priv,
2217                                struct mvpp2_bm_pool *bm_pool, int size)
2218{
2219        u32 val;
2220
2221        bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2222        bm_pool->phys_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2223        if (!bm_pool->virt_addr)
2224                return -ENOMEM;
2225
2226        if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
2227                dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2228                        bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2229                return -ENOMEM;
2230        }
2231
2232        mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2233                    bm_pool->phys_addr);
2234        mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2235
2236        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2237        val |= MVPP2_BM_START_MASK;
2238        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2239
2240        bm_pool->type = MVPP2_BM_FREE;
2241        bm_pool->size = size;
2242        bm_pool->pkt_size = 0;
2243        bm_pool->buf_num = 0;
2244
2245        return 0;
2246}
2247
2248/* Set pool buffer size */
2249static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2250                                      struct mvpp2_bm_pool *bm_pool,
2251                                      int buf_size)
2252{
2253        u32 val;
2254
2255        bm_pool->buf_size = buf_size;
2256
2257        val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2258        mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2259}
2260
2261/* Free all buffers from the pool */
2262static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2263                               struct mvpp2_bm_pool *bm_pool)
2264{
2265        bm_pool->buf_num = 0;
2266}
2267
2268/* Cleanup pool */
2269static int mvpp2_bm_pool_destroy(struct udevice *dev,
2270                                 struct mvpp2 *priv,
2271                                 struct mvpp2_bm_pool *bm_pool)
2272{
2273        u32 val;
2274
2275        mvpp2_bm_bufs_free(dev, priv, bm_pool);
2276        if (bm_pool->buf_num) {
2277                dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2278                return 0;
2279        }
2280
2281        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2282        val |= MVPP2_BM_STOP_MASK;
2283        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2284
2285        return 0;
2286}
2287
2288static int mvpp2_bm_pools_init(struct udevice *dev,
2289                               struct mvpp2 *priv)
2290{
2291        int i, err, size;
2292        struct mvpp2_bm_pool *bm_pool;
2293
2294        /* Create all pools with maximum size */
2295        size = MVPP2_BM_POOL_SIZE_MAX;
2296        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2297                bm_pool = &priv->bm_pools[i];
2298                bm_pool->id = i;
2299                err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2300                if (err)
2301                        goto err_unroll_pools;
2302                mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2303        }
2304        return 0;
2305
2306err_unroll_pools:
2307        dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2308        for (i = i - 1; i >= 0; i--)
2309                mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2310        return err;
2311}
2312
2313static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2314{
2315        int i, err;
2316
2317        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2318                /* Mask BM all interrupts */
2319                mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2320                /* Clear BM cause register */
2321                mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2322        }
2323
2324        /* Allocate and initialize BM pools */
2325        priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2326                                     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2327        if (!priv->bm_pools)
2328                return -ENOMEM;
2329
2330        err = mvpp2_bm_pools_init(dev, priv);
2331        if (err < 0)
2332                return err;
2333        return 0;
2334}
2335
2336/* Attach long pool to rxq */
2337static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2338                                    int lrxq, int long_pool)
2339{
2340        u32 val;
2341        int prxq;
2342
2343        /* Get queue physical ID */
2344        prxq = port->rxqs[lrxq]->id;
2345
2346        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2347        val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2348        val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
2349                    MVPP2_RXQ_POOL_LONG_MASK);
2350
2351        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2352}
2353
2354/* Set pool number in a BM cookie */
2355static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2356{
2357        u32 bm;
2358
2359        bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2360        bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2361
2362        return bm;
2363}
2364
2365/* Get pool number from a BM cookie */
2366static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
2367{
2368        return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2369}
2370
2371/* Release buffer to BM */
2372static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2373                                     u32 buf_phys_addr, u32 buf_virt_addr)
2374{
2375        mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
2376        mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
2377}
2378
2379/* Refill BM pool */
2380static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2381                              u32 phys_addr, u32 cookie)
2382{
2383        int pool = mvpp2_bm_cookie_pool_get(bm);
2384
2385        mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
2386}
2387
2388/* Allocate buffers for the pool */
2389static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2390                             struct mvpp2_bm_pool *bm_pool, int buf_num)
2391{
2392        int i;
2393        u32 bm;
2394
2395        if (buf_num < 0 ||
2396            (buf_num + bm_pool->buf_num > bm_pool->size)) {
2397                netdev_err(port->dev,
2398                           "cannot allocate %d buffers for pool %d\n",
2399                           buf_num, bm_pool->id);
2400                return 0;
2401        }
2402
2403        bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
2404        for (i = 0; i < buf_num; i++) {
2405                mvpp2_pool_refill(port, bm, (u32)buffer_loc.rx_buffer[i],
2406                                  (u32)buffer_loc.rx_buffer[i]);
2407        }
2408
2409        /* Update BM driver with number of buffers added to pool */
2410        bm_pool->buf_num += i;
2411        bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2412
2413        return i;
2414}
2415
2416/* Notify the driver that BM pool is being used as specific type and return the
2417 * pool pointer on success
2418 */
2419static struct mvpp2_bm_pool *
2420mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2421                  int pkt_size)
2422{
2423        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2424        int num;
2425
2426        if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2427                netdev_err(port->dev, "mixing pool types is forbidden\n");
2428                return NULL;
2429        }
2430
2431        if (new_pool->type == MVPP2_BM_FREE)
2432                new_pool->type = type;
2433
2434        /* Allocate buffers in case BM pool is used as long pool, but packet
2435         * size doesn't match MTU or BM pool hasn't being used yet
2436         */
2437        if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2438            (new_pool->pkt_size == 0)) {
2439                int pkts_num;
2440
2441                /* Set default buffer number or free all the buffers in case
2442                 * the pool is not empty
2443                 */
2444                pkts_num = new_pool->buf_num;
2445                if (pkts_num == 0)
2446                        pkts_num = type == MVPP2_BM_SWF_LONG ?
2447                                   MVPP2_BM_LONG_BUF_NUM :
2448                                   MVPP2_BM_SHORT_BUF_NUM;
2449                else
2450                        mvpp2_bm_bufs_free(NULL,
2451                                           port->priv, new_pool);
2452
2453                new_pool->pkt_size = pkt_size;
2454
2455                /* Allocate buffers for this pool */
2456                num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2457                if (num != pkts_num) {
2458                        dev_err(dev, "pool %d: %d of %d allocated\n",
2459                                new_pool->id, num, pkts_num);
2460                        return NULL;
2461                }
2462        }
2463
2464        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2465                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2466
2467        return new_pool;
2468}
2469
2470/* Initialize pools for swf */
2471static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2472{
2473        int rxq;
2474
2475        if (!port->pool_long) {
2476                port->pool_long =
2477                       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2478                                         MVPP2_BM_SWF_LONG,
2479                                         port->pkt_size);
2480                if (!port->pool_long)
2481                        return -ENOMEM;
2482
2483                port->pool_long->port_map |= (1 << port->id);
2484
2485                for (rxq = 0; rxq < rxq_number; rxq++)
2486                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2487        }
2488
2489        return 0;
2490}
2491
2492/* Port configuration routines */
2493
2494static void mvpp2_port_mii_set(struct mvpp2_port *port)
2495{
2496        u32 val;
2497
2498        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2499
2500        switch (port->phy_interface) {
2501        case PHY_INTERFACE_MODE_SGMII:
2502                val |= MVPP2_GMAC_INBAND_AN_MASK;
2503                break;
2504        case PHY_INTERFACE_MODE_RGMII:
2505                val |= MVPP2_GMAC_PORT_RGMII_MASK;
2506        default:
2507                val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2508        }
2509
2510        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2511}
2512
2513static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2514{
2515        u32 val;
2516
2517        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2518        val |= MVPP2_GMAC_FC_ADV_EN;
2519        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2520}
2521
2522static void mvpp2_port_enable(struct mvpp2_port *port)
2523{
2524        u32 val;
2525
2526        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2527        val |= MVPP2_GMAC_PORT_EN_MASK;
2528        val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2529        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2530}
2531
2532static void mvpp2_port_disable(struct mvpp2_port *port)
2533{
2534        u32 val;
2535
2536        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2537        val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2538        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2539}
2540
2541/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2542static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2543{
2544        u32 val;
2545
2546        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2547                    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2548        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2549}
2550
2551/* Configure loopback port */
2552static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2553{
2554        u32 val;
2555
2556        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2557
2558        if (port->speed == 1000)
2559                val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2560        else
2561                val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2562
2563        if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2564                val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2565        else
2566                val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2567
2568        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2569}
2570
2571static void mvpp2_port_reset(struct mvpp2_port *port)
2572{
2573        u32 val;
2574
2575        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2576                    ~MVPP2_GMAC_PORT_RESET_MASK;
2577        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2578
2579        while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2580               MVPP2_GMAC_PORT_RESET_MASK)
2581                continue;
2582}
2583
2584/* Change maximum receive size of the port */
2585static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2586{
2587        u32 val;
2588
2589        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2590        val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2591        val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2592                    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2593        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2594}
2595
2596/* Set defaults to the MVPP2 port */
2597static void mvpp2_defaults_set(struct mvpp2_port *port)
2598{
2599        int tx_port_num, val, queue, ptxq, lrxq;
2600
2601        /* Configure port to loopback if needed */
2602        if (port->flags & MVPP2_F_LOOPBACK)
2603                mvpp2_port_loopback_set(port);
2604
2605        /* Update TX FIFO MIN Threshold */
2606        val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2607        val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2608        /* Min. TX threshold must be less than minimal packet length */
2609        val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2610        writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2611
2612        /* Disable Legacy WRR, Disable EJP, Release from reset */
2613        tx_port_num = mvpp2_egress_port(port);
2614        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2615                    tx_port_num);
2616        mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2617
2618        /* Close bandwidth for all queues */
2619        for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2620                ptxq = mvpp2_txq_phys(port->id, queue);
2621                mvpp2_write(port->priv,
2622                            MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2623        }
2624
2625        /* Set refill period to 1 usec, refill tokens
2626         * and bucket size to maximum
2627         */
2628        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2629        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2630        val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2631        val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2632        val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2633        mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2634        val = MVPP2_TXP_TOKEN_SIZE_MAX;
2635        mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2636
2637        /* Set MaximumLowLatencyPacketSize value to 256 */
2638        mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2639                    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2640                    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2641
2642        /* Enable Rx cache snoop */
2643        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2644                queue = port->rxqs[lrxq]->id;
2645                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2646                val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2647                           MVPP2_SNOOP_BUF_HDR_MASK;
2648                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2649        }
2650}
2651
2652/* Enable/disable receiving packets */
2653static void mvpp2_ingress_enable(struct mvpp2_port *port)
2654{
2655        u32 val;
2656        int lrxq, queue;
2657
2658        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2659                queue = port->rxqs[lrxq]->id;
2660                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2661                val &= ~MVPP2_RXQ_DISABLE_MASK;
2662                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2663        }
2664}
2665
2666static void mvpp2_ingress_disable(struct mvpp2_port *port)
2667{
2668        u32 val;
2669        int lrxq, queue;
2670
2671        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2672                queue = port->rxqs[lrxq]->id;
2673                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2674                val |= MVPP2_RXQ_DISABLE_MASK;
2675                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2676        }
2677}
2678
2679/* Enable transmit via physical egress queue
2680 * - HW starts take descriptors from DRAM
2681 */
2682static void mvpp2_egress_enable(struct mvpp2_port *port)
2683{
2684        u32 qmap;
2685        int queue;
2686        int tx_port_num = mvpp2_egress_port(port);
2687
2688        /* Enable all initialized TXs. */
2689        qmap = 0;
2690        for (queue = 0; queue < txq_number; queue++) {
2691                struct mvpp2_tx_queue *txq = port->txqs[queue];
2692
2693                if (txq->descs != NULL)
2694                        qmap |= (1 << queue);
2695        }
2696
2697        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2698        mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2699}
2700
2701/* Disable transmit via physical egress queue
2702 * - HW doesn't take descriptors from DRAM
2703 */
2704static void mvpp2_egress_disable(struct mvpp2_port *port)
2705{
2706        u32 reg_data;
2707        int delay;
2708        int tx_port_num = mvpp2_egress_port(port);
2709
2710        /* Issue stop command for active channels only */
2711        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2712        reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2713                    MVPP2_TXP_SCHED_ENQ_MASK;
2714        if (reg_data != 0)
2715                mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2716                            (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2717
2718        /* Wait for all Tx activity to terminate. */
2719        delay = 0;
2720        do {
2721                if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2722                        netdev_warn(port->dev,
2723                                    "Tx stop timed out, status=0x%08x\n",
2724                                    reg_data);
2725                        break;
2726                }
2727                mdelay(1);
2728                delay++;
2729
2730                /* Check port TX Command register that all
2731                 * Tx queues are stopped
2732                 */
2733                reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2734        } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2735}
2736
2737/* Rx descriptors helper methods */
2738
2739/* Get number of Rx descriptors occupied by received packets */
2740static inline int
2741mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2742{
2743        u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2744
2745        return val & MVPP2_RXQ_OCCUPIED_MASK;
2746}
2747
2748/* Update Rx queue status with the number of occupied and available
2749 * Rx descriptor slots.
2750 */
2751static inline void
2752mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2753                        int used_count, int free_count)
2754{
2755        /* Decrement the number of used descriptors and increment count
2756         * increment the number of free descriptors.
2757         */
2758        u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2759
2760        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2761}
2762
2763/* Get pointer to next RX descriptor to be processed by SW */
2764static inline struct mvpp2_rx_desc *
2765mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2766{
2767        int rx_desc = rxq->next_desc_to_proc;
2768
2769        rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2770        prefetch(rxq->descs + rxq->next_desc_to_proc);
2771        return rxq->descs + rx_desc;
2772}
2773
2774/* Set rx queue offset */
2775static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2776                                 int prxq, int offset)
2777{
2778        u32 val;
2779
2780        /* Convert offset from bytes to units of 32 bytes */
2781        offset = offset >> 5;
2782
2783        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2784        val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2785
2786        /* Offset is in */
2787        val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2788                    MVPP2_RXQ_PACKET_OFFSET_MASK);
2789
2790        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2791}
2792
2793/* Obtain BM cookie information from descriptor */
2794static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
2795{
2796        int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2797                   MVPP2_RXD_BM_POOL_ID_OFFS;
2798        int cpu = smp_processor_id();
2799
2800        return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
2801               ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
2802}
2803
2804/* Tx descriptors helper methods */
2805
2806/* Get number of Tx descriptors waiting to be transmitted by HW */
2807static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
2808                                       struct mvpp2_tx_queue *txq)
2809{
2810        u32 val;
2811
2812        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
2813        val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
2814
2815        return val & MVPP2_TXQ_PENDING_MASK;
2816}
2817
2818/* Get pointer to next Tx descriptor to be processed (send) by HW */
2819static struct mvpp2_tx_desc *
2820mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2821{
2822        int tx_desc = txq->next_desc_to_proc;
2823
2824        txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2825        return txq->descs + tx_desc;
2826}
2827
2828/* Update HW with number of aggregated Tx descriptors to be sent */
2829static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2830{
2831        /* aggregated access - relevant TXQ number is written in TX desc */
2832        mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2833}
2834
2835/* Get number of sent descriptors and decrement counter.
2836 * The number of sent descriptors is returned.
2837 * Per-CPU access
2838 */
2839static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2840                                           struct mvpp2_tx_queue *txq)
2841{
2842        u32 val;
2843
2844        /* Reading status reg resets transmitted descriptor counter */
2845        val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
2846
2847        return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2848                MVPP2_TRANSMITTED_COUNT_OFFSET;
2849}
2850
2851static void mvpp2_txq_sent_counter_clear(void *arg)
2852{
2853        struct mvpp2_port *port = arg;
2854        int queue;
2855
2856        for (queue = 0; queue < txq_number; queue++) {
2857                int id = port->txqs[queue]->id;
2858
2859                mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
2860        }
2861}
2862
2863/* Set max sizes for Tx queues */
2864static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2865{
2866        u32     val, size, mtu;
2867        int     txq, tx_port_num;
2868
2869        mtu = port->pkt_size * 8;
2870        if (mtu > MVPP2_TXP_MTU_MAX)
2871                mtu = MVPP2_TXP_MTU_MAX;
2872
2873        /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2874        mtu = 3 * mtu;
2875
2876        /* Indirect access to registers */
2877        tx_port_num = mvpp2_egress_port(port);
2878        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2879
2880        /* Set MTU */
2881        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2882        val &= ~MVPP2_TXP_MTU_MAX;
2883        val |= mtu;
2884        mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2885
2886        /* TXP token size and all TXQs token size must be larger that MTU */
2887        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2888        size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2889        if (size < mtu) {
2890                size = mtu;
2891                val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2892                val |= size;
2893                mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2894        }
2895
2896        for (txq = 0; txq < txq_number; txq++) {
2897                val = mvpp2_read(port->priv,
2898                                 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2899                size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2900
2901                if (size < mtu) {
2902                        size = mtu;
2903                        val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2904                        val |= size;
2905                        mvpp2_write(port->priv,
2906                                    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2907                                    val);
2908                }
2909        }
2910}
2911
2912/* Free Tx queue skbuffs */
2913static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2914                                struct mvpp2_tx_queue *txq,
2915                                struct mvpp2_txq_pcpu *txq_pcpu, int num)
2916{
2917        int i;
2918
2919        for (i = 0; i < num; i++)
2920                mvpp2_txq_inc_get(txq_pcpu);
2921}
2922
2923static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2924                                                        u32 cause)
2925{
2926        int queue = fls(cause) - 1;
2927
2928        return port->rxqs[queue];
2929}
2930
2931static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2932                                                        u32 cause)
2933{
2934        int queue = fls(cause) - 1;
2935
2936        return port->txqs[queue];
2937}
2938
2939/* Rx/Tx queue initialization/cleanup methods */
2940
2941/* Allocate and initialize descriptors for aggr TXQ */
2942static int mvpp2_aggr_txq_init(struct udevice *dev,
2943                               struct mvpp2_tx_queue *aggr_txq,
2944                               int desc_num, int cpu,
2945                               struct mvpp2 *priv)
2946{
2947        /* Allocate memory for TX descriptors */
2948        aggr_txq->descs = buffer_loc.aggr_tx_descs;
2949        aggr_txq->descs_phys = (dma_addr_t)buffer_loc.aggr_tx_descs;
2950        if (!aggr_txq->descs)
2951                return -ENOMEM;
2952
2953        /* Make sure descriptor address is cache line size aligned  */
2954        BUG_ON(aggr_txq->descs !=
2955               PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
2956
2957        aggr_txq->last_desc = aggr_txq->size - 1;
2958
2959        /* Aggr TXQ no reset WA */
2960        aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2961                                                 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
2962
2963        /* Set Tx descriptors queue starting address */
2964        /* indirect access */
2965        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
2966                    aggr_txq->descs_phys);
2967        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
2968
2969        return 0;
2970}
2971
2972/* Create a specified Rx queue */
2973static int mvpp2_rxq_init(struct mvpp2_port *port,
2974                          struct mvpp2_rx_queue *rxq)
2975
2976{
2977        rxq->size = port->rx_ring_size;
2978
2979        /* Allocate memory for RX descriptors */
2980        rxq->descs = buffer_loc.rx_descs;
2981        rxq->descs_phys = (dma_addr_t)buffer_loc.rx_descs;
2982        if (!rxq->descs)
2983                return -ENOMEM;
2984
2985        BUG_ON(rxq->descs !=
2986               PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
2987
2988        rxq->last_desc = rxq->size - 1;
2989
2990        /* Zero occupied and non-occupied counters - direct access */
2991        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2992
2993        /* Set Rx descriptors queue starting address - indirect access */
2994        mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2995        mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
2996        mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2997        mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
2998
2999        /* Set Offset */
3000        mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3001
3002        /* Add number of descriptors ready for receiving packets */
3003        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3004
3005        return 0;
3006}
3007
3008/* Push packets received by the RXQ to BM pool */
3009static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3010                                struct mvpp2_rx_queue *rxq)
3011{
3012        int rx_received, i;
3013
3014        rx_received = mvpp2_rxq_received(port, rxq->id);
3015        if (!rx_received)
3016                return;
3017
3018        for (i = 0; i < rx_received; i++) {
3019                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3020                u32 bm = mvpp2_bm_cookie_build(rx_desc);
3021
3022                mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
3023                                  rx_desc->buf_cookie);
3024        }
3025        mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3026}
3027
3028/* Cleanup Rx queue */
3029static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3030                             struct mvpp2_rx_queue *rxq)
3031{
3032        mvpp2_rxq_drop_pkts(port, rxq);
3033
3034        rxq->descs             = NULL;
3035        rxq->last_desc         = 0;
3036        rxq->next_desc_to_proc = 0;
3037        rxq->descs_phys        = 0;
3038
3039        /* Clear Rx descriptors queue starting address and size;
3040         * free descriptor number
3041         */
3042        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3043        mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3044        mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3045        mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3046}
3047
3048/* Create and initialize a Tx queue */
3049static int mvpp2_txq_init(struct mvpp2_port *port,
3050                          struct mvpp2_tx_queue *txq)
3051{
3052        u32 val;
3053        int cpu, desc, desc_per_txq, tx_port_num;
3054        struct mvpp2_txq_pcpu *txq_pcpu;
3055
3056        txq->size = port->tx_ring_size;
3057
3058        /* Allocate memory for Tx descriptors */
3059        txq->descs = buffer_loc.tx_descs;
3060        txq->descs_phys = (dma_addr_t)buffer_loc.tx_descs;
3061        if (!txq->descs)
3062                return -ENOMEM;
3063
3064        /* Make sure descriptor address is cache line size aligned  */
3065        BUG_ON(txq->descs !=
3066               PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3067
3068        txq->last_desc = txq->size - 1;
3069
3070        /* Set Tx descriptors queue starting address - indirect access */
3071        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3072        mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
3073        mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3074                                             MVPP2_TXQ_DESC_SIZE_MASK);
3075        mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3076        mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3077                    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3078        val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3079        val &= ~MVPP2_TXQ_PENDING_MASK;
3080        mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3081
3082        /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3083         * for each existing TXQ.
3084         * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3085         * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3086         */
3087        desc_per_txq = 16;
3088        desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3089               (txq->log_id * desc_per_txq);
3090
3091        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3092                    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3093                    MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
3094
3095        /* WRR / EJP configuration - indirect access */
3096        tx_port_num = mvpp2_egress_port(port);
3097        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3098
3099        val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3100        val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3101        val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3102        val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3103        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3104
3105        val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3106        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3107                    val);
3108
3109        for_each_present_cpu(cpu) {
3110                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3111                txq_pcpu->size = txq->size;
3112        }
3113
3114        return 0;
3115}
3116
3117/* Free allocated TXQ resources */
3118static void mvpp2_txq_deinit(struct mvpp2_port *port,
3119                             struct mvpp2_tx_queue *txq)
3120{
3121        txq->descs             = NULL;
3122        txq->last_desc         = 0;
3123        txq->next_desc_to_proc = 0;
3124        txq->descs_phys        = 0;
3125
3126        /* Set minimum bandwidth for disabled TXQs */
3127        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3128
3129        /* Set Tx descriptors queue starting address and size */
3130        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3131        mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3132        mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3133}
3134
3135/* Cleanup Tx ports */
3136static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3137{
3138        struct mvpp2_txq_pcpu *txq_pcpu;
3139        int delay, pending, cpu;
3140        u32 val;
3141
3142        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3143        val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3144        val |= MVPP2_TXQ_DRAIN_EN_MASK;
3145        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3146
3147        /* The napi queue has been stopped so wait for all packets
3148         * to be transmitted.
3149         */
3150        delay = 0;
3151        do {
3152                if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3153                        netdev_warn(port->dev,
3154                                    "port %d: cleaning queue %d timed out\n",
3155                                    port->id, txq->log_id);
3156                        break;
3157                }
3158                mdelay(1);
3159                delay++;
3160
3161                pending = mvpp2_txq_pend_desc_num_get(port, txq);
3162        } while (pending);
3163
3164        val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3165        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3166
3167        for_each_present_cpu(cpu) {
3168                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3169
3170                /* Release all packets */
3171                mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3172
3173                /* Reset queue */
3174                txq_pcpu->count = 0;
3175                txq_pcpu->txq_put_index = 0;
3176                txq_pcpu->txq_get_index = 0;
3177        }
3178}
3179
3180/* Cleanup all Tx queues */
3181static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3182{
3183        struct mvpp2_tx_queue *txq;
3184        int queue;
3185        u32 val;
3186
3187        val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3188
3189        /* Reset Tx ports and delete Tx queues */
3190        val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3191        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3192
3193        for (queue = 0; queue < txq_number; queue++) {
3194                txq = port->txqs[queue];
3195                mvpp2_txq_clean(port, txq);
3196                mvpp2_txq_deinit(port, txq);
3197        }
3198
3199        mvpp2_txq_sent_counter_clear(port);
3200
3201        val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3202        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3203}
3204
3205/* Cleanup all Rx queues */
3206static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3207{
3208        int queue;
3209
3210        for (queue = 0; queue < rxq_number; queue++)
3211                mvpp2_rxq_deinit(port, port->rxqs[queue]);
3212}
3213
3214/* Init all Rx queues for port */
3215static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3216{
3217        int queue, err;
3218
3219        for (queue = 0; queue < rxq_number; queue++) {
3220                err = mvpp2_rxq_init(port, port->rxqs[queue]);
3221                if (err)
3222                        goto err_cleanup;
3223        }
3224        return 0;
3225
3226err_cleanup:
3227        mvpp2_cleanup_rxqs(port);
3228        return err;
3229}
3230
3231/* Init all tx queues for port */
3232static int mvpp2_setup_txqs(struct mvpp2_port *port)
3233{
3234        struct mvpp2_tx_queue *txq;
3235        int queue, err;
3236
3237        for (queue = 0; queue < txq_number; queue++) {
3238                txq = port->txqs[queue];
3239                err = mvpp2_txq_init(port, txq);
3240                if (err)
3241                        goto err_cleanup;
3242        }
3243
3244        mvpp2_txq_sent_counter_clear(port);
3245        return 0;
3246
3247err_cleanup:
3248        mvpp2_cleanup_txqs(port);
3249        return err;
3250}
3251
3252/* Adjust link */
3253static void mvpp2_link_event(struct mvpp2_port *port)
3254{
3255        struct phy_device *phydev = port->phy_dev;
3256        int status_change = 0;
3257        u32 val;
3258
3259        if (phydev->link) {
3260                if ((port->speed != phydev->speed) ||
3261                    (port->duplex != phydev->duplex)) {
3262                        u32 val;
3263
3264                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3265                        val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3266                                 MVPP2_GMAC_CONFIG_GMII_SPEED |
3267                                 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3268                                 MVPP2_GMAC_AN_SPEED_EN |
3269                                 MVPP2_GMAC_AN_DUPLEX_EN);
3270
3271                        if (phydev->duplex)
3272                                val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3273
3274                        if (phydev->speed == SPEED_1000)
3275                                val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3276                        else if (phydev->speed == SPEED_100)
3277                                val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3278
3279                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3280
3281                        port->duplex = phydev->duplex;
3282                        port->speed  = phydev->speed;
3283                }
3284        }
3285
3286        if (phydev->link != port->link) {
3287                if (!phydev->link) {
3288                        port->duplex = -1;
3289                        port->speed = 0;
3290                }
3291
3292                port->link = phydev->link;
3293                status_change = 1;
3294        }
3295
3296        if (status_change) {
3297                if (phydev->link) {
3298                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3299                        val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3300                                MVPP2_GMAC_FORCE_LINK_DOWN);
3301                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3302                        mvpp2_egress_enable(port);
3303                        mvpp2_ingress_enable(port);
3304                } else {
3305                        mvpp2_ingress_disable(port);
3306                        mvpp2_egress_disable(port);
3307                }
3308        }
3309}
3310
3311/* Main RX/TX processing routines */
3312
3313/* Display more error info */
3314static void mvpp2_rx_error(struct mvpp2_port *port,
3315                           struct mvpp2_rx_desc *rx_desc)
3316{
3317        u32 status = rx_desc->status;
3318
3319        switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3320        case MVPP2_RXD_ERR_CRC:
3321                netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
3322                           status, rx_desc->data_size);
3323                break;
3324        case MVPP2_RXD_ERR_OVERRUN:
3325                netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
3326                           status, rx_desc->data_size);
3327                break;
3328        case MVPP2_RXD_ERR_RESOURCE:
3329                netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
3330                           status, rx_desc->data_size);
3331                break;
3332        }
3333}
3334
3335/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3336static int mvpp2_rx_refill(struct mvpp2_port *port,
3337                           struct mvpp2_bm_pool *bm_pool,
3338                           u32 bm, u32 phys_addr)
3339{
3340        mvpp2_pool_refill(port, bm, phys_addr, phys_addr);
3341        return 0;
3342}
3343
3344/* Set hw internals when starting port */
3345static void mvpp2_start_dev(struct mvpp2_port *port)
3346{
3347        mvpp2_gmac_max_rx_size_set(port);
3348        mvpp2_txp_max_tx_size_set(port);
3349
3350        mvpp2_port_enable(port);
3351}
3352
3353/* Set hw internals when stopping port */
3354static void mvpp2_stop_dev(struct mvpp2_port *port)
3355{
3356        /* Stop new packets from arriving to RXQs */
3357        mvpp2_ingress_disable(port);
3358
3359        mvpp2_egress_disable(port);
3360        mvpp2_port_disable(port);
3361}
3362
3363static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3364{
3365        struct phy_device *phy_dev;
3366
3367        if (!port->init || port->link == 0) {
3368                phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3369                                      port->phy_interface);
3370                port->phy_dev = phy_dev;
3371                if (!phy_dev) {
3372                        netdev_err(port->dev, "cannot connect to phy\n");
3373                        return -ENODEV;
3374                }
3375                phy_dev->supported &= PHY_GBIT_FEATURES;
3376                phy_dev->advertising = phy_dev->supported;
3377
3378                port->phy_dev = phy_dev;
3379                port->link    = 0;
3380                port->duplex  = 0;
3381                port->speed   = 0;
3382
3383                phy_config(phy_dev);
3384                phy_startup(phy_dev);
3385                if (!phy_dev->link) {
3386                        printf("%s: No link\n", phy_dev->dev->name);
3387                        return -1;
3388                }
3389
3390                port->init = 1;
3391        } else {
3392                mvpp2_egress_enable(port);
3393                mvpp2_ingress_enable(port);
3394        }
3395
3396        return 0;
3397}
3398
3399static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3400{
3401        unsigned char mac_bcast[ETH_ALEN] = {
3402                        0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3403        int err;
3404
3405        err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3406        if (err) {
3407                netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3408                return err;
3409        }
3410        err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3411                                      port->dev_addr, true);
3412        if (err) {
3413                netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3414                return err;
3415        }
3416        err = mvpp2_prs_def_flow(port);
3417        if (err) {
3418                netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3419                return err;
3420        }
3421
3422        /* Allocate the Rx/Tx queues */
3423        err = mvpp2_setup_rxqs(port);
3424        if (err) {
3425                netdev_err(port->dev, "cannot allocate Rx queues\n");
3426                return err;
3427        }
3428
3429        err = mvpp2_setup_txqs(port);
3430        if (err) {
3431                netdev_err(port->dev, "cannot allocate Tx queues\n");
3432                return err;
3433        }
3434
3435        err = mvpp2_phy_connect(dev, port);
3436        if (err < 0)
3437                return err;
3438
3439        mvpp2_link_event(port);
3440
3441        mvpp2_start_dev(port);
3442
3443        return 0;
3444}
3445
3446/* No Device ops here in U-Boot */
3447
3448/* Driver initialization */
3449
3450static void mvpp2_port_power_up(struct mvpp2_port *port)
3451{
3452        mvpp2_port_mii_set(port);
3453        mvpp2_port_periodic_xon_disable(port);
3454        mvpp2_port_fc_adv_enable(port);
3455        mvpp2_port_reset(port);
3456}
3457
3458/* Initialize port HW */
3459static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3460{
3461        struct mvpp2 *priv = port->priv;
3462        struct mvpp2_txq_pcpu *txq_pcpu;
3463        int queue, cpu, err;
3464
3465        if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3466                return -EINVAL;
3467
3468        /* Disable port */
3469        mvpp2_egress_disable(port);
3470        mvpp2_port_disable(port);
3471
3472        port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3473                                  GFP_KERNEL);
3474        if (!port->txqs)
3475                return -ENOMEM;
3476
3477        /* Associate physical Tx queues to this port and initialize.
3478         * The mapping is predefined.
3479         */
3480        for (queue = 0; queue < txq_number; queue++) {
3481                int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3482                struct mvpp2_tx_queue *txq;
3483
3484                txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3485                if (!txq)
3486                        return -ENOMEM;
3487
3488                txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3489                                         GFP_KERNEL);
3490                if (!txq->pcpu)
3491                        return -ENOMEM;
3492
3493                txq->id = queue_phy_id;
3494                txq->log_id = queue;
3495                txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3496                for_each_present_cpu(cpu) {
3497                        txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3498                        txq_pcpu->cpu = cpu;
3499                }
3500
3501                port->txqs[queue] = txq;
3502        }
3503
3504        port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3505                                  GFP_KERNEL);
3506        if (!port->rxqs)
3507                return -ENOMEM;
3508
3509        /* Allocate and initialize Rx queue for this port */
3510        for (queue = 0; queue < rxq_number; queue++) {
3511                struct mvpp2_rx_queue *rxq;
3512
3513                /* Map physical Rx queue to port's logical Rx queue */
3514                rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3515                if (!rxq)
3516                        return -ENOMEM;
3517                /* Map this Rx queue to a physical queue */
3518                rxq->id = port->first_rxq + queue;
3519                rxq->port = port->id;
3520                rxq->logic_rxq = queue;
3521
3522                port->rxqs[queue] = rxq;
3523        }
3524
3525        /* Configure Rx queue group interrupt for this port */
3526        mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3527
3528        /* Create Rx descriptor rings */
3529        for (queue = 0; queue < rxq_number; queue++) {
3530                struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3531
3532                rxq->size = port->rx_ring_size;
3533                rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3534                rxq->time_coal = MVPP2_RX_COAL_USEC;
3535        }
3536
3537        mvpp2_ingress_disable(port);
3538
3539        /* Port default configuration */
3540        mvpp2_defaults_set(port);
3541
3542        /* Port's classifier configuration */
3543        mvpp2_cls_oversize_rxq_set(port);
3544        mvpp2_cls_port_config(port);
3545
3546        /* Provide an initial Rx packet size */
3547        port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3548
3549        /* Initialize pools for swf */
3550        err = mvpp2_swf_bm_pool_init(port);
3551        if (err)
3552                return err;
3553
3554        return 0;
3555}
3556
3557/* Ports initialization */
3558static int mvpp2_port_probe(struct udevice *dev,
3559                            struct mvpp2_port *port,
3560                            int port_node,
3561                            struct mvpp2 *priv,
3562                            int *next_first_rxq)
3563{
3564        int phy_node;
3565        u32 id;
3566        u32 phyaddr;
3567        const char *phy_mode_str;
3568        int phy_mode = -1;
3569        int priv_common_regs_num = 2;
3570        int err;
3571
3572        phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3573        if (phy_node < 0) {
3574                dev_err(&pdev->dev, "missing phy\n");
3575                return -ENODEV;
3576        }
3577
3578        phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3579        if (phy_mode_str)
3580                phy_mode = phy_get_interface_by_name(phy_mode_str);
3581        if (phy_mode == -1) {
3582                dev_err(&pdev->dev, "incorrect phy mode\n");
3583                return -EINVAL;
3584        }
3585
3586        id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3587        if (id == -1) {
3588                dev_err(&pdev->dev, "missing port-id value\n");
3589                return -EINVAL;
3590        }
3591
3592        phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3593
3594        port->priv = priv;
3595        port->id = id;
3596        port->first_rxq = *next_first_rxq;
3597        port->phy_node = phy_node;
3598        port->phy_interface = phy_mode;
3599        port->phyaddr = phyaddr;
3600
3601        port->base = (void __iomem *)dev_get_addr_index(dev->parent,
3602                                                        priv_common_regs_num
3603                                                        + id);
3604        if (IS_ERR(port->base))
3605                return PTR_ERR(port->base);
3606
3607        port->tx_ring_size = MVPP2_MAX_TXD;
3608        port->rx_ring_size = MVPP2_MAX_RXD;
3609
3610        err = mvpp2_port_init(dev, port);
3611        if (err < 0) {
3612                dev_err(&pdev->dev, "failed to init port %d\n", id);
3613                return err;
3614        }
3615        mvpp2_port_power_up(port);
3616
3617        /* Increment the first Rx queue number to be used by the next port */
3618        *next_first_rxq += CONFIG_MV_ETH_RXQ;
3619        priv->port_list[id] = port;
3620        return 0;
3621}
3622
3623/* Initialize decoding windows */
3624static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3625                                    struct mvpp2 *priv)
3626{
3627        u32 win_enable;
3628        int i;
3629
3630        for (i = 0; i < 6; i++) {
3631                mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3632                mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3633
3634                if (i < 4)
3635                        mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3636        }
3637
3638        win_enable = 0;
3639
3640        for (i = 0; i < dram->num_cs; i++) {
3641                const struct mbus_dram_window *cs = dram->cs + i;
3642
3643                mvpp2_write(priv, MVPP2_WIN_BASE(i),
3644                            (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3645                            dram->mbus_dram_target_id);
3646
3647                mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3648                            (cs->size - 1) & 0xffff0000);
3649
3650                win_enable |= (1 << i);
3651        }
3652
3653        mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3654}
3655
3656/* Initialize Rx FIFO's */
3657static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3658{
3659        int port;
3660
3661        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3662                mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3663                            MVPP2_RX_FIFO_PORT_DATA_SIZE);
3664                mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3665                            MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3666        }
3667
3668        mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3669                    MVPP2_RX_FIFO_PORT_MIN_PKT);
3670        mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3671}
3672
3673/* Initialize network controller common part HW */
3674static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3675{
3676        const struct mbus_dram_target_info *dram_target_info;
3677        int err, i;
3678        u32 val;
3679
3680        /* Checks for hardware constraints (U-Boot uses only one rxq) */
3681        if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3682                dev_err(&pdev->dev, "invalid queue size parameter\n");
3683                return -EINVAL;
3684        }
3685
3686        /* MBUS windows configuration */
3687        dram_target_info = mvebu_mbus_dram_info();
3688        if (dram_target_info)
3689                mvpp2_conf_mbus_windows(dram_target_info, priv);
3690
3691        /* Disable HW PHY polling */
3692        val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3693        val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3694        writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3695
3696        /* Allocate and initialize aggregated TXQs */
3697        priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3698                                       sizeof(struct mvpp2_tx_queue),
3699                                       GFP_KERNEL);
3700        if (!priv->aggr_txqs)
3701                return -ENOMEM;
3702
3703        for_each_present_cpu(i) {
3704                priv->aggr_txqs[i].id = i;
3705                priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
3706                err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
3707                                          MVPP2_AGGR_TXQ_SIZE, i, priv);
3708                if (err < 0)
3709                        return err;
3710        }
3711
3712        /* Rx Fifo Init */
3713        mvpp2_rx_fifo_init(priv);
3714
3715        /* Reset Rx queue group interrupt configuration */
3716        for (i = 0; i < MVPP2_MAX_PORTS; i++)
3717                mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
3718                            CONFIG_MV_ETH_RXQ);
3719
3720        writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
3721               priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
3722
3723        /* Allow cache snoop when transmiting packets */
3724        mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
3725
3726        /* Buffer Manager initialization */
3727        err = mvpp2_bm_init(dev, priv);
3728        if (err < 0)
3729                return err;
3730
3731        /* Parser default initialization */
3732        err = mvpp2_prs_default_init(dev, priv);
3733        if (err < 0)
3734                return err;
3735
3736        /* Classifier default initialization */
3737        mvpp2_cls_init(priv);
3738
3739        return 0;
3740}
3741
3742/* SMI / MDIO functions */
3743
3744static int smi_wait_ready(struct mvpp2 *priv)
3745{
3746        u32 timeout = MVPP2_SMI_TIMEOUT;
3747        u32 smi_reg;
3748
3749        /* wait till the SMI is not busy */
3750        do {
3751                /* read smi register */
3752                smi_reg = readl(priv->lms_base + MVPP2_SMI);
3753                if (timeout-- == 0) {
3754                        printf("Error: SMI busy timeout\n");
3755                        return -EFAULT;
3756                }
3757        } while (smi_reg & MVPP2_SMI_BUSY);
3758
3759        return 0;
3760}
3761
3762/*
3763 * mpp2_mdio_read - miiphy_read callback function.
3764 *
3765 * Returns 16bit phy register value, or 0xffff on error
3766 */
3767static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
3768{
3769        struct mvpp2 *priv = bus->priv;
3770        u32 smi_reg;
3771        u32 timeout;
3772
3773        /* check parameters */
3774        if (addr > MVPP2_PHY_ADDR_MASK) {
3775                printf("Error: Invalid PHY address %d\n", addr);
3776                return -EFAULT;
3777        }
3778
3779        if (reg > MVPP2_PHY_REG_MASK) {
3780                printf("Err: Invalid register offset %d\n", reg);
3781                return -EFAULT;
3782        }
3783
3784        /* wait till the SMI is not busy */
3785        if (smi_wait_ready(priv) < 0)
3786                return -EFAULT;
3787
3788        /* fill the phy address and regiser offset and read opcode */
3789        smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3790                | (reg << MVPP2_SMI_REG_ADDR_OFFS)
3791                | MVPP2_SMI_OPCODE_READ;
3792
3793        /* write the smi register */
3794        writel(smi_reg, priv->lms_base + MVPP2_SMI);
3795
3796        /* wait till read value is ready */
3797        timeout = MVPP2_SMI_TIMEOUT;
3798
3799        do {
3800                /* read smi register */
3801                smi_reg = readl(priv->lms_base + MVPP2_SMI);
3802                if (timeout-- == 0) {
3803                        printf("Err: SMI read ready timeout\n");
3804                        return -EFAULT;
3805                }
3806        } while (!(smi_reg & MVPP2_SMI_READ_VALID));
3807
3808        /* Wait for the data to update in the SMI register */
3809        for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
3810                ;
3811
3812        return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
3813}
3814
3815/*
3816 * mpp2_mdio_write - miiphy_write callback function.
3817 *
3818 * Returns 0 if write succeed, -EINVAL on bad parameters
3819 * -ETIME on timeout
3820 */
3821static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
3822                           u16 value)
3823{
3824        struct mvpp2 *priv = bus->priv;
3825        u32 smi_reg;
3826
3827        /* check parameters */
3828        if (addr > MVPP2_PHY_ADDR_MASK) {
3829                printf("Error: Invalid PHY address %d\n", addr);
3830                return -EFAULT;
3831        }
3832
3833        if (reg > MVPP2_PHY_REG_MASK) {
3834                printf("Err: Invalid register offset %d\n", reg);
3835                return -EFAULT;
3836        }
3837
3838        /* wait till the SMI is not busy */
3839        if (smi_wait_ready(priv) < 0)
3840                return -EFAULT;
3841
3842        /* fill the phy addr and reg offset and write opcode and data */
3843        smi_reg = value << MVPP2_SMI_DATA_OFFS;
3844        smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3845                | (reg << MVPP2_SMI_REG_ADDR_OFFS);
3846        smi_reg &= ~MVPP2_SMI_OPCODE_READ;
3847
3848        /* write the smi register */
3849        writel(smi_reg, priv->lms_base + MVPP2_SMI);
3850
3851        return 0;
3852}
3853
3854static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
3855{
3856        struct mvpp2_port *port = dev_get_priv(dev);
3857        struct mvpp2_rx_desc *rx_desc;
3858        struct mvpp2_bm_pool *bm_pool;
3859        dma_addr_t phys_addr;
3860        u32 bm, rx_status;
3861        int pool, rx_bytes, err;
3862        int rx_received;
3863        struct mvpp2_rx_queue *rxq;
3864        u32 cause_rx_tx, cause_rx, cause_misc;
3865        u8 *data;
3866
3867        cause_rx_tx = mvpp2_read(port->priv,
3868                                 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3869        cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3870        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3871        if (!cause_rx_tx && !cause_misc)
3872                return 0;
3873
3874        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
3875
3876        /* Process RX packets */
3877        cause_rx |= port->pending_cause_rx;
3878        rxq = mvpp2_get_rx_queue(port, cause_rx);
3879
3880        /* Get number of received packets and clamp the to-do */
3881        rx_received = mvpp2_rxq_received(port, rxq->id);
3882
3883        /* Return if no packets are received */
3884        if (!rx_received)
3885                return 0;
3886
3887        rx_desc = mvpp2_rxq_next_desc_get(rxq);
3888        rx_status = rx_desc->status;
3889        rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
3890        phys_addr = rx_desc->buf_phys_addr;
3891
3892        bm = mvpp2_bm_cookie_build(rx_desc);
3893        pool = mvpp2_bm_cookie_pool_get(bm);
3894        bm_pool = &port->priv->bm_pools[pool];
3895
3896        /* Check if buffer header is used */
3897        if (rx_status & MVPP2_RXD_BUF_HDR)
3898                return 0;
3899
3900        /* In case of an error, release the requested buffer pointer
3901         * to the Buffer Manager. This request process is controlled
3902         * by the hardware, and the information about the buffer is
3903         * comprised by the RX descriptor.
3904         */
3905        if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
3906                mvpp2_rx_error(port, rx_desc);
3907                /* Return the buffer to the pool */
3908                mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
3909                                  rx_desc->buf_cookie);
3910                return 0;
3911        }
3912
3913        err = mvpp2_rx_refill(port, bm_pool, bm, phys_addr);
3914        if (err) {
3915                netdev_err(port->dev, "failed to refill BM pools\n");
3916                return 0;
3917        }
3918
3919        /* Update Rx queue management counters */
3920        mb();
3921        mvpp2_rxq_status_update(port, rxq->id, 1, 1);
3922
3923        /* give packet to stack - skip on first n bytes */
3924        data = (u8 *)phys_addr + 2 + 32;
3925
3926        if (rx_bytes <= 0)
3927                return 0;
3928
3929        /*
3930         * No cache invalidation needed here, since the rx_buffer's are
3931         * located in a uncached memory region
3932         */
3933        *packetp = data;
3934
3935        return rx_bytes;
3936}
3937
3938/* Drain Txq */
3939static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3940                            int enable)
3941{
3942        u32 val;
3943
3944        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3945        val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3946        if (enable)
3947                val |= MVPP2_TXQ_DRAIN_EN_MASK;
3948        else
3949                val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3950        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3951}
3952
3953static int mvpp2_send(struct udevice *dev, void *packet, int length)
3954{
3955        struct mvpp2_port *port = dev_get_priv(dev);
3956        struct mvpp2_tx_queue *txq, *aggr_txq;
3957        struct mvpp2_tx_desc *tx_desc;
3958        int tx_done;
3959        int timeout;
3960
3961        txq = port->txqs[0];
3962        aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
3963
3964        /* Get a descriptor for the first part of the packet */
3965        tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3966        tx_desc->phys_txq = txq->id;
3967        tx_desc->data_size = length;
3968        tx_desc->packet_offset = (u32)packet & MVPP2_TX_DESC_ALIGN;
3969        tx_desc->buf_phys_addr = (u32)packet & ~MVPP2_TX_DESC_ALIGN;
3970        /* First and Last descriptor */
3971        tx_desc->command = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
3972                | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3973
3974        /* Flush tx data */
3975        flush_dcache_range((u32)packet, (u32)packet + length);
3976
3977        /* Enable transmit */
3978        mb();
3979        mvpp2_aggr_txq_pend_desc_add(port, 1);
3980
3981        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3982
3983        timeout = 0;
3984        do {
3985                if (timeout++ > 10000) {
3986                        printf("timeout: packet not sent from aggregated to phys TXQ\n");
3987                        return 0;
3988                }
3989                tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
3990        } while (tx_done);
3991
3992        /* Enable TXQ drain */
3993        mvpp2_txq_drain(port, txq, 1);
3994
3995        timeout = 0;
3996        do {
3997                if (timeout++ > 10000) {
3998                        printf("timeout: packet not sent\n");
3999                        return 0;
4000                }
4001                tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4002        } while (!tx_done);
4003
4004        /* Disable TXQ drain */
4005        mvpp2_txq_drain(port, txq, 0);
4006
4007        return 0;
4008}
4009
4010static int mvpp2_start(struct udevice *dev)
4011{
4012        struct eth_pdata *pdata = dev_get_platdata(dev);
4013        struct mvpp2_port *port = dev_get_priv(dev);
4014
4015        /* Load current MAC address */
4016        memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4017
4018        /* Reconfigure parser accept the original MAC address */
4019        mvpp2_prs_update_mac_da(port, port->dev_addr);
4020
4021        mvpp2_port_power_up(port);
4022
4023        mvpp2_open(dev, port);
4024
4025        return 0;
4026}
4027
4028static void mvpp2_stop(struct udevice *dev)
4029{
4030        struct mvpp2_port *port = dev_get_priv(dev);
4031
4032        mvpp2_stop_dev(port);
4033        mvpp2_cleanup_rxqs(port);
4034        mvpp2_cleanup_txqs(port);
4035}
4036
4037static int mvpp2_probe(struct udevice *dev)
4038{
4039        struct mvpp2_port *port = dev_get_priv(dev);
4040        struct mvpp2 *priv = dev_get_priv(dev->parent);
4041        int err;
4042
4043        /* Initialize network controller */
4044        err = mvpp2_init(dev, priv);
4045        if (err < 0) {
4046                dev_err(&pdev->dev, "failed to initialize controller\n");
4047                return err;
4048        }
4049
4050        return mvpp2_port_probe(dev, port, dev->of_offset, priv,
4051                                &buffer_loc.first_rxq);
4052}
4053
4054static const struct eth_ops mvpp2_ops = {
4055        .start          = mvpp2_start,
4056        .send           = mvpp2_send,
4057        .recv           = mvpp2_recv,
4058        .stop           = mvpp2_stop,
4059};
4060
4061static struct driver mvpp2_driver = {
4062        .name   = "mvpp2",
4063        .id     = UCLASS_ETH,
4064        .probe  = mvpp2_probe,
4065        .ops    = &mvpp2_ops,
4066        .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4067        .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4068};
4069
4070/*
4071 * Use a MISC device to bind the n instances (child nodes) of the
4072 * network base controller in UCLASS_ETH.
4073 */
4074static int mvpp2_base_probe(struct udevice *dev)
4075{
4076        struct mvpp2 *priv = dev_get_priv(dev);
4077        struct mii_dev *bus;
4078        void *bd_space;
4079        u32 size = 0;
4080        int i;
4081
4082        /*
4083         * U-Boot special buffer handling:
4084         *
4085         * Allocate buffer area for descs and rx_buffers. This is only
4086         * done once for all interfaces. As only one interface can
4087         * be active. Make this area DMA-safe by disabling the D-cache
4088         */
4089
4090        /* Align buffer area for descs and rx_buffers to 1MiB */
4091        bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
4092        mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF);
4093
4094        buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4095        size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4096
4097        buffer_loc.tx_descs = (struct mvpp2_tx_desc *)((u32)bd_space + size);
4098        size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4099
4100        buffer_loc.rx_descs = (struct mvpp2_rx_desc *)((u32)bd_space + size);
4101        size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4102
4103        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4104                buffer_loc.bm_pool[i] = (u32 *)((u32)bd_space + size);
4105                size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32);
4106        }
4107
4108        for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
4109                buffer_loc.rx_buffer[i] = (u32 *)((u32)bd_space + size);
4110                size += RX_BUFFER_SIZE;
4111        }
4112
4113        /* Save base addresses for later use */
4114        priv->base = (void *)dev_get_addr_index(dev, 0);
4115        if (IS_ERR(priv->base))
4116                return PTR_ERR(priv->base);
4117
4118        priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4119        if (IS_ERR(priv->lms_base))
4120                return PTR_ERR(priv->lms_base);
4121
4122        /* Finally create and register the MDIO bus driver */
4123        bus = mdio_alloc();
4124        if (!bus) {
4125                printf("Failed to allocate MDIO bus\n");
4126                return -ENOMEM;
4127        }
4128
4129        bus->read = mpp2_mdio_read;
4130        bus->write = mpp2_mdio_write;
4131        snprintf(bus->name, sizeof(bus->name), dev->name);
4132        bus->priv = (void *)priv;
4133        priv->bus = bus;
4134
4135        return mdio_register(bus);
4136}
4137
4138static int mvpp2_base_bind(struct udevice *parent)
4139{
4140        const void *blob = gd->fdt_blob;
4141        int node = parent->of_offset;
4142        struct uclass_driver *drv;
4143        struct udevice *dev;
4144        struct eth_pdata *plat;
4145        char *name;
4146        int subnode;
4147        u32 id;
4148
4149        /* Lookup eth driver */
4150        drv = lists_uclass_lookup(UCLASS_ETH);
4151        if (!drv) {
4152                puts("Cannot find eth driver\n");
4153                return -ENOENT;
4154        }
4155
4156        fdt_for_each_subnode(subnode, blob, node) {
4157                /* Skip disabled ports */
4158                if (!fdtdec_get_is_enabled(blob, subnode))
4159                        continue;
4160
4161                plat = calloc(1, sizeof(*plat));
4162                if (!plat)
4163                        return -ENOMEM;
4164
4165                id = fdtdec_get_int(blob, subnode, "port-id", -1);
4166
4167                name = calloc(1, 16);
4168                sprintf(name, "mvpp2-%d", id);
4169
4170                /* Create child device UCLASS_ETH and bind it */
4171                device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
4172                dev->of_offset = subnode;
4173        }
4174
4175        return 0;
4176}
4177
4178static const struct udevice_id mvpp2_ids[] = {
4179        { .compatible = "marvell,armada-375-pp2" },
4180        { }
4181};
4182
4183U_BOOT_DRIVER(mvpp2_base) = {
4184        .name   = "mvpp2_base",
4185        .id     = UCLASS_MISC,
4186        .of_match = mvpp2_ids,
4187        .bind   = mvpp2_base_bind,
4188        .probe  = mvpp2_base_probe,
4189        .priv_auto_alloc_size = sizeof(struct mvpp2),
4190};
4191