linux/drivers/net/ethernet/marvell/mvpp2.c
<<
>>
Prefs
   1/*
   2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
   3 *
   4 * Copyright (C) 2014 Marvell
   5 *
   6 * Marcin Wojtas <mw@semihalf.com>
   7 *
   8 * This file is licensed under the terms of the GNU General Public
   9 * License version 2. This program is licensed "as is" without any
  10 * warranty of any kind, whether express or implied.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/platform_device.h>
  17#include <linux/skbuff.h>
  18#include <linux/inetdevice.h>
  19#include <linux/mbus.h>
  20#include <linux/module.h>
  21#include <linux/interrupt.h>
  22#include <linux/cpumask.h>
  23#include <linux/of.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_mdio.h>
  26#include <linux/of_net.h>
  27#include <linux/of_address.h>
  28#include <linux/phy.h>
  29#include <linux/clk.h>
  30#include <linux/hrtimer.h>
  31#include <linux/ktime.h>
  32#include <uapi/linux/ppp_defs.h>
  33#include <net/ip.h>
  34#include <net/ipv6.h>
  35
  36/* RX Fifo Registers */
  37#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)       (0x00 + 4 * (port))
  38#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)       (0x20 + 4 * (port))
  39#define MVPP2_RX_MIN_PKT_SIZE_REG               0x60
  40#define MVPP2_RX_FIFO_INIT_REG                  0x64
  41
  42/* RX DMA Top Registers */
  43#define MVPP2_RX_CTRL_REG(port)                 (0x140 + 4 * (port))
  44#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)    (((s) & 0xfff) << 16)
  45#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK   BIT(31)
  46#define MVPP2_POOL_BUF_SIZE_REG(pool)           (0x180 + 4 * (pool))
  47#define     MVPP2_POOL_BUF_SIZE_OFFSET          5
  48#define MVPP2_RXQ_CONFIG_REG(rxq)               (0x800 + 4 * (rxq))
  49#define     MVPP2_SNOOP_PKT_SIZE_MASK           0x1ff
  50#define     MVPP2_SNOOP_BUF_HDR_MASK            BIT(9)
  51#define     MVPP2_RXQ_POOL_SHORT_OFFS           20
  52#define     MVPP2_RXQ_POOL_SHORT_MASK           0x700000
  53#define     MVPP2_RXQ_POOL_LONG_OFFS            24
  54#define     MVPP2_RXQ_POOL_LONG_MASK            0x7000000
  55#define     MVPP2_RXQ_PACKET_OFFSET_OFFS        28
  56#define     MVPP2_RXQ_PACKET_OFFSET_MASK        0x70000000
  57#define     MVPP2_RXQ_DISABLE_MASK              BIT(31)
  58
  59/* Parser Registers */
  60#define MVPP2_PRS_INIT_LOOKUP_REG               0x1000
  61#define     MVPP2_PRS_PORT_LU_MAX               0xf
  62#define     MVPP2_PRS_PORT_LU_MASK(port)        (0xff << ((port) * 4))
  63#define     MVPP2_PRS_PORT_LU_VAL(port, val)    ((val) << ((port) * 4))
  64#define MVPP2_PRS_INIT_OFFS_REG(port)           (0x1004 + ((port) & 4))
  65#define     MVPP2_PRS_INIT_OFF_MASK(port)       (0x3f << (((port) % 4) * 8))
  66#define     MVPP2_PRS_INIT_OFF_VAL(port, val)   ((val) << (((port) % 4) * 8))
  67#define MVPP2_PRS_MAX_LOOP_REG(port)            (0x100c + ((port) & 4))
  68#define     MVPP2_PRS_MAX_LOOP_MASK(port)       (0xff << (((port) % 4) * 8))
  69#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)   ((val) << (((port) % 4) * 8))
  70#define MVPP2_PRS_TCAM_IDX_REG                  0x1100
  71#define MVPP2_PRS_TCAM_DATA_REG(idx)            (0x1104 + (idx) * 4)
  72#define     MVPP2_PRS_TCAM_INV_MASK             BIT(31)
  73#define MVPP2_PRS_SRAM_IDX_REG                  0x1200
  74#define MVPP2_PRS_SRAM_DATA_REG(idx)            (0x1204 + (idx) * 4)
  75#define MVPP2_PRS_TCAM_CTRL_REG                 0x1230
  76#define     MVPP2_PRS_TCAM_EN_MASK              BIT(0)
  77
  78/* Classifier Registers */
  79#define MVPP2_CLS_MODE_REG                      0x1800
  80#define     MVPP2_CLS_MODE_ACTIVE_MASK          BIT(0)
  81#define MVPP2_CLS_PORT_WAY_REG                  0x1810
  82#define     MVPP2_CLS_PORT_WAY_MASK(port)       (1 << (port))
  83#define MVPP2_CLS_LKP_INDEX_REG                 0x1814
  84#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS        6
  85#define MVPP2_CLS_LKP_TBL_REG                   0x1818
  86#define     MVPP2_CLS_LKP_TBL_RXQ_MASK          0xff
  87#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK    BIT(25)
  88#define MVPP2_CLS_FLOW_INDEX_REG                0x1820
  89#define MVPP2_CLS_FLOW_TBL0_REG                 0x1824
  90#define MVPP2_CLS_FLOW_TBL1_REG                 0x1828
  91#define MVPP2_CLS_FLOW_TBL2_REG                 0x182c
  92#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)    (0x1980 + ((port) * 4))
  93#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS     3
  94#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK     0x7
  95#define MVPP2_CLS_SWFWD_P2HQ_REG(port)          (0x19b0 + ((port) * 4))
  96#define MVPP2_CLS_SWFWD_PCTRL_REG               0x19d0
  97#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)    (1 << (port))
  98
  99/* Descriptor Manager Top Registers */
 100#define MVPP2_RXQ_NUM_REG                       0x2040
 101#define MVPP2_RXQ_DESC_ADDR_REG                 0x2044
 102#define MVPP2_RXQ_DESC_SIZE_REG                 0x2048
 103#define     MVPP2_RXQ_DESC_SIZE_MASK            0x3ff0
 104#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)        (0x3000 + 4 * (rxq))
 105#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET      0
 106#define     MVPP2_RXQ_NUM_NEW_OFFSET            16
 107#define MVPP2_RXQ_STATUS_REG(rxq)               (0x3400 + 4 * (rxq))
 108#define     MVPP2_RXQ_OCCUPIED_MASK             0x3fff
 109#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET       16
 110#define     MVPP2_RXQ_NON_OCCUPIED_MASK         0x3fff0000
 111#define MVPP2_RXQ_THRESH_REG                    0x204c
 112#define     MVPP2_OCCUPIED_THRESH_OFFSET        0
 113#define     MVPP2_OCCUPIED_THRESH_MASK          0x3fff
 114#define MVPP2_RXQ_INDEX_REG                     0x2050
 115#define MVPP2_TXQ_NUM_REG                       0x2080
 116#define MVPP2_TXQ_DESC_ADDR_REG                 0x2084
 117#define MVPP2_TXQ_DESC_SIZE_REG                 0x2088
 118#define     MVPP2_TXQ_DESC_SIZE_MASK            0x3ff0
 119#define MVPP2_AGGR_TXQ_UPDATE_REG               0x2090
 120#define MVPP2_TXQ_THRESH_REG                    0x2094
 121#define     MVPP2_TRANSMITTED_THRESH_OFFSET     16
 122#define     MVPP2_TRANSMITTED_THRESH_MASK       0x3fff0000
 123#define MVPP2_TXQ_INDEX_REG                     0x2098
 124#define MVPP2_TXQ_PREF_BUF_REG                  0x209c
 125#define     MVPP2_PREF_BUF_PTR(desc)            ((desc) & 0xfff)
 126#define     MVPP2_PREF_BUF_SIZE_4               (BIT(12) | BIT(13))
 127#define     MVPP2_PREF_BUF_SIZE_16              (BIT(12) | BIT(14))
 128#define     MVPP2_PREF_BUF_THRESH(val)          ((val) << 17)
 129#define     MVPP2_TXQ_DRAIN_EN_MASK             BIT(31)
 130#define MVPP2_TXQ_PENDING_REG                   0x20a0
 131#define     MVPP2_TXQ_PENDING_MASK              0x3fff
 132#define MVPP2_TXQ_INT_STATUS_REG                0x20a4
 133#define MVPP2_TXQ_SENT_REG(txq)                 (0x3c00 + 4 * (txq))
 134#define     MVPP2_TRANSMITTED_COUNT_OFFSET      16
 135#define     MVPP2_TRANSMITTED_COUNT_MASK        0x3fff0000
 136#define MVPP2_TXQ_RSVD_REQ_REG                  0x20b0
 137#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET         16
 138#define MVPP2_TXQ_RSVD_RSLT_REG                 0x20b4
 139#define     MVPP2_TXQ_RSVD_RSLT_MASK            0x3fff
 140#define MVPP2_TXQ_RSVD_CLR_REG                  0x20b8
 141#define     MVPP2_TXQ_RSVD_CLR_OFFSET           16
 142#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)       (0x2100 + 4 * (cpu))
 143#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)       (0x2140 + 4 * (cpu))
 144#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK       0x3ff0
 145#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)          (0x2180 + 4 * (cpu))
 146#define     MVPP2_AGGR_TXQ_PENDING_MASK         0x3fff
 147#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)           (0x21c0 + 4 * (cpu))
 148
 149/* MBUS bridge registers */
 150#define MVPP2_WIN_BASE(w)                       (0x4000 + ((w) << 2))
 151#define MVPP2_WIN_SIZE(w)                       (0x4020 + ((w) << 2))
 152#define MVPP2_WIN_REMAP(w)                      (0x4040 + ((w) << 2))
 153#define MVPP2_BASE_ADDR_ENABLE                  0x4060
 154
 155/* Interrupt Cause and Mask registers */
 156#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)         (0x5200 + 4 * (rxq))
 157#define MVPP2_ISR_RXQ_GROUP_REG(rxq)            (0x5400 + 4 * (rxq))
 158#define MVPP2_ISR_ENABLE_REG(port)              (0x5420 + 4 * (port))
 159#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)    ((mask) & 0xffff)
 160#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)   (((mask) << 16) & 0xffff0000)
 161#define MVPP2_ISR_RX_TX_CAUSE_REG(port)         (0x5480 + 4 * (port))
 162#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
 163#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
 164#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK    BIT(24)
 165#define     MVPP2_CAUSE_FCS_ERR_MASK            BIT(25)
 166#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK   BIT(26)
 167#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK   BIT(29)
 168#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK   BIT(30)
 169#define     MVPP2_CAUSE_MISC_SUM_MASK           BIT(31)
 170#define MVPP2_ISR_RX_TX_MASK_REG(port)          (0x54a0 + 4 * (port))
 171#define MVPP2_ISR_PON_RX_TX_MASK_REG            0x54bc
 172#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK     0xffff
 173#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK     0x3fc00000
 174#define     MVPP2_PON_CAUSE_MISC_SUM_MASK               BIT(31)
 175#define MVPP2_ISR_MISC_CAUSE_REG                0x55b0
 176
 177/* Buffer Manager registers */
 178#define MVPP2_BM_POOL_BASE_REG(pool)            (0x6000 + ((pool) * 4))
 179#define     MVPP2_BM_POOL_BASE_ADDR_MASK        0xfffff80
 180#define MVPP2_BM_POOL_SIZE_REG(pool)            (0x6040 + ((pool) * 4))
 181#define     MVPP2_BM_POOL_SIZE_MASK             0xfff0
 182#define MVPP2_BM_POOL_READ_PTR_REG(pool)        (0x6080 + ((pool) * 4))
 183#define     MVPP2_BM_POOL_GET_READ_PTR_MASK     0xfff0
 184#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)        (0x60c0 + ((pool) * 4))
 185#define     MVPP2_BM_POOL_PTRS_NUM_MASK         0xfff0
 186#define MVPP2_BM_BPPI_READ_PTR_REG(pool)        (0x6100 + ((pool) * 4))
 187#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)        (0x6140 + ((pool) * 4))
 188#define     MVPP2_BM_BPPI_PTR_NUM_MASK          0x7ff
 189#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK    BIT(16)
 190#define MVPP2_BM_POOL_CTRL_REG(pool)            (0x6200 + ((pool) * 4))
 191#define     MVPP2_BM_START_MASK                 BIT(0)
 192#define     MVPP2_BM_STOP_MASK                  BIT(1)
 193#define     MVPP2_BM_STATE_MASK                 BIT(4)
 194#define     MVPP2_BM_LOW_THRESH_OFFS            8
 195#define     MVPP2_BM_LOW_THRESH_MASK            0x7f00
 196#define     MVPP2_BM_LOW_THRESH_VALUE(val)      ((val) << \
 197                                                MVPP2_BM_LOW_THRESH_OFFS)
 198#define     MVPP2_BM_HIGH_THRESH_OFFS           16
 199#define     MVPP2_BM_HIGH_THRESH_MASK           0x7f0000
 200#define     MVPP2_BM_HIGH_THRESH_VALUE(val)     ((val) << \
 201                                                MVPP2_BM_HIGH_THRESH_OFFS)
 202#define MVPP2_BM_INTR_CAUSE_REG(pool)           (0x6240 + ((pool) * 4))
 203#define     MVPP2_BM_RELEASED_DELAY_MASK        BIT(0)
 204#define     MVPP2_BM_ALLOC_FAILED_MASK          BIT(1)
 205#define     MVPP2_BM_BPPE_EMPTY_MASK            BIT(2)
 206#define     MVPP2_BM_BPPE_FULL_MASK             BIT(3)
 207#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK      BIT(4)
 208#define MVPP2_BM_INTR_MASK_REG(pool)            (0x6280 + ((pool) * 4))
 209#define MVPP2_BM_PHY_ALLOC_REG(pool)            (0x6400 + ((pool) * 4))
 210#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK       BIT(0)
 211#define MVPP2_BM_VIRT_ALLOC_REG                 0x6440
 212#define MVPP2_BM_PHY_RLS_REG(pool)              (0x6480 + ((pool) * 4))
 213#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK       BIT(0)
 214#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK       BIT(1)
 215#define     MVPP2_BM_PHY_RLS_GRNTD_MASK         BIT(2)
 216#define MVPP2_BM_VIRT_RLS_REG                   0x64c0
 217#define MVPP2_BM_MC_RLS_REG                     0x64c4
 218#define     MVPP2_BM_MC_ID_MASK                 0xfff
 219#define     MVPP2_BM_FORCE_RELEASE_MASK         BIT(12)
 220
 221/* TX Scheduler registers */
 222#define MVPP2_TXP_SCHED_PORT_INDEX_REG          0x8000
 223#define MVPP2_TXP_SCHED_Q_CMD_REG               0x8004
 224#define     MVPP2_TXP_SCHED_ENQ_MASK            0xff
 225#define     MVPP2_TXP_SCHED_DISQ_OFFSET         8
 226#define MVPP2_TXP_SCHED_CMD_1_REG               0x8010
 227#define MVPP2_TXP_SCHED_PERIOD_REG              0x8018
 228#define MVPP2_TXP_SCHED_MTU_REG                 0x801c
 229#define     MVPP2_TXP_MTU_MAX                   0x7FFFF
 230#define MVPP2_TXP_SCHED_REFILL_REG              0x8020
 231#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK    0x7ffff
 232#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK    0x3ff00000
 233#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)     ((v) << 20)
 234#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG          0x8024
 235#define     MVPP2_TXP_TOKEN_SIZE_MAX            0xffffffff
 236#define MVPP2_TXQ_SCHED_REFILL_REG(q)           (0x8040 + ((q) << 2))
 237#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK    0x7ffff
 238#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK    0x3ff00000
 239#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)     ((v) << 20)
 240#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)       (0x8060 + ((q) << 2))
 241#define     MVPP2_TXQ_TOKEN_SIZE_MAX            0x7fffffff
 242#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)       (0x8080 + ((q) << 2))
 243#define     MVPP2_TXQ_TOKEN_CNTR_MAX            0xffffffff
 244
 245/* TX general registers */
 246#define MVPP2_TX_SNOOP_REG                      0x8800
 247#define MVPP2_TX_PORT_FLUSH_REG                 0x8810
 248#define     MVPP2_TX_PORT_FLUSH_MASK(port)      (1 << (port))
 249
 250/* LMS registers */
 251#define MVPP2_SRC_ADDR_MIDDLE                   0x24
 252#define MVPP2_SRC_ADDR_HIGH                     0x28
 253#define MVPP2_PHY_AN_CFG0_REG                   0x34
 254#define     MVPP2_PHY_AN_STOP_SMI0_MASK         BIT(7)
 255#define MVPP2_MIB_COUNTERS_BASE(port)           (0x1000 + ((port) >> 1) * \
 256                                                0x400 + (port) * 0x400)
 257#define     MVPP2_MIB_LATE_COLLISION            0x7c
 258#define MVPP2_ISR_SUM_MASK_REG                  0x220c
 259#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG      0x305c
 260#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT           0x27
 261
 262/* Per-port registers */
 263#define MVPP2_GMAC_CTRL_0_REG                   0x0
 264#define      MVPP2_GMAC_PORT_EN_MASK            BIT(0)
 265#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS        2
 266#define      MVPP2_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 267#define      MVPP2_GMAC_MIB_CNTR_EN_MASK        BIT(15)
 268#define MVPP2_GMAC_CTRL_1_REG                   0x4
 269#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK    BIT(1)
 270#define      MVPP2_GMAC_GMII_LB_EN_MASK         BIT(5)
 271#define      MVPP2_GMAC_PCS_LB_EN_BIT           6
 272#define      MVPP2_GMAC_PCS_LB_EN_MASK          BIT(6)
 273#define      MVPP2_GMAC_SA_LOW_OFFS             7
 274#define MVPP2_GMAC_CTRL_2_REG                   0x8
 275#define      MVPP2_GMAC_INBAND_AN_MASK          BIT(0)
 276#define      MVPP2_GMAC_PCS_ENABLE_MASK         BIT(3)
 277#define      MVPP2_GMAC_PORT_RGMII_MASK         BIT(4)
 278#define      MVPP2_GMAC_PORT_RESET_MASK         BIT(6)
 279#define MVPP2_GMAC_AUTONEG_CONFIG               0xc
 280#define      MVPP2_GMAC_FORCE_LINK_DOWN         BIT(0)
 281#define      MVPP2_GMAC_FORCE_LINK_PASS         BIT(1)
 282#define      MVPP2_GMAC_CONFIG_MII_SPEED        BIT(5)
 283#define      MVPP2_GMAC_CONFIG_GMII_SPEED       BIT(6)
 284#define      MVPP2_GMAC_AN_SPEED_EN             BIT(7)
 285#define      MVPP2_GMAC_FC_ADV_EN               BIT(9)
 286#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 287#define      MVPP2_GMAC_AN_DUPLEX_EN            BIT(13)
 288#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG          0x1c
 289#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS     6
 290#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
 291#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)  (((v) << 6) & \
 292                                        MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
 293
 294#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 295
 296/* Descriptor ring Macros */
 297#define MVPP2_QUEUE_NEXT_DESC(q, index) \
 298        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 299
 300/* Various constants */
 301
 302/* Coalescing */
 303#define MVPP2_TXDONE_COAL_PKTS_THRESH   15
 304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS  1000000UL
 305#define MVPP2_RX_COAL_PKTS              32
 306#define MVPP2_RX_COAL_USEC              100
 307
 308/* The two bytes Marvell header. Either contains a special value used
 309 * by Marvell switches when a specific hardware mode is enabled (not
 310 * supported by this driver) or is filled automatically by zeroes on
 311 * the RX side. Those two bytes being at the front of the Ethernet
 312 * header, they allow to have the IP header aligned on a 4 bytes
 313 * boundary automatically: the hardware skips those two bytes on its
 314 * own.
 315 */
 316#define MVPP2_MH_SIZE                   2
 317#define MVPP2_ETH_TYPE_LEN              2
 318#define MVPP2_PPPOE_HDR_SIZE            8
 319#define MVPP2_VLAN_TAG_LEN              4
 320
 321/* Lbtd 802.3 type */
 322#define MVPP2_IP_LBDT_TYPE              0xfffa
 323
 324#define MVPP2_CPU_D_CACHE_LINE_SIZE     32
 325#define MVPP2_TX_CSUM_MAX_SIZE          9800
 326
 327/* Timeout constants */
 328#define MVPP2_TX_DISABLE_TIMEOUT_MSEC   1000
 329#define MVPP2_TX_PENDING_TIMEOUT_MSEC   1000
 330
 331#define MVPP2_TX_MTU_MAX                0x7ffff
 332
 333/* Maximum number of T-CONTs of PON port */
 334#define MVPP2_MAX_TCONT                 16
 335
 336/* Maximum number of supported ports */
 337#define MVPP2_MAX_PORTS                 4
 338
 339/* Maximum number of TXQs used by single port */
 340#define MVPP2_MAX_TXQ                   8
 341
 342/* Maximum number of RXQs used by single port */
 343#define MVPP2_MAX_RXQ                   8
 344
 345/* Dfault number of RXQs in use */
 346#define MVPP2_DEFAULT_RXQ               4
 347
 348/* Total number of RXQs available to all ports */
 349#define MVPP2_RXQ_TOTAL_NUM             (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
 350
 351/* Max number of Rx descriptors */
 352#define MVPP2_MAX_RXD                   128
 353
 354/* Max number of Tx descriptors */
 355#define MVPP2_MAX_TXD                   1024
 356
 357/* Amount of Tx descriptors that can be reserved at once by CPU */
 358#define MVPP2_CPU_DESC_CHUNK            64
 359
 360/* Max number of Tx descriptors in each aggregated queue */
 361#define MVPP2_AGGR_TXQ_SIZE             256
 362
 363/* Descriptor aligned size */
 364#define MVPP2_DESC_ALIGNED_SIZE         32
 365
 366/* Descriptor alignment mask */
 367#define MVPP2_TX_DESC_ALIGN             (MVPP2_DESC_ALIGNED_SIZE - 1)
 368
 369/* RX FIFO constants */
 370#define MVPP2_RX_FIFO_PORT_DATA_SIZE    0x2000
 371#define MVPP2_RX_FIFO_PORT_ATTR_SIZE    0x80
 372#define MVPP2_RX_FIFO_PORT_MIN_PKT      0x80
 373
 374/* RX buffer constants */
 375#define MVPP2_SKB_SHINFO_SIZE \
 376        SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 377
 378#define MVPP2_RX_PKT_SIZE(mtu) \
 379        ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
 380              ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
 381
 382#define MVPP2_RX_BUF_SIZE(pkt_size)     ((pkt_size) + NET_SKB_PAD)
 383#define MVPP2_RX_TOTAL_SIZE(buf_size)   ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
 384#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
 385        ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
 386
 387#define MVPP2_BIT_TO_BYTE(bit)          ((bit) / 8)
 388
 389/* IPv6 max L3 address size */
 390#define MVPP2_MAX_L3_ADDR_SIZE          16
 391
 392/* Port flags */
 393#define MVPP2_F_LOOPBACK                BIT(0)
 394
 395/* Marvell tag types */
 396enum mvpp2_tag_type {
 397        MVPP2_TAG_TYPE_NONE = 0,
 398        MVPP2_TAG_TYPE_MH   = 1,
 399        MVPP2_TAG_TYPE_DSA  = 2,
 400        MVPP2_TAG_TYPE_EDSA = 3,
 401        MVPP2_TAG_TYPE_VLAN = 4,
 402        MVPP2_TAG_TYPE_LAST = 5
 403};
 404
 405/* Parser constants */
 406#define MVPP2_PRS_TCAM_SRAM_SIZE        256
 407#define MVPP2_PRS_TCAM_WORDS            6
 408#define MVPP2_PRS_SRAM_WORDS            4
 409#define MVPP2_PRS_FLOW_ID_SIZE          64
 410#define MVPP2_PRS_FLOW_ID_MASK          0x3f
 411#define MVPP2_PRS_TCAM_ENTRY_INVALID    1
 412#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT   BIT(5)
 413#define MVPP2_PRS_IPV4_HEAD             0x40
 414#define MVPP2_PRS_IPV4_HEAD_MASK        0xf0
 415#define MVPP2_PRS_IPV4_MC               0xe0
 416#define MVPP2_PRS_IPV4_MC_MASK          0xf0
 417#define MVPP2_PRS_IPV4_BC_MASK          0xff
 418#define MVPP2_PRS_IPV4_IHL              0x5
 419#define MVPP2_PRS_IPV4_IHL_MASK         0xf
 420#define MVPP2_PRS_IPV6_MC               0xff
 421#define MVPP2_PRS_IPV6_MC_MASK          0xff
 422#define MVPP2_PRS_IPV6_HOP_MASK         0xff
 423#define MVPP2_PRS_TCAM_PROTO_MASK       0xff
 424#define MVPP2_PRS_TCAM_PROTO_MASK_L     0x3f
 425#define MVPP2_PRS_DBL_VLANS_MAX         100
 426
 427/* Tcam structure:
 428 * - lookup ID - 4 bits
 429 * - port ID - 1 byte
 430 * - additional information - 1 byte
 431 * - header data - 8 bytes
 432 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
 433 */
 434#define MVPP2_PRS_AI_BITS                       8
 435#define MVPP2_PRS_PORT_MASK                     0xff
 436#define MVPP2_PRS_LU_MASK                       0xf
 437#define MVPP2_PRS_TCAM_DATA_BYTE(offs)          \
 438                                    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
 439#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)       \
 440                                              (((offs) * 2) - ((offs) % 2)  + 2)
 441#define MVPP2_PRS_TCAM_AI_BYTE                  16
 442#define MVPP2_PRS_TCAM_PORT_BYTE                17
 443#define MVPP2_PRS_TCAM_LU_BYTE                  20
 444#define MVPP2_PRS_TCAM_EN_OFFS(offs)            ((offs) + 2)
 445#define MVPP2_PRS_TCAM_INV_WORD                 5
 446/* Tcam entries ID */
 447#define MVPP2_PE_DROP_ALL               0
 448#define MVPP2_PE_FIRST_FREE_TID         1
 449#define MVPP2_PE_LAST_FREE_TID          (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
 450#define MVPP2_PE_IP6_EXT_PROTO_UN       (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
 451#define MVPP2_PE_MAC_MC_IP6             (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
 452#define MVPP2_PE_IP6_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
 453#define MVPP2_PE_IP4_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
 454#define MVPP2_PE_LAST_DEFAULT_FLOW      (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
 455#define MVPP2_PE_FIRST_DEFAULT_FLOW     (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
 456#define MVPP2_PE_EDSA_TAGGED            (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
 457#define MVPP2_PE_EDSA_UNTAGGED          (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
 458#define MVPP2_PE_DSA_TAGGED             (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
 459#define MVPP2_PE_DSA_UNTAGGED           (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
 460#define MVPP2_PE_ETYPE_EDSA_TAGGED      (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
 461#define MVPP2_PE_ETYPE_EDSA_UNTAGGED    (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
 462#define MVPP2_PE_ETYPE_DSA_TAGGED       (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
 463#define MVPP2_PE_ETYPE_DSA_UNTAGGED     (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
 464#define MVPP2_PE_MH_DEFAULT             (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
 465#define MVPP2_PE_DSA_DEFAULT            (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
 466#define MVPP2_PE_IP6_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
 467#define MVPP2_PE_IP4_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 468#define MVPP2_PE_ETH_TYPE_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 469#define MVPP2_PE_VLAN_DBL               (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
 470#define MVPP2_PE_VLAN_NONE              (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 471#define MVPP2_PE_MAC_MC_ALL             (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 472#define MVPP2_PE_MAC_PROMISCUOUS        (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 473#define MVPP2_PE_MAC_NON_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
 474
 475/* Sram structure
 476 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
 477 */
 478#define MVPP2_PRS_SRAM_RI_OFFS                  0
 479#define MVPP2_PRS_SRAM_RI_WORD                  0
 480#define MVPP2_PRS_SRAM_RI_CTRL_OFFS             32
 481#define MVPP2_PRS_SRAM_RI_CTRL_WORD             1
 482#define MVPP2_PRS_SRAM_RI_CTRL_BITS             32
 483#define MVPP2_PRS_SRAM_SHIFT_OFFS               64
 484#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT           72
 485#define MVPP2_PRS_SRAM_UDF_OFFS                 73
 486#define MVPP2_PRS_SRAM_UDF_BITS                 8
 487#define MVPP2_PRS_SRAM_UDF_MASK                 0xff
 488#define MVPP2_PRS_SRAM_UDF_SIGN_BIT             81
 489#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS            82
 490#define MVPP2_PRS_SRAM_UDF_TYPE_MASK            0x7
 491#define MVPP2_PRS_SRAM_UDF_TYPE_L3              1
 492#define MVPP2_PRS_SRAM_UDF_TYPE_L4              4
 493#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS        85
 494#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK        0x3
 495#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD         1
 496#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD     2
 497#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD     3
 498#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS          87
 499#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS          2
 500#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK          0x3
 501#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD           0
 502#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD       2
 503#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD       3
 504#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS         89
 505#define MVPP2_PRS_SRAM_AI_OFFS                  90
 506#define MVPP2_PRS_SRAM_AI_CTRL_OFFS             98
 507#define MVPP2_PRS_SRAM_AI_CTRL_BITS             8
 508#define MVPP2_PRS_SRAM_AI_MASK                  0xff
 509#define MVPP2_PRS_SRAM_NEXT_LU_OFFS             106
 510#define MVPP2_PRS_SRAM_NEXT_LU_MASK             0xf
 511#define MVPP2_PRS_SRAM_LU_DONE_BIT              110
 512#define MVPP2_PRS_SRAM_LU_GEN_BIT               111
 513
 514/* Sram result info bits assignment */
 515#define MVPP2_PRS_RI_MAC_ME_MASK                0x1
 516#define MVPP2_PRS_RI_DSA_MASK                   0x2
 517#define MVPP2_PRS_RI_VLAN_MASK                  0xc
 518#define MVPP2_PRS_RI_VLAN_NONE                  ~(BIT(2) | BIT(3))
 519#define MVPP2_PRS_RI_VLAN_SINGLE                BIT(2)
 520#define MVPP2_PRS_RI_VLAN_DOUBLE                BIT(3)
 521#define MVPP2_PRS_RI_VLAN_TRIPLE                (BIT(2) | BIT(3))
 522#define MVPP2_PRS_RI_CPU_CODE_MASK              0x70
 523#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC           BIT(4)
 524#define MVPP2_PRS_RI_L2_CAST_MASK               0x600
 525#define MVPP2_PRS_RI_L2_UCAST                   ~(BIT(9) | BIT(10))
 526#define MVPP2_PRS_RI_L2_MCAST                   BIT(9)
 527#define MVPP2_PRS_RI_L2_BCAST                   BIT(10)
 528#define MVPP2_PRS_RI_PPPOE_MASK                 0x800
 529#define MVPP2_PRS_RI_L3_PROTO_MASK              0x7000
 530#define MVPP2_PRS_RI_L3_UN                      ~(BIT(12) | BIT(13) | BIT(14))
 531#define MVPP2_PRS_RI_L3_IP4                     BIT(12)
 532#define MVPP2_PRS_RI_L3_IP4_OPT                 BIT(13)
 533#define MVPP2_PRS_RI_L3_IP4_OTHER               (BIT(12) | BIT(13))
 534#define MVPP2_PRS_RI_L3_IP6                     BIT(14)
 535#define MVPP2_PRS_RI_L3_IP6_EXT                 (BIT(12) | BIT(14))
 536#define MVPP2_PRS_RI_L3_ARP                     (BIT(13) | BIT(14))
 537#define MVPP2_PRS_RI_L3_ADDR_MASK               0x18000
 538#define MVPP2_PRS_RI_L3_UCAST                   ~(BIT(15) | BIT(16))
 539#define MVPP2_PRS_RI_L3_MCAST                   BIT(15)
 540#define MVPP2_PRS_RI_L3_BCAST                   (BIT(15) | BIT(16))
 541#define MVPP2_PRS_RI_IP_FRAG_MASK               0x20000
 542#define MVPP2_PRS_RI_UDF3_MASK                  0x300000
 543#define MVPP2_PRS_RI_UDF3_RX_SPECIAL            BIT(21)
 544#define MVPP2_PRS_RI_L4_PROTO_MASK              0x1c00000
 545#define MVPP2_PRS_RI_L4_TCP                     BIT(22)
 546#define MVPP2_PRS_RI_L4_UDP                     BIT(23)
 547#define MVPP2_PRS_RI_L4_OTHER                   (BIT(22) | BIT(23))
 548#define MVPP2_PRS_RI_UDF7_MASK                  0x60000000
 549#define MVPP2_PRS_RI_UDF7_IP6_LITE              BIT(29)
 550#define MVPP2_PRS_RI_DROP_MASK                  0x80000000
 551
 552/* Sram additional info bits assignment */
 553#define MVPP2_PRS_IPV4_DIP_AI_BIT               BIT(0)
 554#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT            BIT(0)
 555#define MVPP2_PRS_IPV6_EXT_AI_BIT               BIT(1)
 556#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT            BIT(2)
 557#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT        BIT(3)
 558#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT         BIT(4)
 559#define MVPP2_PRS_SINGLE_VLAN_AI                0
 560#define MVPP2_PRS_DBL_VLAN_AI_BIT               BIT(7)
 561
 562/* DSA/EDSA type */
 563#define MVPP2_PRS_TAGGED                true
 564#define MVPP2_PRS_UNTAGGED              false
 565#define MVPP2_PRS_EDSA                  true
 566#define MVPP2_PRS_DSA                   false
 567
 568/* MAC entries, shadow udf */
 569enum mvpp2_prs_udf {
 570        MVPP2_PRS_UDF_MAC_DEF,
 571        MVPP2_PRS_UDF_MAC_RANGE,
 572        MVPP2_PRS_UDF_L2_DEF,
 573        MVPP2_PRS_UDF_L2_DEF_COPY,
 574        MVPP2_PRS_UDF_L2_USER,
 575};
 576
 577/* Lookup ID */
 578enum mvpp2_prs_lookup {
 579        MVPP2_PRS_LU_MH,
 580        MVPP2_PRS_LU_MAC,
 581        MVPP2_PRS_LU_DSA,
 582        MVPP2_PRS_LU_VLAN,
 583        MVPP2_PRS_LU_L2,
 584        MVPP2_PRS_LU_PPPOE,
 585        MVPP2_PRS_LU_IP4,
 586        MVPP2_PRS_LU_IP6,
 587        MVPP2_PRS_LU_FLOWS,
 588        MVPP2_PRS_LU_LAST,
 589};
 590
 591/* L3 cast enum */
 592enum mvpp2_prs_l3_cast {
 593        MVPP2_PRS_L3_UNI_CAST,
 594        MVPP2_PRS_L3_MULTI_CAST,
 595        MVPP2_PRS_L3_BROAD_CAST
 596};
 597
 598/* Classifier constants */
 599#define MVPP2_CLS_FLOWS_TBL_SIZE        512
 600#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS  3
 601#define MVPP2_CLS_LKP_TBL_SIZE          64
 602
 603/* BM constants */
 604#define MVPP2_BM_POOLS_NUM              8
 605#define MVPP2_BM_LONG_BUF_NUM           1024
 606#define MVPP2_BM_SHORT_BUF_NUM          2048
 607#define MVPP2_BM_POOL_SIZE_MAX          (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
 608#define MVPP2_BM_POOL_PTR_ALIGN         128
 609#define MVPP2_BM_SWF_LONG_POOL(port)    ((port > 2) ? 2 : port)
 610#define MVPP2_BM_SWF_SHORT_POOL         3
 611
 612/* BM cookie (32 bits) definition */
 613#define MVPP2_BM_COOKIE_POOL_OFFS       8
 614#define MVPP2_BM_COOKIE_CPU_OFFS        24
 615
 616/* BM short pool packet size
 617 * These value assure that for SWF the total number
 618 * of bytes allocated for each buffer will be 512
 619 */
 620#define MVPP2_BM_SHORT_PKT_SIZE         MVPP2_RX_MAX_PKT_SIZE(512)
 621
 622enum mvpp2_bm_type {
 623        MVPP2_BM_FREE,
 624        MVPP2_BM_SWF_LONG,
 625        MVPP2_BM_SWF_SHORT
 626};
 627
 628/* Definitions */
 629
 630/* Shared Packet Processor resources */
 631struct mvpp2 {
 632        /* Shared registers' base addresses */
 633        void __iomem *base;
 634        void __iomem *lms_base;
 635
 636        /* Common clocks */
 637        struct clk *pp_clk;
 638        struct clk *gop_clk;
 639
 640        /* List of pointers to port structures */
 641        struct mvpp2_port **port_list;
 642
 643        /* Aggregated TXQs */
 644        struct mvpp2_tx_queue *aggr_txqs;
 645
 646        /* BM pools */
 647        struct mvpp2_bm_pool *bm_pools;
 648
 649        /* PRS shadow table */
 650        struct mvpp2_prs_shadow *prs_shadow;
 651        /* PRS auxiliary table for double vlan entries control */
 652        bool *prs_double_vlans;
 653
 654        /* Tclk value */
 655        u32 tclk;
 656};
 657
 658struct mvpp2_pcpu_stats {
 659        struct  u64_stats_sync syncp;
 660        u64     rx_packets;
 661        u64     rx_bytes;
 662        u64     tx_packets;
 663        u64     tx_bytes;
 664};
 665
 666/* Per-CPU port control */
 667struct mvpp2_port_pcpu {
 668        struct hrtimer tx_done_timer;
 669        bool timer_scheduled;
 670        /* Tasklet for egress finalization */
 671        struct tasklet_struct tx_done_tasklet;
 672};
 673
 674struct mvpp2_port {
 675        u8 id;
 676
 677        int irq;
 678
 679        struct mvpp2 *priv;
 680
 681        /* Per-port registers' base address */
 682        void __iomem *base;
 683
 684        struct mvpp2_rx_queue **rxqs;
 685        struct mvpp2_tx_queue **txqs;
 686        struct net_device *dev;
 687
 688        int pkt_size;
 689
 690        u32 pending_cause_rx;
 691        struct napi_struct napi;
 692
 693        /* Per-CPU port control */
 694        struct mvpp2_port_pcpu __percpu *pcpu;
 695
 696        /* Flags */
 697        unsigned long flags;
 698
 699        u16 tx_ring_size;
 700        u16 rx_ring_size;
 701        struct mvpp2_pcpu_stats __percpu *stats;
 702
 703        struct phy_device *phy_dev;
 704        phy_interface_t phy_interface;
 705        struct device_node *phy_node;
 706        unsigned int link;
 707        unsigned int duplex;
 708        unsigned int speed;
 709
 710        struct mvpp2_bm_pool *pool_long;
 711        struct mvpp2_bm_pool *pool_short;
 712
 713        /* Index of first port's physical RXQ */
 714        u8 first_rxq;
 715};
 716
 717/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
 718 * layout of the transmit and reception DMA descriptors, and their
 719 * layout is therefore defined by the hardware design
 720 */
 721
 722#define MVPP2_TXD_L3_OFF_SHIFT          0
 723#define MVPP2_TXD_IP_HLEN_SHIFT         8
 724#define MVPP2_TXD_L4_CSUM_FRAG          BIT(13)
 725#define MVPP2_TXD_L4_CSUM_NOT           BIT(14)
 726#define MVPP2_TXD_IP_CSUM_DISABLE       BIT(15)
 727#define MVPP2_TXD_PADDING_DISABLE       BIT(23)
 728#define MVPP2_TXD_L4_UDP                BIT(24)
 729#define MVPP2_TXD_L3_IP6                BIT(26)
 730#define MVPP2_TXD_L_DESC                BIT(28)
 731#define MVPP2_TXD_F_DESC                BIT(29)
 732
 733#define MVPP2_RXD_ERR_SUMMARY           BIT(15)
 734#define MVPP2_RXD_ERR_CODE_MASK         (BIT(13) | BIT(14))
 735#define MVPP2_RXD_ERR_CRC               0x0
 736#define MVPP2_RXD_ERR_OVERRUN           BIT(13)
 737#define MVPP2_RXD_ERR_RESOURCE          (BIT(13) | BIT(14))
 738#define MVPP2_RXD_BM_POOL_ID_OFFS       16
 739#define MVPP2_RXD_BM_POOL_ID_MASK       (BIT(16) | BIT(17) | BIT(18))
 740#define MVPP2_RXD_HWF_SYNC              BIT(21)
 741#define MVPP2_RXD_L4_CSUM_OK            BIT(22)
 742#define MVPP2_RXD_IP4_HEADER_ERR        BIT(24)
 743#define MVPP2_RXD_L4_TCP                BIT(25)
 744#define MVPP2_RXD_L4_UDP                BIT(26)
 745#define MVPP2_RXD_L3_IP4                BIT(28)
 746#define MVPP2_RXD_L3_IP6                BIT(30)
 747#define MVPP2_RXD_BUF_HDR               BIT(31)
 748
 749struct mvpp2_tx_desc {
 750        u32 command;            /* Options used by HW for packet transmitting.*/
 751        u8  packet_offset;      /* the offset from the buffer beginning */
 752        u8  phys_txq;           /* destination queue ID                 */
 753        u16 data_size;          /* data size of transmitted packet in bytes */
 754        u32 buf_phys_addr;      /* physical addr of transmitted buffer  */
 755        u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
 756        u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
 757        u32 reserved2;          /* reserved (for future use)            */
 758};
 759
 760struct mvpp2_rx_desc {
 761        u32 status;             /* info about received packet           */
 762        u16 reserved1;          /* parser_info (for future use, PnC)    */
 763        u16 data_size;          /* size of received packet in bytes     */
 764        u32 buf_phys_addr;      /* physical address of the buffer       */
 765        u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
 766        u16 reserved2;          /* gem_port_id (for future use, PON)    */
 767        u16 reserved3;          /* csum_l4 (for future use, PnC)        */
 768        u8  reserved4;          /* bm_qset (for future use, BM)         */
 769        u8  reserved5;
 770        u16 reserved6;          /* classify_info (for future use, PnC)  */
 771        u32 reserved7;          /* flow_id (for future use, PnC) */
 772        u32 reserved8;
 773};
 774
 775/* Per-CPU Tx queue control */
 776struct mvpp2_txq_pcpu {
 777        int cpu;
 778
 779        /* Number of Tx DMA descriptors in the descriptor ring */
 780        int size;
 781
 782        /* Number of currently used Tx DMA descriptor in the
 783         * descriptor ring
 784         */
 785        int count;
 786
 787        /* Number of Tx DMA descriptors reserved for each CPU */
 788        int reserved_num;
 789
 790        /* Array of transmitted skb */
 791        struct sk_buff **tx_skb;
 792
 793        /* Array of transmitted buffers' physical addresses */
 794        dma_addr_t *tx_buffs;
 795
 796        /* Index of last TX DMA descriptor that was inserted */
 797        int txq_put_index;
 798
 799        /* Index of the TX DMA descriptor to be cleaned up */
 800        int txq_get_index;
 801};
 802
 803struct mvpp2_tx_queue {
 804        /* Physical number of this Tx queue */
 805        u8 id;
 806
 807        /* Logical number of this Tx queue */
 808        u8 log_id;
 809
 810        /* Number of Tx DMA descriptors in the descriptor ring */
 811        int size;
 812
 813        /* Number of currently used Tx DMA descriptor in the descriptor ring */
 814        int count;
 815
 816        /* Per-CPU control of physical Tx queues */
 817        struct mvpp2_txq_pcpu __percpu *pcpu;
 818
 819        /* Array of transmitted skb */
 820        struct sk_buff **tx_skb;
 821
 822        u32 done_pkts_coal;
 823
 824        /* Virtual address of thex Tx DMA descriptors array */
 825        struct mvpp2_tx_desc *descs;
 826
 827        /* DMA address of the Tx DMA descriptors array */
 828        dma_addr_t descs_phys;
 829
 830        /* Index of the last Tx DMA descriptor */
 831        int last_desc;
 832
 833        /* Index of the next Tx DMA descriptor to process */
 834        int next_desc_to_proc;
 835};
 836
 837struct mvpp2_rx_queue {
 838        /* RX queue number, in the range 0-31 for physical RXQs */
 839        u8 id;
 840
 841        /* Num of rx descriptors in the rx descriptor ring */
 842        int size;
 843
 844        u32 pkts_coal;
 845        u32 time_coal;
 846
 847        /* Virtual address of the RX DMA descriptors array */
 848        struct mvpp2_rx_desc *descs;
 849
 850        /* DMA address of the RX DMA descriptors array */
 851        dma_addr_t descs_phys;
 852
 853        /* Index of the last RX DMA descriptor */
 854        int last_desc;
 855
 856        /* Index of the next RX DMA descriptor to process */
 857        int next_desc_to_proc;
 858
 859        /* ID of port to which physical RXQ is mapped */
 860        int port;
 861
 862        /* Port's logic RXQ number to which physical RXQ is mapped */
 863        int logic_rxq;
 864};
 865
 866union mvpp2_prs_tcam_entry {
 867        u32 word[MVPP2_PRS_TCAM_WORDS];
 868        u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
 869};
 870
 871union mvpp2_prs_sram_entry {
 872        u32 word[MVPP2_PRS_SRAM_WORDS];
 873        u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
 874};
 875
 876struct mvpp2_prs_entry {
 877        u32 index;
 878        union mvpp2_prs_tcam_entry tcam;
 879        union mvpp2_prs_sram_entry sram;
 880};
 881
 882struct mvpp2_prs_shadow {
 883        bool valid;
 884        bool finish;
 885
 886        /* Lookup ID */
 887        int lu;
 888
 889        /* User defined offset */
 890        int udf;
 891
 892        /* Result info */
 893        u32 ri;
 894        u32 ri_mask;
 895};
 896
 897struct mvpp2_cls_flow_entry {
 898        u32 index;
 899        u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
 900};
 901
 902struct mvpp2_cls_lookup_entry {
 903        u32 lkpid;
 904        u32 way;
 905        u32 data;
 906};
 907
 908struct mvpp2_bm_pool {
 909        /* Pool number in the range 0-7 */
 910        int id;
 911        enum mvpp2_bm_type type;
 912
 913        /* Buffer Pointers Pool External (BPPE) size */
 914        int size;
 915        /* Number of buffers for this pool */
 916        int buf_num;
 917        /* Pool buffer size */
 918        int buf_size;
 919        /* Packet size */
 920        int pkt_size;
 921
 922        /* BPPE virtual base address */
 923        u32 *virt_addr;
 924        /* BPPE physical base address */
 925        dma_addr_t phys_addr;
 926
 927        /* Ports using BM pool */
 928        u32 port_map;
 929
 930        /* Occupied buffers indicator */
 931        atomic_t in_use;
 932        int in_use_thresh;
 933};
 934
 935struct mvpp2_buff_hdr {
 936        u32 next_buff_phys_addr;
 937        u32 next_buff_virt_addr;
 938        u16 byte_count;
 939        u16 info;
 940        u8  reserved1;          /* bm_qset (for future use, BM)         */
 941};
 942
 943/* Buffer header info bits */
 944#define MVPP2_B_HDR_INFO_MC_ID_MASK     0xfff
 945#define MVPP2_B_HDR_INFO_MC_ID(info)    ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
 946#define MVPP2_B_HDR_INFO_LAST_OFFS      12
 947#define MVPP2_B_HDR_INFO_LAST_MASK      BIT(12)
 948#define MVPP2_B_HDR_INFO_IS_LAST(info) \
 949           ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
 950
 951/* Static declaractions */
 952
 953/* Number of RXQs used by single port */
 954static int rxq_number = MVPP2_DEFAULT_RXQ;
 955/* Number of TXQs used by single port */
 956static int txq_number = MVPP2_MAX_TXQ;
 957
 958#define MVPP2_DRIVER_NAME "mvpp2"
 959#define MVPP2_DRIVER_VERSION "1.0"
 960
 961/* Utility/helper methods */
 962
 963static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
 964{
 965        writel(data, priv->base + offset);
 966}
 967
 968static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
 969{
 970        return readl(priv->base + offset);
 971}
 972
 973static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
 974{
 975        txq_pcpu->txq_get_index++;
 976        if (txq_pcpu->txq_get_index == txq_pcpu->size)
 977                txq_pcpu->txq_get_index = 0;
 978}
 979
 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
 981                              struct sk_buff *skb,
 982                              struct mvpp2_tx_desc *tx_desc)
 983{
 984        txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
 985        if (skb)
 986                txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
 987                                                         tx_desc->buf_phys_addr;
 988        txq_pcpu->txq_put_index++;
 989        if (txq_pcpu->txq_put_index == txq_pcpu->size)
 990                txq_pcpu->txq_put_index = 0;
 991}
 992
 993/* Get number of physical egress port */
 994static inline int mvpp2_egress_port(struct mvpp2_port *port)
 995{
 996        return MVPP2_MAX_TCONT + port->id;
 997}
 998
 999/* Get number of physical TXQ */
1000static inline int mvpp2_txq_phys(int port, int txq)
1001{
1002        return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1003}
1004
1005/* Parser configuration routines */
1006
1007/* Update parser tcam and sram hw entries */
1008static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1009{
1010        int i;
1011
1012        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1013                return -EINVAL;
1014
1015        /* Clear entry invalidation bit */
1016        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1017
1018        /* Write tcam index - indirect access */
1019        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1020        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1021                mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1022
1023        /* Write sram index - indirect access */
1024        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1025        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1026                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1027
1028        return 0;
1029}
1030
1031/* Read tcam entry from hw */
1032static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1033{
1034        int i;
1035
1036        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1037                return -EINVAL;
1038
1039        /* Write tcam index - indirect access */
1040        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1041
1042        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1043                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1044        if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1045                return MVPP2_PRS_TCAM_ENTRY_INVALID;
1046
1047        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1048                pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1049
1050        /* Write sram index - indirect access */
1051        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1052        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1053                pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1054
1055        return 0;
1056}
1057
1058/* Invalidate tcam hw entry */
1059static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1060{
1061        /* Write index - indirect access */
1062        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1063        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1064                    MVPP2_PRS_TCAM_INV_MASK);
1065}
1066
1067/* Enable shadow table entry and set its lookup ID */
1068static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1069{
1070        priv->prs_shadow[index].valid = true;
1071        priv->prs_shadow[index].lu = lu;
1072}
1073
1074/* Update ri fields in shadow table entry */
1075static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1076                                    unsigned int ri, unsigned int ri_mask)
1077{
1078        priv->prs_shadow[index].ri_mask = ri_mask;
1079        priv->prs_shadow[index].ri = ri;
1080}
1081
1082/* Update lookup field in tcam sw entry */
1083static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1084{
1085        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1086
1087        pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1088        pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1089}
1090
1091/* Update mask for single port in tcam sw entry */
1092static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1093                                    unsigned int port, bool add)
1094{
1095        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1096
1097        if (add)
1098                pe->tcam.byte[enable_off] &= ~(1 << port);
1099        else
1100                pe->tcam.byte[enable_off] |= 1 << port;
1101}
1102
1103/* Update port map in tcam sw entry */
1104static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1105                                        unsigned int ports)
1106{
1107        unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1108        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1109
1110        pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1111        pe->tcam.byte[enable_off] &= ~port_mask;
1112        pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1113}
1114
1115/* Obtain port map from tcam sw entry */
1116static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1117{
1118        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1119
1120        return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1121}
1122
1123/* Set byte of data and its enable bits in tcam sw entry */
1124static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1125                                         unsigned int offs, unsigned char byte,
1126                                         unsigned char enable)
1127{
1128        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1129        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1130}
1131
1132/* Get byte of data and its enable bits from tcam sw entry */
1133static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1134                                         unsigned int offs, unsigned char *byte,
1135                                         unsigned char *enable)
1136{
1137        *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1138        *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1139}
1140
1141/* Compare tcam data bytes with a pattern */
1142static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1143                                    u16 data)
1144{
1145        int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1146        u16 tcam_data;
1147
1148        tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1149        if (tcam_data != data)
1150                return false;
1151        return true;
1152}
1153
1154/* Update ai bits in tcam sw entry */
1155static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1156                                     unsigned int bits, unsigned int enable)
1157{
1158        int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1159
1160        for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1161
1162                if (!(enable & BIT(i)))
1163                        continue;
1164
1165                if (bits & BIT(i))
1166                        pe->tcam.byte[ai_idx] |= 1 << i;
1167                else
1168                        pe->tcam.byte[ai_idx] &= ~(1 << i);
1169        }
1170
1171        pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1172}
1173
1174/* Get ai bits from tcam sw entry */
1175static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1176{
1177        return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1178}
1179
1180/* Set ethertype in tcam sw entry */
1181static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1182                                  unsigned short ethertype)
1183{
1184        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1185        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1186}
1187
1188/* Set bits in sram sw entry */
1189static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1190                                    int val)
1191{
1192        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1193}
1194
1195/* Clear bits in sram sw entry */
1196static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1197                                      int val)
1198{
1199        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1200}
1201
1202/* Update ri bits in sram sw entry */
1203static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1204                                     unsigned int bits, unsigned int mask)
1205{
1206        unsigned int i;
1207
1208        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1209                int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1210
1211                if (!(mask & BIT(i)))
1212                        continue;
1213
1214                if (bits & BIT(i))
1215                        mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1216                else
1217                        mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1218
1219                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1220        }
1221}
1222
1223/* Obtain ri bits from sram sw entry */
1224static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1225{
1226        return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1227}
1228
1229/* Update ai bits in sram sw entry */
1230static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1231                                     unsigned int bits, unsigned int mask)
1232{
1233        unsigned int i;
1234        int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1235
1236        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1237
1238                if (!(mask & BIT(i)))
1239                        continue;
1240
1241                if (bits & BIT(i))
1242                        mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1243                else
1244                        mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1245
1246                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1247        }
1248}
1249
1250/* Read ai bits from sram sw entry */
1251static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1252{
1253        u8 bits;
1254        int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1255        int ai_en_off = ai_off + 1;
1256        int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1257
1258        bits = (pe->sram.byte[ai_off] >> ai_shift) |
1259               (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1260
1261        return bits;
1262}
1263
1264/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1265 * lookup interation
1266 */
1267static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1268                                       unsigned int lu)
1269{
1270        int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1271
1272        mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1273                                  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1274        mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1275}
1276
1277/* In the sram sw entry set sign and value of the next lookup offset
1278 * and the offset value generated to the classifier
1279 */
1280static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1281                                     unsigned int op)
1282{
1283        /* Set sign */
1284        if (shift < 0) {
1285                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1286                shift = 0 - shift;
1287        } else {
1288                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1289        }
1290
1291        /* Set value */
1292        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1293                                                           (unsigned char)shift;
1294
1295        /* Reset and set operation */
1296        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1297                                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1298        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1299
1300        /* Set base offset as current */
1301        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1302}
1303
1304/* In the sram sw entry set sign and value of the user defined offset
1305 * generated to the classifier
1306 */
1307static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1308                                      unsigned int type, int offset,
1309                                      unsigned int op)
1310{
1311        /* Set sign */
1312        if (offset < 0) {
1313                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1314                offset = 0 - offset;
1315        } else {
1316                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1317        }
1318
1319        /* Set value */
1320        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1321                                  MVPP2_PRS_SRAM_UDF_MASK);
1322        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1323        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1324                                        MVPP2_PRS_SRAM_UDF_BITS)] &=
1325              ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1326        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1327                                        MVPP2_PRS_SRAM_UDF_BITS)] |=
1328                                (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1329
1330        /* Set offset type */
1331        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1332                                  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1333        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1334
1335        /* Set offset operation */
1336        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1337                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1338        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1339
1340        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1341                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1342                                             ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1343                                    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1344
1345        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1346                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1347                             (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1348
1349        /* Set base offset as current */
1350        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1351}
1352
1353/* Find parser flow entry */
1354static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1355{
1356        struct mvpp2_prs_entry *pe;
1357        int tid;
1358
1359        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1360        if (!pe)
1361                return NULL;
1362        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1363
1364        /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1365        for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1366                u8 bits;
1367
1368                if (!priv->prs_shadow[tid].valid ||
1369                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1370                        continue;
1371
1372                pe->index = tid;
1373                mvpp2_prs_hw_read(priv, pe);
1374                bits = mvpp2_prs_sram_ai_get(pe);
1375
1376                /* Sram store classification lookup ID in AI bits [5:0] */
1377                if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1378                        return pe;
1379        }
1380        kfree(pe);
1381
1382        return NULL;
1383}
1384
1385/* Return first free tcam index, seeking from start to end */
1386static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1387                                     unsigned char end)
1388{
1389        int tid;
1390
1391        if (start > end)
1392                swap(start, end);
1393
1394        if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1395                end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1396
1397        for (tid = start; tid <= end; tid++) {
1398                if (!priv->prs_shadow[tid].valid)
1399                        return tid;
1400        }
1401
1402        return -EINVAL;
1403}
1404
1405/* Enable/disable dropping all mac da's */
1406static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1407{
1408        struct mvpp2_prs_entry pe;
1409
1410        if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1411                /* Entry exist - update port only */
1412                pe.index = MVPP2_PE_DROP_ALL;
1413                mvpp2_prs_hw_read(priv, &pe);
1414        } else {
1415                /* Entry doesn't exist - create new */
1416                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1417                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1418                pe.index = MVPP2_PE_DROP_ALL;
1419
1420                /* Non-promiscuous mode for all ports - DROP unknown packets */
1421                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1422                                         MVPP2_PRS_RI_DROP_MASK);
1423
1424                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1425                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1426
1427                /* Update shadow table */
1428                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1429
1430                /* Mask all ports */
1431                mvpp2_prs_tcam_port_map_set(&pe, 0);
1432        }
1433
1434        /* Update port mask */
1435        mvpp2_prs_tcam_port_set(&pe, port, add);
1436
1437        mvpp2_prs_hw_write(priv, &pe);
1438}
1439
1440/* Set port to promiscuous mode */
1441static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1442{
1443        struct mvpp2_prs_entry pe;
1444
1445        /* Promiscuous mode - Accept unknown packets */
1446
1447        if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1448                /* Entry exist - update port only */
1449                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1450                mvpp2_prs_hw_read(priv, &pe);
1451        } else {
1452                /* Entry doesn't exist - create new */
1453                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1454                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1455                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1456
1457                /* Continue - set next lookup */
1458                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1459
1460                /* Set result info bits */
1461                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1462                                         MVPP2_PRS_RI_L2_CAST_MASK);
1463
1464                /* Shift to ethertype */
1465                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1466                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1467
1468                /* Mask all ports */
1469                mvpp2_prs_tcam_port_map_set(&pe, 0);
1470
1471                /* Update shadow table */
1472                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1473        }
1474
1475        /* Update port mask */
1476        mvpp2_prs_tcam_port_set(&pe, port, add);
1477
1478        mvpp2_prs_hw_write(priv, &pe);
1479}
1480
1481/* Accept multicast */
1482static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1483                                    bool add)
1484{
1485        struct mvpp2_prs_entry pe;
1486        unsigned char da_mc;
1487
1488        /* Ethernet multicast address first byte is
1489         * 0x01 for IPv4 and 0x33 for IPv6
1490         */
1491        da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1492
1493        if (priv->prs_shadow[index].valid) {
1494                /* Entry exist - update port only */
1495                pe.index = index;
1496                mvpp2_prs_hw_read(priv, &pe);
1497        } else {
1498                /* Entry doesn't exist - create new */
1499                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1500                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1501                pe.index = index;
1502
1503                /* Continue - set next lookup */
1504                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1505
1506                /* Set result info bits */
1507                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1508                                         MVPP2_PRS_RI_L2_CAST_MASK);
1509
1510                /* Update tcam entry data first byte */
1511                mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1512
1513                /* Shift to ethertype */
1514                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1515                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1516
1517                /* Mask all ports */
1518                mvpp2_prs_tcam_port_map_set(&pe, 0);
1519
1520                /* Update shadow table */
1521                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1522        }
1523
1524        /* Update port mask */
1525        mvpp2_prs_tcam_port_set(&pe, port, add);
1526
1527        mvpp2_prs_hw_write(priv, &pe);
1528}
1529
1530/* Set entry for dsa packets */
1531static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1532                                  bool tagged, bool extend)
1533{
1534        struct mvpp2_prs_entry pe;
1535        int tid, shift;
1536
1537        if (extend) {
1538                tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1539                shift = 8;
1540        } else {
1541                tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1542                shift = 4;
1543        }
1544
1545        if (priv->prs_shadow[tid].valid) {
1546                /* Entry exist - update port only */
1547                pe.index = tid;
1548                mvpp2_prs_hw_read(priv, &pe);
1549        } else {
1550                /* Entry doesn't exist - create new */
1551                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1552                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1553                pe.index = tid;
1554
1555                /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1556                mvpp2_prs_sram_shift_set(&pe, shift,
1557                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1558
1559                /* Update shadow table */
1560                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1561
1562                if (tagged) {
1563                        /* Set tagged bit in DSA tag */
1564                        mvpp2_prs_tcam_data_byte_set(&pe, 0,
1565                                                     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1566                                                     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1567                        /* Clear all ai bits for next iteration */
1568                        mvpp2_prs_sram_ai_update(&pe, 0,
1569                                                 MVPP2_PRS_SRAM_AI_MASK);
1570                        /* If packet is tagged continue check vlans */
1571                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1572                } else {
1573                        /* Set result info bits to 'no vlans' */
1574                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1575                                                 MVPP2_PRS_RI_VLAN_MASK);
1576                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1577                }
1578
1579                /* Mask all ports */
1580                mvpp2_prs_tcam_port_map_set(&pe, 0);
1581        }
1582
1583        /* Update port mask */
1584        mvpp2_prs_tcam_port_set(&pe, port, add);
1585
1586        mvpp2_prs_hw_write(priv, &pe);
1587}
1588
1589/* Set entry for dsa ethertype */
1590static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1591                                            bool add, bool tagged, bool extend)
1592{
1593        struct mvpp2_prs_entry pe;
1594        int tid, shift, port_mask;
1595
1596        if (extend) {
1597                tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1598                      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1599                port_mask = 0;
1600                shift = 8;
1601        } else {
1602                tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1603                      MVPP2_PE_ETYPE_DSA_UNTAGGED;
1604                port_mask = MVPP2_PRS_PORT_MASK;
1605                shift = 4;
1606        }
1607
1608        if (priv->prs_shadow[tid].valid) {
1609                /* Entry exist - update port only */
1610                pe.index = tid;
1611                mvpp2_prs_hw_read(priv, &pe);
1612        } else {
1613                /* Entry doesn't exist - create new */
1614                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1615                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1616                pe.index = tid;
1617
1618                /* Set ethertype */
1619                mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1620                mvpp2_prs_match_etype(&pe, 2, 0);
1621
1622                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1623                                         MVPP2_PRS_RI_DSA_MASK);
1624                /* Shift ethertype + 2 byte reserved + tag*/
1625                mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1626                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1627
1628                /* Update shadow table */
1629                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1630
1631                if (tagged) {
1632                        /* Set tagged bit in DSA tag */
1633                        mvpp2_prs_tcam_data_byte_set(&pe,
1634                                                     MVPP2_ETH_TYPE_LEN + 2 + 3,
1635                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1636                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1637                        /* Clear all ai bits for next iteration */
1638                        mvpp2_prs_sram_ai_update(&pe, 0,
1639                                                 MVPP2_PRS_SRAM_AI_MASK);
1640                        /* If packet is tagged continue check vlans */
1641                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1642                } else {
1643                        /* Set result info bits to 'no vlans' */
1644                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1645                                                 MVPP2_PRS_RI_VLAN_MASK);
1646                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1647                }
1648                /* Mask/unmask all ports, depending on dsa type */
1649                mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1650        }
1651
1652        /* Update port mask */
1653        mvpp2_prs_tcam_port_set(&pe, port, add);
1654
1655        mvpp2_prs_hw_write(priv, &pe);
1656}
1657
1658/* Search for existing single/triple vlan entry */
1659static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1660                                                   unsigned short tpid, int ai)
1661{
1662        struct mvpp2_prs_entry *pe;
1663        int tid;
1664
1665        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1666        if (!pe)
1667                return NULL;
1668        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1669
1670        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1671        for (tid = MVPP2_PE_FIRST_FREE_TID;
1672             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1673                unsigned int ri_bits, ai_bits;
1674                bool match;
1675
1676                if (!priv->prs_shadow[tid].valid ||
1677                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1678                        continue;
1679
1680                pe->index = tid;
1681
1682                mvpp2_prs_hw_read(priv, pe);
1683                match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1684                if (!match)
1685                        continue;
1686
1687                /* Get vlan type */
1688                ri_bits = mvpp2_prs_sram_ri_get(pe);
1689                ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1690
1691                /* Get current ai value from tcam */
1692                ai_bits = mvpp2_prs_tcam_ai_get(pe);
1693                /* Clear double vlan bit */
1694                ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1695
1696                if (ai != ai_bits)
1697                        continue;
1698
1699                if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1700                    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1701                        return pe;
1702        }
1703        kfree(pe);
1704
1705        return NULL;
1706}
1707
1708/* Add/update single/triple vlan entry */
1709static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1710                              unsigned int port_map)
1711{
1712        struct mvpp2_prs_entry *pe;
1713        int tid_aux, tid;
1714        int ret = 0;
1715
1716        pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1717
1718        if (!pe) {
1719                /* Create new tcam entry */
1720                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1721                                                MVPP2_PE_FIRST_FREE_TID);
1722                if (tid < 0)
1723                        return tid;
1724
1725                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1726                if (!pe)
1727                        return -ENOMEM;
1728
1729                /* Get last double vlan tid */
1730                for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1731                     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1732                        unsigned int ri_bits;
1733
1734                        if (!priv->prs_shadow[tid_aux].valid ||
1735                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1736                                continue;
1737
1738                        pe->index = tid_aux;
1739                        mvpp2_prs_hw_read(priv, pe);
1740                        ri_bits = mvpp2_prs_sram_ri_get(pe);
1741                        if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1742                            MVPP2_PRS_RI_VLAN_DOUBLE)
1743                                break;
1744                }
1745
1746                if (tid <= tid_aux) {
1747                        ret = -EINVAL;
1748                        goto error;
1749                }
1750
1751                memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1752                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1753                pe->index = tid;
1754
1755                mvpp2_prs_match_etype(pe, 0, tpid);
1756
1757                mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1758                /* Shift 4 bytes - skip 1 vlan tag */
1759                mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1760                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1761                /* Clear all ai bits for next iteration */
1762                mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1763
1764                if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1765                        mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1766                                                 MVPP2_PRS_RI_VLAN_MASK);
1767                } else {
1768                        ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1769                        mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1770                                                 MVPP2_PRS_RI_VLAN_MASK);
1771                }
1772                mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1773
1774                mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1775        }
1776        /* Update ports' mask */
1777        mvpp2_prs_tcam_port_map_set(pe, port_map);
1778
1779        mvpp2_prs_hw_write(priv, pe);
1780
1781error:
1782        kfree(pe);
1783
1784        return ret;
1785}
1786
1787/* Get first free double vlan ai number */
1788static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1789{
1790        int i;
1791
1792        for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1793                if (!priv->prs_double_vlans[i])
1794                        return i;
1795        }
1796
1797        return -EINVAL;
1798}
1799
1800/* Search for existing double vlan entry */
1801static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1802                                                          unsigned short tpid1,
1803                                                          unsigned short tpid2)
1804{
1805        struct mvpp2_prs_entry *pe;
1806        int tid;
1807
1808        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1809        if (!pe)
1810                return NULL;
1811        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1812
1813        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1814        for (tid = MVPP2_PE_FIRST_FREE_TID;
1815             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1816                unsigned int ri_mask;
1817                bool match;
1818
1819                if (!priv->prs_shadow[tid].valid ||
1820                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1821                        continue;
1822
1823                pe->index = tid;
1824                mvpp2_prs_hw_read(priv, pe);
1825
1826                match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1827                        && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1828
1829                if (!match)
1830                        continue;
1831
1832                ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1833                if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1834                        return pe;
1835        }
1836        kfree(pe);
1837
1838        return NULL;
1839}
1840
1841/* Add or update double vlan entry */
1842static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1843                                     unsigned short tpid2,
1844                                     unsigned int port_map)
1845{
1846        struct mvpp2_prs_entry *pe;
1847        int tid_aux, tid, ai, ret = 0;
1848
1849        pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1850
1851        if (!pe) {
1852                /* Create new tcam entry */
1853                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1854                                MVPP2_PE_LAST_FREE_TID);
1855                if (tid < 0)
1856                        return tid;
1857
1858                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1859                if (!pe)
1860                        return -ENOMEM;
1861
1862                /* Set ai value for new double vlan entry */
1863                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1864                if (ai < 0) {
1865                        ret = ai;
1866                        goto error;
1867                }
1868
1869                /* Get first single/triple vlan tid */
1870                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1871                     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1872                        unsigned int ri_bits;
1873
1874                        if (!priv->prs_shadow[tid_aux].valid ||
1875                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1876                                continue;
1877
1878                        pe->index = tid_aux;
1879                        mvpp2_prs_hw_read(priv, pe);
1880                        ri_bits = mvpp2_prs_sram_ri_get(pe);
1881                        ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1882                        if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1883                            ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1884                                break;
1885                }
1886
1887                if (tid >= tid_aux) {
1888                        ret = -ERANGE;
1889                        goto error;
1890                }
1891
1892                memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1893                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1894                pe->index = tid;
1895
1896                priv->prs_double_vlans[ai] = true;
1897
1898                mvpp2_prs_match_etype(pe, 0, tpid1);
1899                mvpp2_prs_match_etype(pe, 4, tpid2);
1900
1901                mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1902                /* Shift 8 bytes - skip 2 vlan tags */
1903                mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1904                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1905                mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1906                                         MVPP2_PRS_RI_VLAN_MASK);
1907                mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1908                                         MVPP2_PRS_SRAM_AI_MASK);
1909
1910                mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1911        }
1912
1913        /* Update ports' mask */
1914        mvpp2_prs_tcam_port_map_set(pe, port_map);
1915        mvpp2_prs_hw_write(priv, pe);
1916
1917error:
1918        kfree(pe);
1919        return ret;
1920}
1921
1922/* IPv4 header parsing for fragmentation and L4 offset */
1923static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1924                               unsigned int ri, unsigned int ri_mask)
1925{
1926        struct mvpp2_prs_entry pe;
1927        int tid;
1928
1929        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1930            (proto != IPPROTO_IGMP))
1931                return -EINVAL;
1932
1933        /* Fragmented packet */
1934        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1935                                        MVPP2_PE_LAST_FREE_TID);
1936        if (tid < 0)
1937                return tid;
1938
1939        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1940        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1941        pe.index = tid;
1942
1943        /* Set next lu to IPv4 */
1944        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1945        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1946        /* Set L4 offset */
1947        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1948                                  sizeof(struct iphdr) - 4,
1949                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1950        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1951                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1952        mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1953                                 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1954
1955        mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1956        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1957        /* Unmask all ports */
1958        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1959
1960        /* Update shadow table and hw entry */
1961        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1962        mvpp2_prs_hw_write(priv, &pe);
1963
1964        /* Not fragmented packet */
1965        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1966                                        MVPP2_PE_LAST_FREE_TID);
1967        if (tid < 0)
1968                return tid;
1969
1970        pe.index = tid;
1971        /* Clear ri before updating */
1972        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1973        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1974        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1975
1976        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1977        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1978
1979        /* Update shadow table and hw entry */
1980        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1981        mvpp2_prs_hw_write(priv, &pe);
1982
1983        return 0;
1984}
1985
1986/* IPv4 L3 multicast or broadcast */
1987static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1988{
1989        struct mvpp2_prs_entry pe;
1990        int mask, tid;
1991
1992        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1993                                        MVPP2_PE_LAST_FREE_TID);
1994        if (tid < 0)
1995                return tid;
1996
1997        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1998        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1999        pe.index = tid;
2000
2001        switch (l3_cast) {
2002        case MVPP2_PRS_L3_MULTI_CAST:
2003                mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2004                                             MVPP2_PRS_IPV4_MC_MASK);
2005                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2006                                         MVPP2_PRS_RI_L3_ADDR_MASK);
2007                break;
2008        case  MVPP2_PRS_L3_BROAD_CAST:
2009                mask = MVPP2_PRS_IPV4_BC_MASK;
2010                mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2011                mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2012                mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2013                mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2014                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2015                                         MVPP2_PRS_RI_L3_ADDR_MASK);
2016                break;
2017        default:
2018                return -EINVAL;
2019        }
2020
2021        /* Finished: go to flowid generation */
2022        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2023        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2024
2025        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2026                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
2027        /* Unmask all ports */
2028        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2029
2030        /* Update shadow table and hw entry */
2031        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2032        mvpp2_prs_hw_write(priv, &pe);
2033
2034        return 0;
2035}
2036
2037/* Set entries for protocols over IPv6  */
2038static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2039                               unsigned int ri, unsigned int ri_mask)
2040{
2041        struct mvpp2_prs_entry pe;
2042        int tid;
2043
2044        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2045            (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2046                return -EINVAL;
2047
2048        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2049                                        MVPP2_PE_LAST_FREE_TID);
2050        if (tid < 0)
2051                return tid;
2052
2053        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2054        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2055        pe.index = tid;
2056
2057        /* Finished: go to flowid generation */
2058        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2059        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2060        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2061        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2062                                  sizeof(struct ipv6hdr) - 6,
2063                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2064
2065        mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2066        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2067                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2068        /* Unmask all ports */
2069        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2070
2071        /* Write HW */
2072        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2073        mvpp2_prs_hw_write(priv, &pe);
2074
2075        return 0;
2076}
2077
2078/* IPv6 L3 multicast entry */
2079static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2080{
2081        struct mvpp2_prs_entry pe;
2082        int tid;
2083
2084        if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2085                return -EINVAL;
2086
2087        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2088                                        MVPP2_PE_LAST_FREE_TID);
2089        if (tid < 0)
2090                return tid;
2091
2092        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2093        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2094        pe.index = tid;
2095
2096        /* Finished: go to flowid generation */
2097        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2098        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2099                                 MVPP2_PRS_RI_L3_ADDR_MASK);
2100        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2101                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2102        /* Shift back to IPv6 NH */
2103        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2104
2105        mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2106                                     MVPP2_PRS_IPV6_MC_MASK);
2107        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2108        /* Unmask all ports */
2109        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2110
2111        /* Update shadow table and hw entry */
2112        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2113        mvpp2_prs_hw_write(priv, &pe);
2114
2115        return 0;
2116}
2117
2118/* Parser per-port initialization */
2119static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2120                                   int lu_max, int offset)
2121{
2122        u32 val;
2123
2124        /* Set lookup ID */
2125        val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2126        val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2127        val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2128        mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2129
2130        /* Set maximum number of loops for packet received from port */
2131        val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2132        val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2133        val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2134        mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2135
2136        /* Set initial offset for packet header extraction for the first
2137         * searching loop
2138         */
2139        val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2140        val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2141        val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2142        mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2143}
2144
2145/* Default flow entries initialization for all ports */
2146static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2147{
2148        struct mvpp2_prs_entry pe;
2149        int port;
2150
2151        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2152                memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2153                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2154                pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2155
2156                /* Mask all ports */
2157                mvpp2_prs_tcam_port_map_set(&pe, 0);
2158
2159                /* Set flow ID*/
2160                mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2161                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2162
2163                /* Update shadow table and hw entry */
2164                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2165                mvpp2_prs_hw_write(priv, &pe);
2166        }
2167}
2168
2169/* Set default entry for Marvell Header field */
2170static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2171{
2172        struct mvpp2_prs_entry pe;
2173
2174        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2175
2176        pe.index = MVPP2_PE_MH_DEFAULT;
2177        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2178        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2179                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2180        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2181
2182        /* Unmask all ports */
2183        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2184
2185        /* Update shadow table and hw entry */
2186        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2187        mvpp2_prs_hw_write(priv, &pe);
2188}
2189
2190/* Set default entires (place holder) for promiscuous, non-promiscuous and
2191 * multicast MAC addresses
2192 */
2193static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2194{
2195        struct mvpp2_prs_entry pe;
2196
2197        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2198
2199        /* Non-promiscuous mode for all ports - DROP unknown packets */
2200        pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2201        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2202
2203        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2204                                 MVPP2_PRS_RI_DROP_MASK);
2205        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2206        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2207
2208        /* Unmask all ports */
2209        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2210
2211        /* Update shadow table and hw entry */
2212        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2213        mvpp2_prs_hw_write(priv, &pe);
2214
2215        /* place holders only - no ports */
2216        mvpp2_prs_mac_drop_all_set(priv, 0, false);
2217        mvpp2_prs_mac_promisc_set(priv, 0, false);
2218        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2219        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2220}
2221
2222/* Set default entries for various types of dsa packets */
2223static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2224{
2225        struct mvpp2_prs_entry pe;
2226
2227        /* None tagged EDSA entry - place holder */
2228        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2229                              MVPP2_PRS_EDSA);
2230
2231        /* Tagged EDSA entry - place holder */
2232        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2233
2234        /* None tagged DSA entry - place holder */
2235        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2236                              MVPP2_PRS_DSA);
2237
2238        /* Tagged DSA entry - place holder */
2239        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2240
2241        /* None tagged EDSA ethertype entry - place holder*/
2242        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2243                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2244
2245        /* Tagged EDSA ethertype entry - place holder*/
2246        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2247                                        MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2248
2249        /* None tagged DSA ethertype entry */
2250        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2251                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2252
2253        /* Tagged DSA ethertype entry */
2254        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2255                                        MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2256
2257        /* Set default entry, in case DSA or EDSA tag not found */
2258        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2259        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2260        pe.index = MVPP2_PE_DSA_DEFAULT;
2261        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2262
2263        /* Shift 0 bytes */
2264        mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2265        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2266
2267        /* Clear all sram ai bits for next iteration */
2268        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2269
2270        /* Unmask all ports */
2271        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2272
2273        mvpp2_prs_hw_write(priv, &pe);
2274}
2275
2276/* Match basic ethertypes */
2277static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2278{
2279        struct mvpp2_prs_entry pe;
2280        int tid;
2281
2282        /* Ethertype: PPPoE */
2283        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2284                                        MVPP2_PE_LAST_FREE_TID);
2285        if (tid < 0)
2286                return tid;
2287
2288        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2289        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2290        pe.index = tid;
2291
2292        mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2293
2294        mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2295                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2296        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2297        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2298                                 MVPP2_PRS_RI_PPPOE_MASK);
2299
2300        /* Update shadow table and hw entry */
2301        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2302        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2303        priv->prs_shadow[pe.index].finish = false;
2304        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2305                                MVPP2_PRS_RI_PPPOE_MASK);
2306        mvpp2_prs_hw_write(priv, &pe);
2307
2308        /* Ethertype: ARP */
2309        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2310                                        MVPP2_PE_LAST_FREE_TID);
2311        if (tid < 0)
2312                return tid;
2313
2314        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2315        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2316        pe.index = tid;
2317
2318        mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2319
2320        /* Generate flow in the next iteration*/
2321        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2322        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2323        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2324                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2325        /* Set L3 offset */
2326        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2327                                  MVPP2_ETH_TYPE_LEN,
2328                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2329
2330        /* Update shadow table and hw entry */
2331        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2332        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2333        priv->prs_shadow[pe.index].finish = true;
2334        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2335                                MVPP2_PRS_RI_L3_PROTO_MASK);
2336        mvpp2_prs_hw_write(priv, &pe);
2337
2338        /* Ethertype: LBTD */
2339        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2340                                        MVPP2_PE_LAST_FREE_TID);
2341        if (tid < 0)
2342                return tid;
2343
2344        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2345        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2346        pe.index = tid;
2347
2348        mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2349
2350        /* Generate flow in the next iteration*/
2351        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2352        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2353        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2354                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2355                                 MVPP2_PRS_RI_CPU_CODE_MASK |
2356                                 MVPP2_PRS_RI_UDF3_MASK);
2357        /* Set L3 offset */
2358        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2359                                  MVPP2_ETH_TYPE_LEN,
2360                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2361
2362        /* Update shadow table and hw entry */
2363        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2364        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2365        priv->prs_shadow[pe.index].finish = true;
2366        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2367                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2368                                MVPP2_PRS_RI_CPU_CODE_MASK |
2369                                MVPP2_PRS_RI_UDF3_MASK);
2370        mvpp2_prs_hw_write(priv, &pe);
2371
2372        /* Ethertype: IPv4 without options */
2373        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2374                                        MVPP2_PE_LAST_FREE_TID);
2375        if (tid < 0)
2376                return tid;
2377
2378        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2379        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2380        pe.index = tid;
2381
2382        mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2383        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2384                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2385                                     MVPP2_PRS_IPV4_HEAD_MASK |
2386                                     MVPP2_PRS_IPV4_IHL_MASK);
2387
2388        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2389        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2390                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2391        /* Skip eth_type + 4 bytes of IP header */
2392        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2393                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2394        /* Set L3 offset */
2395        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2396                                  MVPP2_ETH_TYPE_LEN,
2397                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2398
2399        /* Update shadow table and hw entry */
2400        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2401        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2402        priv->prs_shadow[pe.index].finish = false;
2403        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2404                                MVPP2_PRS_RI_L3_PROTO_MASK);
2405        mvpp2_prs_hw_write(priv, &pe);
2406
2407        /* Ethertype: IPv4 with options */
2408        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2409                                        MVPP2_PE_LAST_FREE_TID);
2410        if (tid < 0)
2411                return tid;
2412
2413        pe.index = tid;
2414
2415        /* Clear tcam data before updating */
2416        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2417        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2418
2419        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2420                                     MVPP2_PRS_IPV4_HEAD,
2421                                     MVPP2_PRS_IPV4_HEAD_MASK);
2422
2423        /* Clear ri before updating */
2424        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2425        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2426        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2427                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2428
2429        /* Update shadow table and hw entry */
2430        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2431        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2432        priv->prs_shadow[pe.index].finish = false;
2433        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2434                                MVPP2_PRS_RI_L3_PROTO_MASK);
2435        mvpp2_prs_hw_write(priv, &pe);
2436
2437        /* Ethertype: IPv6 without options */
2438        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2439                                        MVPP2_PE_LAST_FREE_TID);
2440        if (tid < 0)
2441                return tid;
2442
2443        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2444        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2445        pe.index = tid;
2446
2447        mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2448
2449        /* Skip DIP of IPV6 header */
2450        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2451                                 MVPP2_MAX_L3_ADDR_SIZE,
2452                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2453        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2454        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2455                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2456        /* Set L3 offset */
2457        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2458                                  MVPP2_ETH_TYPE_LEN,
2459                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2460
2461        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2462        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2463        priv->prs_shadow[pe.index].finish = false;
2464        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2465                                MVPP2_PRS_RI_L3_PROTO_MASK);
2466        mvpp2_prs_hw_write(priv, &pe);
2467
2468        /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2469        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2470        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2471        pe.index = MVPP2_PE_ETH_TYPE_UN;
2472
2473        /* Unmask all ports */
2474        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2475
2476        /* Generate flow in the next iteration*/
2477        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2478        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2479        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2480                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2481        /* Set L3 offset even it's unknown L3 */
2482        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2483                                  MVPP2_ETH_TYPE_LEN,
2484                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2485
2486        /* Update shadow table and hw entry */
2487        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2488        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2489        priv->prs_shadow[pe.index].finish = true;
2490        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2491                                MVPP2_PRS_RI_L3_PROTO_MASK);
2492        mvpp2_prs_hw_write(priv, &pe);
2493
2494        return 0;
2495}
2496
2497/* Configure vlan entries and detect up to 2 successive VLAN tags.
2498 * Possible options:
2499 * 0x8100, 0x88A8
2500 * 0x8100, 0x8100
2501 * 0x8100
2502 * 0x88A8
2503 */
2504static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2505{
2506        struct mvpp2_prs_entry pe;
2507        int err;
2508
2509        priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2510                                              MVPP2_PRS_DBL_VLANS_MAX,
2511                                              GFP_KERNEL);
2512        if (!priv->prs_double_vlans)
2513                return -ENOMEM;
2514
2515        /* Double VLAN: 0x8100, 0x88A8 */
2516        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2517                                        MVPP2_PRS_PORT_MASK);
2518        if (err)
2519                return err;
2520
2521        /* Double VLAN: 0x8100, 0x8100 */
2522        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2523                                        MVPP2_PRS_PORT_MASK);
2524        if (err)
2525                return err;
2526
2527        /* Single VLAN: 0x88a8 */
2528        err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2529                                 MVPP2_PRS_PORT_MASK);
2530        if (err)
2531                return err;
2532
2533        /* Single VLAN: 0x8100 */
2534        err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2535                                 MVPP2_PRS_PORT_MASK);
2536        if (err)
2537                return err;
2538
2539        /* Set default double vlan entry */
2540        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2541        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2542        pe.index = MVPP2_PE_VLAN_DBL;
2543
2544        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2545        /* Clear ai for next iterations */
2546        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2547        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2548                                 MVPP2_PRS_RI_VLAN_MASK);
2549
2550        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2551                                 MVPP2_PRS_DBL_VLAN_AI_BIT);
2552        /* Unmask all ports */
2553        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2554
2555        /* Update shadow table and hw entry */
2556        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2557        mvpp2_prs_hw_write(priv, &pe);
2558
2559        /* Set default vlan none entry */
2560        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2561        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2562        pe.index = MVPP2_PE_VLAN_NONE;
2563
2564        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2565        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2566                                 MVPP2_PRS_RI_VLAN_MASK);
2567
2568        /* Unmask all ports */
2569        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2570
2571        /* Update shadow table and hw entry */
2572        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2573        mvpp2_prs_hw_write(priv, &pe);
2574
2575        return 0;
2576}
2577
2578/* Set entries for PPPoE ethertype */
2579static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2580{
2581        struct mvpp2_prs_entry pe;
2582        int tid;
2583
2584        /* IPv4 over PPPoE with options */
2585        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2586                                        MVPP2_PE_LAST_FREE_TID);
2587        if (tid < 0)
2588                return tid;
2589
2590        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2591        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2592        pe.index = tid;
2593
2594        mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2595
2596        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2597        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2598                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2599        /* Skip eth_type + 4 bytes of IP header */
2600        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2601                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2602        /* Set L3 offset */
2603        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2604                                  MVPP2_ETH_TYPE_LEN,
2605                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2606
2607        /* Update shadow table and hw entry */
2608        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2609        mvpp2_prs_hw_write(priv, &pe);
2610
2611        /* IPv4 over PPPoE without options */
2612        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2613                                        MVPP2_PE_LAST_FREE_TID);
2614        if (tid < 0)
2615                return tid;
2616
2617        pe.index = tid;
2618
2619        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2620                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2621                                     MVPP2_PRS_IPV4_HEAD_MASK |
2622                                     MVPP2_PRS_IPV4_IHL_MASK);
2623
2624        /* Clear ri before updating */
2625        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2626        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2627        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2628                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2629
2630        /* Update shadow table and hw entry */
2631        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2632        mvpp2_prs_hw_write(priv, &pe);
2633
2634        /* IPv6 over PPPoE */
2635        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2636                                        MVPP2_PE_LAST_FREE_TID);
2637        if (tid < 0)
2638                return tid;
2639
2640        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2641        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2642        pe.index = tid;
2643
2644        mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2645
2646        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2647        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2648                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2649        /* Skip eth_type + 4 bytes of IPv6 header */
2650        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2651                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2652        /* Set L3 offset */
2653        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2654                                  MVPP2_ETH_TYPE_LEN,
2655                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2656
2657        /* Update shadow table and hw entry */
2658        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2659        mvpp2_prs_hw_write(priv, &pe);
2660
2661        /* Non-IP over PPPoE */
2662        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2663                                        MVPP2_PE_LAST_FREE_TID);
2664        if (tid < 0)
2665                return tid;
2666
2667        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2668        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2669        pe.index = tid;
2670
2671        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2672                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2673
2674        /* Finished: go to flowid generation */
2675        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2676        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2677        /* Set L3 offset even if it's unknown L3 */
2678        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2679                                  MVPP2_ETH_TYPE_LEN,
2680                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2681
2682        /* Update shadow table and hw entry */
2683        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2684        mvpp2_prs_hw_write(priv, &pe);
2685
2686        return 0;
2687}
2688
2689/* Initialize entries for IPv4 */
2690static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2691{
2692        struct mvpp2_prs_entry pe;
2693        int err;
2694
2695        /* Set entries for TCP, UDP and IGMP over IPv4 */
2696        err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2697                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2698        if (err)
2699                return err;
2700
2701        err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2702                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2703        if (err)
2704                return err;
2705
2706        err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2707                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2708                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2709                                  MVPP2_PRS_RI_CPU_CODE_MASK |
2710                                  MVPP2_PRS_RI_UDF3_MASK);
2711        if (err)
2712                return err;
2713
2714        /* IPv4 Broadcast */
2715        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2716        if (err)
2717                return err;
2718
2719        /* IPv4 Multicast */
2720        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2721        if (err)
2722                return err;
2723
2724        /* Default IPv4 entry for unknown protocols */
2725        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2726        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2727        pe.index = MVPP2_PE_IP4_PROTO_UN;
2728
2729        /* Set next lu to IPv4 */
2730        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2731        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2732        /* Set L4 offset */
2733        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2734                                  sizeof(struct iphdr) - 4,
2735                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2736        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2737                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
2738        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2739                                 MVPP2_PRS_RI_L4_PROTO_MASK);
2740
2741        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2742        /* Unmask all ports */
2743        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2744
2745        /* Update shadow table and hw entry */
2746        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2747        mvpp2_prs_hw_write(priv, &pe);
2748
2749        /* Default IPv4 entry for unicast address */
2750        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2751        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2752        pe.index = MVPP2_PE_IP4_ADDR_UN;
2753
2754        /* Finished: go to flowid generation */
2755        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2756        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2757        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2758                                 MVPP2_PRS_RI_L3_ADDR_MASK);
2759
2760        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2761                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
2762        /* Unmask all ports */
2763        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2764
2765        /* Update shadow table and hw entry */
2766        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2767        mvpp2_prs_hw_write(priv, &pe);
2768
2769        return 0;
2770}
2771
2772/* Initialize entries for IPv6 */
2773static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2774{
2775        struct mvpp2_prs_entry pe;
2776        int tid, err;
2777
2778        /* Set entries for TCP, UDP and ICMP over IPv6 */
2779        err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2780                                  MVPP2_PRS_RI_L4_TCP,
2781                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2782        if (err)
2783                return err;
2784
2785        err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2786                                  MVPP2_PRS_RI_L4_UDP,
2787                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2788        if (err)
2789                return err;
2790
2791        err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2792                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2793                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2794                                  MVPP2_PRS_RI_CPU_CODE_MASK |
2795                                  MVPP2_PRS_RI_UDF3_MASK);
2796        if (err)
2797                return err;
2798
2799        /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2800        /* Result Info: UDF7=1, DS lite */
2801        err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2802                                  MVPP2_PRS_RI_UDF7_IP6_LITE,
2803                                  MVPP2_PRS_RI_UDF7_MASK);
2804        if (err)
2805                return err;
2806
2807        /* IPv6 multicast */
2808        err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2809        if (err)
2810                return err;
2811
2812        /* Entry for checking hop limit */
2813        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2814                                        MVPP2_PE_LAST_FREE_TID);
2815        if (tid < 0)
2816                return tid;
2817
2818        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2819        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2820        pe.index = tid;
2821
2822        /* Finished: go to flowid generation */
2823        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2824        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2825        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2826                                 MVPP2_PRS_RI_DROP_MASK,
2827                                 MVPP2_PRS_RI_L3_PROTO_MASK |
2828                                 MVPP2_PRS_RI_DROP_MASK);
2829
2830        mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2831        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2832                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2833
2834        /* Update shadow table and hw entry */
2835        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2836        mvpp2_prs_hw_write(priv, &pe);
2837
2838        /* Default IPv6 entry for unknown protocols */
2839        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2840        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2841        pe.index = MVPP2_PE_IP6_PROTO_UN;
2842
2843        /* Finished: go to flowid generation */
2844        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2845        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2846        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2847                                 MVPP2_PRS_RI_L4_PROTO_MASK);
2848        /* Set L4 offset relatively to our current place */
2849        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2850                                  sizeof(struct ipv6hdr) - 4,
2851                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2852
2853        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2854                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2855        /* Unmask all ports */
2856        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2857
2858        /* Update shadow table and hw entry */
2859        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2860        mvpp2_prs_hw_write(priv, &pe);
2861
2862        /* Default IPv6 entry for unknown ext protocols */
2863        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2864        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2865        pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2866
2867        /* Finished: go to flowid generation */
2868        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2869        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2870        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2871                                 MVPP2_PRS_RI_L4_PROTO_MASK);
2872
2873        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2874                                 MVPP2_PRS_IPV6_EXT_AI_BIT);
2875        /* Unmask all ports */
2876        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2877
2878        /* Update shadow table and hw entry */
2879        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2880        mvpp2_prs_hw_write(priv, &pe);
2881
2882        /* Default IPv6 entry for unicast address */
2883        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2884        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2885        pe.index = MVPP2_PE_IP6_ADDR_UN;
2886
2887        /* Finished: go to IPv6 again */
2888        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2889        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2890                                 MVPP2_PRS_RI_L3_ADDR_MASK);
2891        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2892                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2893        /* Shift back to IPV6 NH */
2894        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2895
2896        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2897        /* Unmask all ports */
2898        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2899
2900        /* Update shadow table and hw entry */
2901        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2902        mvpp2_prs_hw_write(priv, &pe);
2903
2904        return 0;
2905}
2906
2907/* Parser default initialization */
2908static int mvpp2_prs_default_init(struct platform_device *pdev,
2909                                  struct mvpp2 *priv)
2910{
2911        int err, index, i;
2912
2913        /* Enable tcam table */
2914        mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2915
2916        /* Clear all tcam and sram entries */
2917        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2918                mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2919                for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2920                        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2921
2922                mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2923                for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2924                        mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2925        }
2926
2927        /* Invalidate all tcam entries */
2928        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2929                mvpp2_prs_hw_inv(priv, index);
2930
2931        priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2932                                        sizeof(struct mvpp2_prs_shadow),
2933                                        GFP_KERNEL);
2934        if (!priv->prs_shadow)
2935                return -ENOMEM;
2936
2937        /* Always start from lookup = 0 */
2938        for (index = 0; index < MVPP2_MAX_PORTS; index++)
2939                mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2940                                       MVPP2_PRS_PORT_LU_MAX, 0);
2941
2942        mvpp2_prs_def_flow_init(priv);
2943
2944        mvpp2_prs_mh_init(priv);
2945
2946        mvpp2_prs_mac_init(priv);
2947
2948        mvpp2_prs_dsa_init(priv);
2949
2950        err = mvpp2_prs_etype_init(priv);
2951        if (err)
2952                return err;
2953
2954        err = mvpp2_prs_vlan_init(pdev, priv);
2955        if (err)
2956                return err;
2957
2958        err = mvpp2_prs_pppoe_init(priv);
2959        if (err)
2960                return err;
2961
2962        err = mvpp2_prs_ip6_init(priv);
2963        if (err)
2964                return err;
2965
2966        err = mvpp2_prs_ip4_init(priv);
2967        if (err)
2968                return err;
2969
2970        return 0;
2971}
2972
2973/* Compare MAC DA with tcam entry data */
2974static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2975                                       const u8 *da, unsigned char *mask)
2976{
2977        unsigned char tcam_byte, tcam_mask;
2978        int index;
2979
2980        for (index = 0; index < ETH_ALEN; index++) {
2981                mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2982                if (tcam_mask != mask[index])
2983                        return false;
2984
2985                if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2986                        return false;
2987        }
2988
2989        return true;
2990}
2991
2992/* Find tcam entry with matched pair <MAC DA, port> */
2993static struct mvpp2_prs_entry *
2994mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2995                            unsigned char *mask, int udf_type)
2996{
2997        struct mvpp2_prs_entry *pe;
2998        int tid;
2999
3000        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3001        if (!pe)
3002                return NULL;
3003        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3004
3005        /* Go through the all entires with MVPP2_PRS_LU_MAC */
3006        for (tid = MVPP2_PE_FIRST_FREE_TID;
3007             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3008                unsigned int entry_pmap;
3009
3010                if (!priv->prs_shadow[tid].valid ||
3011                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3012                    (priv->prs_shadow[tid].udf != udf_type))
3013                        continue;
3014
3015                pe->index = tid;
3016                mvpp2_prs_hw_read(priv, pe);
3017                entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3018
3019                if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3020                    entry_pmap == pmap)
3021                        return pe;
3022        }
3023        kfree(pe);
3024
3025        return NULL;
3026}
3027
3028/* Update parser's mac da entry */
3029static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3030                                   const u8 *da, bool add)
3031{
3032        struct mvpp2_prs_entry *pe;
3033        unsigned int pmap, len, ri;
3034        unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3035        int tid;
3036
3037        /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3038        pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3039                                         MVPP2_PRS_UDF_MAC_DEF);
3040
3041        /* No such entry */
3042        if (!pe) {
3043                if (!add)
3044                        return 0;
3045
3046                /* Create new TCAM entry */
3047                /* Find first range mac entry*/
3048                for (tid = MVPP2_PE_FIRST_FREE_TID;
3049                     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3050                        if (priv->prs_shadow[tid].valid &&
3051                            (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3052                            (priv->prs_shadow[tid].udf ==
3053                                                       MVPP2_PRS_UDF_MAC_RANGE))
3054                                break;
3055
3056                /* Go through the all entries from first to last */
3057                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3058                                                tid - 1);
3059                if (tid < 0)
3060                        return tid;
3061
3062                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063                if (!pe)
3064                        return -ENOMEM;
3065                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066                pe->index = tid;
3067
3068                /* Mask all ports */
3069                mvpp2_prs_tcam_port_map_set(pe, 0);
3070        }
3071
3072        /* Update port mask */
3073        mvpp2_prs_tcam_port_set(pe, port, add);
3074
3075        /* Invalidate the entry if no ports are left enabled */
3076        pmap = mvpp2_prs_tcam_port_map_get(pe);
3077        if (pmap == 0) {
3078                if (add) {
3079                        kfree(pe);
3080                        return -EINVAL;
3081                }
3082                mvpp2_prs_hw_inv(priv, pe->index);
3083                priv->prs_shadow[pe->index].valid = false;
3084                kfree(pe);
3085                return 0;
3086        }
3087
3088        /* Continue - set next lookup */
3089        mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3090
3091        /* Set match on DA */
3092        len = ETH_ALEN;
3093        while (len--)
3094                mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3095
3096        /* Set result info bits */
3097        if (is_broadcast_ether_addr(da))
3098                ri = MVPP2_PRS_RI_L2_BCAST;
3099        else if (is_multicast_ether_addr(da))
3100                ri = MVPP2_PRS_RI_L2_MCAST;
3101        else
3102                ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3103
3104        mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3105                                 MVPP2_PRS_RI_MAC_ME_MASK);
3106        mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3107                                MVPP2_PRS_RI_MAC_ME_MASK);
3108
3109        /* Shift to ethertype */
3110        mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3111                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3112
3113        /* Update shadow table and hw entry */
3114        priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3115        mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3116        mvpp2_prs_hw_write(priv, pe);
3117
3118        kfree(pe);
3119
3120        return 0;
3121}
3122
3123static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3124{
3125        struct mvpp2_port *port = netdev_priv(dev);
3126        int err;
3127
3128        /* Remove old parser entry */
3129        err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3130                                      false);
3131        if (err)
3132                return err;
3133
3134        /* Add new parser entry */
3135        err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3136        if (err)
3137                return err;
3138
3139        /* Set addr in the device */
3140        ether_addr_copy(dev->dev_addr, da);
3141
3142        return 0;
3143}
3144
3145/* Delete all port's multicast simple (not range) entries */
3146static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3147{
3148        struct mvpp2_prs_entry pe;
3149        int index, tid;
3150
3151        for (tid = MVPP2_PE_FIRST_FREE_TID;
3152             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3153                unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3154
3155                if (!priv->prs_shadow[tid].valid ||
3156                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3157                    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3158                        continue;
3159
3160                /* Only simple mac entries */
3161                pe.index = tid;
3162                mvpp2_prs_hw_read(priv, &pe);
3163
3164                /* Read mac addr from entry */
3165                for (index = 0; index < ETH_ALEN; index++)
3166                        mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3167                                                     &da_mask[index]);
3168
3169                if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3170                        /* Delete this entry */
3171                        mvpp2_prs_mac_da_accept(priv, port, da, false);
3172        }
3173}
3174
3175static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3176{
3177        switch (type) {
3178        case MVPP2_TAG_TYPE_EDSA:
3179                /* Add port to EDSA entries */
3180                mvpp2_prs_dsa_tag_set(priv, port, true,
3181                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3182                mvpp2_prs_dsa_tag_set(priv, port, true,
3183                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3184                /* Remove port from DSA entries */
3185                mvpp2_prs_dsa_tag_set(priv, port, false,
3186                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3187                mvpp2_prs_dsa_tag_set(priv, port, false,
3188                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3189                break;
3190
3191        case MVPP2_TAG_TYPE_DSA:
3192                /* Add port to DSA entries */
3193                mvpp2_prs_dsa_tag_set(priv, port, true,
3194                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3195                mvpp2_prs_dsa_tag_set(priv, port, true,
3196                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3197                /* Remove port from EDSA entries */
3198                mvpp2_prs_dsa_tag_set(priv, port, false,
3199                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3200                mvpp2_prs_dsa_tag_set(priv, port, false,
3201                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3202                break;
3203
3204        case MVPP2_TAG_TYPE_MH:
3205        case MVPP2_TAG_TYPE_NONE:
3206                /* Remove port form EDSA and DSA entries */
3207                mvpp2_prs_dsa_tag_set(priv, port, false,
3208                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3209                mvpp2_prs_dsa_tag_set(priv, port, false,
3210                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3211                mvpp2_prs_dsa_tag_set(priv, port, false,
3212                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3213                mvpp2_prs_dsa_tag_set(priv, port, false,
3214                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3215                break;
3216
3217        default:
3218                if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3219                        return -EINVAL;
3220        }
3221
3222        return 0;
3223}
3224
3225/* Set prs flow for the port */
3226static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3227{
3228        struct mvpp2_prs_entry *pe;
3229        int tid;
3230
3231        pe = mvpp2_prs_flow_find(port->priv, port->id);
3232
3233        /* Such entry not exist */
3234        if (!pe) {
3235                /* Go through the all entires from last to first */
3236                tid = mvpp2_prs_tcam_first_free(port->priv,
3237                                                MVPP2_PE_LAST_FREE_TID,
3238                                               MVPP2_PE_FIRST_FREE_TID);
3239                if (tid < 0)
3240                        return tid;
3241
3242                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3243                if (!pe)
3244                        return -ENOMEM;
3245
3246                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3247                pe->index = tid;
3248
3249                /* Set flow ID*/
3250                mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3251                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3252
3253                /* Update shadow table */
3254                mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3255        }
3256
3257        mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3258        mvpp2_prs_hw_write(port->priv, pe);
3259        kfree(pe);
3260
3261        return 0;
3262}
3263
3264/* Classifier configuration routines */
3265
3266/* Update classification flow table registers */
3267static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3268                                 struct mvpp2_cls_flow_entry *fe)
3269{
3270        mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3271        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
3272        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
3273        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
3274}
3275
3276/* Update classification lookup table register */
3277static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3278                                   struct mvpp2_cls_lookup_entry *le)
3279{
3280        u32 val;
3281
3282        val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3283        mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3284        mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3285}
3286
3287/* Classifier default initialization */
3288static void mvpp2_cls_init(struct mvpp2 *priv)
3289{
3290        struct mvpp2_cls_lookup_entry le;
3291        struct mvpp2_cls_flow_entry fe;
3292        int index;
3293
3294        /* Enable classifier */
3295        mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3296
3297        /* Clear classifier flow table */
3298        memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
3299        for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3300                fe.index = index;
3301                mvpp2_cls_flow_write(priv, &fe);
3302        }
3303
3304        /* Clear classifier lookup table */
3305        le.data = 0;
3306        for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3307                le.lkpid = index;
3308                le.way = 0;
3309                mvpp2_cls_lookup_write(priv, &le);
3310
3311                le.way = 1;
3312                mvpp2_cls_lookup_write(priv, &le);
3313        }
3314}
3315
3316static void mvpp2_cls_port_config(struct mvpp2_port *port)
3317{
3318        struct mvpp2_cls_lookup_entry le;
3319        u32 val;
3320
3321        /* Set way for the port */
3322        val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3323        val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3324        mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3325
3326        /* Pick the entry to be accessed in lookup ID decoding table
3327         * according to the way and lkpid.
3328         */
3329        le.lkpid = port->id;
3330        le.way = 0;
3331        le.data = 0;
3332
3333        /* Set initial CPU queue for receiving packets */
3334        le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3335        le.data |= port->first_rxq;
3336
3337        /* Disable classification engines */
3338        le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3339
3340        /* Update lookup ID table entry */
3341        mvpp2_cls_lookup_write(port->priv, &le);
3342}
3343
3344/* Set CPU queue number for oversize packets */
3345static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3346{
3347        u32 val;
3348
3349        mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3350                    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3351
3352        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3353                    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3354
3355        val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3356        val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3357        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3358}
3359
3360/* Buffer Manager configuration routines */
3361
3362/* Create pool */
3363static int mvpp2_bm_pool_create(struct platform_device *pdev,
3364                                struct mvpp2 *priv,
3365                                struct mvpp2_bm_pool *bm_pool, int size)
3366{
3367        int size_bytes;
3368        u32 val;
3369
3370        size_bytes = sizeof(u32) * size;
3371        bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3372                                                &bm_pool->phys_addr,
3373                                                GFP_KERNEL);
3374        if (!bm_pool->virt_addr)
3375                return -ENOMEM;
3376
3377        if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3378                dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3379                                  bm_pool->phys_addr);
3380                dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3381                        bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3382                return -ENOMEM;
3383        }
3384
3385        mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3386                    bm_pool->phys_addr);
3387        mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3388
3389        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3390        val |= MVPP2_BM_START_MASK;
3391        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3392
3393        bm_pool->type = MVPP2_BM_FREE;
3394        bm_pool->size = size;
3395        bm_pool->pkt_size = 0;
3396        bm_pool->buf_num = 0;
3397        atomic_set(&bm_pool->in_use, 0);
3398
3399        return 0;
3400}
3401
3402/* Set pool buffer size */
3403static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3404                                      struct mvpp2_bm_pool *bm_pool,
3405                                      int buf_size)
3406{
3407        u32 val;
3408
3409        bm_pool->buf_size = buf_size;
3410
3411        val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3412        mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3413}
3414
3415/* Free all buffers from the pool */
3416static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3417                               struct mvpp2_bm_pool *bm_pool)
3418{
3419        int i;
3420
3421        for (i = 0; i < bm_pool->buf_num; i++) {
3422                dma_addr_t buf_phys_addr;
3423                u32 vaddr;
3424
3425                /* Get buffer virtual address (indirect access) */
3426                buf_phys_addr = mvpp2_read(priv,
3427                                           MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3428                vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3429
3430                dma_unmap_single(dev, buf_phys_addr,
3431                                 bm_pool->buf_size, DMA_FROM_DEVICE);
3432
3433                if (!vaddr)
3434                        break;
3435                dev_kfree_skb_any((struct sk_buff *)vaddr);
3436        }
3437
3438        /* Update BM driver with number of buffers removed from pool */
3439        bm_pool->buf_num -= i;
3440}
3441
3442/* Cleanup pool */
3443static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3444                                 struct mvpp2 *priv,
3445                                 struct mvpp2_bm_pool *bm_pool)
3446{
3447        u32 val;
3448
3449        mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3450        if (bm_pool->buf_num) {
3451                WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3452                return 0;
3453        }
3454
3455        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3456        val |= MVPP2_BM_STOP_MASK;
3457        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3458
3459        dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3460                          bm_pool->virt_addr,
3461                          bm_pool->phys_addr);
3462        return 0;
3463}
3464
3465static int mvpp2_bm_pools_init(struct platform_device *pdev,
3466                               struct mvpp2 *priv)
3467{
3468        int i, err, size;
3469        struct mvpp2_bm_pool *bm_pool;
3470
3471        /* Create all pools with maximum size */
3472        size = MVPP2_BM_POOL_SIZE_MAX;
3473        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3474                bm_pool = &priv->bm_pools[i];
3475                bm_pool->id = i;
3476                err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3477                if (err)
3478                        goto err_unroll_pools;
3479                mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3480        }
3481        return 0;
3482
3483err_unroll_pools:
3484        dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3485        for (i = i - 1; i >= 0; i--)
3486                mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3487        return err;
3488}
3489
3490static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3491{
3492        int i, err;
3493
3494        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3495                /* Mask BM all interrupts */
3496                mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3497                /* Clear BM cause register */
3498                mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3499        }
3500
3501        /* Allocate and initialize BM pools */
3502        priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3503                                     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3504        if (!priv->bm_pools)
3505                return -ENOMEM;
3506
3507        err = mvpp2_bm_pools_init(pdev, priv);
3508        if (err < 0)
3509                return err;
3510        return 0;
3511}
3512
3513/* Attach long pool to rxq */
3514static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3515                                    int lrxq, int long_pool)
3516{
3517        u32 val;
3518        int prxq;
3519
3520        /* Get queue physical ID */
3521        prxq = port->rxqs[lrxq]->id;
3522
3523        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3524        val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3525        val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3526                    MVPP2_RXQ_POOL_LONG_MASK);
3527
3528        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3529}
3530
3531/* Attach short pool to rxq */
3532static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3533                                     int lrxq, int short_pool)
3534{
3535        u32 val;
3536        int prxq;
3537
3538        /* Get queue physical ID */
3539        prxq = port->rxqs[lrxq]->id;
3540
3541        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3542        val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3543        val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3544                    MVPP2_RXQ_POOL_SHORT_MASK);
3545
3546        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3547}
3548
3549/* Allocate skb for BM pool */
3550static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3551                                       struct mvpp2_bm_pool *bm_pool,
3552                                       dma_addr_t *buf_phys_addr,
3553                                       gfp_t gfp_mask)
3554{
3555        struct sk_buff *skb;
3556        dma_addr_t phys_addr;
3557
3558        skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3559        if (!skb)
3560                return NULL;
3561
3562        phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3563                                   MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3564                                    DMA_FROM_DEVICE);
3565        if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3566                dev_kfree_skb_any(skb);
3567                return NULL;
3568        }
3569        *buf_phys_addr = phys_addr;
3570
3571        return skb;
3572}
3573
3574/* Set pool number in a BM cookie */
3575static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3576{
3577        u32 bm;
3578
3579        bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3580        bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3581
3582        return bm;
3583}
3584
3585/* Get pool number from a BM cookie */
3586static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3587{
3588        return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3589}
3590
3591/* Release buffer to BM */
3592static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3593                                     u32 buf_phys_addr, u32 buf_virt_addr)
3594{
3595        mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3596        mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3597}
3598
3599/* Release multicast buffer */
3600static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3601                                 u32 buf_phys_addr, u32 buf_virt_addr,
3602                                 int mc_id)
3603{
3604        u32 val = 0;
3605
3606        val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3607        mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3608
3609        mvpp2_bm_pool_put(port, pool,
3610                          buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3611                          buf_virt_addr);
3612}
3613
3614/* Refill BM pool */
3615static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3616                              u32 phys_addr, u32 cookie)
3617{
3618        int pool = mvpp2_bm_cookie_pool_get(bm);
3619
3620        mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3621}
3622
3623/* Allocate buffers for the pool */
3624static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3625                             struct mvpp2_bm_pool *bm_pool, int buf_num)
3626{
3627        struct sk_buff *skb;
3628        int i, buf_size, total_size;
3629        u32 bm;
3630        dma_addr_t phys_addr;
3631
3632        buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3633        total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3634
3635        if (buf_num < 0 ||
3636            (buf_num + bm_pool->buf_num > bm_pool->size)) {
3637                netdev_err(port->dev,
3638                           "cannot allocate %d buffers for pool %d\n",
3639                           buf_num, bm_pool->id);
3640                return 0;
3641        }
3642
3643        bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3644        for (i = 0; i < buf_num; i++) {
3645                skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3646                if (!skb)
3647                        break;
3648
3649                mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3650        }
3651
3652        /* Update BM driver with number of buffers added to pool */
3653        bm_pool->buf_num += i;
3654        bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3655
3656        netdev_dbg(port->dev,
3657                   "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3658                   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3659                   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3660
3661        netdev_dbg(port->dev,
3662                   "%s pool %d: %d of %d buffers added\n",
3663                   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3664                   bm_pool->id, i, buf_num);
3665        return i;
3666}
3667
3668/* Notify the driver that BM pool is being used as specific type and return the
3669 * pool pointer on success
3670 */
3671static struct mvpp2_bm_pool *
3672mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3673                  int pkt_size)
3674{
3675        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3676        int num;
3677
3678        if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3679                netdev_err(port->dev, "mixing pool types is forbidden\n");
3680                return NULL;
3681        }
3682
3683        if (new_pool->type == MVPP2_BM_FREE)
3684                new_pool->type = type;
3685
3686        /* Allocate buffers in case BM pool is used as long pool, but packet
3687         * size doesn't match MTU or BM pool hasn't being used yet
3688         */
3689        if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3690            (new_pool->pkt_size == 0)) {
3691                int pkts_num;
3692
3693                /* Set default buffer number or free all the buffers in case
3694                 * the pool is not empty
3695                 */
3696                pkts_num = new_pool->buf_num;
3697                if (pkts_num == 0)
3698                        pkts_num = type == MVPP2_BM_SWF_LONG ?
3699                                   MVPP2_BM_LONG_BUF_NUM :
3700                                   MVPP2_BM_SHORT_BUF_NUM;
3701                else
3702                        mvpp2_bm_bufs_free(port->dev->dev.parent,
3703                                           port->priv, new_pool);
3704
3705                new_pool->pkt_size = pkt_size;
3706
3707                /* Allocate buffers for this pool */
3708                num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3709                if (num != pkts_num) {
3710                        WARN(1, "pool %d: %d of %d allocated\n",
3711                             new_pool->id, num, pkts_num);
3712                        return NULL;
3713                }
3714        }
3715
3716        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3717                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3718
3719        return new_pool;
3720}
3721
3722/* Initialize pools for swf */
3723static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3724{
3725        int rxq;
3726
3727        if (!port->pool_long) {
3728                port->pool_long =
3729                       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3730                                         MVPP2_BM_SWF_LONG,
3731                                         port->pkt_size);
3732                if (!port->pool_long)
3733                        return -ENOMEM;
3734
3735                port->pool_long->port_map |= (1 << port->id);
3736
3737                for (rxq = 0; rxq < rxq_number; rxq++)
3738                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3739        }
3740
3741        if (!port->pool_short) {
3742                port->pool_short =
3743                        mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3744                                          MVPP2_BM_SWF_SHORT,
3745                                          MVPP2_BM_SHORT_PKT_SIZE);
3746                if (!port->pool_short)
3747                        return -ENOMEM;
3748
3749                port->pool_short->port_map |= (1 << port->id);
3750
3751                for (rxq = 0; rxq < rxq_number; rxq++)
3752                        mvpp2_rxq_short_pool_set(port, rxq,
3753                                                 port->pool_short->id);
3754        }
3755
3756        return 0;
3757}
3758
3759static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3760{
3761        struct mvpp2_port *port = netdev_priv(dev);
3762        struct mvpp2_bm_pool *port_pool = port->pool_long;
3763        int num, pkts_num = port_pool->buf_num;
3764        int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3765
3766        /* Update BM pool with new buffer size */
3767        mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3768        if (port_pool->buf_num) {
3769                WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3770                return -EIO;
3771        }
3772
3773        port_pool->pkt_size = pkt_size;
3774        num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3775        if (num != pkts_num) {
3776                WARN(1, "pool %d: %d of %d allocated\n",
3777                     port_pool->id, num, pkts_num);
3778                return -EIO;
3779        }
3780
3781        mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3782                                  MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3783        dev->mtu = mtu;
3784        netdev_update_features(dev);
3785        return 0;
3786}
3787
3788static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3789{
3790        int cpu, cpu_mask = 0;
3791
3792        for_each_present_cpu(cpu)
3793                cpu_mask |= 1 << cpu;
3794        mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3795                    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3796}
3797
3798static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3799{
3800        int cpu, cpu_mask = 0;
3801
3802        for_each_present_cpu(cpu)
3803                cpu_mask |= 1 << cpu;
3804        mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3805                    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3806}
3807
3808/* Mask the current CPU's Rx/Tx interrupts */
3809static void mvpp2_interrupts_mask(void *arg)
3810{
3811        struct mvpp2_port *port = arg;
3812
3813        mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3814}
3815
3816/* Unmask the current CPU's Rx/Tx interrupts */
3817static void mvpp2_interrupts_unmask(void *arg)
3818{
3819        struct mvpp2_port *port = arg;
3820
3821        mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3822                    (MVPP2_CAUSE_MISC_SUM_MASK |
3823                     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3824}
3825
3826/* Port configuration routines */
3827
3828static void mvpp2_port_mii_set(struct mvpp2_port *port)
3829{
3830        u32 val;
3831
3832        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3833
3834        switch (port->phy_interface) {
3835        case PHY_INTERFACE_MODE_SGMII:
3836                val |= MVPP2_GMAC_INBAND_AN_MASK;
3837                break;
3838        case PHY_INTERFACE_MODE_RGMII:
3839                val |= MVPP2_GMAC_PORT_RGMII_MASK;
3840        default:
3841                val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3842        }
3843
3844        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3845}
3846
3847static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3848{
3849        u32 val;
3850
3851        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3852        val |= MVPP2_GMAC_FC_ADV_EN;
3853        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3854}
3855
3856static void mvpp2_port_enable(struct mvpp2_port *port)
3857{
3858        u32 val;
3859
3860        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3861        val |= MVPP2_GMAC_PORT_EN_MASK;
3862        val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3863        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3864}
3865
3866static void mvpp2_port_disable(struct mvpp2_port *port)
3867{
3868        u32 val;
3869
3870        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3871        val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3872        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3873}
3874
3875/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3876static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3877{
3878        u32 val;
3879
3880        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3881                    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3882        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3883}
3884
3885/* Configure loopback port */
3886static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3887{
3888        u32 val;
3889
3890        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3891
3892        if (port->speed == 1000)
3893                val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3894        else
3895                val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3896
3897        if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3898                val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3899        else
3900                val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3901
3902        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3903}
3904
3905static void mvpp2_port_reset(struct mvpp2_port *port)
3906{
3907        u32 val;
3908
3909        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3910                    ~MVPP2_GMAC_PORT_RESET_MASK;
3911        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3912
3913        while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3914               MVPP2_GMAC_PORT_RESET_MASK)
3915                continue;
3916}
3917
3918/* Change maximum receive size of the port */
3919static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3920{
3921        u32 val;
3922
3923        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3924        val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3925        val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3926                    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3927        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3928}
3929
3930/* Set defaults to the MVPP2 port */
3931static void mvpp2_defaults_set(struct mvpp2_port *port)
3932{
3933        int tx_port_num, val, queue, ptxq, lrxq;
3934
3935        /* Configure port to loopback if needed */
3936        if (port->flags & MVPP2_F_LOOPBACK)
3937                mvpp2_port_loopback_set(port);
3938
3939        /* Update TX FIFO MIN Threshold */
3940        val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3941        val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3942        /* Min. TX threshold must be less than minimal packet length */
3943        val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3944        writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3945
3946        /* Disable Legacy WRR, Disable EJP, Release from reset */
3947        tx_port_num = mvpp2_egress_port(port);
3948        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3949                    tx_port_num);
3950        mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3951
3952        /* Close bandwidth for all queues */
3953        for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3954                ptxq = mvpp2_txq_phys(port->id, queue);
3955                mvpp2_write(port->priv,
3956                            MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3957        }
3958
3959        /* Set refill period to 1 usec, refill tokens
3960         * and bucket size to maximum
3961         */
3962        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3963                    port->priv->tclk / USEC_PER_SEC);
3964        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3965        val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3966        val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3967        val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3968        mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3969        val = MVPP2_TXP_TOKEN_SIZE_MAX;
3970        mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3971
3972        /* Set MaximumLowLatencyPacketSize value to 256 */
3973        mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3974                    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3975                    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3976
3977        /* Enable Rx cache snoop */
3978        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3979                queue = port->rxqs[lrxq]->id;
3980                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3981                val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3982                           MVPP2_SNOOP_BUF_HDR_MASK;
3983                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3984        }
3985
3986        /* At default, mask all interrupts to all present cpus */
3987        mvpp2_interrupts_disable(port);
3988}
3989
3990/* Enable/disable receiving packets */
3991static void mvpp2_ingress_enable(struct mvpp2_port *port)
3992{
3993        u32 val;
3994        int lrxq, queue;
3995
3996        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3997                queue = port->rxqs[lrxq]->id;
3998                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3999                val &= ~MVPP2_RXQ_DISABLE_MASK;
4000                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4001        }
4002}
4003
4004static void mvpp2_ingress_disable(struct mvpp2_port *port)
4005{
4006        u32 val;
4007        int lrxq, queue;
4008
4009        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4010                queue = port->rxqs[lrxq]->id;
4011                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4012                val |= MVPP2_RXQ_DISABLE_MASK;
4013                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4014        }
4015}
4016
4017/* Enable transmit via physical egress queue
4018 * - HW starts take descriptors from DRAM
4019 */
4020static void mvpp2_egress_enable(struct mvpp2_port *port)
4021{
4022        u32 qmap;
4023        int queue;
4024        int tx_port_num = mvpp2_egress_port(port);
4025
4026        /* Enable all initialized TXs. */
4027        qmap = 0;
4028        for (queue = 0; queue < txq_number; queue++) {
4029                struct mvpp2_tx_queue *txq = port->txqs[queue];
4030
4031                if (txq->descs != NULL)
4032                        qmap |= (1 << queue);
4033        }
4034
4035        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4036        mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4037}
4038
4039/* Disable transmit via physical egress queue
4040 * - HW doesn't take descriptors from DRAM
4041 */
4042static void mvpp2_egress_disable(struct mvpp2_port *port)
4043{
4044        u32 reg_data;
4045        int delay;
4046        int tx_port_num = mvpp2_egress_port(port);
4047
4048        /* Issue stop command for active channels only */
4049        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4050        reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4051                    MVPP2_TXP_SCHED_ENQ_MASK;
4052        if (reg_data != 0)
4053                mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4054                            (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4055
4056        /* Wait for all Tx activity to terminate. */
4057        delay = 0;
4058        do {
4059                if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4060                        netdev_warn(port->dev,
4061                                    "Tx stop timed out, status=0x%08x\n",
4062                                    reg_data);
4063                        break;
4064                }
4065                mdelay(1);
4066                delay++;
4067
4068                /* Check port TX Command register that all
4069                 * Tx queues are stopped
4070                 */
4071                reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4072        } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4073}
4074
4075/* Rx descriptors helper methods */
4076
4077/* Get number of Rx descriptors occupied by received packets */
4078static inline int
4079mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4080{
4081        u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4082
4083        return val & MVPP2_RXQ_OCCUPIED_MASK;
4084}
4085
4086/* Update Rx queue status with the number of occupied and available
4087 * Rx descriptor slots.
4088 */
4089static inline void
4090mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4091                        int used_count, int free_count)
4092{
4093        /* Decrement the number of used descriptors and increment count
4094         * increment the number of free descriptors.
4095         */
4096        u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4097
4098        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4099}
4100
4101/* Get pointer to next RX descriptor to be processed by SW */
4102static inline struct mvpp2_rx_desc *
4103mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4104{
4105        int rx_desc = rxq->next_desc_to_proc;
4106
4107        rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4108        prefetch(rxq->descs + rxq->next_desc_to_proc);
4109        return rxq->descs + rx_desc;
4110}
4111
4112/* Set rx queue offset */
4113static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4114                                 int prxq, int offset)
4115{
4116        u32 val;
4117
4118        /* Convert offset from bytes to units of 32 bytes */
4119        offset = offset >> 5;
4120
4121        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4122        val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4123
4124        /* Offset is in */
4125        val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4126                    MVPP2_RXQ_PACKET_OFFSET_MASK);
4127
4128        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4129}
4130
4131/* Obtain BM cookie information from descriptor */
4132static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4133{
4134        int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4135                   MVPP2_RXD_BM_POOL_ID_OFFS;
4136        int cpu = smp_processor_id();
4137
4138        return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4139               ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4140}
4141
4142/* Tx descriptors helper methods */
4143
4144/* Get number of Tx descriptors waiting to be transmitted by HW */
4145static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4146                                       struct mvpp2_tx_queue *txq)
4147{
4148        u32 val;
4149
4150        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4151        val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4152
4153        return val & MVPP2_TXQ_PENDING_MASK;
4154}
4155
4156/* Get pointer to next Tx descriptor to be processed (send) by HW */
4157static struct mvpp2_tx_desc *
4158mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4159{
4160        int tx_desc = txq->next_desc_to_proc;
4161
4162        txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4163        return txq->descs + tx_desc;
4164}
4165
4166/* Update HW with number of aggregated Tx descriptors to be sent */
4167static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4168{
4169        /* aggregated access - relevant TXQ number is written in TX desc */
4170        mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4171}
4172
4173
4174/* Check if there are enough free descriptors in aggregated txq.
4175 * If not, update the number of occupied descriptors and repeat the check.
4176 */
4177static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4178                                     struct mvpp2_tx_queue *aggr_txq, int num)
4179{
4180        if ((aggr_txq->count + num) > aggr_txq->size) {
4181                /* Update number of occupied aggregated Tx descriptors */
4182                int cpu = smp_processor_id();
4183                u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4184
4185                aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4186        }
4187
4188        if ((aggr_txq->count + num) > aggr_txq->size)
4189                return -ENOMEM;
4190
4191        return 0;
4192}
4193
4194/* Reserved Tx descriptors allocation request */
4195static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4196                                         struct mvpp2_tx_queue *txq, int num)
4197{
4198        u32 val;
4199
4200        val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4201        mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4202
4203        val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4204
4205        return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4206}
4207
4208/* Check if there are enough reserved descriptors for transmission.
4209 * If not, request chunk of reserved descriptors and check again.
4210 */
4211static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4212                                            struct mvpp2_tx_queue *txq,
4213                                            struct mvpp2_txq_pcpu *txq_pcpu,
4214                                            int num)
4215{
4216        int req, cpu, desc_count;
4217
4218        if (txq_pcpu->reserved_num >= num)
4219                return 0;
4220
4221        /* Not enough descriptors reserved! Update the reserved descriptor
4222         * count and check again.
4223         */
4224
4225        desc_count = 0;
4226        /* Compute total of used descriptors */
4227        for_each_present_cpu(cpu) {
4228                struct mvpp2_txq_pcpu *txq_pcpu_aux;
4229
4230                txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4231                desc_count += txq_pcpu_aux->count;
4232                desc_count += txq_pcpu_aux->reserved_num;
4233        }
4234
4235        req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4236        desc_count += req;
4237
4238        if (desc_count >
4239           (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4240                return -ENOMEM;
4241
4242        txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4243
4244        /* OK, the descriptor cound has been updated: check again. */
4245        if (txq_pcpu->reserved_num < num)
4246                return -ENOMEM;
4247        return 0;
4248}
4249
4250/* Release the last allocated Tx descriptor. Useful to handle DMA
4251 * mapping failures in the Tx path.
4252 */
4253static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4254{
4255        if (txq->next_desc_to_proc == 0)
4256                txq->next_desc_to_proc = txq->last_desc - 1;
4257        else
4258                txq->next_desc_to_proc--;
4259}
4260
4261/* Set Tx descriptors fields relevant for CSUM calculation */
4262static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4263                               int ip_hdr_len, int l4_proto)
4264{
4265        u32 command;
4266
4267        /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4268         * G_L4_chk, L4_type required only for checksum calculation
4269         */
4270        command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4271        command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4272        command |= MVPP2_TXD_IP_CSUM_DISABLE;
4273
4274        if (l3_proto == swab16(ETH_P_IP)) {
4275                command &= ~MVPP2_TXD_IP_CSUM_DISABLE;  /* enable IPv4 csum */
4276                command &= ~MVPP2_TXD_L3_IP6;           /* enable IPv4 */
4277        } else {
4278                command |= MVPP2_TXD_L3_IP6;            /* enable IPv6 */
4279        }
4280
4281        if (l4_proto == IPPROTO_TCP) {
4282                command &= ~MVPP2_TXD_L4_UDP;           /* enable TCP */
4283                command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
4284        } else if (l4_proto == IPPROTO_UDP) {
4285                command |= MVPP2_TXD_L4_UDP;            /* enable UDP */
4286                command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
4287        } else {
4288                command |= MVPP2_TXD_L4_CSUM_NOT;
4289        }
4290
4291        return command;
4292}
4293
4294/* Get number of sent descriptors and decrement counter.
4295 * The number of sent descriptors is returned.
4296 * Per-CPU access
4297 */
4298static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4299                                           struct mvpp2_tx_queue *txq)
4300{
4301        u32 val;
4302
4303        /* Reading status reg resets transmitted descriptor counter */
4304        val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4305
4306        return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4307                MVPP2_TRANSMITTED_COUNT_OFFSET;
4308}
4309
4310static void mvpp2_txq_sent_counter_clear(void *arg)
4311{
4312        struct mvpp2_port *port = arg;
4313        int queue;
4314
4315        for (queue = 0; queue < txq_number; queue++) {
4316                int id = port->txqs[queue]->id;
4317
4318                mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4319        }
4320}
4321
4322/* Set max sizes for Tx queues */
4323static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4324{
4325        u32     val, size, mtu;
4326        int     txq, tx_port_num;
4327
4328        mtu = port->pkt_size * 8;
4329        if (mtu > MVPP2_TXP_MTU_MAX)
4330                mtu = MVPP2_TXP_MTU_MAX;
4331
4332        /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4333        mtu = 3 * mtu;
4334
4335        /* Indirect access to registers */
4336        tx_port_num = mvpp2_egress_port(port);
4337        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4338
4339        /* Set MTU */
4340        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4341        val &= ~MVPP2_TXP_MTU_MAX;
4342        val |= mtu;
4343        mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4344
4345        /* TXP token size and all TXQs token size must be larger that MTU */
4346        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4347        size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4348        if (size < mtu) {
4349                size = mtu;
4350                val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4351                val |= size;
4352                mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4353        }
4354
4355        for (txq = 0; txq < txq_number; txq++) {
4356                val = mvpp2_read(port->priv,
4357                                 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4358                size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4359
4360                if (size < mtu) {
4361                        size = mtu;
4362                        val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4363                        val |= size;
4364                        mvpp2_write(port->priv,
4365                                    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4366                                    val);
4367                }
4368        }
4369}
4370
4371/* Set the number of packets that will be received before Rx interrupt
4372 * will be generated by HW.
4373 */
4374static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4375                                   struct mvpp2_rx_queue *rxq, u32 pkts)
4376{
4377        u32 val;
4378
4379        val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4380        mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4381        mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4382
4383        rxq->pkts_coal = pkts;
4384}
4385
4386/* Set the time delay in usec before Rx interrupt */
4387static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4388                                   struct mvpp2_rx_queue *rxq, u32 usec)
4389{
4390        u32 val;
4391
4392        val = (port->priv->tclk / USEC_PER_SEC) * usec;
4393        mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4394
4395        rxq->time_coal = usec;
4396}
4397
4398/* Free Tx queue skbuffs */
4399static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4400                                struct mvpp2_tx_queue *txq,
4401                                struct mvpp2_txq_pcpu *txq_pcpu, int num)
4402{
4403        int i;
4404
4405        for (i = 0; i < num; i++) {
4406                dma_addr_t buf_phys_addr =
4407                                    txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4408                struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4409
4410                mvpp2_txq_inc_get(txq_pcpu);
4411
4412                dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4413                                 skb_headlen(skb), DMA_TO_DEVICE);
4414                if (!skb)
4415                        continue;
4416                dev_kfree_skb_any(skb);
4417        }
4418}
4419
4420static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4421                                                        u32 cause)
4422{
4423        int queue = fls(cause) - 1;
4424
4425        return port->rxqs[queue];
4426}
4427
4428static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4429                                                        u32 cause)
4430{
4431        int queue = fls(cause) - 1;
4432
4433        return port->txqs[queue];
4434}
4435
4436/* Handle end of transmission */
4437static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4438                           struct mvpp2_txq_pcpu *txq_pcpu)
4439{
4440        struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4441        int tx_done;
4442
4443        if (txq_pcpu->cpu != smp_processor_id())
4444                netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4445
4446        tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4447        if (!tx_done)
4448                return;
4449        mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4450
4451        txq_pcpu->count -= tx_done;
4452
4453        if (netif_tx_queue_stopped(nq))
4454                if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4455                        netif_tx_wake_queue(nq);
4456}
4457
4458static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4459{
4460        struct mvpp2_tx_queue *txq;
4461        struct mvpp2_txq_pcpu *txq_pcpu;
4462        unsigned int tx_todo = 0;
4463
4464        while (cause) {
4465                txq = mvpp2_get_tx_queue(port, cause);
4466                if (!txq)
4467                        break;
4468
4469                txq_pcpu = this_cpu_ptr(txq->pcpu);
4470
4471                if (txq_pcpu->count) {
4472                        mvpp2_txq_done(port, txq, txq_pcpu);
4473                        tx_todo += txq_pcpu->count;
4474                }
4475
4476                cause &= ~(1 << txq->log_id);
4477        }
4478        return tx_todo;
4479}
4480
4481/* Rx/Tx queue initialization/cleanup methods */
4482
4483/* Allocate and initialize descriptors for aggr TXQ */
4484static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4485                               struct mvpp2_tx_queue *aggr_txq,
4486                               int desc_num, int cpu,
4487                               struct mvpp2 *priv)
4488{
4489        /* Allocate memory for TX descriptors */
4490        aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4491                                desc_num * MVPP2_DESC_ALIGNED_SIZE,
4492                                &aggr_txq->descs_phys, GFP_KERNEL);
4493        if (!aggr_txq->descs)
4494                return -ENOMEM;
4495
4496        /* Make sure descriptor address is cache line size aligned  */
4497        BUG_ON(aggr_txq->descs !=
4498               PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4499
4500        aggr_txq->last_desc = aggr_txq->size - 1;
4501
4502        /* Aggr TXQ no reset WA */
4503        aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4504                                                 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4505
4506        /* Set Tx descriptors queue starting address */
4507        /* indirect access */
4508        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4509                    aggr_txq->descs_phys);
4510        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4511
4512        return 0;
4513}
4514
4515/* Create a specified Rx queue */
4516static int mvpp2_rxq_init(struct mvpp2_port *port,
4517                          struct mvpp2_rx_queue *rxq)
4518
4519{
4520        rxq->size = port->rx_ring_size;
4521
4522        /* Allocate memory for RX descriptors */
4523        rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4524                                        rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4525                                        &rxq->descs_phys, GFP_KERNEL);
4526        if (!rxq->descs)
4527                return -ENOMEM;
4528
4529        BUG_ON(rxq->descs !=
4530               PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4531
4532        rxq->last_desc = rxq->size - 1;
4533
4534        /* Zero occupied and non-occupied counters - direct access */
4535        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4536
4537        /* Set Rx descriptors queue starting address - indirect access */
4538        mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4539        mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4540        mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4541        mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4542
4543        /* Set Offset */
4544        mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4545
4546        /* Set coalescing pkts and time */
4547        mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4548        mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4549
4550        /* Add number of descriptors ready for receiving packets */
4551        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4552
4553        return 0;
4554}
4555
4556/* Push packets received by the RXQ to BM pool */
4557static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4558                                struct mvpp2_rx_queue *rxq)
4559{
4560        int rx_received, i;
4561
4562        rx_received = mvpp2_rxq_received(port, rxq->id);
4563        if (!rx_received)
4564                return;
4565
4566        for (i = 0; i < rx_received; i++) {
4567                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4568                u32 bm = mvpp2_bm_cookie_build(rx_desc);
4569
4570                mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4571                                  rx_desc->buf_cookie);
4572        }
4573        mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4574}
4575
4576/* Cleanup Rx queue */
4577static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4578                             struct mvpp2_rx_queue *rxq)
4579{
4580        mvpp2_rxq_drop_pkts(port, rxq);
4581
4582        if (rxq->descs)
4583                dma_free_coherent(port->dev->dev.parent,
4584                                  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4585                                  rxq->descs,
4586                                  rxq->descs_phys);
4587
4588        rxq->descs             = NULL;
4589        rxq->last_desc         = 0;
4590        rxq->next_desc_to_proc = 0;
4591        rxq->descs_phys        = 0;
4592
4593        /* Clear Rx descriptors queue starting address and size;
4594         * free descriptor number
4595         */
4596        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4597        mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4598        mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4599        mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4600}
4601
4602/* Create and initialize a Tx queue */
4603static int mvpp2_txq_init(struct mvpp2_port *port,
4604                          struct mvpp2_tx_queue *txq)
4605{
4606        u32 val;
4607        int cpu, desc, desc_per_txq, tx_port_num;
4608        struct mvpp2_txq_pcpu *txq_pcpu;
4609
4610        txq->size = port->tx_ring_size;
4611
4612        /* Allocate memory for Tx descriptors */
4613        txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4614                                txq->size * MVPP2_DESC_ALIGNED_SIZE,
4615                                &txq->descs_phys, GFP_KERNEL);
4616        if (!txq->descs)
4617                return -ENOMEM;
4618
4619        /* Make sure descriptor address is cache line size aligned  */
4620        BUG_ON(txq->descs !=
4621               PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4622
4623        txq->last_desc = txq->size - 1;
4624
4625        /* Set Tx descriptors queue starting address - indirect access */
4626        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4627        mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4628        mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4629                                             MVPP2_TXQ_DESC_SIZE_MASK);
4630        mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4631        mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4632                    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4633        val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4634        val &= ~MVPP2_TXQ_PENDING_MASK;
4635        mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4636
4637        /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4638         * for each existing TXQ.
4639         * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4640         * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4641         */
4642        desc_per_txq = 16;
4643        desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4644               (txq->log_id * desc_per_txq);
4645
4646        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4647                    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4648                    MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4649
4650        /* WRR / EJP configuration - indirect access */
4651        tx_port_num = mvpp2_egress_port(port);
4652        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4653
4654        val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4655        val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4656        val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4657        val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4658        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4659
4660        val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4661        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4662                    val);
4663
4664        for_each_present_cpu(cpu) {
4665                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4666                txq_pcpu->size = txq->size;
4667                txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4668                                           sizeof(*txq_pcpu->tx_skb),
4669                                           GFP_KERNEL);
4670                if (!txq_pcpu->tx_skb)
4671                        goto error;
4672
4673                txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4674                                             sizeof(dma_addr_t), GFP_KERNEL);
4675                if (!txq_pcpu->tx_buffs)
4676                        goto error;
4677
4678                txq_pcpu->count = 0;
4679                txq_pcpu->reserved_num = 0;
4680                txq_pcpu->txq_put_index = 0;
4681                txq_pcpu->txq_get_index = 0;
4682        }
4683
4684        return 0;
4685
4686error:
4687        for_each_present_cpu(cpu) {
4688                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4689                kfree(txq_pcpu->tx_skb);
4690                kfree(txq_pcpu->tx_buffs);
4691        }
4692
4693        dma_free_coherent(port->dev->dev.parent,
4694                          txq->size * MVPP2_DESC_ALIGNED_SIZE,
4695                          txq->descs, txq->descs_phys);
4696
4697        return -ENOMEM;
4698}
4699
4700/* Free allocated TXQ resources */
4701static void mvpp2_txq_deinit(struct mvpp2_port *port,
4702                             struct mvpp2_tx_queue *txq)
4703{
4704        struct mvpp2_txq_pcpu *txq_pcpu;
4705        int cpu;
4706
4707        for_each_present_cpu(cpu) {
4708                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4709                kfree(txq_pcpu->tx_skb);
4710                kfree(txq_pcpu->tx_buffs);
4711        }
4712
4713        if (txq->descs)
4714                dma_free_coherent(port->dev->dev.parent,
4715                                  txq->size * MVPP2_DESC_ALIGNED_SIZE,
4716                                  txq->descs, txq->descs_phys);
4717
4718        txq->descs             = NULL;
4719        txq->last_desc         = 0;
4720        txq->next_desc_to_proc = 0;
4721        txq->descs_phys        = 0;
4722
4723        /* Set minimum bandwidth for disabled TXQs */
4724        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4725
4726        /* Set Tx descriptors queue starting address and size */
4727        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4728        mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4729        mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4730}
4731
4732/* Cleanup Tx ports */
4733static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4734{
4735        struct mvpp2_txq_pcpu *txq_pcpu;
4736        int delay, pending, cpu;
4737        u32 val;
4738
4739        mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4740        val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4741        val |= MVPP2_TXQ_DRAIN_EN_MASK;
4742        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4743
4744        /* The napi queue has been stopped so wait for all packets
4745         * to be transmitted.
4746         */
4747        delay = 0;
4748        do {
4749                if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4750                        netdev_warn(port->dev,
4751                                    "port %d: cleaning queue %d timed out\n",
4752                                    port->id, txq->log_id);
4753                        break;
4754                }
4755                mdelay(1);
4756                delay++;
4757
4758                pending = mvpp2_txq_pend_desc_num_get(port, txq);
4759        } while (pending);
4760
4761        val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4762        mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4763
4764        for_each_present_cpu(cpu) {
4765                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4766
4767                /* Release all packets */
4768                mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4769
4770                /* Reset queue */
4771                txq_pcpu->count = 0;
4772                txq_pcpu->txq_put_index = 0;
4773                txq_pcpu->txq_get_index = 0;
4774        }
4775}
4776
4777/* Cleanup all Tx queues */
4778static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4779{
4780        struct mvpp2_tx_queue *txq;
4781        int queue;
4782        u32 val;
4783
4784        val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4785
4786        /* Reset Tx ports and delete Tx queues */
4787        val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4788        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4789
4790        for (queue = 0; queue < txq_number; queue++) {
4791                txq = port->txqs[queue];
4792                mvpp2_txq_clean(port, txq);
4793                mvpp2_txq_deinit(port, txq);
4794        }
4795
4796        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4797
4798        val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4799        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4800}
4801
4802/* Cleanup all Rx queues */
4803static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4804{
4805        int queue;
4806
4807        for (queue = 0; queue < rxq_number; queue++)
4808                mvpp2_rxq_deinit(port, port->rxqs[queue]);
4809}
4810
4811/* Init all Rx queues for port */
4812static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4813{
4814        int queue, err;
4815
4816        for (queue = 0; queue < rxq_number; queue++) {
4817                err = mvpp2_rxq_init(port, port->rxqs[queue]);
4818                if (err)
4819                        goto err_cleanup;
4820        }
4821        return 0;
4822
4823err_cleanup:
4824        mvpp2_cleanup_rxqs(port);
4825        return err;
4826}
4827
4828/* Init all tx queues for port */
4829static int mvpp2_setup_txqs(struct mvpp2_port *port)
4830{
4831        struct mvpp2_tx_queue *txq;
4832        int queue, err;
4833
4834        for (queue = 0; queue < txq_number; queue++) {
4835                txq = port->txqs[queue];
4836                err = mvpp2_txq_init(port, txq);
4837                if (err)
4838                        goto err_cleanup;
4839        }
4840
4841        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4842        return 0;
4843
4844err_cleanup:
4845        mvpp2_cleanup_txqs(port);
4846        return err;
4847}
4848
4849/* The callback for per-port interrupt */
4850static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4851{
4852        struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4853
4854        mvpp2_interrupts_disable(port);
4855
4856        napi_schedule(&port->napi);
4857
4858        return IRQ_HANDLED;
4859}
4860
4861/* Adjust link */
4862static void mvpp2_link_event(struct net_device *dev)
4863{
4864        struct mvpp2_port *port = netdev_priv(dev);
4865        struct phy_device *phydev = port->phy_dev;
4866        int status_change = 0;
4867        u32 val;
4868
4869        if (phydev->link) {
4870                if ((port->speed != phydev->speed) ||
4871                    (port->duplex != phydev->duplex)) {
4872                        u32 val;
4873
4874                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4875                        val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4876                                 MVPP2_GMAC_CONFIG_GMII_SPEED |
4877                                 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4878                                 MVPP2_GMAC_AN_SPEED_EN |
4879                                 MVPP2_GMAC_AN_DUPLEX_EN);
4880
4881                        if (phydev->duplex)
4882                                val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4883
4884                        if (phydev->speed == SPEED_1000)
4885                                val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4886                        else if (phydev->speed == SPEED_100)
4887                                val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4888
4889                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4890
4891                        port->duplex = phydev->duplex;
4892                        port->speed  = phydev->speed;
4893                }
4894        }
4895
4896        if (phydev->link != port->link) {
4897                if (!phydev->link) {
4898                        port->duplex = -1;
4899                        port->speed = 0;
4900                }
4901
4902                port->link = phydev->link;
4903                status_change = 1;
4904        }
4905
4906        if (status_change) {
4907                if (phydev->link) {
4908                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4909                        val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4910                                MVPP2_GMAC_FORCE_LINK_DOWN);
4911                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4912                        mvpp2_egress_enable(port);
4913                        mvpp2_ingress_enable(port);
4914                } else {
4915                        mvpp2_ingress_disable(port);
4916                        mvpp2_egress_disable(port);
4917                }
4918                phy_print_status(phydev);
4919        }
4920}
4921
4922static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4923{
4924        ktime_t interval;
4925
4926        if (!port_pcpu->timer_scheduled) {
4927                port_pcpu->timer_scheduled = true;
4928                interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4929                hrtimer_start(&port_pcpu->tx_done_timer, interval,
4930                              HRTIMER_MODE_REL_PINNED);
4931        }
4932}
4933
4934static void mvpp2_tx_proc_cb(unsigned long data)
4935{
4936        struct net_device *dev = (struct net_device *)data;
4937        struct mvpp2_port *port = netdev_priv(dev);
4938        struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4939        unsigned int tx_todo, cause;
4940
4941        if (!netif_running(dev))
4942                return;
4943        port_pcpu->timer_scheduled = false;
4944
4945        /* Process all the Tx queues */
4946        cause = (1 << txq_number) - 1;
4947        tx_todo = mvpp2_tx_done(port, cause);
4948
4949        /* Set the timer in case not all the packets were processed */
4950        if (tx_todo)
4951                mvpp2_timer_set(port_pcpu);
4952}
4953
4954static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4955{
4956        struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4957                                                         struct mvpp2_port_pcpu,
4958                                                         tx_done_timer);
4959
4960        tasklet_schedule(&port_pcpu->tx_done_tasklet);
4961
4962        return HRTIMER_NORESTART;
4963}
4964
4965/* Main RX/TX processing routines */
4966
4967/* Display more error info */
4968static void mvpp2_rx_error(struct mvpp2_port *port,
4969                           struct mvpp2_rx_desc *rx_desc)
4970{
4971        u32 status = rx_desc->status;
4972
4973        switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4974        case MVPP2_RXD_ERR_CRC:
4975                netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4976                           status, rx_desc->data_size);
4977                break;
4978        case MVPP2_RXD_ERR_OVERRUN:
4979                netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4980                           status, rx_desc->data_size);
4981                break;
4982        case MVPP2_RXD_ERR_RESOURCE:
4983                netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4984                           status, rx_desc->data_size);
4985                break;
4986        }
4987}
4988
4989/* Handle RX checksum offload */
4990static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4991                          struct sk_buff *skb)
4992{
4993        if (((status & MVPP2_RXD_L3_IP4) &&
4994             !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4995            (status & MVPP2_RXD_L3_IP6))
4996                if (((status & MVPP2_RXD_L4_UDP) ||
4997                     (status & MVPP2_RXD_L4_TCP)) &&
4998                     (status & MVPP2_RXD_L4_CSUM_OK)) {
4999                        skb->csum = 0;
5000                        skb->ip_summed = CHECKSUM_UNNECESSARY;
5001                        return;
5002                }
5003
5004        skb->ip_summed = CHECKSUM_NONE;
5005}
5006
5007/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5008static int mvpp2_rx_refill(struct mvpp2_port *port,
5009                           struct mvpp2_bm_pool *bm_pool,
5010                           u32 bm, int is_recycle)
5011{
5012        struct sk_buff *skb;
5013        dma_addr_t phys_addr;
5014
5015        if (is_recycle &&
5016            (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
5017                return 0;
5018
5019        /* No recycle or too many buffers are in use, so allocate a new skb */
5020        skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5021        if (!skb)
5022                return -ENOMEM;
5023
5024        mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
5025        atomic_dec(&bm_pool->in_use);
5026        return 0;
5027}
5028
5029/* Handle tx checksum */
5030static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5031{
5032        if (skb->ip_summed == CHECKSUM_PARTIAL) {
5033                int ip_hdr_len = 0;
5034                u8 l4_proto;
5035
5036                if (skb->protocol == htons(ETH_P_IP)) {
5037                        struct iphdr *ip4h = ip_hdr(skb);
5038
5039                        /* Calculate IPv4 checksum and L4 checksum */
5040                        ip_hdr_len = ip4h->ihl;
5041                        l4_proto = ip4h->protocol;
5042                } else if (skb->protocol == htons(ETH_P_IPV6)) {
5043                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
5044
5045                        /* Read l4_protocol from one of IPv6 extra headers */
5046                        if (skb_network_header_len(skb) > 0)
5047                                ip_hdr_len = (skb_network_header_len(skb) >> 2);
5048                        l4_proto = ip6h->nexthdr;
5049                } else {
5050                        return MVPP2_TXD_L4_CSUM_NOT;
5051                }
5052
5053                return mvpp2_txq_desc_csum(skb_network_offset(skb),
5054                                skb->protocol, ip_hdr_len, l4_proto);
5055        }
5056
5057        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5058}
5059
5060static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5061                              struct mvpp2_rx_desc *rx_desc)
5062{
5063        struct mvpp2_buff_hdr *buff_hdr;
5064        struct sk_buff *skb;
5065        u32 rx_status = rx_desc->status;
5066        u32 buff_phys_addr;
5067        u32 buff_virt_addr;
5068        u32 buff_phys_addr_next;
5069        u32 buff_virt_addr_next;
5070        int mc_id;
5071        int pool_id;
5072
5073        pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5074                   MVPP2_RXD_BM_POOL_ID_OFFS;
5075        buff_phys_addr = rx_desc->buf_phys_addr;
5076        buff_virt_addr = rx_desc->buf_cookie;
5077
5078        do {
5079                skb = (struct sk_buff *)buff_virt_addr;
5080                buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5081
5082                mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5083
5084                buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5085                buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5086
5087                /* Release buffer */
5088                mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5089                                     buff_virt_addr, mc_id);
5090
5091                buff_phys_addr = buff_phys_addr_next;
5092                buff_virt_addr = buff_virt_addr_next;
5093
5094        } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5095}
5096
5097/* Main rx processing */
5098static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5099                    struct mvpp2_rx_queue *rxq)
5100{
5101        struct net_device *dev = port->dev;
5102        int rx_received;
5103        int rx_done = 0;
5104        u32 rcvd_pkts = 0;
5105        u32 rcvd_bytes = 0;
5106
5107        /* Get number of received packets and clamp the to-do */
5108        rx_received = mvpp2_rxq_received(port, rxq->id);
5109        if (rx_todo > rx_received)
5110                rx_todo = rx_received;
5111
5112        while (rx_done < rx_todo) {
5113                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5114                struct mvpp2_bm_pool *bm_pool;
5115                struct sk_buff *skb;
5116                dma_addr_t phys_addr;
5117                u32 bm, rx_status;
5118                int pool, rx_bytes, err;
5119
5120                rx_done++;
5121                rx_status = rx_desc->status;
5122                rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5123                phys_addr = rx_desc->buf_phys_addr;
5124
5125                bm = mvpp2_bm_cookie_build(rx_desc);
5126                pool = mvpp2_bm_cookie_pool_get(bm);
5127                bm_pool = &port->priv->bm_pools[pool];
5128                /* Check if buffer header is used */
5129                if (rx_status & MVPP2_RXD_BUF_HDR) {
5130                        mvpp2_buff_hdr_rx(port, rx_desc);
5131                        continue;
5132                }
5133
5134                /* In case of an error, release the requested buffer pointer
5135                 * to the Buffer Manager. This request process is controlled
5136                 * by the hardware, and the information about the buffer is
5137                 * comprised by the RX descriptor.
5138                 */
5139                if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5140                err_drop_frame:
5141                        dev->stats.rx_errors++;
5142                        mvpp2_rx_error(port, rx_desc);
5143                        /* Return the buffer to the pool */
5144                        mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5145                                          rx_desc->buf_cookie);
5146                        continue;
5147                }
5148
5149                skb = (struct sk_buff *)rx_desc->buf_cookie;
5150
5151                err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152                if (err) {
5153                        netdev_err(port->dev, "failed to refill BM pools\n");
5154                        goto err_drop_frame;
5155                }
5156
5157                dma_unmap_single(dev->dev.parent, phys_addr,
5158                                 bm_pool->buf_size, DMA_FROM_DEVICE);
5159
5160                rcvd_pkts++;
5161                rcvd_bytes += rx_bytes;
5162                atomic_inc(&bm_pool->in_use);
5163
5164                skb_reserve(skb, MVPP2_MH_SIZE);
5165                skb_put(skb, rx_bytes);
5166                skb->protocol = eth_type_trans(skb, dev);
5167                mvpp2_rx_csum(port, rx_status, skb);
5168
5169                napi_gro_receive(&port->napi, skb);
5170        }
5171
5172        if (rcvd_pkts) {
5173                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5174
5175                u64_stats_update_begin(&stats->syncp);
5176                stats->rx_packets += rcvd_pkts;
5177                stats->rx_bytes   += rcvd_bytes;
5178                u64_stats_update_end(&stats->syncp);
5179        }
5180
5181        /* Update Rx queue management counters */
5182        wmb();
5183        mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5184
5185        return rx_todo;
5186}
5187
5188static inline void
5189tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5190                  struct mvpp2_tx_desc *desc)
5191{
5192        dma_unmap_single(dev, desc->buf_phys_addr,
5193                         desc->data_size, DMA_TO_DEVICE);
5194        mvpp2_txq_desc_put(txq);
5195}
5196
5197/* Handle tx fragmentation processing */
5198static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5199                                 struct mvpp2_tx_queue *aggr_txq,
5200                                 struct mvpp2_tx_queue *txq)
5201{
5202        struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5203        struct mvpp2_tx_desc *tx_desc;
5204        int i;
5205        dma_addr_t buf_phys_addr;
5206
5207        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5208                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5209                void *addr = page_address(frag->page.p) + frag->page_offset;
5210
5211                tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5212                tx_desc->phys_txq = txq->id;
5213                tx_desc->data_size = frag->size;
5214
5215                buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5216                                               tx_desc->data_size,
5217                                               DMA_TO_DEVICE);
5218                if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5219                        mvpp2_txq_desc_put(txq);
5220                        goto error;
5221                }
5222
5223                tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5224                tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5225
5226                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5227                        /* Last descriptor */
5228                        tx_desc->command = MVPP2_TXD_L_DESC;
5229                        mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5230                } else {
5231                        /* Descriptor in the middle: Not First, Not Last */
5232                        tx_desc->command = 0;
5233                        mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5234                }
5235        }
5236
5237        return 0;
5238
5239error:
5240        /* Release all descriptors that were used to map fragments of
5241         * this packet, as well as the corresponding DMA mappings
5242         */
5243        for (i = i - 1; i >= 0; i--) {
5244                tx_desc = txq->descs + i;
5245                tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5246        }
5247
5248        return -ENOMEM;
5249}
5250
5251/* Main tx processing */
5252static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5253{
5254        struct mvpp2_port *port = netdev_priv(dev);
5255        struct mvpp2_tx_queue *txq, *aggr_txq;
5256        struct mvpp2_txq_pcpu *txq_pcpu;
5257        struct mvpp2_tx_desc *tx_desc;
5258        dma_addr_t buf_phys_addr;
5259        int frags = 0;
5260        u16 txq_id;
5261        u32 tx_cmd;
5262
5263        txq_id = skb_get_queue_mapping(skb);
5264        txq = port->txqs[txq_id];
5265        txq_pcpu = this_cpu_ptr(txq->pcpu);
5266        aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5267
5268        frags = skb_shinfo(skb)->nr_frags + 1;
5269
5270        /* Check number of available descriptors */
5271        if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5272            mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5273                                             txq_pcpu, frags)) {
5274                frags = 0;
5275                goto out;
5276        }
5277
5278        /* Get a descriptor for the first part of the packet */
5279        tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5280        tx_desc->phys_txq = txq->id;
5281        tx_desc->data_size = skb_headlen(skb);
5282
5283        buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5284                                       tx_desc->data_size, DMA_TO_DEVICE);
5285        if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5286                mvpp2_txq_desc_put(txq);
5287                frags = 0;
5288                goto out;
5289        }
5290        tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5291        tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5292
5293        tx_cmd = mvpp2_skb_tx_csum(port, skb);
5294
5295        if (frags == 1) {
5296                /* First and Last descriptor */
5297                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5298                tx_desc->command = tx_cmd;
5299                mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5300        } else {
5301                /* First but not Last */
5302                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5303                tx_desc->command = tx_cmd;
5304                mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5305
5306                /* Continue with other skb fragments */
5307                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5308                        tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5309                        frags = 0;
5310                        goto out;
5311                }
5312        }
5313
5314        txq_pcpu->reserved_num -= frags;
5315        txq_pcpu->count += frags;
5316        aggr_txq->count += frags;
5317
5318        /* Enable transmit */
5319        wmb();
5320        mvpp2_aggr_txq_pend_desc_add(port, frags);
5321
5322        if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5323                struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5324
5325                netif_tx_stop_queue(nq);
5326        }
5327out:
5328        if (frags > 0) {
5329                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5330
5331                u64_stats_update_begin(&stats->syncp);
5332                stats->tx_packets++;
5333                stats->tx_bytes += skb->len;
5334                u64_stats_update_end(&stats->syncp);
5335        } else {
5336                dev->stats.tx_dropped++;
5337                dev_kfree_skb_any(skb);
5338        }
5339
5340        /* Finalize TX processing */
5341        if (txq_pcpu->count >= txq->done_pkts_coal)
5342                mvpp2_txq_done(port, txq, txq_pcpu);
5343
5344        /* Set the timer in case not all frags were processed */
5345        if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5346                struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5347
5348                mvpp2_timer_set(port_pcpu);
5349        }
5350
5351        return NETDEV_TX_OK;
5352}
5353
5354static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5355{
5356        if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5357                netdev_err(dev, "FCS error\n");
5358        if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5359                netdev_err(dev, "rx fifo overrun error\n");
5360        if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5361                netdev_err(dev, "tx fifo underrun error\n");
5362}
5363
5364static int mvpp2_poll(struct napi_struct *napi, int budget)
5365{
5366        u32 cause_rx_tx, cause_rx, cause_misc;
5367        int rx_done = 0;
5368        struct mvpp2_port *port = netdev_priv(napi->dev);
5369
5370        /* Rx/Tx cause register
5371         *
5372         * Bits 0-15: each bit indicates received packets on the Rx queue
5373         * (bit 0 is for Rx queue 0).
5374         *
5375         * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5376         * (bit 16 is for Tx queue 0).
5377         *
5378         * Each CPU has its own Rx/Tx cause register
5379         */
5380        cause_rx_tx = mvpp2_read(port->priv,
5381                                 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5382        cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5383        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5384
5385        if (cause_misc) {
5386                mvpp2_cause_error(port->dev, cause_misc);
5387
5388                /* Clear the cause register */
5389                mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5390                mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5391                            cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5392        }
5393
5394        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5395
5396        /* Process RX packets */
5397        cause_rx |= port->pending_cause_rx;
5398        while (cause_rx && budget > 0) {
5399                int count;
5400                struct mvpp2_rx_queue *rxq;
5401
5402                rxq = mvpp2_get_rx_queue(port, cause_rx);
5403                if (!rxq)
5404                        break;
5405
5406                count = mvpp2_rx(port, budget, rxq);
5407                rx_done += count;
5408                budget -= count;
5409                if (budget > 0) {
5410                        /* Clear the bit associated to this Rx queue
5411                         * so that next iteration will continue from
5412                         * the next Rx queue.
5413                         */
5414                        cause_rx &= ~(1 << rxq->logic_rxq);
5415                }
5416        }
5417
5418        if (budget > 0) {
5419                cause_rx = 0;
5420                napi_complete(napi);
5421
5422                mvpp2_interrupts_enable(port);
5423        }
5424        port->pending_cause_rx = cause_rx;
5425        return rx_done;
5426}
5427
5428/* Set hw internals when starting port */
5429static void mvpp2_start_dev(struct mvpp2_port *port)
5430{
5431        mvpp2_gmac_max_rx_size_set(port);
5432        mvpp2_txp_max_tx_size_set(port);
5433
5434        napi_enable(&port->napi);
5435
5436        /* Enable interrupts on all CPUs */
5437        mvpp2_interrupts_enable(port);
5438
5439        mvpp2_port_enable(port);
5440        phy_start(port->phy_dev);
5441        netif_tx_start_all_queues(port->dev);
5442}
5443
5444/* Set hw internals when stopping port */
5445static void mvpp2_stop_dev(struct mvpp2_port *port)
5446{
5447        /* Stop new packets from arriving to RXQs */
5448        mvpp2_ingress_disable(port);
5449
5450        mdelay(10);
5451
5452        /* Disable interrupts on all CPUs */
5453        mvpp2_interrupts_disable(port);
5454
5455        napi_disable(&port->napi);
5456
5457        netif_carrier_off(port->dev);
5458        netif_tx_stop_all_queues(port->dev);
5459
5460        mvpp2_egress_disable(port);
5461        mvpp2_port_disable(port);
5462        phy_stop(port->phy_dev);
5463}
5464
5465/* Return positive if MTU is valid */
5466static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5467{
5468        if (mtu < 68) {
5469                netdev_err(dev, "cannot change mtu to less than 68\n");
5470                return -EINVAL;
5471        }
5472
5473        /* 9676 == 9700 - 20 and rounding to 8 */
5474        if (mtu > 9676) {
5475                netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5476                mtu = 9676;
5477        }
5478
5479        if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5480                netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5481                            ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5482                mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5483        }
5484
5485        return mtu;
5486}
5487
5488static int mvpp2_check_ringparam_valid(struct net_device *dev,
5489                                       struct ethtool_ringparam *ring)
5490{
5491        u16 new_rx_pending = ring->rx_pending;
5492        u16 new_tx_pending = ring->tx_pending;
5493
5494        if (ring->rx_pending == 0 || ring->tx_pending == 0)
5495                return -EINVAL;
5496
5497        if (ring->rx_pending > MVPP2_MAX_RXD)
5498                new_rx_pending = MVPP2_MAX_RXD;
5499        else if (!IS_ALIGNED(ring->rx_pending, 16))
5500                new_rx_pending = ALIGN(ring->rx_pending, 16);
5501
5502        if (ring->tx_pending > MVPP2_MAX_TXD)
5503                new_tx_pending = MVPP2_MAX_TXD;
5504        else if (!IS_ALIGNED(ring->tx_pending, 32))
5505                new_tx_pending = ALIGN(ring->tx_pending, 32);
5506
5507        if (ring->rx_pending != new_rx_pending) {
5508                netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5509                            ring->rx_pending, new_rx_pending);
5510                ring->rx_pending = new_rx_pending;
5511        }
5512
5513        if (ring->tx_pending != new_tx_pending) {
5514                netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5515                            ring->tx_pending, new_tx_pending);
5516                ring->tx_pending = new_tx_pending;
5517        }
5518
5519        return 0;
5520}
5521
5522static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5523{
5524        u32 mac_addr_l, mac_addr_m, mac_addr_h;
5525
5526        mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5527        mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5528        mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5529        addr[0] = (mac_addr_h >> 24) & 0xFF;
5530        addr[1] = (mac_addr_h >> 16) & 0xFF;
5531        addr[2] = (mac_addr_h >> 8) & 0xFF;
5532        addr[3] = mac_addr_h & 0xFF;
5533        addr[4] = mac_addr_m & 0xFF;
5534        addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5535}
5536
5537static int mvpp2_phy_connect(struct mvpp2_port *port)
5538{
5539        struct phy_device *phy_dev;
5540
5541        phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5542                                 port->phy_interface);
5543        if (!phy_dev) {
5544                netdev_err(port->dev, "cannot connect to phy\n");
5545                return -ENODEV;
5546        }
5547        phy_dev->supported &= PHY_GBIT_FEATURES;
5548        phy_dev->advertising = phy_dev->supported;
5549
5550        port->phy_dev = phy_dev;
5551        port->link    = 0;
5552        port->duplex  = 0;
5553        port->speed   = 0;
5554
5555        return 0;
5556}
5557
5558static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5559{
5560        phy_disconnect(port->phy_dev);
5561        port->phy_dev = NULL;
5562}
5563
5564static int mvpp2_open(struct net_device *dev)
5565{
5566        struct mvpp2_port *port = netdev_priv(dev);
5567        unsigned char mac_bcast[ETH_ALEN] = {
5568                        0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5569        int err;
5570
5571        err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5572        if (err) {
5573                netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5574                return err;
5575        }
5576        err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5577                                      dev->dev_addr, true);
5578        if (err) {
5579                netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5580                return err;
5581        }
5582        err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5583        if (err) {
5584                netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5585                return err;
5586        }
5587        err = mvpp2_prs_def_flow(port);
5588        if (err) {
5589                netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5590                return err;
5591        }
5592
5593        /* Allocate the Rx/Tx queues */
5594        err = mvpp2_setup_rxqs(port);
5595        if (err) {
5596                netdev_err(port->dev, "cannot allocate Rx queues\n");
5597                return err;
5598        }
5599
5600        err = mvpp2_setup_txqs(port);
5601        if (err) {
5602                netdev_err(port->dev, "cannot allocate Tx queues\n");
5603                goto err_cleanup_rxqs;
5604        }
5605
5606        err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5607        if (err) {
5608                netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5609                goto err_cleanup_txqs;
5610        }
5611
5612        /* In default link is down */
5613        netif_carrier_off(port->dev);
5614
5615        err = mvpp2_phy_connect(port);
5616        if (err < 0)
5617                goto err_free_irq;
5618
5619        /* Unmask interrupts on all CPUs */
5620        on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5621
5622        mvpp2_start_dev(port);
5623
5624        return 0;
5625
5626err_free_irq:
5627        free_irq(port->irq, port);
5628err_cleanup_txqs:
5629        mvpp2_cleanup_txqs(port);
5630err_cleanup_rxqs:
5631        mvpp2_cleanup_rxqs(port);
5632        return err;
5633}
5634
5635static int mvpp2_stop(struct net_device *dev)
5636{
5637        struct mvpp2_port *port = netdev_priv(dev);
5638        struct mvpp2_port_pcpu *port_pcpu;
5639        int cpu;
5640
5641        mvpp2_stop_dev(port);
5642        mvpp2_phy_disconnect(port);
5643
5644        /* Mask interrupts on all CPUs */
5645        on_each_cpu(mvpp2_interrupts_mask, port, 1);
5646
5647        free_irq(port->irq, port);
5648        for_each_present_cpu(cpu) {
5649                port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5650
5651                hrtimer_cancel(&port_pcpu->tx_done_timer);
5652                port_pcpu->timer_scheduled = false;
5653                tasklet_kill(&port_pcpu->tx_done_tasklet);
5654        }
5655        mvpp2_cleanup_rxqs(port);
5656        mvpp2_cleanup_txqs(port);
5657
5658        return 0;
5659}
5660
5661static void mvpp2_set_rx_mode(struct net_device *dev)
5662{
5663        struct mvpp2_port *port = netdev_priv(dev);
5664        struct mvpp2 *priv = port->priv;
5665        struct netdev_hw_addr *ha;
5666        int id = port->id;
5667        bool allmulti = dev->flags & IFF_ALLMULTI;
5668
5669        mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5670        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5671        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5672
5673        /* Remove all port->id's mcast enries */
5674        mvpp2_prs_mcast_del_all(priv, id);
5675
5676        if (allmulti && !netdev_mc_empty(dev)) {
5677                netdev_for_each_mc_addr(ha, dev)
5678                        mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5679        }
5680}
5681
5682static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5683{
5684        struct mvpp2_port *port = netdev_priv(dev);
5685        const struct sockaddr *addr = p;
5686        int err;
5687
5688        if (!is_valid_ether_addr(addr->sa_data)) {
5689                err = -EADDRNOTAVAIL;
5690                goto error;
5691        }
5692
5693        if (!netif_running(dev)) {
5694                err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5695                if (!err)
5696                        return 0;
5697                /* Reconfigure parser to accept the original MAC address */
5698                err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5699                if (err)
5700                        goto error;
5701        }
5702
5703        mvpp2_stop_dev(port);
5704
5705        err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5706        if (!err)
5707                goto out_start;
5708
5709        /* Reconfigure parser accept the original MAC address */
5710        err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5711        if (err)
5712                goto error;
5713out_start:
5714        mvpp2_start_dev(port);
5715        mvpp2_egress_enable(port);
5716        mvpp2_ingress_enable(port);
5717        return 0;
5718
5719error:
5720        netdev_err(dev, "fail to change MAC address\n");
5721        return err;
5722}
5723
5724static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5725{
5726        struct mvpp2_port *port = netdev_priv(dev);
5727        int err;
5728
5729        mtu = mvpp2_check_mtu_valid(dev, mtu);
5730        if (mtu < 0) {
5731                err = mtu;
5732                goto error;
5733        }
5734
5735        if (!netif_running(dev)) {
5736                err = mvpp2_bm_update_mtu(dev, mtu);
5737                if (!err) {
5738                        port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
5739                        return 0;
5740                }
5741
5742                /* Reconfigure BM to the original MTU */
5743                err = mvpp2_bm_update_mtu(dev, dev->mtu);
5744                if (err)
5745                        goto error;
5746        }
5747
5748        mvpp2_stop_dev(port);
5749
5750        err = mvpp2_bm_update_mtu(dev, mtu);
5751        if (!err) {
5752                port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
5753                goto out_start;
5754        }
5755
5756        /* Reconfigure BM to the original MTU */
5757        err = mvpp2_bm_update_mtu(dev, dev->mtu);
5758        if (err)
5759                goto error;
5760
5761out_start:
5762        mvpp2_start_dev(port);
5763        mvpp2_egress_enable(port);
5764        mvpp2_ingress_enable(port);
5765
5766        return 0;
5767
5768error:
5769        netdev_err(dev, "fail to change MTU\n");
5770        return err;
5771}
5772
5773static struct rtnl_link_stats64 *
5774mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5775{
5776        struct mvpp2_port *port = netdev_priv(dev);
5777        unsigned int start;
5778        int cpu;
5779
5780        for_each_possible_cpu(cpu) {
5781                struct mvpp2_pcpu_stats *cpu_stats;
5782                u64 rx_packets;
5783                u64 rx_bytes;
5784                u64 tx_packets;
5785                u64 tx_bytes;
5786
5787                cpu_stats = per_cpu_ptr(port->stats, cpu);
5788                do {
5789                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5790                        rx_packets = cpu_stats->rx_packets;
5791                        rx_bytes   = cpu_stats->rx_bytes;
5792                        tx_packets = cpu_stats->tx_packets;
5793                        tx_bytes   = cpu_stats->tx_bytes;
5794                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5795
5796                stats->rx_packets += rx_packets;
5797                stats->rx_bytes   += rx_bytes;
5798                stats->tx_packets += tx_packets;
5799                stats->tx_bytes   += tx_bytes;
5800        }
5801
5802        stats->rx_errors        = dev->stats.rx_errors;
5803        stats->rx_dropped       = dev->stats.rx_dropped;
5804        stats->tx_dropped       = dev->stats.tx_dropped;
5805
5806        return stats;
5807}
5808
5809static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5810{
5811        struct mvpp2_port *port = netdev_priv(dev);
5812        int ret;
5813
5814        if (!port->phy_dev)
5815                return -ENOTSUPP;
5816
5817        ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
5818        if (!ret)
5819                mvpp2_link_event(dev);
5820
5821        return ret;
5822}
5823
5824/* Ethtool methods */
5825
5826/* Get settings (phy address, speed) for ethtools */
5827static int mvpp2_ethtool_get_settings(struct net_device *dev,
5828                                      struct ethtool_cmd *cmd)
5829{
5830        struct mvpp2_port *port = netdev_priv(dev);
5831
5832        if (!port->phy_dev)
5833                return -ENODEV;
5834        return phy_ethtool_gset(port->phy_dev, cmd);
5835}
5836
5837/* Set settings (phy address, speed) for ethtools */
5838static int mvpp2_ethtool_set_settings(struct net_device *dev,
5839                                      struct ethtool_cmd *cmd)
5840{
5841        struct mvpp2_port *port = netdev_priv(dev);
5842
5843        if (!port->phy_dev)
5844                return -ENODEV;
5845        return phy_ethtool_sset(port->phy_dev, cmd);
5846}
5847
5848/* Set interrupt coalescing for ethtools */
5849static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5850                                      struct ethtool_coalesce *c)
5851{
5852        struct mvpp2_port *port = netdev_priv(dev);
5853        int queue;
5854
5855        for (queue = 0; queue < rxq_number; queue++) {
5856                struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5857
5858                rxq->time_coal = c->rx_coalesce_usecs;
5859                rxq->pkts_coal = c->rx_max_coalesced_frames;
5860                mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5861                mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5862        }
5863
5864        for (queue = 0; queue < txq_number; queue++) {
5865                struct mvpp2_tx_queue *txq = port->txqs[queue];
5866
5867                txq->done_pkts_coal = c->tx_max_coalesced_frames;
5868        }
5869
5870        return 0;
5871}
5872
5873/* get coalescing for ethtools */
5874static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5875                                      struct ethtool_coalesce *c)
5876{
5877        struct mvpp2_port *port = netdev_priv(dev);
5878
5879        c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
5880        c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
5881        c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
5882        return 0;
5883}
5884
5885static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5886                                      struct ethtool_drvinfo *drvinfo)
5887{
5888        strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5889                sizeof(drvinfo->driver));
5890        strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5891                sizeof(drvinfo->version));
5892        strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5893                sizeof(drvinfo->bus_info));
5894}
5895
5896static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5897                                        struct ethtool_ringparam *ring)
5898{
5899        struct mvpp2_port *port = netdev_priv(dev);
5900
5901        ring->rx_max_pending = MVPP2_MAX_RXD;
5902        ring->tx_max_pending = MVPP2_MAX_TXD;
5903        ring->rx_pending = port->rx_ring_size;
5904        ring->tx_pending = port->tx_ring_size;
5905}
5906
5907static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5908                                       struct ethtool_ringparam *ring)
5909{
5910        struct mvpp2_port *port = netdev_priv(dev);
5911        u16 prev_rx_ring_size = port->rx_ring_size;
5912        u16 prev_tx_ring_size = port->tx_ring_size;
5913        int err;
5914
5915        err = mvpp2_check_ringparam_valid(dev, ring);
5916        if (err)
5917                return err;
5918
5919        if (!netif_running(dev)) {
5920                port->rx_ring_size = ring->rx_pending;
5921                port->tx_ring_size = ring->tx_pending;
5922                return 0;
5923        }
5924
5925        /* The interface is running, so we have to force a
5926         * reallocation of the queues
5927         */
5928        mvpp2_stop_dev(port);
5929        mvpp2_cleanup_rxqs(port);
5930        mvpp2_cleanup_txqs(port);
5931
5932        port->rx_ring_size = ring->rx_pending;
5933        port->tx_ring_size = ring->tx_pending;
5934
5935        err = mvpp2_setup_rxqs(port);
5936        if (err) {
5937                /* Reallocate Rx queues with the original ring size */
5938                port->rx_ring_size = prev_rx_ring_size;
5939                ring->rx_pending = prev_rx_ring_size;
5940                err = mvpp2_setup_rxqs(port);
5941                if (err)
5942                        goto err_out;
5943        }
5944        err = mvpp2_setup_txqs(port);
5945        if (err) {
5946                /* Reallocate Tx queues with the original ring size */
5947                port->tx_ring_size = prev_tx_ring_size;
5948                ring->tx_pending = prev_tx_ring_size;
5949                err = mvpp2_setup_txqs(port);
5950                if (err)
5951                        goto err_clean_rxqs;
5952        }
5953
5954        mvpp2_start_dev(port);
5955        mvpp2_egress_enable(port);
5956        mvpp2_ingress_enable(port);
5957
5958        return 0;
5959
5960err_clean_rxqs:
5961        mvpp2_cleanup_rxqs(port);
5962err_out:
5963        netdev_err(dev, "fail to change ring parameters");
5964        return err;
5965}
5966
5967/* Device ops */
5968
5969static const struct net_device_ops mvpp2_netdev_ops = {
5970        .ndo_open               = mvpp2_open,
5971        .ndo_stop               = mvpp2_stop,
5972        .ndo_start_xmit         = mvpp2_tx,
5973        .ndo_set_rx_mode        = mvpp2_set_rx_mode,
5974        .ndo_set_mac_address    = mvpp2_set_mac_address,
5975        .ndo_change_mtu         = mvpp2_change_mtu,
5976        .ndo_get_stats64        = mvpp2_get_stats64,
5977        .ndo_do_ioctl           = mvpp2_ioctl,
5978};
5979
5980static const struct ethtool_ops mvpp2_eth_tool_ops = {
5981        .get_link       = ethtool_op_get_link,
5982        .get_settings   = mvpp2_ethtool_get_settings,
5983        .set_settings   = mvpp2_ethtool_set_settings,
5984        .set_coalesce   = mvpp2_ethtool_set_coalesce,
5985        .get_coalesce   = mvpp2_ethtool_get_coalesce,
5986        .get_drvinfo    = mvpp2_ethtool_get_drvinfo,
5987        .get_ringparam  = mvpp2_ethtool_get_ringparam,
5988        .set_ringparam  = mvpp2_ethtool_set_ringparam,
5989};
5990
5991/* Driver initialization */
5992
5993static void mvpp2_port_power_up(struct mvpp2_port *port)
5994{
5995        mvpp2_port_mii_set(port);
5996        mvpp2_port_periodic_xon_disable(port);
5997        mvpp2_port_fc_adv_enable(port);
5998        mvpp2_port_reset(port);
5999}
6000
6001/* Initialize port HW */
6002static int mvpp2_port_init(struct mvpp2_port *port)
6003{
6004        struct device *dev = port->dev->dev.parent;
6005        struct mvpp2 *priv = port->priv;
6006        struct mvpp2_txq_pcpu *txq_pcpu;
6007        int queue, cpu, err;
6008
6009        if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6010                return -EINVAL;
6011
6012        /* Disable port */
6013        mvpp2_egress_disable(port);
6014        mvpp2_port_disable(port);
6015
6016        port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6017                                  GFP_KERNEL);
6018        if (!port->txqs)
6019                return -ENOMEM;
6020
6021        /* Associate physical Tx queues to this port and initialize.
6022         * The mapping is predefined.
6023         */
6024        for (queue = 0; queue < txq_number; queue++) {
6025                int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6026                struct mvpp2_tx_queue *txq;
6027
6028                txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6029                if (!txq)
6030                        return -ENOMEM;
6031
6032                txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6033                if (!txq->pcpu) {
6034                        err = -ENOMEM;
6035                        goto err_free_percpu;
6036                }
6037
6038                txq->id = queue_phy_id;
6039                txq->log_id = queue;
6040                txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6041                for_each_present_cpu(cpu) {
6042                        txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6043                        txq_pcpu->cpu = cpu;
6044                }
6045
6046                port->txqs[queue] = txq;
6047        }
6048
6049        port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6050                                  GFP_KERNEL);
6051        if (!port->rxqs) {
6052                err = -ENOMEM;
6053                goto err_free_percpu;
6054        }
6055
6056        /* Allocate and initialize Rx queue for this port */
6057        for (queue = 0; queue < rxq_number; queue++) {
6058                struct mvpp2_rx_queue *rxq;
6059
6060                /* Map physical Rx queue to port's logical Rx queue */
6061                rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6062                if (!rxq)
6063                        goto err_free_percpu;
6064                /* Map this Rx queue to a physical queue */
6065                rxq->id = port->first_rxq + queue;
6066                rxq->port = port->id;
6067                rxq->logic_rxq = queue;
6068
6069                port->rxqs[queue] = rxq;
6070        }
6071
6072        /* Configure Rx queue group interrupt for this port */
6073        mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6074
6075        /* Create Rx descriptor rings */
6076        for (queue = 0; queue < rxq_number; queue++) {
6077                struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6078
6079                rxq->size = port->rx_ring_size;
6080                rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6081                rxq->time_coal = MVPP2_RX_COAL_USEC;
6082        }
6083
6084        mvpp2_ingress_disable(port);
6085
6086        /* Port default configuration */
6087        mvpp2_defaults_set(port);
6088
6089        /* Port's classifier configuration */
6090        mvpp2_cls_oversize_rxq_set(port);
6091        mvpp2_cls_port_config(port);
6092
6093        /* Provide an initial Rx packet size */
6094        port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6095
6096        /* Initialize pools for swf */
6097        err = mvpp2_swf_bm_pool_init(port);
6098        if (err)
6099                goto err_free_percpu;
6100
6101        return 0;
6102
6103err_free_percpu:
6104        for (queue = 0; queue < txq_number; queue++) {
6105                if (!port->txqs[queue])
6106                        continue;
6107                free_percpu(port->txqs[queue]->pcpu);
6108        }
6109        return err;
6110}
6111
6112/* Ports initialization */
6113static int mvpp2_port_probe(struct platform_device *pdev,
6114                            struct device_node *port_node,
6115                            struct mvpp2 *priv,
6116                            int *next_first_rxq)
6117{
6118        struct device_node *phy_node;
6119        struct mvpp2_port *port;
6120        struct mvpp2_port_pcpu *port_pcpu;
6121        struct net_device *dev;
6122        struct resource *res;
6123        const char *dt_mac_addr;
6124        const char *mac_from;
6125        char hw_mac_addr[ETH_ALEN];
6126        u32 id;
6127        int features;
6128        int phy_mode;
6129        int priv_common_regs_num = 2;
6130        int err, i, cpu;
6131
6132        dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6133                                 rxq_number);
6134        if (!dev)
6135                return -ENOMEM;
6136
6137        phy_node = of_parse_phandle(port_node, "phy", 0);
6138        if (!phy_node) {
6139                dev_err(&pdev->dev, "missing phy\n");
6140                err = -ENODEV;
6141                goto err_free_netdev;
6142        }
6143
6144        phy_mode = of_get_phy_mode(port_node);
6145        if (phy_mode < 0) {
6146                dev_err(&pdev->dev, "incorrect phy mode\n");
6147                err = phy_mode;
6148                goto err_free_netdev;
6149        }
6150
6151        if (of_property_read_u32(port_node, "port-id", &id)) {
6152                err = -EINVAL;
6153                dev_err(&pdev->dev, "missing port-id value\n");
6154                goto err_free_netdev;
6155        }
6156
6157        dev->tx_queue_len = MVPP2_MAX_TXD;
6158        dev->watchdog_timeo = 5 * HZ;
6159        dev->netdev_ops = &mvpp2_netdev_ops;
6160        dev->ethtool_ops = &mvpp2_eth_tool_ops;
6161
6162        port = netdev_priv(dev);
6163
6164        port->irq = irq_of_parse_and_map(port_node, 0);
6165        if (port->irq <= 0) {
6166                err = -EINVAL;
6167                goto err_free_netdev;
6168        }
6169
6170        if (of_property_read_bool(port_node, "marvell,loopback"))
6171                port->flags |= MVPP2_F_LOOPBACK;
6172
6173        port->priv = priv;
6174        port->id = id;
6175        port->first_rxq = *next_first_rxq;
6176        port->phy_node = phy_node;
6177        port->phy_interface = phy_mode;
6178
6179        res = platform_get_resource(pdev, IORESOURCE_MEM,
6180                                    priv_common_regs_num + id);
6181        port->base = devm_ioremap_resource(&pdev->dev, res);
6182        if (IS_ERR(port->base)) {
6183                err = PTR_ERR(port->base);
6184                goto err_free_irq;
6185        }
6186
6187        /* Alloc per-cpu stats */
6188        port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6189        if (!port->stats) {
6190                err = -ENOMEM;
6191                goto err_free_irq;
6192        }
6193
6194        dt_mac_addr = of_get_mac_address(port_node);
6195        if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6196                mac_from = "device tree";
6197                ether_addr_copy(dev->dev_addr, dt_mac_addr);
6198        } else {
6199                mvpp2_get_mac_address(port, hw_mac_addr);
6200                if (is_valid_ether_addr(hw_mac_addr)) {
6201                        mac_from = "hardware";
6202                        ether_addr_copy(dev->dev_addr, hw_mac_addr);
6203                } else {
6204                        mac_from = "random";
6205                        eth_hw_addr_random(dev);
6206                }
6207        }
6208
6209        port->tx_ring_size = MVPP2_MAX_TXD;
6210        port->rx_ring_size = MVPP2_MAX_RXD;
6211        port->dev = dev;
6212        SET_NETDEV_DEV(dev, &pdev->dev);
6213
6214        err = mvpp2_port_init(port);
6215        if (err < 0) {
6216                dev_err(&pdev->dev, "failed to init port %d\n", id);
6217                goto err_free_stats;
6218        }
6219        mvpp2_port_power_up(port);
6220
6221        port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6222        if (!port->pcpu) {
6223                err = -ENOMEM;
6224                goto err_free_txq_pcpu;
6225        }
6226
6227        for_each_present_cpu(cpu) {
6228                port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6229
6230                hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6231                             HRTIMER_MODE_REL_PINNED);
6232                port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6233                port_pcpu->timer_scheduled = false;
6234
6235                tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6236                             (unsigned long)dev);
6237        }
6238
6239        netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6240        features = NETIF_F_SG | NETIF_F_IP_CSUM;
6241        dev->features = features | NETIF_F_RXCSUM;
6242        dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6243        dev->vlan_features |= features;
6244
6245        err = register_netdev(dev);
6246        if (err < 0) {
6247                dev_err(&pdev->dev, "failed to register netdev\n");
6248                goto err_free_port_pcpu;
6249        }
6250        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6251
6252        /* Increment the first Rx queue number to be used by the next port */
6253        *next_first_rxq += rxq_number;
6254        priv->port_list[id] = port;
6255        return 0;
6256
6257err_free_port_pcpu:
6258        free_percpu(port->pcpu);
6259err_free_txq_pcpu:
6260        for (i = 0; i < txq_number; i++)
6261                free_percpu(port->txqs[i]->pcpu);
6262err_free_stats:
6263        free_percpu(port->stats);
6264err_free_irq:
6265        irq_dispose_mapping(port->irq);
6266err_free_netdev:
6267        free_netdev(dev);
6268        return err;
6269}
6270
6271/* Ports removal routine */
6272static void mvpp2_port_remove(struct mvpp2_port *port)
6273{
6274        int i;
6275
6276        unregister_netdev(port->dev);
6277        free_percpu(port->pcpu);
6278        free_percpu(port->stats);
6279        for (i = 0; i < txq_number; i++)
6280                free_percpu(port->txqs[i]->pcpu);
6281        irq_dispose_mapping(port->irq);
6282        free_netdev(port->dev);
6283}
6284
6285/* Initialize decoding windows */
6286static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6287                                    struct mvpp2 *priv)
6288{
6289        u32 win_enable;
6290        int i;
6291
6292        for (i = 0; i < 6; i++) {
6293                mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6294                mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6295
6296                if (i < 4)
6297                        mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6298        }
6299
6300        win_enable = 0;
6301
6302        for (i = 0; i < dram->num_cs; i++) {
6303                const struct mbus_dram_window *cs = dram->cs + i;
6304
6305                mvpp2_write(priv, MVPP2_WIN_BASE(i),
6306                            (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6307                            dram->mbus_dram_target_id);
6308
6309                mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6310                            (cs->size - 1) & 0xffff0000);
6311
6312                win_enable |= (1 << i);
6313        }
6314
6315        mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6316}
6317
6318/* Initialize Rx FIFO's */
6319static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6320{
6321        int port;
6322
6323        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6324                mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6325                            MVPP2_RX_FIFO_PORT_DATA_SIZE);
6326                mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6327                            MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6328        }
6329
6330        mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6331                    MVPP2_RX_FIFO_PORT_MIN_PKT);
6332        mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6333}
6334
6335/* Initialize network controller common part HW */
6336static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6337{
6338        const struct mbus_dram_target_info *dram_target_info;
6339        int err, i;
6340        u32 val;
6341
6342        /* Checks for hardware constraints */
6343        if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6344            (txq_number > MVPP2_MAX_TXQ)) {
6345                dev_err(&pdev->dev, "invalid queue size parameter\n");
6346                return -EINVAL;
6347        }
6348
6349        /* MBUS windows configuration */
6350        dram_target_info = mv_mbus_dram_info();
6351        if (dram_target_info)
6352                mvpp2_conf_mbus_windows(dram_target_info, priv);
6353
6354        /* Disable HW PHY polling */
6355        val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6356        val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6357        writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6358
6359        /* Allocate and initialize aggregated TXQs */
6360        priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6361                                       sizeof(struct mvpp2_tx_queue),
6362                                       GFP_KERNEL);
6363        if (!priv->aggr_txqs)
6364                return -ENOMEM;
6365
6366        for_each_present_cpu(i) {
6367                priv->aggr_txqs[i].id = i;
6368                priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6369                err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6370                                          MVPP2_AGGR_TXQ_SIZE, i, priv);
6371                if (err < 0)
6372                        return err;
6373        }
6374
6375        /* Rx Fifo Init */
6376        mvpp2_rx_fifo_init(priv);
6377
6378        /* Reset Rx queue group interrupt configuration */
6379        for (i = 0; i < MVPP2_MAX_PORTS; i++)
6380                mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6381
6382        writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6383               priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6384
6385        /* Allow cache snoop when transmiting packets */
6386        mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6387
6388        /* Buffer Manager initialization */
6389        err = mvpp2_bm_init(pdev, priv);
6390        if (err < 0)
6391                return err;
6392
6393        /* Parser default initialization */
6394        err = mvpp2_prs_default_init(pdev, priv);
6395        if (err < 0)
6396                return err;
6397
6398        /* Classifier default initialization */
6399        mvpp2_cls_init(priv);
6400
6401        return 0;
6402}
6403
6404static int mvpp2_probe(struct platform_device *pdev)
6405{
6406        struct device_node *dn = pdev->dev.of_node;
6407        struct device_node *port_node;
6408        struct mvpp2 *priv;
6409        struct resource *res;
6410        int port_count, first_rxq;
6411        int err;
6412
6413        priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6414        if (!priv)
6415                return -ENOMEM;
6416
6417        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6418        priv->base = devm_ioremap_resource(&pdev->dev, res);
6419        if (IS_ERR(priv->base))
6420                return PTR_ERR(priv->base);
6421
6422        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6423        priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6424        if (IS_ERR(priv->lms_base))
6425                return PTR_ERR(priv->lms_base);
6426
6427        priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6428        if (IS_ERR(priv->pp_clk))
6429                return PTR_ERR(priv->pp_clk);
6430        err = clk_prepare_enable(priv->pp_clk);
6431        if (err < 0)
6432                return err;
6433
6434        priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6435        if (IS_ERR(priv->gop_clk)) {
6436                err = PTR_ERR(priv->gop_clk);
6437                goto err_pp_clk;
6438        }
6439        err = clk_prepare_enable(priv->gop_clk);
6440        if (err < 0)
6441                goto err_pp_clk;
6442
6443        /* Get system's tclk rate */
6444        priv->tclk = clk_get_rate(priv->pp_clk);
6445
6446        /* Initialize network controller */
6447        err = mvpp2_init(pdev, priv);
6448        if (err < 0) {
6449                dev_err(&pdev->dev, "failed to initialize controller\n");
6450                goto err_gop_clk;
6451        }
6452
6453        port_count = of_get_available_child_count(dn);
6454        if (port_count == 0) {
6455                dev_err(&pdev->dev, "no ports enabled\n");
6456                err = -ENODEV;
6457                goto err_gop_clk;
6458        }
6459
6460        priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6461                                      sizeof(struct mvpp2_port *),
6462                                      GFP_KERNEL);
6463        if (!priv->port_list) {
6464                err = -ENOMEM;
6465                goto err_gop_clk;
6466        }
6467
6468        /* Initialize ports */
6469        first_rxq = 0;
6470        for_each_available_child_of_node(dn, port_node) {
6471                err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6472                if (err < 0)
6473                        goto err_gop_clk;
6474        }
6475
6476        platform_set_drvdata(pdev, priv);
6477        return 0;
6478
6479err_gop_clk:
6480        clk_disable_unprepare(priv->gop_clk);
6481err_pp_clk:
6482        clk_disable_unprepare(priv->pp_clk);
6483        return err;
6484}
6485
6486static int mvpp2_remove(struct platform_device *pdev)
6487{
6488        struct mvpp2 *priv = platform_get_drvdata(pdev);
6489        struct device_node *dn = pdev->dev.of_node;
6490        struct device_node *port_node;
6491        int i = 0;
6492
6493        for_each_available_child_of_node(dn, port_node) {
6494                if (priv->port_list[i])
6495                        mvpp2_port_remove(priv->port_list[i]);
6496                i++;
6497        }
6498
6499        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6500                struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6501
6502                mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6503        }
6504
6505        for_each_present_cpu(i) {
6506                struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6507
6508                dma_free_coherent(&pdev->dev,
6509                                  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6510                                  aggr_txq->descs,
6511                                  aggr_txq->descs_phys);
6512        }
6513
6514        clk_disable_unprepare(priv->pp_clk);
6515        clk_disable_unprepare(priv->gop_clk);
6516
6517        return 0;
6518}
6519
6520static const struct of_device_id mvpp2_match[] = {
6521        { .compatible = "marvell,armada-375-pp2" },
6522        { }
6523};
6524MODULE_DEVICE_TABLE(of, mvpp2_match);
6525
6526static struct platform_driver mvpp2_driver = {
6527        .probe = mvpp2_probe,
6528        .remove = mvpp2_remove,
6529        .driver = {
6530                .name = MVPP2_DRIVER_NAME,
6531                .of_match_table = mvpp2_match,
6532        },
6533};
6534
6535module_platform_driver(mvpp2_driver);
6536
6537MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6538MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6539MODULE_LICENSE("GPL v2");
6540