linux/drivers/net/ethernet/marvell/mvpp2.c
<<
>>
Prefs
   1/*
   2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
   3 *
   4 * Copyright (C) 2014 Marvell
   5 *
   6 * Marcin Wojtas <mw@semihalf.com>
   7 *
   8 * This file is licensed under the terms of the GNU General Public
   9 * License version 2. This program is licensed "as is" without any
  10 * warranty of any kind, whether express or implied.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/platform_device.h>
  17#include <linux/skbuff.h>
  18#include <linux/inetdevice.h>
  19#include <linux/mbus.h>
  20#include <linux/module.h>
  21#include <linux/interrupt.h>
  22#include <linux/cpumask.h>
  23#include <linux/of.h>
  24#include <linux/of_irq.h>
  25#include <linux/of_mdio.h>
  26#include <linux/of_net.h>
  27#include <linux/of_address.h>
  28#include <linux/of_device.h>
  29#include <linux/phy.h>
  30#include <linux/clk.h>
  31#include <linux/hrtimer.h>
  32#include <linux/ktime.h>
  33#include <uapi/linux/ppp_defs.h>
  34#include <net/ip.h>
  35#include <net/ipv6.h>
  36
  37/* RX Fifo Registers */
  38#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)       (0x00 + 4 * (port))
  39#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)       (0x20 + 4 * (port))
  40#define MVPP2_RX_MIN_PKT_SIZE_REG               0x60
  41#define MVPP2_RX_FIFO_INIT_REG                  0x64
  42
  43/* RX DMA Top Registers */
  44#define MVPP2_RX_CTRL_REG(port)                 (0x140 + 4 * (port))
  45#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)    (((s) & 0xfff) << 16)
  46#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK   BIT(31)
  47#define MVPP2_POOL_BUF_SIZE_REG(pool)           (0x180 + 4 * (pool))
  48#define     MVPP2_POOL_BUF_SIZE_OFFSET          5
  49#define MVPP2_RXQ_CONFIG_REG(rxq)               (0x800 + 4 * (rxq))
  50#define     MVPP2_SNOOP_PKT_SIZE_MASK           0x1ff
  51#define     MVPP2_SNOOP_BUF_HDR_MASK            BIT(9)
  52#define     MVPP2_RXQ_POOL_SHORT_OFFS           20
  53#define     MVPP21_RXQ_POOL_SHORT_MASK          0x700000
  54#define     MVPP22_RXQ_POOL_SHORT_MASK          0xf00000
  55#define     MVPP2_RXQ_POOL_LONG_OFFS            24
  56#define     MVPP21_RXQ_POOL_LONG_MASK           0x7000000
  57#define     MVPP22_RXQ_POOL_LONG_MASK           0xf000000
  58#define     MVPP2_RXQ_PACKET_OFFSET_OFFS        28
  59#define     MVPP2_RXQ_PACKET_OFFSET_MASK        0x70000000
  60#define     MVPP2_RXQ_DISABLE_MASK              BIT(31)
  61
  62/* Parser Registers */
  63#define MVPP2_PRS_INIT_LOOKUP_REG               0x1000
  64#define     MVPP2_PRS_PORT_LU_MAX               0xf
  65#define     MVPP2_PRS_PORT_LU_MASK(port)        (0xff << ((port) * 4))
  66#define     MVPP2_PRS_PORT_LU_VAL(port, val)    ((val) << ((port) * 4))
  67#define MVPP2_PRS_INIT_OFFS_REG(port)           (0x1004 + ((port) & 4))
  68#define     MVPP2_PRS_INIT_OFF_MASK(port)       (0x3f << (((port) % 4) * 8))
  69#define     MVPP2_PRS_INIT_OFF_VAL(port, val)   ((val) << (((port) % 4) * 8))
  70#define MVPP2_PRS_MAX_LOOP_REG(port)            (0x100c + ((port) & 4))
  71#define     MVPP2_PRS_MAX_LOOP_MASK(port)       (0xff << (((port) % 4) * 8))
  72#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)   ((val) << (((port) % 4) * 8))
  73#define MVPP2_PRS_TCAM_IDX_REG                  0x1100
  74#define MVPP2_PRS_TCAM_DATA_REG(idx)            (0x1104 + (idx) * 4)
  75#define     MVPP2_PRS_TCAM_INV_MASK             BIT(31)
  76#define MVPP2_PRS_SRAM_IDX_REG                  0x1200
  77#define MVPP2_PRS_SRAM_DATA_REG(idx)            (0x1204 + (idx) * 4)
  78#define MVPP2_PRS_TCAM_CTRL_REG                 0x1230
  79#define     MVPP2_PRS_TCAM_EN_MASK              BIT(0)
  80
  81/* Classifier Registers */
  82#define MVPP2_CLS_MODE_REG                      0x1800
  83#define     MVPP2_CLS_MODE_ACTIVE_MASK          BIT(0)
  84#define MVPP2_CLS_PORT_WAY_REG                  0x1810
  85#define     MVPP2_CLS_PORT_WAY_MASK(port)       (1 << (port))
  86#define MVPP2_CLS_LKP_INDEX_REG                 0x1814
  87#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS        6
  88#define MVPP2_CLS_LKP_TBL_REG                   0x1818
  89#define     MVPP2_CLS_LKP_TBL_RXQ_MASK          0xff
  90#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK    BIT(25)
  91#define MVPP2_CLS_FLOW_INDEX_REG                0x1820
  92#define MVPP2_CLS_FLOW_TBL0_REG                 0x1824
  93#define MVPP2_CLS_FLOW_TBL1_REG                 0x1828
  94#define MVPP2_CLS_FLOW_TBL2_REG                 0x182c
  95#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)    (0x1980 + ((port) * 4))
  96#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS     3
  97#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK     0x7
  98#define MVPP2_CLS_SWFWD_P2HQ_REG(port)          (0x19b0 + ((port) * 4))
  99#define MVPP2_CLS_SWFWD_PCTRL_REG               0x19d0
 100#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)    (1 << (port))
 101
 102/* Descriptor Manager Top Registers */
 103#define MVPP2_RXQ_NUM_REG                       0x2040
 104#define MVPP2_RXQ_DESC_ADDR_REG                 0x2044
 105#define     MVPP22_DESC_ADDR_OFFS               8
 106#define MVPP2_RXQ_DESC_SIZE_REG                 0x2048
 107#define     MVPP2_RXQ_DESC_SIZE_MASK            0x3ff0
 108#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)        (0x3000 + 4 * (rxq))
 109#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET      0
 110#define     MVPP2_RXQ_NUM_NEW_OFFSET            16
 111#define MVPP2_RXQ_STATUS_REG(rxq)               (0x3400 + 4 * (rxq))
 112#define     MVPP2_RXQ_OCCUPIED_MASK             0x3fff
 113#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET       16
 114#define     MVPP2_RXQ_NON_OCCUPIED_MASK         0x3fff0000
 115#define MVPP2_RXQ_THRESH_REG                    0x204c
 116#define     MVPP2_OCCUPIED_THRESH_OFFSET        0
 117#define     MVPP2_OCCUPIED_THRESH_MASK          0x3fff
 118#define MVPP2_RXQ_INDEX_REG                     0x2050
 119#define MVPP2_TXQ_NUM_REG                       0x2080
 120#define MVPP2_TXQ_DESC_ADDR_REG                 0x2084
 121#define MVPP2_TXQ_DESC_SIZE_REG                 0x2088
 122#define     MVPP2_TXQ_DESC_SIZE_MASK            0x3ff0
 123#define MVPP2_AGGR_TXQ_UPDATE_REG               0x2090
 124#define MVPP2_TXQ_INDEX_REG                     0x2098
 125#define MVPP2_TXQ_PREF_BUF_REG                  0x209c
 126#define     MVPP2_PREF_BUF_PTR(desc)            ((desc) & 0xfff)
 127#define     MVPP2_PREF_BUF_SIZE_4               (BIT(12) | BIT(13))
 128#define     MVPP2_PREF_BUF_SIZE_16              (BIT(12) | BIT(14))
 129#define     MVPP2_PREF_BUF_THRESH(val)          ((val) << 17)
 130#define     MVPP2_TXQ_DRAIN_EN_MASK             BIT(31)
 131#define MVPP2_TXQ_PENDING_REG                   0x20a0
 132#define     MVPP2_TXQ_PENDING_MASK              0x3fff
 133#define MVPP2_TXQ_INT_STATUS_REG                0x20a4
 134#define MVPP2_TXQ_SENT_REG(txq)                 (0x3c00 + 4 * (txq))
 135#define     MVPP2_TRANSMITTED_COUNT_OFFSET      16
 136#define     MVPP2_TRANSMITTED_COUNT_MASK        0x3fff0000
 137#define MVPP2_TXQ_RSVD_REQ_REG                  0x20b0
 138#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET         16
 139#define MVPP2_TXQ_RSVD_RSLT_REG                 0x20b4
 140#define     MVPP2_TXQ_RSVD_RSLT_MASK            0x3fff
 141#define MVPP2_TXQ_RSVD_CLR_REG                  0x20b8
 142#define     MVPP2_TXQ_RSVD_CLR_OFFSET           16
 143#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)       (0x2100 + 4 * (cpu))
 144#define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS      8
 145#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)       (0x2140 + 4 * (cpu))
 146#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK       0x3ff0
 147#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)          (0x2180 + 4 * (cpu))
 148#define     MVPP2_AGGR_TXQ_PENDING_MASK         0x3fff
 149#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)           (0x21c0 + 4 * (cpu))
 150
 151/* MBUS bridge registers */
 152#define MVPP2_WIN_BASE(w)                       (0x4000 + ((w) << 2))
 153#define MVPP2_WIN_SIZE(w)                       (0x4020 + ((w) << 2))
 154#define MVPP2_WIN_REMAP(w)                      (0x4040 + ((w) << 2))
 155#define MVPP2_BASE_ADDR_ENABLE                  0x4060
 156
 157/* AXI Bridge Registers */
 158#define MVPP22_AXI_BM_WR_ATTR_REG               0x4100
 159#define MVPP22_AXI_BM_RD_ATTR_REG               0x4104
 160#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG      0x4110
 161#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG        0x4114
 162#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG        0x4118
 163#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG        0x411c
 164#define MVPP22_AXI_RX_DATA_WR_ATTR_REG          0x4120
 165#define MVPP22_AXI_TX_DATA_RD_ATTR_REG          0x4130
 166#define MVPP22_AXI_RD_NORMAL_CODE_REG           0x4150
 167#define MVPP22_AXI_RD_SNOOP_CODE_REG            0x4154
 168#define MVPP22_AXI_WR_NORMAL_CODE_REG           0x4160
 169#define MVPP22_AXI_WR_SNOOP_CODE_REG            0x4164
 170
 171/* Values for AXI Bridge registers */
 172#define MVPP22_AXI_ATTR_CACHE_OFFS              0
 173#define MVPP22_AXI_ATTR_DOMAIN_OFFS             12
 174
 175#define MVPP22_AXI_CODE_CACHE_OFFS              0
 176#define MVPP22_AXI_CODE_DOMAIN_OFFS             4
 177
 178#define MVPP22_AXI_CODE_CACHE_NON_CACHE         0x3
 179#define MVPP22_AXI_CODE_CACHE_WR_CACHE          0x7
 180#define MVPP22_AXI_CODE_CACHE_RD_CACHE          0xb
 181
 182#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM        2
 183#define MVPP22_AXI_CODE_DOMAIN_SYSTEM           3
 184
 185/* Interrupt Cause and Mask registers */
 186#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)         (0x5200 + 4 * (rxq))
 187#define     MVPP2_MAX_ISR_RX_THRESHOLD          0xfffff0
 188#define MVPP21_ISR_RXQ_GROUP_REG(rxq)           (0x5400 + 4 * (rxq))
 189
 190#define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
 191#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
 192#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
 193#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
 194
 195#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
 196#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
 197
 198#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
 199#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
 200#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
 201#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
 202
 203#define MVPP2_ISR_ENABLE_REG(port)              (0x5420 + 4 * (port))
 204#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)    ((mask) & 0xffff)
 205#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)   (((mask) << 16) & 0xffff0000)
 206#define MVPP2_ISR_RX_TX_CAUSE_REG(port)         (0x5480 + 4 * (port))
 207#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
 208#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
 209#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK    BIT(24)
 210#define     MVPP2_CAUSE_FCS_ERR_MASK            BIT(25)
 211#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK   BIT(26)
 212#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK   BIT(29)
 213#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK   BIT(30)
 214#define     MVPP2_CAUSE_MISC_SUM_MASK           BIT(31)
 215#define MVPP2_ISR_RX_TX_MASK_REG(port)          (0x54a0 + 4 * (port))
 216#define MVPP2_ISR_PON_RX_TX_MASK_REG            0x54bc
 217#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK     0xffff
 218#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK     0x3fc00000
 219#define     MVPP2_PON_CAUSE_MISC_SUM_MASK               BIT(31)
 220#define MVPP2_ISR_MISC_CAUSE_REG                0x55b0
 221
 222/* Buffer Manager registers */
 223#define MVPP2_BM_POOL_BASE_REG(pool)            (0x6000 + ((pool) * 4))
 224#define     MVPP2_BM_POOL_BASE_ADDR_MASK        0xfffff80
 225#define MVPP2_BM_POOL_SIZE_REG(pool)            (0x6040 + ((pool) * 4))
 226#define     MVPP2_BM_POOL_SIZE_MASK             0xfff0
 227#define MVPP2_BM_POOL_READ_PTR_REG(pool)        (0x6080 + ((pool) * 4))
 228#define     MVPP2_BM_POOL_GET_READ_PTR_MASK     0xfff0
 229#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)        (0x60c0 + ((pool) * 4))
 230#define     MVPP2_BM_POOL_PTRS_NUM_MASK         0xfff0
 231#define MVPP2_BM_BPPI_READ_PTR_REG(pool)        (0x6100 + ((pool) * 4))
 232#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)        (0x6140 + ((pool) * 4))
 233#define     MVPP2_BM_BPPI_PTR_NUM_MASK          0x7ff
 234#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK    BIT(16)
 235#define MVPP2_BM_POOL_CTRL_REG(pool)            (0x6200 + ((pool) * 4))
 236#define     MVPP2_BM_START_MASK                 BIT(0)
 237#define     MVPP2_BM_STOP_MASK                  BIT(1)
 238#define     MVPP2_BM_STATE_MASK                 BIT(4)
 239#define     MVPP2_BM_LOW_THRESH_OFFS            8
 240#define     MVPP2_BM_LOW_THRESH_MASK            0x7f00
 241#define     MVPP2_BM_LOW_THRESH_VALUE(val)      ((val) << \
 242                                                MVPP2_BM_LOW_THRESH_OFFS)
 243#define     MVPP2_BM_HIGH_THRESH_OFFS           16
 244#define     MVPP2_BM_HIGH_THRESH_MASK           0x7f0000
 245#define     MVPP2_BM_HIGH_THRESH_VALUE(val)     ((val) << \
 246                                                MVPP2_BM_HIGH_THRESH_OFFS)
 247#define MVPP2_BM_INTR_CAUSE_REG(pool)           (0x6240 + ((pool) * 4))
 248#define     MVPP2_BM_RELEASED_DELAY_MASK        BIT(0)
 249#define     MVPP2_BM_ALLOC_FAILED_MASK          BIT(1)
 250#define     MVPP2_BM_BPPE_EMPTY_MASK            BIT(2)
 251#define     MVPP2_BM_BPPE_FULL_MASK             BIT(3)
 252#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK      BIT(4)
 253#define MVPP2_BM_INTR_MASK_REG(pool)            (0x6280 + ((pool) * 4))
 254#define MVPP2_BM_PHY_ALLOC_REG(pool)            (0x6400 + ((pool) * 4))
 255#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK       BIT(0)
 256#define MVPP2_BM_VIRT_ALLOC_REG                 0x6440
 257#define MVPP22_BM_ADDR_HIGH_ALLOC               0x6444
 258#define     MVPP22_BM_ADDR_HIGH_PHYS_MASK       0xff
 259#define     MVPP22_BM_ADDR_HIGH_VIRT_MASK       0xff00
 260#define     MVPP22_BM_ADDR_HIGH_VIRT_SHIFT      8
 261#define MVPP2_BM_PHY_RLS_REG(pool)              (0x6480 + ((pool) * 4))
 262#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK       BIT(0)
 263#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK       BIT(1)
 264#define     MVPP2_BM_PHY_RLS_GRNTD_MASK         BIT(2)
 265#define MVPP2_BM_VIRT_RLS_REG                   0x64c0
 266#define MVPP22_BM_ADDR_HIGH_RLS_REG             0x64c4
 267#define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK   0xff
 268#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK   0xff00
 269#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT  8
 270
 271/* TX Scheduler registers */
 272#define MVPP2_TXP_SCHED_PORT_INDEX_REG          0x8000
 273#define MVPP2_TXP_SCHED_Q_CMD_REG               0x8004
 274#define     MVPP2_TXP_SCHED_ENQ_MASK            0xff
 275#define     MVPP2_TXP_SCHED_DISQ_OFFSET         8
 276#define MVPP2_TXP_SCHED_CMD_1_REG               0x8010
 277#define MVPP2_TXP_SCHED_PERIOD_REG              0x8018
 278#define MVPP2_TXP_SCHED_MTU_REG                 0x801c
 279#define     MVPP2_TXP_MTU_MAX                   0x7FFFF
 280#define MVPP2_TXP_SCHED_REFILL_REG              0x8020
 281#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK    0x7ffff
 282#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK    0x3ff00000
 283#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)     ((v) << 20)
 284#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG          0x8024
 285#define     MVPP2_TXP_TOKEN_SIZE_MAX            0xffffffff
 286#define MVPP2_TXQ_SCHED_REFILL_REG(q)           (0x8040 + ((q) << 2))
 287#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK    0x7ffff
 288#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK    0x3ff00000
 289#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)     ((v) << 20)
 290#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)       (0x8060 + ((q) << 2))
 291#define     MVPP2_TXQ_TOKEN_SIZE_MAX            0x7fffffff
 292#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)       (0x8080 + ((q) << 2))
 293#define     MVPP2_TXQ_TOKEN_CNTR_MAX            0xffffffff
 294
 295/* TX general registers */
 296#define MVPP2_TX_SNOOP_REG                      0x8800
 297#define MVPP2_TX_PORT_FLUSH_REG                 0x8810
 298#define     MVPP2_TX_PORT_FLUSH_MASK(port)      (1 << (port))
 299
 300/* LMS registers */
 301#define MVPP2_SRC_ADDR_MIDDLE                   0x24
 302#define MVPP2_SRC_ADDR_HIGH                     0x28
 303#define MVPP2_PHY_AN_CFG0_REG                   0x34
 304#define     MVPP2_PHY_AN_STOP_SMI0_MASK         BIT(7)
 305#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG      0x305c
 306#define     MVPP2_EXT_GLOBAL_CTRL_DEFAULT       0x27
 307
 308/* Per-port registers */
 309#define MVPP2_GMAC_CTRL_0_REG                   0x0
 310#define      MVPP2_GMAC_PORT_EN_MASK            BIT(0)
 311#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS        2
 312#define      MVPP2_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 313#define      MVPP2_GMAC_MIB_CNTR_EN_MASK        BIT(15)
 314#define MVPP2_GMAC_CTRL_1_REG                   0x4
 315#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK    BIT(1)
 316#define      MVPP2_GMAC_GMII_LB_EN_MASK         BIT(5)
 317#define      MVPP2_GMAC_PCS_LB_EN_BIT           6
 318#define      MVPP2_GMAC_PCS_LB_EN_MASK          BIT(6)
 319#define      MVPP2_GMAC_SA_LOW_OFFS             7
 320#define MVPP2_GMAC_CTRL_2_REG                   0x8
 321#define      MVPP2_GMAC_INBAND_AN_MASK          BIT(0)
 322#define      MVPP2_GMAC_PCS_ENABLE_MASK         BIT(3)
 323#define      MVPP2_GMAC_PORT_RGMII_MASK         BIT(4)
 324#define      MVPP2_GMAC_PORT_RESET_MASK         BIT(6)
 325#define MVPP2_GMAC_AUTONEG_CONFIG               0xc
 326#define      MVPP2_GMAC_FORCE_LINK_DOWN         BIT(0)
 327#define      MVPP2_GMAC_FORCE_LINK_PASS         BIT(1)
 328#define      MVPP2_GMAC_CONFIG_MII_SPEED        BIT(5)
 329#define      MVPP2_GMAC_CONFIG_GMII_SPEED       BIT(6)
 330#define      MVPP2_GMAC_AN_SPEED_EN             BIT(7)
 331#define      MVPP2_GMAC_FC_ADV_EN               BIT(9)
 332#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 333#define      MVPP2_GMAC_AN_DUPLEX_EN            BIT(13)
 334#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG          0x1c
 335#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS     6
 336#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
 337#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)  (((v) << 6) & \
 338                                        MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
 339#define MVPP22_GMAC_CTRL_4_REG                  0x90
 340#define      MVPP22_CTRL4_EXT_PIN_GMII_SEL      BIT(0)
 341#define      MVPP22_CTRL4_DP_CLK_SEL            BIT(5)
 342#define      MVPP22_CTRL4_SYNC_BYPASS           BIT(6)
 343#define      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE  BIT(7)
 344
 345/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
 346 * relative to port->base.
 347 */
 348#define MVPP22_XLG_CTRL3_REG                    0x11c
 349#define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK        (7 << 13)
 350#define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC        (0 << 13)
 351
 352/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
 353#define MVPP22_SMI_MISC_CFG_REG                 0x1204
 354#define      MVPP22_SMI_POLLING_EN              BIT(10)
 355
 356#define MVPP22_GMAC_BASE(port)          (0x7000 + (port) * 0x1000 + 0xe00)
 357
 358#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 359
 360/* Descriptor ring Macros */
 361#define MVPP2_QUEUE_NEXT_DESC(q, index) \
 362        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 363
 364/* Various constants */
 365
 366/* Coalescing */
 367#define MVPP2_TXDONE_COAL_PKTS_THRESH   15
 368#define MVPP2_TXDONE_HRTIMER_PERIOD_NS  1000000UL
 369#define MVPP2_RX_COAL_PKTS              32
 370#define MVPP2_RX_COAL_USEC              100
 371
 372/* The two bytes Marvell header. Either contains a special value used
 373 * by Marvell switches when a specific hardware mode is enabled (not
 374 * supported by this driver) or is filled automatically by zeroes on
 375 * the RX side. Those two bytes being at the front of the Ethernet
 376 * header, they allow to have the IP header aligned on a 4 bytes
 377 * boundary automatically: the hardware skips those two bytes on its
 378 * own.
 379 */
 380#define MVPP2_MH_SIZE                   2
 381#define MVPP2_ETH_TYPE_LEN              2
 382#define MVPP2_PPPOE_HDR_SIZE            8
 383#define MVPP2_VLAN_TAG_LEN              4
 384
 385/* Lbtd 802.3 type */
 386#define MVPP2_IP_LBDT_TYPE              0xfffa
 387
 388#define MVPP2_TX_CSUM_MAX_SIZE          9800
 389
 390/* Timeout constants */
 391#define MVPP2_TX_DISABLE_TIMEOUT_MSEC   1000
 392#define MVPP2_TX_PENDING_TIMEOUT_MSEC   1000
 393
 394#define MVPP2_TX_MTU_MAX                0x7ffff
 395
 396/* Maximum number of T-CONTs of PON port */
 397#define MVPP2_MAX_TCONT                 16
 398
 399/* Maximum number of supported ports */
 400#define MVPP2_MAX_PORTS                 4
 401
 402/* Maximum number of TXQs used by single port */
 403#define MVPP2_MAX_TXQ                   8
 404
 405/* Dfault number of RXQs in use */
 406#define MVPP2_DEFAULT_RXQ               4
 407
 408/* Max number of Rx descriptors */
 409#define MVPP2_MAX_RXD                   128
 410
 411/* Max number of Tx descriptors */
 412#define MVPP2_MAX_TXD                   1024
 413
 414/* Amount of Tx descriptors that can be reserved at once by CPU */
 415#define MVPP2_CPU_DESC_CHUNK            64
 416
 417/* Max number of Tx descriptors in each aggregated queue */
 418#define MVPP2_AGGR_TXQ_SIZE             256
 419
 420/* Descriptor aligned size */
 421#define MVPP2_DESC_ALIGNED_SIZE         32
 422
 423/* Descriptor alignment mask */
 424#define MVPP2_TX_DESC_ALIGN             (MVPP2_DESC_ALIGNED_SIZE - 1)
 425
 426/* RX FIFO constants */
 427#define MVPP2_RX_FIFO_PORT_DATA_SIZE    0x2000
 428#define MVPP2_RX_FIFO_PORT_ATTR_SIZE    0x80
 429#define MVPP2_RX_FIFO_PORT_MIN_PKT      0x80
 430
 431/* RX buffer constants */
 432#define MVPP2_SKB_SHINFO_SIZE \
 433        SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 434
 435#define MVPP2_RX_PKT_SIZE(mtu) \
 436        ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
 437              ETH_HLEN + ETH_FCS_LEN, cache_line_size())
 438
 439#define MVPP2_RX_BUF_SIZE(pkt_size)     ((pkt_size) + NET_SKB_PAD)
 440#define MVPP2_RX_TOTAL_SIZE(buf_size)   ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
 441#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
 442        ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
 443
 444#define MVPP2_BIT_TO_BYTE(bit)          ((bit) / 8)
 445
 446/* IPv6 max L3 address size */
 447#define MVPP2_MAX_L3_ADDR_SIZE          16
 448
 449/* Port flags */
 450#define MVPP2_F_LOOPBACK                BIT(0)
 451
 452/* Marvell tag types */
 453enum mvpp2_tag_type {
 454        MVPP2_TAG_TYPE_NONE = 0,
 455        MVPP2_TAG_TYPE_MH   = 1,
 456        MVPP2_TAG_TYPE_DSA  = 2,
 457        MVPP2_TAG_TYPE_EDSA = 3,
 458        MVPP2_TAG_TYPE_VLAN = 4,
 459        MVPP2_TAG_TYPE_LAST = 5
 460};
 461
 462/* Parser constants */
 463#define MVPP2_PRS_TCAM_SRAM_SIZE        256
 464#define MVPP2_PRS_TCAM_WORDS            6
 465#define MVPP2_PRS_SRAM_WORDS            4
 466#define MVPP2_PRS_FLOW_ID_SIZE          64
 467#define MVPP2_PRS_FLOW_ID_MASK          0x3f
 468#define MVPP2_PRS_TCAM_ENTRY_INVALID    1
 469#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT   BIT(5)
 470#define MVPP2_PRS_IPV4_HEAD             0x40
 471#define MVPP2_PRS_IPV4_HEAD_MASK        0xf0
 472#define MVPP2_PRS_IPV4_MC               0xe0
 473#define MVPP2_PRS_IPV4_MC_MASK          0xf0
 474#define MVPP2_PRS_IPV4_BC_MASK          0xff
 475#define MVPP2_PRS_IPV4_IHL              0x5
 476#define MVPP2_PRS_IPV4_IHL_MASK         0xf
 477#define MVPP2_PRS_IPV6_MC               0xff
 478#define MVPP2_PRS_IPV6_MC_MASK          0xff
 479#define MVPP2_PRS_IPV6_HOP_MASK         0xff
 480#define MVPP2_PRS_TCAM_PROTO_MASK       0xff
 481#define MVPP2_PRS_TCAM_PROTO_MASK_L     0x3f
 482#define MVPP2_PRS_DBL_VLANS_MAX         100
 483
 484/* Tcam structure:
 485 * - lookup ID - 4 bits
 486 * - port ID - 1 byte
 487 * - additional information - 1 byte
 488 * - header data - 8 bytes
 489 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
 490 */
 491#define MVPP2_PRS_AI_BITS                       8
 492#define MVPP2_PRS_PORT_MASK                     0xff
 493#define MVPP2_PRS_LU_MASK                       0xf
 494#define MVPP2_PRS_TCAM_DATA_BYTE(offs)          \
 495                                    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
 496#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)       \
 497                                              (((offs) * 2) - ((offs) % 2)  + 2)
 498#define MVPP2_PRS_TCAM_AI_BYTE                  16
 499#define MVPP2_PRS_TCAM_PORT_BYTE                17
 500#define MVPP2_PRS_TCAM_LU_BYTE                  20
 501#define MVPP2_PRS_TCAM_EN_OFFS(offs)            ((offs) + 2)
 502#define MVPP2_PRS_TCAM_INV_WORD                 5
 503/* Tcam entries ID */
 504#define MVPP2_PE_DROP_ALL               0
 505#define MVPP2_PE_FIRST_FREE_TID         1
 506#define MVPP2_PE_LAST_FREE_TID          (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
 507#define MVPP2_PE_IP6_EXT_PROTO_UN       (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
 508#define MVPP2_PE_MAC_MC_IP6             (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
 509#define MVPP2_PE_IP6_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
 510#define MVPP2_PE_IP4_ADDR_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
 511#define MVPP2_PE_LAST_DEFAULT_FLOW      (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
 512#define MVPP2_PE_FIRST_DEFAULT_FLOW     (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
 513#define MVPP2_PE_EDSA_TAGGED            (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
 514#define MVPP2_PE_EDSA_UNTAGGED          (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
 515#define MVPP2_PE_DSA_TAGGED             (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
 516#define MVPP2_PE_DSA_UNTAGGED           (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
 517#define MVPP2_PE_ETYPE_EDSA_TAGGED      (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
 518#define MVPP2_PE_ETYPE_EDSA_UNTAGGED    (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
 519#define MVPP2_PE_ETYPE_DSA_TAGGED       (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
 520#define MVPP2_PE_ETYPE_DSA_UNTAGGED     (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
 521#define MVPP2_PE_MH_DEFAULT             (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
 522#define MVPP2_PE_DSA_DEFAULT            (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
 523#define MVPP2_PE_IP6_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
 524#define MVPP2_PE_IP4_PROTO_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 525#define MVPP2_PE_ETH_TYPE_UN            (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 526#define MVPP2_PE_VLAN_DBL               (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
 527#define MVPP2_PE_VLAN_NONE              (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 528#define MVPP2_PE_MAC_MC_ALL             (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 529#define MVPP2_PE_MAC_PROMISCUOUS        (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 530#define MVPP2_PE_MAC_NON_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
 531
 532/* Sram structure
 533 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
 534 */
 535#define MVPP2_PRS_SRAM_RI_OFFS                  0
 536#define MVPP2_PRS_SRAM_RI_WORD                  0
 537#define MVPP2_PRS_SRAM_RI_CTRL_OFFS             32
 538#define MVPP2_PRS_SRAM_RI_CTRL_WORD             1
 539#define MVPP2_PRS_SRAM_RI_CTRL_BITS             32
 540#define MVPP2_PRS_SRAM_SHIFT_OFFS               64
 541#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT           72
 542#define MVPP2_PRS_SRAM_UDF_OFFS                 73
 543#define MVPP2_PRS_SRAM_UDF_BITS                 8
 544#define MVPP2_PRS_SRAM_UDF_MASK                 0xff
 545#define MVPP2_PRS_SRAM_UDF_SIGN_BIT             81
 546#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS            82
 547#define MVPP2_PRS_SRAM_UDF_TYPE_MASK            0x7
 548#define MVPP2_PRS_SRAM_UDF_TYPE_L3              1
 549#define MVPP2_PRS_SRAM_UDF_TYPE_L4              4
 550#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS        85
 551#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK        0x3
 552#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD         1
 553#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD     2
 554#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD     3
 555#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS          87
 556#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS          2
 557#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK          0x3
 558#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD           0
 559#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD       2
 560#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD       3
 561#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS         89
 562#define MVPP2_PRS_SRAM_AI_OFFS                  90
 563#define MVPP2_PRS_SRAM_AI_CTRL_OFFS             98
 564#define MVPP2_PRS_SRAM_AI_CTRL_BITS             8
 565#define MVPP2_PRS_SRAM_AI_MASK                  0xff
 566#define MVPP2_PRS_SRAM_NEXT_LU_OFFS             106
 567#define MVPP2_PRS_SRAM_NEXT_LU_MASK             0xf
 568#define MVPP2_PRS_SRAM_LU_DONE_BIT              110
 569#define MVPP2_PRS_SRAM_LU_GEN_BIT               111
 570
 571/* Sram result info bits assignment */
 572#define MVPP2_PRS_RI_MAC_ME_MASK                0x1
 573#define MVPP2_PRS_RI_DSA_MASK                   0x2
 574#define MVPP2_PRS_RI_VLAN_MASK                  (BIT(2) | BIT(3))
 575#define MVPP2_PRS_RI_VLAN_NONE                  0x0
 576#define MVPP2_PRS_RI_VLAN_SINGLE                BIT(2)
 577#define MVPP2_PRS_RI_VLAN_DOUBLE                BIT(3)
 578#define MVPP2_PRS_RI_VLAN_TRIPLE                (BIT(2) | BIT(3))
 579#define MVPP2_PRS_RI_CPU_CODE_MASK              0x70
 580#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC           BIT(4)
 581#define MVPP2_PRS_RI_L2_CAST_MASK               (BIT(9) | BIT(10))
 582#define MVPP2_PRS_RI_L2_UCAST                   0x0
 583#define MVPP2_PRS_RI_L2_MCAST                   BIT(9)
 584#define MVPP2_PRS_RI_L2_BCAST                   BIT(10)
 585#define MVPP2_PRS_RI_PPPOE_MASK                 0x800
 586#define MVPP2_PRS_RI_L3_PROTO_MASK              (BIT(12) | BIT(13) | BIT(14))
 587#define MVPP2_PRS_RI_L3_UN                      0x0
 588#define MVPP2_PRS_RI_L3_IP4                     BIT(12)
 589#define MVPP2_PRS_RI_L3_IP4_OPT                 BIT(13)
 590#define MVPP2_PRS_RI_L3_IP4_OTHER               (BIT(12) | BIT(13))
 591#define MVPP2_PRS_RI_L3_IP6                     BIT(14)
 592#define MVPP2_PRS_RI_L3_IP6_EXT                 (BIT(12) | BIT(14))
 593#define MVPP2_PRS_RI_L3_ARP                     (BIT(13) | BIT(14))
 594#define MVPP2_PRS_RI_L3_ADDR_MASK               (BIT(15) | BIT(16))
 595#define MVPP2_PRS_RI_L3_UCAST                   0x0
 596#define MVPP2_PRS_RI_L3_MCAST                   BIT(15)
 597#define MVPP2_PRS_RI_L3_BCAST                   (BIT(15) | BIT(16))
 598#define MVPP2_PRS_RI_IP_FRAG_MASK               0x20000
 599#define MVPP2_PRS_RI_UDF3_MASK                  0x300000
 600#define MVPP2_PRS_RI_UDF3_RX_SPECIAL            BIT(21)
 601#define MVPP2_PRS_RI_L4_PROTO_MASK              0x1c00000
 602#define MVPP2_PRS_RI_L4_TCP                     BIT(22)
 603#define MVPP2_PRS_RI_L4_UDP                     BIT(23)
 604#define MVPP2_PRS_RI_L4_OTHER                   (BIT(22) | BIT(23))
 605#define MVPP2_PRS_RI_UDF7_MASK                  0x60000000
 606#define MVPP2_PRS_RI_UDF7_IP6_LITE              BIT(29)
 607#define MVPP2_PRS_RI_DROP_MASK                  0x80000000
 608
 609/* Sram additional info bits assignment */
 610#define MVPP2_PRS_IPV4_DIP_AI_BIT               BIT(0)
 611#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT            BIT(0)
 612#define MVPP2_PRS_IPV6_EXT_AI_BIT               BIT(1)
 613#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT            BIT(2)
 614#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT        BIT(3)
 615#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT         BIT(4)
 616#define MVPP2_PRS_SINGLE_VLAN_AI                0
 617#define MVPP2_PRS_DBL_VLAN_AI_BIT               BIT(7)
 618
 619/* DSA/EDSA type */
 620#define MVPP2_PRS_TAGGED                true
 621#define MVPP2_PRS_UNTAGGED              false
 622#define MVPP2_PRS_EDSA                  true
 623#define MVPP2_PRS_DSA                   false
 624
 625/* MAC entries, shadow udf */
 626enum mvpp2_prs_udf {
 627        MVPP2_PRS_UDF_MAC_DEF,
 628        MVPP2_PRS_UDF_MAC_RANGE,
 629        MVPP2_PRS_UDF_L2_DEF,
 630        MVPP2_PRS_UDF_L2_DEF_COPY,
 631        MVPP2_PRS_UDF_L2_USER,
 632};
 633
 634/* Lookup ID */
 635enum mvpp2_prs_lookup {
 636        MVPP2_PRS_LU_MH,
 637        MVPP2_PRS_LU_MAC,
 638        MVPP2_PRS_LU_DSA,
 639        MVPP2_PRS_LU_VLAN,
 640        MVPP2_PRS_LU_L2,
 641        MVPP2_PRS_LU_PPPOE,
 642        MVPP2_PRS_LU_IP4,
 643        MVPP2_PRS_LU_IP6,
 644        MVPP2_PRS_LU_FLOWS,
 645        MVPP2_PRS_LU_LAST,
 646};
 647
 648/* L3 cast enum */
 649enum mvpp2_prs_l3_cast {
 650        MVPP2_PRS_L3_UNI_CAST,
 651        MVPP2_PRS_L3_MULTI_CAST,
 652        MVPP2_PRS_L3_BROAD_CAST
 653};
 654
 655/* Classifier constants */
 656#define MVPP2_CLS_FLOWS_TBL_SIZE        512
 657#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS  3
 658#define MVPP2_CLS_LKP_TBL_SIZE          64
 659
 660/* BM constants */
 661#define MVPP2_BM_POOLS_NUM              8
 662#define MVPP2_BM_LONG_BUF_NUM           1024
 663#define MVPP2_BM_SHORT_BUF_NUM          2048
 664#define MVPP2_BM_POOL_SIZE_MAX          (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
 665#define MVPP2_BM_POOL_PTR_ALIGN         128
 666#define MVPP2_BM_SWF_LONG_POOL(port)    ((port > 2) ? 2 : port)
 667#define MVPP2_BM_SWF_SHORT_POOL         3
 668
 669/* BM cookie (32 bits) definition */
 670#define MVPP2_BM_COOKIE_POOL_OFFS       8
 671#define MVPP2_BM_COOKIE_CPU_OFFS        24
 672
 673/* BM short pool packet size
 674 * These value assure that for SWF the total number
 675 * of bytes allocated for each buffer will be 512
 676 */
 677#define MVPP2_BM_SHORT_PKT_SIZE         MVPP2_RX_MAX_PKT_SIZE(512)
 678
 679#define MVPP21_ADDR_SPACE_SZ            0
 680#define MVPP22_ADDR_SPACE_SZ            SZ_64K
 681
 682#define MVPP2_MAX_CPUS                  4
 683
 684enum mvpp2_bm_type {
 685        MVPP2_BM_FREE,
 686        MVPP2_BM_SWF_LONG,
 687        MVPP2_BM_SWF_SHORT
 688};
 689
 690/* Definitions */
 691
 692/* Shared Packet Processor resources */
 693struct mvpp2 {
 694        /* Shared registers' base addresses */
 695        void __iomem *lms_base;
 696        void __iomem *iface_base;
 697
 698        /* On PPv2.2, each CPU can access the base register through a
 699         * separate address space, each 64 KB apart from each
 700         * other.
 701         */
 702        void __iomem *cpu_base[MVPP2_MAX_CPUS];
 703
 704        /* Common clocks */
 705        struct clk *pp_clk;
 706        struct clk *gop_clk;
 707        struct clk *mg_clk;
 708
 709        /* List of pointers to port structures */
 710        struct mvpp2_port **port_list;
 711
 712        /* Aggregated TXQs */
 713        struct mvpp2_tx_queue *aggr_txqs;
 714
 715        /* BM pools */
 716        struct mvpp2_bm_pool *bm_pools;
 717
 718        /* PRS shadow table */
 719        struct mvpp2_prs_shadow *prs_shadow;
 720        /* PRS auxiliary table for double vlan entries control */
 721        bool *prs_double_vlans;
 722
 723        /* Tclk value */
 724        u32 tclk;
 725
 726        /* HW version */
 727        enum { MVPP21, MVPP22 } hw_version;
 728
 729        /* Maximum number of RXQs per port */
 730        unsigned int max_port_rxqs;
 731};
 732
 733struct mvpp2_pcpu_stats {
 734        struct  u64_stats_sync syncp;
 735        u64     rx_packets;
 736        u64     rx_bytes;
 737        u64     tx_packets;
 738        u64     tx_bytes;
 739};
 740
 741/* Per-CPU port control */
 742struct mvpp2_port_pcpu {
 743        struct hrtimer tx_done_timer;
 744        bool timer_scheduled;
 745        /* Tasklet for egress finalization */
 746        struct tasklet_struct tx_done_tasklet;
 747};
 748
 749struct mvpp2_port {
 750        u8 id;
 751
 752        /* Index of the port from the "group of ports" complex point
 753         * of view
 754         */
 755        int gop_id;
 756
 757        int irq;
 758
 759        struct mvpp2 *priv;
 760
 761        /* Per-port registers' base address */
 762        void __iomem *base;
 763
 764        struct mvpp2_rx_queue **rxqs;
 765        struct mvpp2_tx_queue **txqs;
 766        struct net_device *dev;
 767
 768        int pkt_size;
 769
 770        u32 pending_cause_rx;
 771        struct napi_struct napi;
 772
 773        /* Per-CPU port control */
 774        struct mvpp2_port_pcpu __percpu *pcpu;
 775
 776        /* Flags */
 777        unsigned long flags;
 778
 779        u16 tx_ring_size;
 780        u16 rx_ring_size;
 781        struct mvpp2_pcpu_stats __percpu *stats;
 782
 783        phy_interface_t phy_interface;
 784        struct device_node *phy_node;
 785        unsigned int link;
 786        unsigned int duplex;
 787        unsigned int speed;
 788
 789        struct mvpp2_bm_pool *pool_long;
 790        struct mvpp2_bm_pool *pool_short;
 791
 792        /* Index of first port's physical RXQ */
 793        u8 first_rxq;
 794};
 795
 796/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
 797 * layout of the transmit and reception DMA descriptors, and their
 798 * layout is therefore defined by the hardware design
 799 */
 800
 801#define MVPP2_TXD_L3_OFF_SHIFT          0
 802#define MVPP2_TXD_IP_HLEN_SHIFT         8
 803#define MVPP2_TXD_L4_CSUM_FRAG          BIT(13)
 804#define MVPP2_TXD_L4_CSUM_NOT           BIT(14)
 805#define MVPP2_TXD_IP_CSUM_DISABLE       BIT(15)
 806#define MVPP2_TXD_PADDING_DISABLE       BIT(23)
 807#define MVPP2_TXD_L4_UDP                BIT(24)
 808#define MVPP2_TXD_L3_IP6                BIT(26)
 809#define MVPP2_TXD_L_DESC                BIT(28)
 810#define MVPP2_TXD_F_DESC                BIT(29)
 811
 812#define MVPP2_RXD_ERR_SUMMARY           BIT(15)
 813#define MVPP2_RXD_ERR_CODE_MASK         (BIT(13) | BIT(14))
 814#define MVPP2_RXD_ERR_CRC               0x0
 815#define MVPP2_RXD_ERR_OVERRUN           BIT(13)
 816#define MVPP2_RXD_ERR_RESOURCE          (BIT(13) | BIT(14))
 817#define MVPP2_RXD_BM_POOL_ID_OFFS       16
 818#define MVPP2_RXD_BM_POOL_ID_MASK       (BIT(16) | BIT(17) | BIT(18))
 819#define MVPP2_RXD_HWF_SYNC              BIT(21)
 820#define MVPP2_RXD_L4_CSUM_OK            BIT(22)
 821#define MVPP2_RXD_IP4_HEADER_ERR        BIT(24)
 822#define MVPP2_RXD_L4_TCP                BIT(25)
 823#define MVPP2_RXD_L4_UDP                BIT(26)
 824#define MVPP2_RXD_L3_IP4                BIT(28)
 825#define MVPP2_RXD_L3_IP6                BIT(30)
 826#define MVPP2_RXD_BUF_HDR               BIT(31)
 827
 828/* HW TX descriptor for PPv2.1 */
 829struct mvpp21_tx_desc {
 830        u32 command;            /* Options used by HW for packet transmitting.*/
 831        u8  packet_offset;      /* the offset from the buffer beginning */
 832        u8  phys_txq;           /* destination queue ID                 */
 833        u16 data_size;          /* data size of transmitted packet in bytes */
 834        u32 buf_dma_addr;       /* physical addr of transmitted buffer  */
 835        u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
 836        u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
 837        u32 reserved2;          /* reserved (for future use)            */
 838};
 839
 840/* HW RX descriptor for PPv2.1 */
 841struct mvpp21_rx_desc {
 842        u32 status;             /* info about received packet           */
 843        u16 reserved1;          /* parser_info (for future use, PnC)    */
 844        u16 data_size;          /* size of received packet in bytes     */
 845        u32 buf_dma_addr;       /* physical address of the buffer       */
 846        u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
 847        u16 reserved2;          /* gem_port_id (for future use, PON)    */
 848        u16 reserved3;          /* csum_l4 (for future use, PnC)        */
 849        u8  reserved4;          /* bm_qset (for future use, BM)         */
 850        u8  reserved5;
 851        u16 reserved6;          /* classify_info (for future use, PnC)  */
 852        u32 reserved7;          /* flow_id (for future use, PnC) */
 853        u32 reserved8;
 854};
 855
 856/* HW TX descriptor for PPv2.2 */
 857struct mvpp22_tx_desc {
 858        u32 command;
 859        u8  packet_offset;
 860        u8  phys_txq;
 861        u16 data_size;
 862        u64 reserved1;
 863        u64 buf_dma_addr_ptp;
 864        u64 buf_cookie_misc;
 865};
 866
 867/* HW RX descriptor for PPv2.2 */
 868struct mvpp22_rx_desc {
 869        u32 status;
 870        u16 reserved1;
 871        u16 data_size;
 872        u32 reserved2;
 873        u32 reserved3;
 874        u64 buf_dma_addr_key_hash;
 875        u64 buf_cookie_misc;
 876};
 877
 878/* Opaque type used by the driver to manipulate the HW TX and RX
 879 * descriptors
 880 */
 881struct mvpp2_tx_desc {
 882        union {
 883                struct mvpp21_tx_desc pp21;
 884                struct mvpp22_tx_desc pp22;
 885        };
 886};
 887
 888struct mvpp2_rx_desc {
 889        union {
 890                struct mvpp21_rx_desc pp21;
 891                struct mvpp22_rx_desc pp22;
 892        };
 893};
 894
 895struct mvpp2_txq_pcpu_buf {
 896        /* Transmitted SKB */
 897        struct sk_buff *skb;
 898
 899        /* Physical address of transmitted buffer */
 900        dma_addr_t dma;
 901
 902        /* Size transmitted */
 903        size_t size;
 904};
 905
 906/* Per-CPU Tx queue control */
 907struct mvpp2_txq_pcpu {
 908        int cpu;
 909
 910        /* Number of Tx DMA descriptors in the descriptor ring */
 911        int size;
 912
 913        /* Number of currently used Tx DMA descriptor in the
 914         * descriptor ring
 915         */
 916        int count;
 917
 918        /* Number of Tx DMA descriptors reserved for each CPU */
 919        int reserved_num;
 920
 921        /* Infos about transmitted buffers */
 922        struct mvpp2_txq_pcpu_buf *buffs;
 923
 924        /* Index of last TX DMA descriptor that was inserted */
 925        int txq_put_index;
 926
 927        /* Index of the TX DMA descriptor to be cleaned up */
 928        int txq_get_index;
 929};
 930
 931struct mvpp2_tx_queue {
 932        /* Physical number of this Tx queue */
 933        u8 id;
 934
 935        /* Logical number of this Tx queue */
 936        u8 log_id;
 937
 938        /* Number of Tx DMA descriptors in the descriptor ring */
 939        int size;
 940
 941        /* Number of currently used Tx DMA descriptor in the descriptor ring */
 942        int count;
 943
 944        /* Per-CPU control of physical Tx queues */
 945        struct mvpp2_txq_pcpu __percpu *pcpu;
 946
 947        u32 done_pkts_coal;
 948
 949        /* Virtual address of thex Tx DMA descriptors array */
 950        struct mvpp2_tx_desc *descs;
 951
 952        /* DMA address of the Tx DMA descriptors array */
 953        dma_addr_t descs_dma;
 954
 955        /* Index of the last Tx DMA descriptor */
 956        int last_desc;
 957
 958        /* Index of the next Tx DMA descriptor to process */
 959        int next_desc_to_proc;
 960};
 961
 962struct mvpp2_rx_queue {
 963        /* RX queue number, in the range 0-31 for physical RXQs */
 964        u8 id;
 965
 966        /* Num of rx descriptors in the rx descriptor ring */
 967        int size;
 968
 969        u32 pkts_coal;
 970        u32 time_coal;
 971
 972        /* Virtual address of the RX DMA descriptors array */
 973        struct mvpp2_rx_desc *descs;
 974
 975        /* DMA address of the RX DMA descriptors array */
 976        dma_addr_t descs_dma;
 977
 978        /* Index of the last RX DMA descriptor */
 979        int last_desc;
 980
 981        /* Index of the next RX DMA descriptor to process */
 982        int next_desc_to_proc;
 983
 984        /* ID of port to which physical RXQ is mapped */
 985        int port;
 986
 987        /* Port's logic RXQ number to which physical RXQ is mapped */
 988        int logic_rxq;
 989};
 990
 991union mvpp2_prs_tcam_entry {
 992        u32 word[MVPP2_PRS_TCAM_WORDS];
 993        u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
 994};
 995
 996union mvpp2_prs_sram_entry {
 997        u32 word[MVPP2_PRS_SRAM_WORDS];
 998        u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
 999};
1000
1001struct mvpp2_prs_entry {
1002        u32 index;
1003        union mvpp2_prs_tcam_entry tcam;
1004        union mvpp2_prs_sram_entry sram;
1005};
1006
1007struct mvpp2_prs_shadow {
1008        bool valid;
1009        bool finish;
1010
1011        /* Lookup ID */
1012        int lu;
1013
1014        /* User defined offset */
1015        int udf;
1016
1017        /* Result info */
1018        u32 ri;
1019        u32 ri_mask;
1020};
1021
1022struct mvpp2_cls_flow_entry {
1023        u32 index;
1024        u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1025};
1026
1027struct mvpp2_cls_lookup_entry {
1028        u32 lkpid;
1029        u32 way;
1030        u32 data;
1031};
1032
1033struct mvpp2_bm_pool {
1034        /* Pool number in the range 0-7 */
1035        int id;
1036        enum mvpp2_bm_type type;
1037
1038        /* Buffer Pointers Pool External (BPPE) size */
1039        int size;
1040        /* BPPE size in bytes */
1041        int size_bytes;
1042        /* Number of buffers for this pool */
1043        int buf_num;
1044        /* Pool buffer size */
1045        int buf_size;
1046        /* Packet size */
1047        int pkt_size;
1048        int frag_size;
1049
1050        /* BPPE virtual base address */
1051        u32 *virt_addr;
1052        /* BPPE DMA base address */
1053        dma_addr_t dma_addr;
1054
1055        /* Ports using BM pool */
1056        u32 port_map;
1057};
1058
1059/* Static declaractions */
1060
1061/* Number of RXQs used by single port */
1062static int rxq_number = MVPP2_DEFAULT_RXQ;
1063/* Number of TXQs used by single port */
1064static int txq_number = MVPP2_MAX_TXQ;
1065
1066#define MVPP2_DRIVER_NAME "mvpp2"
1067#define MVPP2_DRIVER_VERSION "1.0"
1068
1069/* Utility/helper methods */
1070
1071static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1072{
1073        writel(data, priv->cpu_base[0] + offset);
1074}
1075
1076static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1077{
1078        return readl(priv->cpu_base[0] + offset);
1079}
1080
1081/* These accessors should be used to access:
1082 *
1083 * - per-CPU registers, where each CPU has its own copy of the
1084 *   register.
1085 *
1086 *   MVPP2_BM_VIRT_ALLOC_REG
1087 *   MVPP2_BM_ADDR_HIGH_ALLOC
1088 *   MVPP22_BM_ADDR_HIGH_RLS_REG
1089 *   MVPP2_BM_VIRT_RLS_REG
1090 *   MVPP2_ISR_RX_TX_CAUSE_REG
1091 *   MVPP2_ISR_RX_TX_MASK_REG
1092 *   MVPP2_TXQ_NUM_REG
1093 *   MVPP2_AGGR_TXQ_UPDATE_REG
1094 *   MVPP2_TXQ_RSVD_REQ_REG
1095 *   MVPP2_TXQ_RSVD_RSLT_REG
1096 *   MVPP2_TXQ_SENT_REG
1097 *   MVPP2_RXQ_NUM_REG
1098 *
1099 * - global registers that must be accessed through a specific CPU
1100 *   window, because they are related to an access to a per-CPU
1101 *   register
1102 *
1103 *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
1104 *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
1105 *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
1106 *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
1107 *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
1108 *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
1109 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
1110 *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
1111 *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
1112 *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
1113 *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
1114 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
1115 *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
1116 */
1117static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1118                               u32 offset, u32 data)
1119{
1120        writel(data, priv->cpu_base[cpu] + offset);
1121}
1122
1123static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1124                             u32 offset)
1125{
1126        return readl(priv->cpu_base[cpu] + offset);
1127}
1128
1129static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1130                                            struct mvpp2_tx_desc *tx_desc)
1131{
1132        if (port->priv->hw_version == MVPP21)
1133                return tx_desc->pp21.buf_dma_addr;
1134        else
1135                return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1136}
1137
1138static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1139                                      struct mvpp2_tx_desc *tx_desc,
1140                                      dma_addr_t dma_addr)
1141{
1142        if (port->priv->hw_version == MVPP21) {
1143                tx_desc->pp21.buf_dma_addr = dma_addr;
1144        } else {
1145                u64 val = (u64)dma_addr;
1146
1147                tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1148                tx_desc->pp22.buf_dma_addr_ptp |= val;
1149        }
1150}
1151
1152static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1153                                    struct mvpp2_tx_desc *tx_desc)
1154{
1155        if (port->priv->hw_version == MVPP21)
1156                return tx_desc->pp21.data_size;
1157        else
1158                return tx_desc->pp22.data_size;
1159}
1160
1161static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1162                                  struct mvpp2_tx_desc *tx_desc,
1163                                  size_t size)
1164{
1165        if (port->priv->hw_version == MVPP21)
1166                tx_desc->pp21.data_size = size;
1167        else
1168                tx_desc->pp22.data_size = size;
1169}
1170
1171static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1172                                 struct mvpp2_tx_desc *tx_desc,
1173                                 unsigned int txq)
1174{
1175        if (port->priv->hw_version == MVPP21)
1176                tx_desc->pp21.phys_txq = txq;
1177        else
1178                tx_desc->pp22.phys_txq = txq;
1179}
1180
1181static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1182                                 struct mvpp2_tx_desc *tx_desc,
1183                                 unsigned int command)
1184{
1185        if (port->priv->hw_version == MVPP21)
1186                tx_desc->pp21.command = command;
1187        else
1188                tx_desc->pp22.command = command;
1189}
1190
1191static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1192                                    struct mvpp2_tx_desc *tx_desc,
1193                                    unsigned int offset)
1194{
1195        if (port->priv->hw_version == MVPP21)
1196                tx_desc->pp21.packet_offset = offset;
1197        else
1198                tx_desc->pp22.packet_offset = offset;
1199}
1200
1201static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1202                                            struct mvpp2_tx_desc *tx_desc)
1203{
1204        if (port->priv->hw_version == MVPP21)
1205                return tx_desc->pp21.packet_offset;
1206        else
1207                return tx_desc->pp22.packet_offset;
1208}
1209
1210static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1211                                            struct mvpp2_rx_desc *rx_desc)
1212{
1213        if (port->priv->hw_version == MVPP21)
1214                return rx_desc->pp21.buf_dma_addr;
1215        else
1216                return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1217}
1218
1219static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1220                                             struct mvpp2_rx_desc *rx_desc)
1221{
1222        if (port->priv->hw_version == MVPP21)
1223                return rx_desc->pp21.buf_cookie;
1224        else
1225                return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1226}
1227
1228static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1229                                    struct mvpp2_rx_desc *rx_desc)
1230{
1231        if (port->priv->hw_version == MVPP21)
1232                return rx_desc->pp21.data_size;
1233        else
1234                return rx_desc->pp22.data_size;
1235}
1236
1237static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1238                                   struct mvpp2_rx_desc *rx_desc)
1239{
1240        if (port->priv->hw_version == MVPP21)
1241                return rx_desc->pp21.status;
1242        else
1243                return rx_desc->pp22.status;
1244}
1245
1246static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1247{
1248        txq_pcpu->txq_get_index++;
1249        if (txq_pcpu->txq_get_index == txq_pcpu->size)
1250                txq_pcpu->txq_get_index = 0;
1251}
1252
1253static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1254                              struct mvpp2_txq_pcpu *txq_pcpu,
1255                              struct sk_buff *skb,
1256                              struct mvpp2_tx_desc *tx_desc)
1257{
1258        struct mvpp2_txq_pcpu_buf *tx_buf =
1259                txq_pcpu->buffs + txq_pcpu->txq_put_index;
1260        tx_buf->skb = skb;
1261        tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1262        tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1263                mvpp2_txdesc_offset_get(port, tx_desc);
1264        txq_pcpu->txq_put_index++;
1265        if (txq_pcpu->txq_put_index == txq_pcpu->size)
1266                txq_pcpu->txq_put_index = 0;
1267}
1268
1269/* Get number of physical egress port */
1270static inline int mvpp2_egress_port(struct mvpp2_port *port)
1271{
1272        return MVPP2_MAX_TCONT + port->id;
1273}
1274
1275/* Get number of physical TXQ */
1276static inline int mvpp2_txq_phys(int port, int txq)
1277{
1278        return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1279}
1280
1281/* Parser configuration routines */
1282
1283/* Update parser tcam and sram hw entries */
1284static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1285{
1286        int i;
1287
1288        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1289                return -EINVAL;
1290
1291        /* Clear entry invalidation bit */
1292        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1293
1294        /* Write tcam index - indirect access */
1295        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1296        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1297                mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1298
1299        /* Write sram index - indirect access */
1300        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1301        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1302                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1303
1304        return 0;
1305}
1306
1307/* Read tcam entry from hw */
1308static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1309{
1310        int i;
1311
1312        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1313                return -EINVAL;
1314
1315        /* Write tcam index - indirect access */
1316        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1317
1318        pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1319                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1320        if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1321                return MVPP2_PRS_TCAM_ENTRY_INVALID;
1322
1323        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1324                pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1325
1326        /* Write sram index - indirect access */
1327        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1328        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1329                pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1330
1331        return 0;
1332}
1333
1334/* Invalidate tcam hw entry */
1335static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1336{
1337        /* Write index - indirect access */
1338        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1339        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1340                    MVPP2_PRS_TCAM_INV_MASK);
1341}
1342
1343/* Enable shadow table entry and set its lookup ID */
1344static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1345{
1346        priv->prs_shadow[index].valid = true;
1347        priv->prs_shadow[index].lu = lu;
1348}
1349
1350/* Update ri fields in shadow table entry */
1351static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1352                                    unsigned int ri, unsigned int ri_mask)
1353{
1354        priv->prs_shadow[index].ri_mask = ri_mask;
1355        priv->prs_shadow[index].ri = ri;
1356}
1357
1358/* Update lookup field in tcam sw entry */
1359static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1360{
1361        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1362
1363        pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1364        pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1365}
1366
1367/* Update mask for single port in tcam sw entry */
1368static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1369                                    unsigned int port, bool add)
1370{
1371        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1372
1373        if (add)
1374                pe->tcam.byte[enable_off] &= ~(1 << port);
1375        else
1376                pe->tcam.byte[enable_off] |= 1 << port;
1377}
1378
1379/* Update port map in tcam sw entry */
1380static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1381                                        unsigned int ports)
1382{
1383        unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1384        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1385
1386        pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1387        pe->tcam.byte[enable_off] &= ~port_mask;
1388        pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1389}
1390
1391/* Obtain port map from tcam sw entry */
1392static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1393{
1394        int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1395
1396        return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1397}
1398
1399/* Set byte of data and its enable bits in tcam sw entry */
1400static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1401                                         unsigned int offs, unsigned char byte,
1402                                         unsigned char enable)
1403{
1404        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1405        pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1406}
1407
1408/* Get byte of data and its enable bits from tcam sw entry */
1409static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1410                                         unsigned int offs, unsigned char *byte,
1411                                         unsigned char *enable)
1412{
1413        *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1414        *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1415}
1416
1417/* Compare tcam data bytes with a pattern */
1418static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1419                                    u16 data)
1420{
1421        int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1422        u16 tcam_data;
1423
1424        tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1425        if (tcam_data != data)
1426                return false;
1427        return true;
1428}
1429
1430/* Update ai bits in tcam sw entry */
1431static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1432                                     unsigned int bits, unsigned int enable)
1433{
1434        int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1435
1436        for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1437
1438                if (!(enable & BIT(i)))
1439                        continue;
1440
1441                if (bits & BIT(i))
1442                        pe->tcam.byte[ai_idx] |= 1 << i;
1443                else
1444                        pe->tcam.byte[ai_idx] &= ~(1 << i);
1445        }
1446
1447        pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1448}
1449
1450/* Get ai bits from tcam sw entry */
1451static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1452{
1453        return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1454}
1455
1456/* Set ethertype in tcam sw entry */
1457static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1458                                  unsigned short ethertype)
1459{
1460        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1461        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1462}
1463
1464/* Set bits in sram sw entry */
1465static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1466                                    int val)
1467{
1468        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1469}
1470
1471/* Clear bits in sram sw entry */
1472static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1473                                      int val)
1474{
1475        pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1476}
1477
1478/* Update ri bits in sram sw entry */
1479static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1480                                     unsigned int bits, unsigned int mask)
1481{
1482        unsigned int i;
1483
1484        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1485                int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1486
1487                if (!(mask & BIT(i)))
1488                        continue;
1489
1490                if (bits & BIT(i))
1491                        mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1492                else
1493                        mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1494
1495                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1496        }
1497}
1498
1499/* Obtain ri bits from sram sw entry */
1500static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1501{
1502        return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1503}
1504
1505/* Update ai bits in sram sw entry */
1506static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1507                                     unsigned int bits, unsigned int mask)
1508{
1509        unsigned int i;
1510        int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1511
1512        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1513
1514                if (!(mask & BIT(i)))
1515                        continue;
1516
1517                if (bits & BIT(i))
1518                        mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1519                else
1520                        mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1521
1522                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1523        }
1524}
1525
1526/* Read ai bits from sram sw entry */
1527static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1528{
1529        u8 bits;
1530        int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1531        int ai_en_off = ai_off + 1;
1532        int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1533
1534        bits = (pe->sram.byte[ai_off] >> ai_shift) |
1535               (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1536
1537        return bits;
1538}
1539
1540/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1541 * lookup interation
1542 */
1543static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1544                                       unsigned int lu)
1545{
1546        int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1547
1548        mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1549                                  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1550        mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1551}
1552
1553/* In the sram sw entry set sign and value of the next lookup offset
1554 * and the offset value generated to the classifier
1555 */
1556static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1557                                     unsigned int op)
1558{
1559        /* Set sign */
1560        if (shift < 0) {
1561                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1562                shift = 0 - shift;
1563        } else {
1564                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1565        }
1566
1567        /* Set value */
1568        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1569                                                           (unsigned char)shift;
1570
1571        /* Reset and set operation */
1572        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1573                                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1574        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1575
1576        /* Set base offset as current */
1577        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1578}
1579
1580/* In the sram sw entry set sign and value of the user defined offset
1581 * generated to the classifier
1582 */
1583static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1584                                      unsigned int type, int offset,
1585                                      unsigned int op)
1586{
1587        /* Set sign */
1588        if (offset < 0) {
1589                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1590                offset = 0 - offset;
1591        } else {
1592                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1593        }
1594
1595        /* Set value */
1596        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1597                                  MVPP2_PRS_SRAM_UDF_MASK);
1598        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1599        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1600                                        MVPP2_PRS_SRAM_UDF_BITS)] &=
1601              ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1602        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1603                                        MVPP2_PRS_SRAM_UDF_BITS)] |=
1604                                (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1605
1606        /* Set offset type */
1607        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1608                                  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1609        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1610
1611        /* Set offset operation */
1612        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1613                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1614        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1615
1616        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1617                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1618                                             ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1619                                    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1620
1621        pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1622                                        MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1623                             (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1624
1625        /* Set base offset as current */
1626        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1627}
1628
1629/* Find parser flow entry */
1630static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1631{
1632        struct mvpp2_prs_entry *pe;
1633        int tid;
1634
1635        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1636        if (!pe)
1637                return NULL;
1638        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1639
1640        /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1641        for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1642                u8 bits;
1643
1644                if (!priv->prs_shadow[tid].valid ||
1645                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1646                        continue;
1647
1648                pe->index = tid;
1649                mvpp2_prs_hw_read(priv, pe);
1650                bits = mvpp2_prs_sram_ai_get(pe);
1651
1652                /* Sram store classification lookup ID in AI bits [5:0] */
1653                if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1654                        return pe;
1655        }
1656        kfree(pe);
1657
1658        return NULL;
1659}
1660
1661/* Return first free tcam index, seeking from start to end */
1662static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1663                                     unsigned char end)
1664{
1665        int tid;
1666
1667        if (start > end)
1668                swap(start, end);
1669
1670        if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1671                end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1672
1673        for (tid = start; tid <= end; tid++) {
1674                if (!priv->prs_shadow[tid].valid)
1675                        return tid;
1676        }
1677
1678        return -EINVAL;
1679}
1680
1681/* Enable/disable dropping all mac da's */
1682static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1683{
1684        struct mvpp2_prs_entry pe;
1685
1686        if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1687                /* Entry exist - update port only */
1688                pe.index = MVPP2_PE_DROP_ALL;
1689                mvpp2_prs_hw_read(priv, &pe);
1690        } else {
1691                /* Entry doesn't exist - create new */
1692                memset(&pe, 0, sizeof(pe));
1693                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1694                pe.index = MVPP2_PE_DROP_ALL;
1695
1696                /* Non-promiscuous mode for all ports - DROP unknown packets */
1697                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1698                                         MVPP2_PRS_RI_DROP_MASK);
1699
1700                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1701                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1702
1703                /* Update shadow table */
1704                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1705
1706                /* Mask all ports */
1707                mvpp2_prs_tcam_port_map_set(&pe, 0);
1708        }
1709
1710        /* Update port mask */
1711        mvpp2_prs_tcam_port_set(&pe, port, add);
1712
1713        mvpp2_prs_hw_write(priv, &pe);
1714}
1715
1716/* Set port to promiscuous mode */
1717static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1718{
1719        struct mvpp2_prs_entry pe;
1720
1721        /* Promiscuous mode - Accept unknown packets */
1722
1723        if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1724                /* Entry exist - update port only */
1725                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1726                mvpp2_prs_hw_read(priv, &pe);
1727        } else {
1728                /* Entry doesn't exist - create new */
1729                memset(&pe, 0, sizeof(pe));
1730                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1731                pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1732
1733                /* Continue - set next lookup */
1734                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1735
1736                /* Set result info bits */
1737                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1738                                         MVPP2_PRS_RI_L2_CAST_MASK);
1739
1740                /* Shift to ethertype */
1741                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1742                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1743
1744                /* Mask all ports */
1745                mvpp2_prs_tcam_port_map_set(&pe, 0);
1746
1747                /* Update shadow table */
1748                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1749        }
1750
1751        /* Update port mask */
1752        mvpp2_prs_tcam_port_set(&pe, port, add);
1753
1754        mvpp2_prs_hw_write(priv, &pe);
1755}
1756
1757/* Accept multicast */
1758static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1759                                    bool add)
1760{
1761        struct mvpp2_prs_entry pe;
1762        unsigned char da_mc;
1763
1764        /* Ethernet multicast address first byte is
1765         * 0x01 for IPv4 and 0x33 for IPv6
1766         */
1767        da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1768
1769        if (priv->prs_shadow[index].valid) {
1770                /* Entry exist - update port only */
1771                pe.index = index;
1772                mvpp2_prs_hw_read(priv, &pe);
1773        } else {
1774                /* Entry doesn't exist - create new */
1775                memset(&pe, 0, sizeof(pe));
1776                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1777                pe.index = index;
1778
1779                /* Continue - set next lookup */
1780                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1781
1782                /* Set result info bits */
1783                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1784                                         MVPP2_PRS_RI_L2_CAST_MASK);
1785
1786                /* Update tcam entry data first byte */
1787                mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1788
1789                /* Shift to ethertype */
1790                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1791                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1792
1793                /* Mask all ports */
1794                mvpp2_prs_tcam_port_map_set(&pe, 0);
1795
1796                /* Update shadow table */
1797                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1798        }
1799
1800        /* Update port mask */
1801        mvpp2_prs_tcam_port_set(&pe, port, add);
1802
1803        mvpp2_prs_hw_write(priv, &pe);
1804}
1805
1806/* Set entry for dsa packets */
1807static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1808                                  bool tagged, bool extend)
1809{
1810        struct mvpp2_prs_entry pe;
1811        int tid, shift;
1812
1813        if (extend) {
1814                tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1815                shift = 8;
1816        } else {
1817                tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1818                shift = 4;
1819        }
1820
1821        if (priv->prs_shadow[tid].valid) {
1822                /* Entry exist - update port only */
1823                pe.index = tid;
1824                mvpp2_prs_hw_read(priv, &pe);
1825        } else {
1826                /* Entry doesn't exist - create new */
1827                memset(&pe, 0, sizeof(pe));
1828                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1829                pe.index = tid;
1830
1831                /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1832                mvpp2_prs_sram_shift_set(&pe, shift,
1833                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1834
1835                /* Update shadow table */
1836                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1837
1838                if (tagged) {
1839                        /* Set tagged bit in DSA tag */
1840                        mvpp2_prs_tcam_data_byte_set(&pe, 0,
1841                                                     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1842                                                     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1843                        /* Clear all ai bits for next iteration */
1844                        mvpp2_prs_sram_ai_update(&pe, 0,
1845                                                 MVPP2_PRS_SRAM_AI_MASK);
1846                        /* If packet is tagged continue check vlans */
1847                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1848                } else {
1849                        /* Set result info bits to 'no vlans' */
1850                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1851                                                 MVPP2_PRS_RI_VLAN_MASK);
1852                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1853                }
1854
1855                /* Mask all ports */
1856                mvpp2_prs_tcam_port_map_set(&pe, 0);
1857        }
1858
1859        /* Update port mask */
1860        mvpp2_prs_tcam_port_set(&pe, port, add);
1861
1862        mvpp2_prs_hw_write(priv, &pe);
1863}
1864
1865/* Set entry for dsa ethertype */
1866static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1867                                            bool add, bool tagged, bool extend)
1868{
1869        struct mvpp2_prs_entry pe;
1870        int tid, shift, port_mask;
1871
1872        if (extend) {
1873                tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1874                      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1875                port_mask = 0;
1876                shift = 8;
1877        } else {
1878                tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1879                      MVPP2_PE_ETYPE_DSA_UNTAGGED;
1880                port_mask = MVPP2_PRS_PORT_MASK;
1881                shift = 4;
1882        }
1883
1884        if (priv->prs_shadow[tid].valid) {
1885                /* Entry exist - update port only */
1886                pe.index = tid;
1887                mvpp2_prs_hw_read(priv, &pe);
1888        } else {
1889                /* Entry doesn't exist - create new */
1890                memset(&pe, 0, sizeof(pe));
1891                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1892                pe.index = tid;
1893
1894                /* Set ethertype */
1895                mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1896                mvpp2_prs_match_etype(&pe, 2, 0);
1897
1898                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1899                                         MVPP2_PRS_RI_DSA_MASK);
1900                /* Shift ethertype + 2 byte reserved + tag*/
1901                mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1902                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1903
1904                /* Update shadow table */
1905                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1906
1907                if (tagged) {
1908                        /* Set tagged bit in DSA tag */
1909                        mvpp2_prs_tcam_data_byte_set(&pe,
1910                                                     MVPP2_ETH_TYPE_LEN + 2 + 3,
1911                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1912                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1913                        /* Clear all ai bits for next iteration */
1914                        mvpp2_prs_sram_ai_update(&pe, 0,
1915                                                 MVPP2_PRS_SRAM_AI_MASK);
1916                        /* If packet is tagged continue check vlans */
1917                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1918                } else {
1919                        /* Set result info bits to 'no vlans' */
1920                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1921                                                 MVPP2_PRS_RI_VLAN_MASK);
1922                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1923                }
1924                /* Mask/unmask all ports, depending on dsa type */
1925                mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1926        }
1927
1928        /* Update port mask */
1929        mvpp2_prs_tcam_port_set(&pe, port, add);
1930
1931        mvpp2_prs_hw_write(priv, &pe);
1932}
1933
1934/* Search for existing single/triple vlan entry */
1935static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1936                                                   unsigned short tpid, int ai)
1937{
1938        struct mvpp2_prs_entry *pe;
1939        int tid;
1940
1941        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1942        if (!pe)
1943                return NULL;
1944        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1945
1946        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1947        for (tid = MVPP2_PE_FIRST_FREE_TID;
1948             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1949                unsigned int ri_bits, ai_bits;
1950                bool match;
1951
1952                if (!priv->prs_shadow[tid].valid ||
1953                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1954                        continue;
1955
1956                pe->index = tid;
1957
1958                mvpp2_prs_hw_read(priv, pe);
1959                match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1960                if (!match)
1961                        continue;
1962
1963                /* Get vlan type */
1964                ri_bits = mvpp2_prs_sram_ri_get(pe);
1965                ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1966
1967                /* Get current ai value from tcam */
1968                ai_bits = mvpp2_prs_tcam_ai_get(pe);
1969                /* Clear double vlan bit */
1970                ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1971
1972                if (ai != ai_bits)
1973                        continue;
1974
1975                if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1976                    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1977                        return pe;
1978        }
1979        kfree(pe);
1980
1981        return NULL;
1982}
1983
1984/* Add/update single/triple vlan entry */
1985static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1986                              unsigned int port_map)
1987{
1988        struct mvpp2_prs_entry *pe;
1989        int tid_aux, tid;
1990        int ret = 0;
1991
1992        pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1993
1994        if (!pe) {
1995                /* Create new tcam entry */
1996                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1997                                                MVPP2_PE_FIRST_FREE_TID);
1998                if (tid < 0)
1999                        return tid;
2000
2001                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2002                if (!pe)
2003                        return -ENOMEM;
2004
2005                /* Get last double vlan tid */
2006                for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2007                     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2008                        unsigned int ri_bits;
2009
2010                        if (!priv->prs_shadow[tid_aux].valid ||
2011                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2012                                continue;
2013
2014                        pe->index = tid_aux;
2015                        mvpp2_prs_hw_read(priv, pe);
2016                        ri_bits = mvpp2_prs_sram_ri_get(pe);
2017                        if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2018                            MVPP2_PRS_RI_VLAN_DOUBLE)
2019                                break;
2020                }
2021
2022                if (tid <= tid_aux) {
2023                        ret = -EINVAL;
2024                        goto free_pe;
2025                }
2026
2027                memset(pe, 0, sizeof(*pe));
2028                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2029                pe->index = tid;
2030
2031                mvpp2_prs_match_etype(pe, 0, tpid);
2032
2033                mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2034                /* Shift 4 bytes - skip 1 vlan tag */
2035                mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2036                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2037                /* Clear all ai bits for next iteration */
2038                mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2039
2040                if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2041                        mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2042                                                 MVPP2_PRS_RI_VLAN_MASK);
2043                } else {
2044                        ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2045                        mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2046                                                 MVPP2_PRS_RI_VLAN_MASK);
2047                }
2048                mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2049
2050                mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2051        }
2052        /* Update ports' mask */
2053        mvpp2_prs_tcam_port_map_set(pe, port_map);
2054
2055        mvpp2_prs_hw_write(priv, pe);
2056free_pe:
2057        kfree(pe);
2058
2059        return ret;
2060}
2061
2062/* Get first free double vlan ai number */
2063static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2064{
2065        int i;
2066
2067        for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2068                if (!priv->prs_double_vlans[i])
2069                        return i;
2070        }
2071
2072        return -EINVAL;
2073}
2074
2075/* Search for existing double vlan entry */
2076static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2077                                                          unsigned short tpid1,
2078                                                          unsigned short tpid2)
2079{
2080        struct mvpp2_prs_entry *pe;
2081        int tid;
2082
2083        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2084        if (!pe)
2085                return NULL;
2086        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2087
2088        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2089        for (tid = MVPP2_PE_FIRST_FREE_TID;
2090             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2091                unsigned int ri_mask;
2092                bool match;
2093
2094                if (!priv->prs_shadow[tid].valid ||
2095                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2096                        continue;
2097
2098                pe->index = tid;
2099                mvpp2_prs_hw_read(priv, pe);
2100
2101                match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2102                        && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2103
2104                if (!match)
2105                        continue;
2106
2107                ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2108                if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2109                        return pe;
2110        }
2111        kfree(pe);
2112
2113        return NULL;
2114}
2115
2116/* Add or update double vlan entry */
2117static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2118                                     unsigned short tpid2,
2119                                     unsigned int port_map)
2120{
2121        struct mvpp2_prs_entry *pe;
2122        int tid_aux, tid, ai, ret = 0;
2123
2124        pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2125
2126        if (!pe) {
2127                /* Create new tcam entry */
2128                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2129                                MVPP2_PE_LAST_FREE_TID);
2130                if (tid < 0)
2131                        return tid;
2132
2133                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2134                if (!pe)
2135                        return -ENOMEM;
2136
2137                /* Set ai value for new double vlan entry */
2138                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2139                if (ai < 0) {
2140                        ret = ai;
2141                        goto free_pe;
2142                }
2143
2144                /* Get first single/triple vlan tid */
2145                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2146                     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2147                        unsigned int ri_bits;
2148
2149                        if (!priv->prs_shadow[tid_aux].valid ||
2150                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2151                                continue;
2152
2153                        pe->index = tid_aux;
2154                        mvpp2_prs_hw_read(priv, pe);
2155                        ri_bits = mvpp2_prs_sram_ri_get(pe);
2156                        ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2157                        if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2158                            ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2159                                break;
2160                }
2161
2162                if (tid >= tid_aux) {
2163                        ret = -ERANGE;
2164                        goto free_pe;
2165                }
2166
2167                memset(pe, 0, sizeof(*pe));
2168                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2169                pe->index = tid;
2170
2171                priv->prs_double_vlans[ai] = true;
2172
2173                mvpp2_prs_match_etype(pe, 0, tpid1);
2174                mvpp2_prs_match_etype(pe, 4, tpid2);
2175
2176                mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2177                /* Shift 8 bytes - skip 2 vlan tags */
2178                mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2179                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2180                mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2181                                         MVPP2_PRS_RI_VLAN_MASK);
2182                mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2183                                         MVPP2_PRS_SRAM_AI_MASK);
2184
2185                mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2186        }
2187
2188        /* Update ports' mask */
2189        mvpp2_prs_tcam_port_map_set(pe, port_map);
2190        mvpp2_prs_hw_write(priv, pe);
2191free_pe:
2192        kfree(pe);
2193        return ret;
2194}
2195
2196/* IPv4 header parsing for fragmentation and L4 offset */
2197static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2198                               unsigned int ri, unsigned int ri_mask)
2199{
2200        struct mvpp2_prs_entry pe;
2201        int tid;
2202
2203        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2204            (proto != IPPROTO_IGMP))
2205                return -EINVAL;
2206
2207        /* Fragmented packet */
2208        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2209                                        MVPP2_PE_LAST_FREE_TID);
2210        if (tid < 0)
2211                return tid;
2212
2213        memset(&pe, 0, sizeof(pe));
2214        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2215        pe.index = tid;
2216
2217        /* Set next lu to IPv4 */
2218        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2219        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2220        /* Set L4 offset */
2221        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2222                                  sizeof(struct iphdr) - 4,
2223                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2224        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2225                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
2226        mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2227                                 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2228
2229        mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2230        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2231        /* Unmask all ports */
2232        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2233
2234        /* Update shadow table and hw entry */
2235        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2236        mvpp2_prs_hw_write(priv, &pe);
2237
2238        /* Not fragmented packet */
2239        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2240                                        MVPP2_PE_LAST_FREE_TID);
2241        if (tid < 0)
2242                return tid;
2243
2244        pe.index = tid;
2245        /* Clear ri before updating */
2246        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2247        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2248        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2249
2250        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2251        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2252
2253        /* Update shadow table and hw entry */
2254        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2255        mvpp2_prs_hw_write(priv, &pe);
2256
2257        return 0;
2258}
2259
2260/* IPv4 L3 multicast or broadcast */
2261static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2262{
2263        struct mvpp2_prs_entry pe;
2264        int mask, tid;
2265
2266        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2267                                        MVPP2_PE_LAST_FREE_TID);
2268        if (tid < 0)
2269                return tid;
2270
2271        memset(&pe, 0, sizeof(pe));
2272        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2273        pe.index = tid;
2274
2275        switch (l3_cast) {
2276        case MVPP2_PRS_L3_MULTI_CAST:
2277                mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2278                                             MVPP2_PRS_IPV4_MC_MASK);
2279                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2280                                         MVPP2_PRS_RI_L3_ADDR_MASK);
2281                break;
2282        case  MVPP2_PRS_L3_BROAD_CAST:
2283                mask = MVPP2_PRS_IPV4_BC_MASK;
2284                mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2285                mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2286                mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2287                mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2288                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2289                                         MVPP2_PRS_RI_L3_ADDR_MASK);
2290                break;
2291        default:
2292                return -EINVAL;
2293        }
2294
2295        /* Finished: go to flowid generation */
2296        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2297        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2298
2299        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2300                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
2301        /* Unmask all ports */
2302        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2303
2304        /* Update shadow table and hw entry */
2305        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2306        mvpp2_prs_hw_write(priv, &pe);
2307
2308        return 0;
2309}
2310
2311/* Set entries for protocols over IPv6  */
2312static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2313                               unsigned int ri, unsigned int ri_mask)
2314{
2315        struct mvpp2_prs_entry pe;
2316        int tid;
2317
2318        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2319            (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2320                return -EINVAL;
2321
2322        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2323                                        MVPP2_PE_LAST_FREE_TID);
2324        if (tid < 0)
2325                return tid;
2326
2327        memset(&pe, 0, sizeof(pe));
2328        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2329        pe.index = tid;
2330
2331        /* Finished: go to flowid generation */
2332        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2333        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2334        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2335        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2336                                  sizeof(struct ipv6hdr) - 6,
2337                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2338
2339        mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2340        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2341                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2342        /* Unmask all ports */
2343        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2344
2345        /* Write HW */
2346        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2347        mvpp2_prs_hw_write(priv, &pe);
2348
2349        return 0;
2350}
2351
2352/* IPv6 L3 multicast entry */
2353static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2354{
2355        struct mvpp2_prs_entry pe;
2356        int tid;
2357
2358        if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2359                return -EINVAL;
2360
2361        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2362                                        MVPP2_PE_LAST_FREE_TID);
2363        if (tid < 0)
2364                return tid;
2365
2366        memset(&pe, 0, sizeof(pe));
2367        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2368        pe.index = tid;
2369
2370        /* Finished: go to flowid generation */
2371        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2372        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2373                                 MVPP2_PRS_RI_L3_ADDR_MASK);
2374        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2375                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2376        /* Shift back to IPv6 NH */
2377        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2378
2379        mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2380                                     MVPP2_PRS_IPV6_MC_MASK);
2381        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2382        /* Unmask all ports */
2383        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2384
2385        /* Update shadow table and hw entry */
2386        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2387        mvpp2_prs_hw_write(priv, &pe);
2388
2389        return 0;
2390}
2391
2392/* Parser per-port initialization */
2393static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2394                                   int lu_max, int offset)
2395{
2396        u32 val;
2397
2398        /* Set lookup ID */
2399        val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2400        val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2401        val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2402        mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2403
2404        /* Set maximum number of loops for packet received from port */
2405        val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2406        val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2407        val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2408        mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2409
2410        /* Set initial offset for packet header extraction for the first
2411         * searching loop
2412         */
2413        val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2414        val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2415        val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2416        mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2417}
2418
2419/* Default flow entries initialization for all ports */
2420static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2421{
2422        struct mvpp2_prs_entry pe;
2423        int port;
2424
2425        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2426                memset(&pe, 0, sizeof(pe));
2427                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2428                pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2429
2430                /* Mask all ports */
2431                mvpp2_prs_tcam_port_map_set(&pe, 0);
2432
2433                /* Set flow ID*/
2434                mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2435                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2436
2437                /* Update shadow table and hw entry */
2438                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2439                mvpp2_prs_hw_write(priv, &pe);
2440        }
2441}
2442
2443/* Set default entry for Marvell Header field */
2444static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2445{
2446        struct mvpp2_prs_entry pe;
2447
2448        memset(&pe, 0, sizeof(pe));
2449
2450        pe.index = MVPP2_PE_MH_DEFAULT;
2451        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2452        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2453                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2454        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2455
2456        /* Unmask all ports */
2457        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2458
2459        /* Update shadow table and hw entry */
2460        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2461        mvpp2_prs_hw_write(priv, &pe);
2462}
2463
2464/* Set default entires (place holder) for promiscuous, non-promiscuous and
2465 * multicast MAC addresses
2466 */
2467static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2468{
2469        struct mvpp2_prs_entry pe;
2470
2471        memset(&pe, 0, sizeof(pe));
2472
2473        /* Non-promiscuous mode for all ports - DROP unknown packets */
2474        pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2475        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2476
2477        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2478                                 MVPP2_PRS_RI_DROP_MASK);
2479        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2480        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2481
2482        /* Unmask all ports */
2483        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2484
2485        /* Update shadow table and hw entry */
2486        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2487        mvpp2_prs_hw_write(priv, &pe);
2488
2489        /* place holders only - no ports */
2490        mvpp2_prs_mac_drop_all_set(priv, 0, false);
2491        mvpp2_prs_mac_promisc_set(priv, 0, false);
2492        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2493        mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2494}
2495
2496/* Set default entries for various types of dsa packets */
2497static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2498{
2499        struct mvpp2_prs_entry pe;
2500
2501        /* None tagged EDSA entry - place holder */
2502        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2503                              MVPP2_PRS_EDSA);
2504
2505        /* Tagged EDSA entry - place holder */
2506        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2507
2508        /* None tagged DSA entry - place holder */
2509        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2510                              MVPP2_PRS_DSA);
2511
2512        /* Tagged DSA entry - place holder */
2513        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2514
2515        /* None tagged EDSA ethertype entry - place holder*/
2516        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2517                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2518
2519        /* Tagged EDSA ethertype entry - place holder*/
2520        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2521                                        MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2522
2523        /* None tagged DSA ethertype entry */
2524        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2525                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2526
2527        /* Tagged DSA ethertype entry */
2528        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2529                                        MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2530
2531        /* Set default entry, in case DSA or EDSA tag not found */
2532        memset(&pe, 0, sizeof(pe));
2533        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2534        pe.index = MVPP2_PE_DSA_DEFAULT;
2535        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2536
2537        /* Shift 0 bytes */
2538        mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2539        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2540
2541        /* Clear all sram ai bits for next iteration */
2542        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2543
2544        /* Unmask all ports */
2545        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2546
2547        mvpp2_prs_hw_write(priv, &pe);
2548}
2549
2550/* Match basic ethertypes */
2551static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2552{
2553        struct mvpp2_prs_entry pe;
2554        int tid;
2555
2556        /* Ethertype: PPPoE */
2557        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2558                                        MVPP2_PE_LAST_FREE_TID);
2559        if (tid < 0)
2560                return tid;
2561
2562        memset(&pe, 0, sizeof(pe));
2563        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2564        pe.index = tid;
2565
2566        mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2567
2568        mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2569                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2570        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2571        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2572                                 MVPP2_PRS_RI_PPPOE_MASK);
2573
2574        /* Update shadow table and hw entry */
2575        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2576        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2577        priv->prs_shadow[pe.index].finish = false;
2578        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2579                                MVPP2_PRS_RI_PPPOE_MASK);
2580        mvpp2_prs_hw_write(priv, &pe);
2581
2582        /* Ethertype: ARP */
2583        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2584                                        MVPP2_PE_LAST_FREE_TID);
2585        if (tid < 0)
2586                return tid;
2587
2588        memset(&pe, 0, sizeof(pe));
2589        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2590        pe.index = tid;
2591
2592        mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2593
2594        /* Generate flow in the next iteration*/
2595        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2596        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2597        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2598                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2599        /* Set L3 offset */
2600        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2601                                  MVPP2_ETH_TYPE_LEN,
2602                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2603
2604        /* Update shadow table and hw entry */
2605        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2606        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2607        priv->prs_shadow[pe.index].finish = true;
2608        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2609                                MVPP2_PRS_RI_L3_PROTO_MASK);
2610        mvpp2_prs_hw_write(priv, &pe);
2611
2612        /* Ethertype: LBTD */
2613        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2614                                        MVPP2_PE_LAST_FREE_TID);
2615        if (tid < 0)
2616                return tid;
2617
2618        memset(&pe, 0, sizeof(pe));
2619        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2620        pe.index = tid;
2621
2622        mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2623
2624        /* Generate flow in the next iteration*/
2625        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2626        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2627        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2628                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2629                                 MVPP2_PRS_RI_CPU_CODE_MASK |
2630                                 MVPP2_PRS_RI_UDF3_MASK);
2631        /* Set L3 offset */
2632        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2633                                  MVPP2_ETH_TYPE_LEN,
2634                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2635
2636        /* Update shadow table and hw entry */
2637        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2638        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2639        priv->prs_shadow[pe.index].finish = true;
2640        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2641                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2642                                MVPP2_PRS_RI_CPU_CODE_MASK |
2643                                MVPP2_PRS_RI_UDF3_MASK);
2644        mvpp2_prs_hw_write(priv, &pe);
2645
2646        /* Ethertype: IPv4 without options */
2647        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2648                                        MVPP2_PE_LAST_FREE_TID);
2649        if (tid < 0)
2650                return tid;
2651
2652        memset(&pe, 0, sizeof(pe));
2653        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2654        pe.index = tid;
2655
2656        mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2657        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2658                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2659                                     MVPP2_PRS_IPV4_HEAD_MASK |
2660                                     MVPP2_PRS_IPV4_IHL_MASK);
2661
2662        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2663        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2664                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2665        /* Skip eth_type + 4 bytes of IP header */
2666        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2667                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2668        /* Set L3 offset */
2669        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2670                                  MVPP2_ETH_TYPE_LEN,
2671                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2672
2673        /* Update shadow table and hw entry */
2674        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2675        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2676        priv->prs_shadow[pe.index].finish = false;
2677        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2678                                MVPP2_PRS_RI_L3_PROTO_MASK);
2679        mvpp2_prs_hw_write(priv, &pe);
2680
2681        /* Ethertype: IPv4 with options */
2682        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2683                                        MVPP2_PE_LAST_FREE_TID);
2684        if (tid < 0)
2685                return tid;
2686
2687        pe.index = tid;
2688
2689        /* Clear tcam data before updating */
2690        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2691        pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2692
2693        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2694                                     MVPP2_PRS_IPV4_HEAD,
2695                                     MVPP2_PRS_IPV4_HEAD_MASK);
2696
2697        /* Clear ri before updating */
2698        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2699        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2700        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2701                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2702
2703        /* Update shadow table and hw entry */
2704        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2705        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2706        priv->prs_shadow[pe.index].finish = false;
2707        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2708                                MVPP2_PRS_RI_L3_PROTO_MASK);
2709        mvpp2_prs_hw_write(priv, &pe);
2710
2711        /* Ethertype: IPv6 without options */
2712        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2713                                        MVPP2_PE_LAST_FREE_TID);
2714        if (tid < 0)
2715                return tid;
2716
2717        memset(&pe, 0, sizeof(pe));
2718        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2719        pe.index = tid;
2720
2721        mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2722
2723        /* Skip DIP of IPV6 header */
2724        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2725                                 MVPP2_MAX_L3_ADDR_SIZE,
2726                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2727        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2728        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2729                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2730        /* Set L3 offset */
2731        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2732                                  MVPP2_ETH_TYPE_LEN,
2733                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2734
2735        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2736        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2737        priv->prs_shadow[pe.index].finish = false;
2738        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2739                                MVPP2_PRS_RI_L3_PROTO_MASK);
2740        mvpp2_prs_hw_write(priv, &pe);
2741
2742        /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2743        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2744        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2745        pe.index = MVPP2_PE_ETH_TYPE_UN;
2746
2747        /* Unmask all ports */
2748        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2749
2750        /* Generate flow in the next iteration*/
2751        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2752        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2753        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2754                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2755        /* Set L3 offset even it's unknown L3 */
2756        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2757                                  MVPP2_ETH_TYPE_LEN,
2758                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2759
2760        /* Update shadow table and hw entry */
2761        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2762        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2763        priv->prs_shadow[pe.index].finish = true;
2764        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2765                                MVPP2_PRS_RI_L3_PROTO_MASK);
2766        mvpp2_prs_hw_write(priv, &pe);
2767
2768        return 0;
2769}
2770
2771/* Configure vlan entries and detect up to 2 successive VLAN tags.
2772 * Possible options:
2773 * 0x8100, 0x88A8
2774 * 0x8100, 0x8100
2775 * 0x8100
2776 * 0x88A8
2777 */
2778static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2779{
2780        struct mvpp2_prs_entry pe;
2781        int err;
2782
2783        priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2784                                              MVPP2_PRS_DBL_VLANS_MAX,
2785                                              GFP_KERNEL);
2786        if (!priv->prs_double_vlans)
2787                return -ENOMEM;
2788
2789        /* Double VLAN: 0x8100, 0x88A8 */
2790        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2791                                        MVPP2_PRS_PORT_MASK);
2792        if (err)
2793                return err;
2794
2795        /* Double VLAN: 0x8100, 0x8100 */
2796        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2797                                        MVPP2_PRS_PORT_MASK);
2798        if (err)
2799                return err;
2800
2801        /* Single VLAN: 0x88a8 */
2802        err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2803                                 MVPP2_PRS_PORT_MASK);
2804        if (err)
2805                return err;
2806
2807        /* Single VLAN: 0x8100 */
2808        err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2809                                 MVPP2_PRS_PORT_MASK);
2810        if (err)
2811                return err;
2812
2813        /* Set default double vlan entry */
2814        memset(&pe, 0, sizeof(pe));
2815        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2816        pe.index = MVPP2_PE_VLAN_DBL;
2817
2818        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2819        /* Clear ai for next iterations */
2820        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2821        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2822                                 MVPP2_PRS_RI_VLAN_MASK);
2823
2824        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2825                                 MVPP2_PRS_DBL_VLAN_AI_BIT);
2826        /* Unmask all ports */
2827        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2828
2829        /* Update shadow table and hw entry */
2830        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2831        mvpp2_prs_hw_write(priv, &pe);
2832
2833        /* Set default vlan none entry */
2834        memset(&pe, 0, sizeof(pe));
2835        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2836        pe.index = MVPP2_PE_VLAN_NONE;
2837
2838        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2839        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2840                                 MVPP2_PRS_RI_VLAN_MASK);
2841
2842        /* Unmask all ports */
2843        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2844
2845        /* Update shadow table and hw entry */
2846        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2847        mvpp2_prs_hw_write(priv, &pe);
2848
2849        return 0;
2850}
2851
2852/* Set entries for PPPoE ethertype */
2853static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2854{
2855        struct mvpp2_prs_entry pe;
2856        int tid;
2857
2858        /* IPv4 over PPPoE with options */
2859        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2860                                        MVPP2_PE_LAST_FREE_TID);
2861        if (tid < 0)
2862                return tid;
2863
2864        memset(&pe, 0, sizeof(pe));
2865        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2866        pe.index = tid;
2867
2868        mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2869
2870        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2871        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2872                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2873        /* Skip eth_type + 4 bytes of IP header */
2874        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2875                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2876        /* Set L3 offset */
2877        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2878                                  MVPP2_ETH_TYPE_LEN,
2879                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2880
2881        /* Update shadow table and hw entry */
2882        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2883        mvpp2_prs_hw_write(priv, &pe);
2884
2885        /* IPv4 over PPPoE without options */
2886        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2887                                        MVPP2_PE_LAST_FREE_TID);
2888        if (tid < 0)
2889                return tid;
2890
2891        pe.index = tid;
2892
2893        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2894                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2895                                     MVPP2_PRS_IPV4_HEAD_MASK |
2896                                     MVPP2_PRS_IPV4_IHL_MASK);
2897
2898        /* Clear ri before updating */
2899        pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2900        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2901        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2902                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2903
2904        /* Update shadow table and hw entry */
2905        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2906        mvpp2_prs_hw_write(priv, &pe);
2907
2908        /* IPv6 over PPPoE */
2909        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2910                                        MVPP2_PE_LAST_FREE_TID);
2911        if (tid < 0)
2912                return tid;
2913
2914        memset(&pe, 0, sizeof(pe));
2915        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2916        pe.index = tid;
2917
2918        mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2919
2920        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2921        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2922                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2923        /* Skip eth_type + 4 bytes of IPv6 header */
2924        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2925                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2926        /* Set L3 offset */
2927        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2928                                  MVPP2_ETH_TYPE_LEN,
2929                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2930
2931        /* Update shadow table and hw entry */
2932        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2933        mvpp2_prs_hw_write(priv, &pe);
2934
2935        /* Non-IP over PPPoE */
2936        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2937                                        MVPP2_PE_LAST_FREE_TID);
2938        if (tid < 0)
2939                return tid;
2940
2941        memset(&pe, 0, sizeof(pe));
2942        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2943        pe.index = tid;
2944
2945        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2946                                 MVPP2_PRS_RI_L3_PROTO_MASK);
2947
2948        /* Finished: go to flowid generation */
2949        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2950        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2951        /* Set L3 offset even if it's unknown L3 */
2952        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2953                                  MVPP2_ETH_TYPE_LEN,
2954                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2955
2956        /* Update shadow table and hw entry */
2957        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2958        mvpp2_prs_hw_write(priv, &pe);
2959
2960        return 0;
2961}
2962
2963/* Initialize entries for IPv4 */
2964static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2965{
2966        struct mvpp2_prs_entry pe;
2967        int err;
2968
2969        /* Set entries for TCP, UDP and IGMP over IPv4 */
2970        err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2971                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2972        if (err)
2973                return err;
2974
2975        err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2976                                  MVPP2_PRS_RI_L4_PROTO_MASK);
2977        if (err)
2978                return err;
2979
2980        err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2981                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2982                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2983                                  MVPP2_PRS_RI_CPU_CODE_MASK |
2984                                  MVPP2_PRS_RI_UDF3_MASK);
2985        if (err)
2986                return err;
2987
2988        /* IPv4 Broadcast */
2989        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2990        if (err)
2991                return err;
2992
2993        /* IPv4 Multicast */
2994        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2995        if (err)
2996                return err;
2997
2998        /* Default IPv4 entry for unknown protocols */
2999        memset(&pe, 0, sizeof(pe));
3000        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3001        pe.index = MVPP2_PE_IP4_PROTO_UN;
3002
3003        /* Set next lu to IPv4 */
3004        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3005        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3006        /* Set L4 offset */
3007        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3008                                  sizeof(struct iphdr) - 4,
3009                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3010        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3011                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
3012        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3013                                 MVPP2_PRS_RI_L4_PROTO_MASK);
3014
3015        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3016        /* Unmask all ports */
3017        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3018
3019        /* Update shadow table and hw entry */
3020        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3021        mvpp2_prs_hw_write(priv, &pe);
3022
3023        /* Default IPv4 entry for unicast address */
3024        memset(&pe, 0, sizeof(pe));
3025        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3026        pe.index = MVPP2_PE_IP4_ADDR_UN;
3027
3028        /* Finished: go to flowid generation */
3029        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3030        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3031        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3032                                 MVPP2_PRS_RI_L3_ADDR_MASK);
3033
3034        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3035                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
3036        /* Unmask all ports */
3037        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3038
3039        /* Update shadow table and hw entry */
3040        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3041        mvpp2_prs_hw_write(priv, &pe);
3042
3043        return 0;
3044}
3045
3046/* Initialize entries for IPv6 */
3047static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3048{
3049        struct mvpp2_prs_entry pe;
3050        int tid, err;
3051
3052        /* Set entries for TCP, UDP and ICMP over IPv6 */
3053        err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3054                                  MVPP2_PRS_RI_L4_TCP,
3055                                  MVPP2_PRS_RI_L4_PROTO_MASK);
3056        if (err)
3057                return err;
3058
3059        err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3060                                  MVPP2_PRS_RI_L4_UDP,
3061                                  MVPP2_PRS_RI_L4_PROTO_MASK);
3062        if (err)
3063                return err;
3064
3065        err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3066                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3067                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3068                                  MVPP2_PRS_RI_CPU_CODE_MASK |
3069                                  MVPP2_PRS_RI_UDF3_MASK);
3070        if (err)
3071                return err;
3072
3073        /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3074        /* Result Info: UDF7=1, DS lite */
3075        err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3076                                  MVPP2_PRS_RI_UDF7_IP6_LITE,
3077                                  MVPP2_PRS_RI_UDF7_MASK);
3078        if (err)
3079                return err;
3080
3081        /* IPv6 multicast */
3082        err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3083        if (err)
3084                return err;
3085
3086        /* Entry for checking hop limit */
3087        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3088                                        MVPP2_PE_LAST_FREE_TID);
3089        if (tid < 0)
3090                return tid;
3091
3092        memset(&pe, 0, sizeof(pe));
3093        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3094        pe.index = tid;
3095
3096        /* Finished: go to flowid generation */
3097        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3098        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3099        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3100                                 MVPP2_PRS_RI_DROP_MASK,
3101                                 MVPP2_PRS_RI_L3_PROTO_MASK |
3102                                 MVPP2_PRS_RI_DROP_MASK);
3103
3104        mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3105        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3106                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3107
3108        /* Update shadow table and hw entry */
3109        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3110        mvpp2_prs_hw_write(priv, &pe);
3111
3112        /* Default IPv6 entry for unknown protocols */
3113        memset(&pe, 0, sizeof(pe));
3114        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3115        pe.index = MVPP2_PE_IP6_PROTO_UN;
3116
3117        /* Finished: go to flowid generation */
3118        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3119        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3120        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3121                                 MVPP2_PRS_RI_L4_PROTO_MASK);
3122        /* Set L4 offset relatively to our current place */
3123        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3124                                  sizeof(struct ipv6hdr) - 4,
3125                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3126
3127        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3128                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3129        /* Unmask all ports */
3130        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3131
3132        /* Update shadow table and hw entry */
3133        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3134        mvpp2_prs_hw_write(priv, &pe);
3135
3136        /* Default IPv6 entry for unknown ext protocols */
3137        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3138        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3139        pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3140
3141        /* Finished: go to flowid generation */
3142        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3143        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3144        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3145                                 MVPP2_PRS_RI_L4_PROTO_MASK);
3146
3147        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3148                                 MVPP2_PRS_IPV6_EXT_AI_BIT);
3149        /* Unmask all ports */
3150        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3151
3152        /* Update shadow table and hw entry */
3153        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3154        mvpp2_prs_hw_write(priv, &pe);
3155
3156        /* Default IPv6 entry for unicast address */
3157        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3158        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3159        pe.index = MVPP2_PE_IP6_ADDR_UN;
3160
3161        /* Finished: go to IPv6 again */
3162        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3163        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3164                                 MVPP2_PRS_RI_L3_ADDR_MASK);
3165        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3166                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3167        /* Shift back to IPV6 NH */
3168        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3169
3170        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3171        /* Unmask all ports */
3172        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3173
3174        /* Update shadow table and hw entry */
3175        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3176        mvpp2_prs_hw_write(priv, &pe);
3177
3178        return 0;
3179}
3180
3181/* Parser default initialization */
3182static int mvpp2_prs_default_init(struct platform_device *pdev,
3183                                  struct mvpp2 *priv)
3184{
3185        int err, index, i;
3186
3187        /* Enable tcam table */
3188        mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3189
3190        /* Clear all tcam and sram entries */
3191        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3192                mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3193                for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3194                        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3195
3196                mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3197                for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3198                        mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3199        }
3200
3201        /* Invalidate all tcam entries */
3202        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3203                mvpp2_prs_hw_inv(priv, index);
3204
3205        priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3206                                        sizeof(*priv->prs_shadow),
3207                                        GFP_KERNEL);
3208        if (!priv->prs_shadow)
3209                return -ENOMEM;
3210
3211        /* Always start from lookup = 0 */
3212        for (index = 0; index < MVPP2_MAX_PORTS; index++)
3213                mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3214                                       MVPP2_PRS_PORT_LU_MAX, 0);
3215
3216        mvpp2_prs_def_flow_init(priv);
3217
3218        mvpp2_prs_mh_init(priv);
3219
3220        mvpp2_prs_mac_init(priv);
3221
3222        mvpp2_prs_dsa_init(priv);
3223
3224        err = mvpp2_prs_etype_init(priv);
3225        if (err)
3226                return err;
3227
3228        err = mvpp2_prs_vlan_init(pdev, priv);
3229        if (err)
3230                return err;
3231
3232        err = mvpp2_prs_pppoe_init(priv);
3233        if (err)
3234                return err;
3235
3236        err = mvpp2_prs_ip6_init(priv);
3237        if (err)
3238                return err;
3239
3240        err = mvpp2_prs_ip4_init(priv);
3241        if (err)
3242                return err;
3243
3244        return 0;
3245}
3246
3247/* Compare MAC DA with tcam entry data */
3248static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3249                                       const u8 *da, unsigned char *mask)
3250{
3251        unsigned char tcam_byte, tcam_mask;
3252        int index;
3253
3254        for (index = 0; index < ETH_ALEN; index++) {
3255                mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3256                if (tcam_mask != mask[index])
3257                        return false;
3258
3259                if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3260                        return false;
3261        }
3262
3263        return true;
3264}
3265
3266/* Find tcam entry with matched pair <MAC DA, port> */
3267static struct mvpp2_prs_entry *
3268mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3269                            unsigned char *mask, int udf_type)
3270{
3271        struct mvpp2_prs_entry *pe;
3272        int tid;
3273
3274        pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3275        if (!pe)
3276                return NULL;
3277        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3278
3279        /* Go through the all entires with MVPP2_PRS_LU_MAC */
3280        for (tid = MVPP2_PE_FIRST_FREE_TID;
3281             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3282                unsigned int entry_pmap;
3283
3284                if (!priv->prs_shadow[tid].valid ||
3285                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3286                    (priv->prs_shadow[tid].udf != udf_type))
3287                        continue;
3288
3289                pe->index = tid;
3290                mvpp2_prs_hw_read(priv, pe);
3291                entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3292
3293                if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3294                    entry_pmap == pmap)
3295                        return pe;
3296        }
3297        kfree(pe);
3298
3299        return NULL;
3300}
3301
3302/* Update parser's mac da entry */
3303static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3304                                   const u8 *da, bool add)
3305{
3306        struct mvpp2_prs_entry *pe;
3307        unsigned int pmap, len, ri;
3308        unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3309        int tid;
3310
3311        /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3312        pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3313                                         MVPP2_PRS_UDF_MAC_DEF);
3314
3315        /* No such entry */
3316        if (!pe) {
3317                if (!add)
3318                        return 0;
3319
3320                /* Create new TCAM entry */
3321                /* Find first range mac entry*/
3322                for (tid = MVPP2_PE_FIRST_FREE_TID;
3323                     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3324                        if (priv->prs_shadow[tid].valid &&
3325                            (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3326                            (priv->prs_shadow[tid].udf ==
3327                                                       MVPP2_PRS_UDF_MAC_RANGE))
3328                                break;
3329
3330                /* Go through the all entries from first to last */
3331                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3332                                                tid - 1);
3333                if (tid < 0)
3334                        return tid;
3335
3336                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3337                if (!pe)
3338                        return -ENOMEM;
3339                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3340                pe->index = tid;
3341
3342                /* Mask all ports */
3343                mvpp2_prs_tcam_port_map_set(pe, 0);
3344        }
3345
3346        /* Update port mask */
3347        mvpp2_prs_tcam_port_set(pe, port, add);
3348
3349        /* Invalidate the entry if no ports are left enabled */
3350        pmap = mvpp2_prs_tcam_port_map_get(pe);
3351        if (pmap == 0) {
3352                if (add) {
3353                        kfree(pe);
3354                        return -EINVAL;
3355                }
3356                mvpp2_prs_hw_inv(priv, pe->index);
3357                priv->prs_shadow[pe->index].valid = false;
3358                kfree(pe);
3359                return 0;
3360        }
3361
3362        /* Continue - set next lookup */
3363        mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3364
3365        /* Set match on DA */
3366        len = ETH_ALEN;
3367        while (len--)
3368                mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3369
3370        /* Set result info bits */
3371        if (is_broadcast_ether_addr(da))
3372                ri = MVPP2_PRS_RI_L2_BCAST;
3373        else if (is_multicast_ether_addr(da))
3374                ri = MVPP2_PRS_RI_L2_MCAST;
3375        else
3376                ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3377
3378        mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3379                                 MVPP2_PRS_RI_MAC_ME_MASK);
3380        mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3381                                MVPP2_PRS_RI_MAC_ME_MASK);
3382
3383        /* Shift to ethertype */
3384        mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3385                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3386
3387        /* Update shadow table and hw entry */
3388        priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3389        mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3390        mvpp2_prs_hw_write(priv, pe);
3391
3392        kfree(pe);
3393
3394        return 0;
3395}
3396
3397static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3398{
3399        struct mvpp2_port *port = netdev_priv(dev);
3400        int err;
3401
3402        /* Remove old parser entry */
3403        err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3404                                      false);
3405        if (err)
3406                return err;
3407
3408        /* Add new parser entry */
3409        err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3410        if (err)
3411                return err;
3412
3413        /* Set addr in the device */
3414        ether_addr_copy(dev->dev_addr, da);
3415
3416        return 0;
3417}
3418
3419/* Delete all port's multicast simple (not range) entries */
3420static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3421{
3422        struct mvpp2_prs_entry pe;
3423        int index, tid;
3424
3425        for (tid = MVPP2_PE_FIRST_FREE_TID;
3426             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3427                unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3428
3429                if (!priv->prs_shadow[tid].valid ||
3430                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3431                    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3432                        continue;
3433
3434                /* Only simple mac entries */
3435                pe.index = tid;
3436                mvpp2_prs_hw_read(priv, &pe);
3437
3438                /* Read mac addr from entry */
3439                for (index = 0; index < ETH_ALEN; index++)
3440                        mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3441                                                     &da_mask[index]);
3442
3443                if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3444                        /* Delete this entry */
3445                        mvpp2_prs_mac_da_accept(priv, port, da, false);
3446        }
3447}
3448
3449static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3450{
3451        switch (type) {
3452        case MVPP2_TAG_TYPE_EDSA:
3453                /* Add port to EDSA entries */
3454                mvpp2_prs_dsa_tag_set(priv, port, true,
3455                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3456                mvpp2_prs_dsa_tag_set(priv, port, true,
3457                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3458                /* Remove port from DSA entries */
3459                mvpp2_prs_dsa_tag_set(priv, port, false,
3460                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3461                mvpp2_prs_dsa_tag_set(priv, port, false,
3462                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3463                break;
3464
3465        case MVPP2_TAG_TYPE_DSA:
3466                /* Add port to DSA entries */
3467                mvpp2_prs_dsa_tag_set(priv, port, true,
3468                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3469                mvpp2_prs_dsa_tag_set(priv, port, true,
3470                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3471                /* Remove port from EDSA entries */
3472                mvpp2_prs_dsa_tag_set(priv, port, false,
3473                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3474                mvpp2_prs_dsa_tag_set(priv, port, false,
3475                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3476                break;
3477
3478        case MVPP2_TAG_TYPE_MH:
3479        case MVPP2_TAG_TYPE_NONE:
3480                /* Remove port form EDSA and DSA entries */
3481                mvpp2_prs_dsa_tag_set(priv, port, false,
3482                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3483                mvpp2_prs_dsa_tag_set(priv, port, false,
3484                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3485                mvpp2_prs_dsa_tag_set(priv, port, false,
3486                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3487                mvpp2_prs_dsa_tag_set(priv, port, false,
3488                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3489                break;
3490
3491        default:
3492                if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3493                        return -EINVAL;
3494        }
3495
3496        return 0;
3497}
3498
3499/* Set prs flow for the port */
3500static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3501{
3502        struct mvpp2_prs_entry *pe;
3503        int tid;
3504
3505        pe = mvpp2_prs_flow_find(port->priv, port->id);
3506
3507        /* Such entry not exist */
3508        if (!pe) {
3509                /* Go through the all entires from last to first */
3510                tid = mvpp2_prs_tcam_first_free(port->priv,
3511                                                MVPP2_PE_LAST_FREE_TID,
3512                                               MVPP2_PE_FIRST_FREE_TID);
3513                if (tid < 0)
3514                        return tid;
3515
3516                pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3517                if (!pe)
3518                        return -ENOMEM;
3519
3520                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3521                pe->index = tid;
3522
3523                /* Set flow ID*/
3524                mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3525                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3526
3527                /* Update shadow table */
3528                mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3529        }
3530
3531        mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3532        mvpp2_prs_hw_write(port->priv, pe);
3533        kfree(pe);
3534
3535        return 0;
3536}
3537
3538/* Classifier configuration routines */
3539
3540/* Update classification flow table registers */
3541static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3542                                 struct mvpp2_cls_flow_entry *fe)
3543{
3544        mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3545        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
3546        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
3547        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
3548}
3549
3550/* Update classification lookup table register */
3551static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3552                                   struct mvpp2_cls_lookup_entry *le)
3553{
3554        u32 val;
3555
3556        val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3557        mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3558        mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3559}
3560
3561/* Classifier default initialization */
3562static void mvpp2_cls_init(struct mvpp2 *priv)
3563{
3564        struct mvpp2_cls_lookup_entry le;
3565        struct mvpp2_cls_flow_entry fe;
3566        int index;
3567
3568        /* Enable classifier */
3569        mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3570
3571        /* Clear classifier flow table */
3572        memset(&fe.data, 0, sizeof(fe.data));
3573        for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3574                fe.index = index;
3575                mvpp2_cls_flow_write(priv, &fe);
3576        }
3577
3578        /* Clear classifier lookup table */
3579        le.data = 0;
3580        for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3581                le.lkpid = index;
3582                le.way = 0;
3583                mvpp2_cls_lookup_write(priv, &le);
3584
3585                le.way = 1;
3586                mvpp2_cls_lookup_write(priv, &le);
3587        }
3588}
3589
3590static void mvpp2_cls_port_config(struct mvpp2_port *port)
3591{
3592        struct mvpp2_cls_lookup_entry le;
3593        u32 val;
3594
3595        /* Set way for the port */
3596        val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3597        val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3598        mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3599
3600        /* Pick the entry to be accessed in lookup ID decoding table
3601         * according to the way and lkpid.
3602         */
3603        le.lkpid = port->id;
3604        le.way = 0;
3605        le.data = 0;
3606
3607        /* Set initial CPU queue for receiving packets */
3608        le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3609        le.data |= port->first_rxq;
3610
3611        /* Disable classification engines */
3612        le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3613
3614        /* Update lookup ID table entry */
3615        mvpp2_cls_lookup_write(port->priv, &le);
3616}
3617
3618/* Set CPU queue number for oversize packets */
3619static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3620{
3621        u32 val;
3622
3623        mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3624                    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3625
3626        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3627                    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3628
3629        val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3630        val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3631        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3632}
3633
3634static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3635{
3636        if (likely(pool->frag_size <= PAGE_SIZE))
3637                return netdev_alloc_frag(pool->frag_size);
3638        else
3639                return kmalloc(pool->frag_size, GFP_ATOMIC);
3640}
3641
3642static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3643{
3644        if (likely(pool->frag_size <= PAGE_SIZE))
3645                skb_free_frag(data);
3646        else
3647                kfree(data);
3648}
3649
3650/* Buffer Manager configuration routines */
3651
3652/* Create pool */
3653static int mvpp2_bm_pool_create(struct platform_device *pdev,
3654                                struct mvpp2 *priv,
3655                                struct mvpp2_bm_pool *bm_pool, int size)
3656{
3657        u32 val;
3658
3659        /* Number of buffer pointers must be a multiple of 16, as per
3660         * hardware constraints
3661         */
3662        if (!IS_ALIGNED(size, 16))
3663                return -EINVAL;
3664
3665        /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3666         * bytes per buffer pointer
3667         */
3668        if (priv->hw_version == MVPP21)
3669                bm_pool->size_bytes = 2 * sizeof(u32) * size;
3670        else
3671                bm_pool->size_bytes = 2 * sizeof(u64) * size;
3672
3673        bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3674                                                &bm_pool->dma_addr,
3675                                                GFP_KERNEL);
3676        if (!bm_pool->virt_addr)
3677                return -ENOMEM;
3678
3679        if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3680                        MVPP2_BM_POOL_PTR_ALIGN)) {
3681                dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3682                                  bm_pool->virt_addr, bm_pool->dma_addr);
3683                dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3684                        bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3685                return -ENOMEM;
3686        }
3687
3688        mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3689                    lower_32_bits(bm_pool->dma_addr));
3690        mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3691
3692        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3693        val |= MVPP2_BM_START_MASK;
3694        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3695
3696        bm_pool->type = MVPP2_BM_FREE;
3697        bm_pool->size = size;
3698        bm_pool->pkt_size = 0;
3699        bm_pool->buf_num = 0;
3700
3701        return 0;
3702}
3703
3704/* Set pool buffer size */
3705static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3706                                      struct mvpp2_bm_pool *bm_pool,
3707                                      int buf_size)
3708{
3709        u32 val;
3710
3711        bm_pool->buf_size = buf_size;
3712
3713        val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3714        mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3715}
3716
3717static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3718                                    struct mvpp2_bm_pool *bm_pool,
3719                                    dma_addr_t *dma_addr,
3720                                    phys_addr_t *phys_addr)
3721{
3722        int cpu = get_cpu();
3723
3724        *dma_addr = mvpp2_percpu_read(priv, cpu,
3725                                      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3726        *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3727
3728        if (priv->hw_version == MVPP22) {
3729                u32 val;
3730                u32 dma_addr_highbits, phys_addr_highbits;
3731
3732                val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3733                dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3734                phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3735                        MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3736
3737                if (sizeof(dma_addr_t) == 8)
3738                        *dma_addr |= (u64)dma_addr_highbits << 32;
3739
3740                if (sizeof(phys_addr_t) == 8)
3741                        *phys_addr |= (u64)phys_addr_highbits << 32;
3742        }
3743
3744        put_cpu();
3745}
3746
3747/* Free all buffers from the pool */
3748static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3749                               struct mvpp2_bm_pool *bm_pool)
3750{
3751        int i;
3752
3753        for (i = 0; i < bm_pool->buf_num; i++) {
3754                dma_addr_t buf_dma_addr;
3755                phys_addr_t buf_phys_addr;
3756                void *data;
3757
3758                mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3759                                        &buf_dma_addr, &buf_phys_addr);
3760
3761                dma_unmap_single(dev, buf_dma_addr,
3762                                 bm_pool->buf_size, DMA_FROM_DEVICE);
3763
3764                data = (void *)phys_to_virt(buf_phys_addr);
3765                if (!data)
3766                        break;
3767
3768                mvpp2_frag_free(bm_pool, data);
3769        }
3770
3771        /* Update BM driver with number of buffers removed from pool */
3772        bm_pool->buf_num -= i;
3773}
3774
3775/* Cleanup pool */
3776static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3777                                 struct mvpp2 *priv,
3778                                 struct mvpp2_bm_pool *bm_pool)
3779{
3780        u32 val;
3781
3782        mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3783        if (bm_pool->buf_num) {
3784                WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3785                return 0;
3786        }
3787
3788        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3789        val |= MVPP2_BM_STOP_MASK;
3790        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3791
3792        dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3793                          bm_pool->virt_addr,
3794                          bm_pool->dma_addr);
3795        return 0;
3796}
3797
3798static int mvpp2_bm_pools_init(struct platform_device *pdev,
3799                               struct mvpp2 *priv)
3800{
3801        int i, err, size;
3802        struct mvpp2_bm_pool *bm_pool;
3803
3804        /* Create all pools with maximum size */
3805        size = MVPP2_BM_POOL_SIZE_MAX;
3806        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3807                bm_pool = &priv->bm_pools[i];
3808                bm_pool->id = i;
3809                err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3810                if (err)
3811                        goto err_unroll_pools;
3812                mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3813        }
3814        return 0;
3815
3816err_unroll_pools:
3817        dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3818        for (i = i - 1; i >= 0; i--)
3819                mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3820        return err;
3821}
3822
3823static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3824{
3825        int i, err;
3826
3827        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3828                /* Mask BM all interrupts */
3829                mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3830                /* Clear BM cause register */
3831                mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3832        }
3833
3834        /* Allocate and initialize BM pools */
3835        priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3836                                      sizeof(*priv->bm_pools), GFP_KERNEL);
3837        if (!priv->bm_pools)
3838                return -ENOMEM;
3839
3840        err = mvpp2_bm_pools_init(pdev, priv);
3841        if (err < 0)
3842                return err;
3843        return 0;
3844}
3845
3846/* Attach long pool to rxq */
3847static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3848                                    int lrxq, int long_pool)
3849{
3850        u32 val, mask;
3851        int prxq;
3852
3853        /* Get queue physical ID */
3854        prxq = port->rxqs[lrxq]->id;
3855
3856        if (port->priv->hw_version == MVPP21)
3857                mask = MVPP21_RXQ_POOL_LONG_MASK;
3858        else
3859                mask = MVPP22_RXQ_POOL_LONG_MASK;
3860
3861        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3862        val &= ~mask;
3863        val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3864        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3865}
3866
3867/* Attach short pool to rxq */
3868static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3869                                     int lrxq, int short_pool)
3870{
3871        u32 val, mask;
3872        int prxq;
3873
3874        /* Get queue physical ID */
3875        prxq = port->rxqs[lrxq]->id;
3876
3877        if (port->priv->hw_version == MVPP21)
3878                mask = MVPP21_RXQ_POOL_SHORT_MASK;
3879        else
3880                mask = MVPP22_RXQ_POOL_SHORT_MASK;
3881
3882        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3883        val &= ~mask;
3884        val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3885        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3886}
3887
3888static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3889                             struct mvpp2_bm_pool *bm_pool,
3890                             dma_addr_t *buf_dma_addr,
3891                             phys_addr_t *buf_phys_addr,
3892                             gfp_t gfp_mask)
3893{
3894        dma_addr_t dma_addr;
3895        void *data;
3896
3897        data = mvpp2_frag_alloc(bm_pool);
3898        if (!data)
3899                return NULL;
3900
3901        dma_addr = dma_map_single(port->dev->dev.parent, data,
3902                                  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3903                                  DMA_FROM_DEVICE);
3904        if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3905                mvpp2_frag_free(bm_pool, data);
3906                return NULL;
3907        }
3908        *buf_dma_addr = dma_addr;
3909        *buf_phys_addr = virt_to_phys(data);
3910
3911        return data;
3912}
3913
3914/* Set pool number in a BM cookie */
3915static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3916{
3917        u32 bm;
3918
3919        bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3920        bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3921
3922        return bm;
3923}
3924
3925/* Release buffer to BM */
3926static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3927                                     dma_addr_t buf_dma_addr,
3928                                     phys_addr_t buf_phys_addr)
3929{
3930        int cpu = get_cpu();
3931
3932        if (port->priv->hw_version == MVPP22) {
3933                u32 val = 0;
3934
3935                if (sizeof(dma_addr_t) == 8)
3936                        val |= upper_32_bits(buf_dma_addr) &
3937                                MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3938
3939                if (sizeof(phys_addr_t) == 8)
3940                        val |= (upper_32_bits(buf_phys_addr)
3941                                << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3942                                MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3943
3944                mvpp2_percpu_write(port->priv, cpu,
3945                                   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
3946        }
3947
3948        /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3949         * returned in the "cookie" field of the RX
3950         * descriptor. Instead of storing the virtual address, we
3951         * store the physical address
3952         */
3953        mvpp2_percpu_write(port->priv, cpu,
3954                           MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3955        mvpp2_percpu_write(port->priv, cpu,
3956                           MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3957
3958        put_cpu();
3959}
3960
3961/* Refill BM pool */
3962static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
3963                              dma_addr_t dma_addr,
3964                              phys_addr_t phys_addr)
3965{
3966        mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3967}
3968
3969/* Allocate buffers for the pool */
3970static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3971                             struct mvpp2_bm_pool *bm_pool, int buf_num)
3972{
3973        int i, buf_size, total_size;
3974        dma_addr_t dma_addr;
3975        phys_addr_t phys_addr;
3976        void *buf;
3977
3978        buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3979        total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3980
3981        if (buf_num < 0 ||
3982            (buf_num + bm_pool->buf_num > bm_pool->size)) {
3983                netdev_err(port->dev,
3984                           "cannot allocate %d buffers for pool %d\n",
3985                           buf_num, bm_pool->id);
3986                return 0;
3987        }
3988
3989        for (i = 0; i < buf_num; i++) {
3990                buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3991                                      &phys_addr, GFP_KERNEL);
3992                if (!buf)
3993                        break;
3994
3995                mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3996                                  phys_addr);
3997        }
3998
3999        /* Update BM driver with number of buffers added to pool */
4000        bm_pool->buf_num += i;
4001
4002        netdev_dbg(port->dev,
4003                   "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4004                   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4005                   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4006
4007        netdev_dbg(port->dev,
4008                   "%s pool %d: %d of %d buffers added\n",
4009                   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4010                   bm_pool->id, i, buf_num);
4011        return i;
4012}
4013
4014/* Notify the driver that BM pool is being used as specific type and return the
4015 * pool pointer on success
4016 */
4017static struct mvpp2_bm_pool *
4018mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4019                  int pkt_size)
4020{
4021        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4022        int num;
4023
4024        if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4025                netdev_err(port->dev, "mixing pool types is forbidden\n");
4026                return NULL;
4027        }
4028
4029        if (new_pool->type == MVPP2_BM_FREE)
4030                new_pool->type = type;
4031
4032        /* Allocate buffers in case BM pool is used as long pool, but packet
4033         * size doesn't match MTU or BM pool hasn't being used yet
4034         */
4035        if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4036            (new_pool->pkt_size == 0)) {
4037                int pkts_num;
4038
4039                /* Set default buffer number or free all the buffers in case
4040                 * the pool is not empty
4041                 */
4042                pkts_num = new_pool->buf_num;
4043                if (pkts_num == 0)
4044                        pkts_num = type == MVPP2_BM_SWF_LONG ?
4045                                   MVPP2_BM_LONG_BUF_NUM :
4046                                   MVPP2_BM_SHORT_BUF_NUM;
4047                else
4048                        mvpp2_bm_bufs_free(port->dev->dev.parent,
4049                                           port->priv, new_pool);
4050
4051                new_pool->pkt_size = pkt_size;
4052                new_pool->frag_size =
4053                        SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4054                        MVPP2_SKB_SHINFO_SIZE;
4055
4056                /* Allocate buffers for this pool */
4057                num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4058                if (num != pkts_num) {
4059                        WARN(1, "pool %d: %d of %d allocated\n",
4060                             new_pool->id, num, pkts_num);
4061                        return NULL;
4062                }
4063        }
4064
4065        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4066                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4067
4068        return new_pool;
4069}
4070
4071/* Initialize pools for swf */
4072static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4073{
4074        int rxq;
4075
4076        if (!port->pool_long) {
4077                port->pool_long =
4078                       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4079                                         MVPP2_BM_SWF_LONG,
4080                                         port->pkt_size);
4081                if (!port->pool_long)
4082                        return -ENOMEM;
4083
4084                port->pool_long->port_map |= (1 << port->id);
4085
4086                for (rxq = 0; rxq < rxq_number; rxq++)
4087                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4088        }
4089
4090        if (!port->pool_short) {
4091                port->pool_short =
4092                        mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4093                                          MVPP2_BM_SWF_SHORT,
4094                                          MVPP2_BM_SHORT_PKT_SIZE);
4095                if (!port->pool_short)
4096                        return -ENOMEM;
4097
4098                port->pool_short->port_map |= (1 << port->id);
4099
4100                for (rxq = 0; rxq < rxq_number; rxq++)
4101                        mvpp2_rxq_short_pool_set(port, rxq,
4102                                                 port->pool_short->id);
4103        }
4104
4105        return 0;
4106}
4107
4108static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4109{
4110        struct mvpp2_port *port = netdev_priv(dev);
4111        struct mvpp2_bm_pool *port_pool = port->pool_long;
4112        int num, pkts_num = port_pool->buf_num;
4113        int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4114
4115        /* Update BM pool with new buffer size */
4116        mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4117        if (port_pool->buf_num) {
4118                WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4119                return -EIO;
4120        }
4121
4122        port_pool->pkt_size = pkt_size;
4123        port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4124                MVPP2_SKB_SHINFO_SIZE;
4125        num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4126        if (num != pkts_num) {
4127                WARN(1, "pool %d: %d of %d allocated\n",
4128                     port_pool->id, num, pkts_num);
4129                return -EIO;
4130        }
4131
4132        mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4133                                  MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4134        dev->mtu = mtu;
4135        netdev_update_features(dev);
4136        return 0;
4137}
4138
4139static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4140{
4141        int cpu, cpu_mask = 0;
4142
4143        for_each_present_cpu(cpu)
4144                cpu_mask |= 1 << cpu;
4145        mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4146                    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
4147}
4148
4149static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4150{
4151        int cpu, cpu_mask = 0;
4152
4153        for_each_present_cpu(cpu)
4154                cpu_mask |= 1 << cpu;
4155        mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4156                    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
4157}
4158
4159/* Mask the current CPU's Rx/Tx interrupts */
4160static void mvpp2_interrupts_mask(void *arg)
4161{
4162        struct mvpp2_port *port = arg;
4163
4164        mvpp2_percpu_write(port->priv, smp_processor_id(),
4165                           MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4166}
4167
4168/* Unmask the current CPU's Rx/Tx interrupts */
4169static void mvpp2_interrupts_unmask(void *arg)
4170{
4171        struct mvpp2_port *port = arg;
4172
4173        mvpp2_percpu_write(port->priv, smp_processor_id(),
4174                           MVPP2_ISR_RX_TX_MASK_REG(port->id),
4175                           (MVPP2_CAUSE_MISC_SUM_MASK |
4176                            MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
4177}
4178
4179/* Port configuration routines */
4180
4181static void mvpp22_port_mii_set(struct mvpp2_port *port)
4182{
4183        u32 val;
4184
4185        /* Only GOP port 0 has an XLG MAC */
4186        if (port->gop_id == 0) {
4187                val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4188                val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4189                val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4190                writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4191        }
4192
4193        val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4194        if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4195                val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4196        else
4197                val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4198        val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4199        val |= MVPP22_CTRL4_SYNC_BYPASS;
4200        val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4201        writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4202}
4203
4204static void mvpp2_port_mii_set(struct mvpp2_port *port)
4205{
4206        u32 val;
4207
4208        if (port->priv->hw_version == MVPP22)
4209                mvpp22_port_mii_set(port);
4210
4211        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4212
4213        switch (port->phy_interface) {
4214        case PHY_INTERFACE_MODE_SGMII:
4215                val |= MVPP2_GMAC_INBAND_AN_MASK;
4216                break;
4217        case PHY_INTERFACE_MODE_RGMII:
4218                val |= MVPP2_GMAC_PORT_RGMII_MASK;
4219        default:
4220                val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4221        }
4222
4223        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4224}
4225
4226static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4227{
4228        u32 val;
4229
4230        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4231        val |= MVPP2_GMAC_FC_ADV_EN;
4232        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4233}
4234
4235static void mvpp2_port_enable(struct mvpp2_port *port)
4236{
4237        u32 val;
4238
4239        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4240        val |= MVPP2_GMAC_PORT_EN_MASK;
4241        val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4242        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4243}
4244
4245static void mvpp2_port_disable(struct mvpp2_port *port)
4246{
4247        u32 val;
4248
4249        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4250        val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4251        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4252}
4253
4254/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4255static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4256{
4257        u32 val;
4258
4259        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4260                    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4261        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4262}
4263
4264/* Configure loopback port */
4265static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4266{
4267        u32 val;
4268
4269        val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4270
4271        if (port->speed == 1000)
4272                val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4273        else
4274                val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4275
4276        if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4277                val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4278        else
4279                val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4280
4281        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4282}
4283
4284static void mvpp2_port_reset(struct mvpp2_port *port)
4285{
4286        u32 val;
4287
4288        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4289                    ~MVPP2_GMAC_PORT_RESET_MASK;
4290        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4291
4292        while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4293               MVPP2_GMAC_PORT_RESET_MASK)
4294                continue;
4295}
4296
4297/* Change maximum receive size of the port */
4298static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4299{
4300        u32 val;
4301
4302        val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4303        val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4304        val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4305                    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4306        writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4307}
4308
4309/* Set defaults to the MVPP2 port */
4310static void mvpp2_defaults_set(struct mvpp2_port *port)
4311{
4312        int tx_port_num, val, queue, ptxq, lrxq;
4313
4314        if (port->priv->hw_version == MVPP21) {
4315                /* Configure port to loopback if needed */
4316                if (port->flags & MVPP2_F_LOOPBACK)
4317                        mvpp2_port_loopback_set(port);
4318
4319                /* Update TX FIFO MIN Threshold */
4320                val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4321                val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4322                /* Min. TX threshold must be less than minimal packet length */
4323                val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4324                writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4325        }
4326
4327        /* Disable Legacy WRR, Disable EJP, Release from reset */
4328        tx_port_num = mvpp2_egress_port(port);
4329        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4330                    tx_port_num);
4331        mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4332
4333        /* Close bandwidth for all queues */
4334        for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4335                ptxq = mvpp2_txq_phys(port->id, queue);
4336                mvpp2_write(port->priv,
4337                            MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4338        }
4339
4340        /* Set refill period to 1 usec, refill tokens
4341         * and bucket size to maximum
4342         */
4343        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4344                    port->priv->tclk / USEC_PER_SEC);
4345        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4346        val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4347        val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4348        val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4349        mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4350        val = MVPP2_TXP_TOKEN_SIZE_MAX;
4351        mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4352
4353        /* Set MaximumLowLatencyPacketSize value to 256 */
4354        mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4355                    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4356                    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4357
4358        /* Enable Rx cache snoop */
4359        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4360                queue = port->rxqs[lrxq]->id;
4361                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4362                val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4363                           MVPP2_SNOOP_BUF_HDR_MASK;
4364                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4365        }
4366
4367        /* At default, mask all interrupts to all present cpus */
4368        mvpp2_interrupts_disable(port);
4369}
4370
4371/* Enable/disable receiving packets */
4372static void mvpp2_ingress_enable(struct mvpp2_port *port)
4373{
4374        u32 val;
4375        int lrxq, queue;
4376
4377        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4378                queue = port->rxqs[lrxq]->id;
4379                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4380                val &= ~MVPP2_RXQ_DISABLE_MASK;
4381                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4382        }
4383}
4384
4385static void mvpp2_ingress_disable(struct mvpp2_port *port)
4386{
4387        u32 val;
4388        int lrxq, queue;
4389
4390        for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4391                queue = port->rxqs[lrxq]->id;
4392                val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4393                val |= MVPP2_RXQ_DISABLE_MASK;
4394                mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4395        }
4396}
4397
4398/* Enable transmit via physical egress queue
4399 * - HW starts take descriptors from DRAM
4400 */
4401static void mvpp2_egress_enable(struct mvpp2_port *port)
4402{
4403        u32 qmap;
4404        int queue;
4405        int tx_port_num = mvpp2_egress_port(port);
4406
4407        /* Enable all initialized TXs. */
4408        qmap = 0;
4409        for (queue = 0; queue < txq_number; queue++) {
4410                struct mvpp2_tx_queue *txq = port->txqs[queue];
4411
4412                if (txq->descs)
4413                        qmap |= (1 << queue);
4414        }
4415
4416        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4417        mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4418}
4419
4420/* Disable transmit via physical egress queue
4421 * - HW doesn't take descriptors from DRAM
4422 */
4423static void mvpp2_egress_disable(struct mvpp2_port *port)
4424{
4425        u32 reg_data;
4426        int delay;
4427        int tx_port_num = mvpp2_egress_port(port);
4428
4429        /* Issue stop command for active channels only */
4430        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4431        reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4432                    MVPP2_TXP_SCHED_ENQ_MASK;
4433        if (reg_data != 0)
4434                mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4435                            (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4436
4437        /* Wait for all Tx activity to terminate. */
4438        delay = 0;
4439        do {
4440                if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4441                        netdev_warn(port->dev,
4442                                    "Tx stop timed out, status=0x%08x\n",
4443                                    reg_data);
4444                        break;
4445                }
4446                mdelay(1);
4447                delay++;
4448
4449                /* Check port TX Command register that all
4450                 * Tx queues are stopped
4451                 */
4452                reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4453        } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4454}
4455
4456/* Rx descriptors helper methods */
4457
4458/* Get number of Rx descriptors occupied by received packets */
4459static inline int
4460mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4461{
4462        u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4463
4464        return val & MVPP2_RXQ_OCCUPIED_MASK;
4465}
4466
4467/* Update Rx queue status with the number of occupied and available
4468 * Rx descriptor slots.
4469 */
4470static inline void
4471mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4472                        int used_count, int free_count)
4473{
4474        /* Decrement the number of used descriptors and increment count
4475         * increment the number of free descriptors.
4476         */
4477        u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4478
4479        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4480}
4481
4482/* Get pointer to next RX descriptor to be processed by SW */
4483static inline struct mvpp2_rx_desc *
4484mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4485{
4486        int rx_desc = rxq->next_desc_to_proc;
4487
4488        rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4489        prefetch(rxq->descs + rxq->next_desc_to_proc);
4490        return rxq->descs + rx_desc;
4491}
4492
4493/* Set rx queue offset */
4494static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4495                                 int prxq, int offset)
4496{
4497        u32 val;
4498
4499        /* Convert offset from bytes to units of 32 bytes */
4500        offset = offset >> 5;
4501
4502        val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4503        val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4504
4505        /* Offset is in */
4506        val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4507                    MVPP2_RXQ_PACKET_OFFSET_MASK);
4508
4509        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4510}
4511
4512/* Tx descriptors helper methods */
4513
4514/* Get pointer to next Tx descriptor to be processed (send) by HW */
4515static struct mvpp2_tx_desc *
4516mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4517{
4518        int tx_desc = txq->next_desc_to_proc;
4519
4520        txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4521        return txq->descs + tx_desc;
4522}
4523
4524/* Update HW with number of aggregated Tx descriptors to be sent */
4525static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4526{
4527        /* aggregated access - relevant TXQ number is written in TX desc */
4528        mvpp2_percpu_write(port->priv, smp_processor_id(),
4529                           MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4530}
4531
4532
4533/* Check if there are enough free descriptors in aggregated txq.
4534 * If not, update the number of occupied descriptors and repeat the check.
4535 */
4536static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4537                                     struct mvpp2_tx_queue *aggr_txq, int num)
4538{
4539        if ((aggr_txq->count + num) > aggr_txq->size) {
4540                /* Update number of occupied aggregated Tx descriptors */
4541                int cpu = smp_processor_id();
4542                u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4543
4544                aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4545        }
4546
4547        if ((aggr_txq->count + num) > aggr_txq->size)
4548                return -ENOMEM;
4549
4550        return 0;
4551}
4552
4553/* Reserved Tx descriptors allocation request */
4554static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4555                                         struct mvpp2_tx_queue *txq, int num)
4556{
4557        u32 val;
4558        int cpu = smp_processor_id();
4559
4560        val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4561        mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
4562
4563        val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
4564
4565        return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4566}
4567
4568/* Check if there are enough reserved descriptors for transmission.
4569 * If not, request chunk of reserved descriptors and check again.
4570 */
4571static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4572                                            struct mvpp2_tx_queue *txq,
4573                                            struct mvpp2_txq_pcpu *txq_pcpu,
4574                                            int num)
4575{
4576        int req, cpu, desc_count;
4577
4578        if (txq_pcpu->reserved_num >= num)
4579                return 0;
4580
4581        /* Not enough descriptors reserved! Update the reserved descriptor
4582         * count and check again.
4583         */
4584
4585        desc_count = 0;
4586        /* Compute total of used descriptors */
4587        for_each_present_cpu(cpu) {
4588                struct mvpp2_txq_pcpu *txq_pcpu_aux;
4589
4590                txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4591                desc_count += txq_pcpu_aux->count;
4592                desc_count += txq_pcpu_aux->reserved_num;
4593        }
4594
4595        req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4596        desc_count += req;
4597
4598        if (desc_count >
4599           (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4600                return -ENOMEM;
4601
4602        txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4603
4604        /* OK, the descriptor cound has been updated: check again. */
4605        if (txq_pcpu->reserved_num < num)
4606                return -ENOMEM;
4607        return 0;
4608}
4609
4610/* Release the last allocated Tx descriptor. Useful to handle DMA
4611 * mapping failures in the Tx path.
4612 */
4613static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4614{
4615        if (txq->next_desc_to_proc == 0)
4616                txq->next_desc_to_proc = txq->last_desc - 1;
4617        else
4618                txq->next_desc_to_proc--;
4619}
4620
4621/* Set Tx descriptors fields relevant for CSUM calculation */
4622static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4623                               int ip_hdr_len, int l4_proto)
4624{
4625        u32 command;
4626
4627        /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4628         * G_L4_chk, L4_type required only for checksum calculation
4629         */
4630        command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4631        command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4632        command |= MVPP2_TXD_IP_CSUM_DISABLE;
4633
4634        if (l3_proto == swab16(ETH_P_IP)) {
4635                command &= ~MVPP2_TXD_IP_CSUM_DISABLE;  /* enable IPv4 csum */
4636                command &= ~MVPP2_TXD_L3_IP6;           /* enable IPv4 */
4637        } else {
4638                command |= MVPP2_TXD_L3_IP6;            /* enable IPv6 */
4639        }
4640
4641        if (l4_proto == IPPROTO_TCP) {
4642                command &= ~MVPP2_TXD_L4_UDP;           /* enable TCP */
4643                command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
4644        } else if (l4_proto == IPPROTO_UDP) {
4645                command |= MVPP2_TXD_L4_UDP;            /* enable UDP */
4646                command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
4647        } else {
4648                command |= MVPP2_TXD_L4_CSUM_NOT;
4649        }
4650
4651        return command;
4652}
4653
4654/* Get number of sent descriptors and decrement counter.
4655 * The number of sent descriptors is returned.
4656 * Per-CPU access
4657 */
4658static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4659                                           struct mvpp2_tx_queue *txq)
4660{
4661        u32 val;
4662
4663        /* Reading status reg resets transmitted descriptor counter */
4664        val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4665                                MVPP2_TXQ_SENT_REG(txq->id));
4666
4667        return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4668                MVPP2_TRANSMITTED_COUNT_OFFSET;
4669}
4670
4671static void mvpp2_txq_sent_counter_clear(void *arg)
4672{
4673        struct mvpp2_port *port = arg;
4674        int queue;
4675
4676        for (queue = 0; queue < txq_number; queue++) {
4677                int id = port->txqs[queue]->id;
4678
4679                mvpp2_percpu_read(port->priv, smp_processor_id(),
4680                                  MVPP2_TXQ_SENT_REG(id));
4681        }
4682}
4683
4684/* Set max sizes for Tx queues */
4685static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4686{
4687        u32     val, size, mtu;
4688        int     txq, tx_port_num;
4689
4690        mtu = port->pkt_size * 8;
4691        if (mtu > MVPP2_TXP_MTU_MAX)
4692                mtu = MVPP2_TXP_MTU_MAX;
4693
4694        /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4695        mtu = 3 * mtu;
4696
4697        /* Indirect access to registers */
4698        tx_port_num = mvpp2_egress_port(port);
4699        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4700
4701        /* Set MTU */
4702        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4703        val &= ~MVPP2_TXP_MTU_MAX;
4704        val |= mtu;
4705        mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4706
4707        /* TXP token size and all TXQs token size must be larger that MTU */
4708        val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4709        size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4710        if (size < mtu) {
4711                size = mtu;
4712                val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4713                val |= size;
4714                mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4715        }
4716
4717        for (txq = 0; txq < txq_number; txq++) {
4718                val = mvpp2_read(port->priv,
4719                                 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4720                size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4721
4722                if (size < mtu) {
4723                        size = mtu;
4724                        val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4725                        val |= size;
4726                        mvpp2_write(port->priv,
4727                                    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4728                                    val);
4729                }
4730        }
4731}
4732
4733/* Set the number of packets that will be received before Rx interrupt
4734 * will be generated by HW.
4735 */
4736static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4737                                   struct mvpp2_rx_queue *rxq)
4738{
4739        int cpu = get_cpu();
4740
4741        if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4742                rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4743
4744        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4745        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4746                           rxq->pkts_coal);
4747
4748        put_cpu();
4749}
4750
4751static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4752{
4753        u64 tmp = (u64)clk_hz * usec;
4754
4755        do_div(tmp, USEC_PER_SEC);
4756
4757        return tmp > U32_MAX ? U32_MAX : tmp;
4758}
4759
4760static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4761{
4762        u64 tmp = (u64)cycles * USEC_PER_SEC;
4763
4764        do_div(tmp, clk_hz);
4765
4766        return tmp > U32_MAX ? U32_MAX : tmp;
4767}
4768
4769/* Set the time delay in usec before Rx interrupt */
4770static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4771                                   struct mvpp2_rx_queue *rxq)
4772{
4773        unsigned long freq = port->priv->tclk;
4774        u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4775
4776        if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4777                rxq->time_coal =
4778                        mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4779
4780                /* re-evaluate to get actual register value */
4781                val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4782        }
4783
4784        mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4785}
4786
4787/* Free Tx queue skbuffs */
4788static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4789                                struct mvpp2_tx_queue *txq,
4790                                struct mvpp2_txq_pcpu *txq_pcpu, int num)
4791{
4792        int i;
4793
4794        for (i = 0; i < num; i++) {
4795                struct mvpp2_txq_pcpu_buf *tx_buf =
4796                        txq_pcpu->buffs + txq_pcpu->txq_get_index;
4797
4798                dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4799                                 tx_buf->size, DMA_TO_DEVICE);
4800                if (tx_buf->skb)
4801                        dev_kfree_skb_any(tx_buf->skb);
4802
4803                mvpp2_txq_inc_get(txq_pcpu);
4804        }
4805}
4806
4807static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4808                                                        u32 cause)
4809{
4810        int queue = fls(cause) - 1;
4811
4812        return port->rxqs[queue];
4813}
4814
4815static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4816                                                        u32 cause)
4817{
4818        int queue = fls(cause) - 1;
4819
4820        return port->txqs[queue];
4821}
4822
4823/* Handle end of transmission */
4824static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4825                           struct mvpp2_txq_pcpu *txq_pcpu)
4826{
4827        struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4828        int tx_done;
4829
4830        if (txq_pcpu->cpu != smp_processor_id())
4831                netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4832
4833        tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4834        if (!tx_done)
4835                return;
4836        mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4837
4838        txq_pcpu->count -= tx_done;
4839
4840        if (netif_tx_queue_stopped(nq))
4841                if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4842                        netif_tx_wake_queue(nq);
4843}
4844
4845static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4846{
4847        struct mvpp2_tx_queue *txq;
4848        struct mvpp2_txq_pcpu *txq_pcpu;
4849        unsigned int tx_todo = 0;
4850
4851        while (cause) {
4852                txq = mvpp2_get_tx_queue(port, cause);
4853                if (!txq)
4854                        break;
4855
4856                txq_pcpu = this_cpu_ptr(txq->pcpu);
4857
4858                if (txq_pcpu->count) {
4859                        mvpp2_txq_done(port, txq, txq_pcpu);
4860                        tx_todo += txq_pcpu->count;
4861                }
4862
4863                cause &= ~(1 << txq->log_id);
4864        }
4865        return tx_todo;
4866}
4867
4868/* Rx/Tx queue initialization/cleanup methods */
4869
4870/* Allocate and initialize descriptors for aggr TXQ */
4871static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4872                               struct mvpp2_tx_queue *aggr_txq,
4873                               int desc_num, int cpu,
4874                               struct mvpp2 *priv)
4875{
4876        u32 txq_dma;
4877
4878        /* Allocate memory for TX descriptors */
4879        aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4880                                desc_num * MVPP2_DESC_ALIGNED_SIZE,
4881                                &aggr_txq->descs_dma, GFP_KERNEL);
4882        if (!aggr_txq->descs)
4883                return -ENOMEM;
4884
4885        aggr_txq->last_desc = aggr_txq->size - 1;
4886
4887        /* Aggr TXQ no reset WA */
4888        aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4889                                                 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4890
4891        /* Set Tx descriptors queue starting address indirect
4892         * access
4893         */
4894        if (priv->hw_version == MVPP21)
4895                txq_dma = aggr_txq->descs_dma;
4896        else
4897                txq_dma = aggr_txq->descs_dma >>
4898                        MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4899
4900        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4901        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4902
4903        return 0;
4904}
4905
4906/* Create a specified Rx queue */
4907static int mvpp2_rxq_init(struct mvpp2_port *port,
4908                          struct mvpp2_rx_queue *rxq)
4909
4910{
4911        u32 rxq_dma;
4912        int cpu;
4913
4914        rxq->size = port->rx_ring_size;
4915
4916        /* Allocate memory for RX descriptors */
4917        rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4918                                        rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4919                                        &rxq->descs_dma, GFP_KERNEL);
4920        if (!rxq->descs)
4921                return -ENOMEM;
4922
4923        rxq->last_desc = rxq->size - 1;
4924
4925        /* Zero occupied and non-occupied counters - direct access */
4926        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4927
4928        /* Set Rx descriptors queue starting address - indirect access */
4929        cpu = get_cpu();
4930        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4931        if (port->priv->hw_version == MVPP21)
4932                rxq_dma = rxq->descs_dma;
4933        else
4934                rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4935        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4936        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4937        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4938        put_cpu();
4939
4940        /* Set Offset */
4941        mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4942
4943        /* Set coalescing pkts and time */
4944        mvpp2_rx_pkts_coal_set(port, rxq);
4945        mvpp2_rx_time_coal_set(port, rxq);
4946
4947        /* Add number of descriptors ready for receiving packets */
4948        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4949
4950        return 0;
4951}
4952
4953/* Push packets received by the RXQ to BM pool */
4954static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4955                                struct mvpp2_rx_queue *rxq)
4956{
4957        int rx_received, i;
4958
4959        rx_received = mvpp2_rxq_received(port, rxq->id);
4960        if (!rx_received)
4961                return;
4962
4963        for (i = 0; i < rx_received; i++) {
4964                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4965                u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4966                int pool;
4967
4968                pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4969                        MVPP2_RXD_BM_POOL_ID_OFFS;
4970
4971                mvpp2_pool_refill(port, pool,
4972                                  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4973                                  mvpp2_rxdesc_cookie_get(port, rx_desc));
4974        }
4975        mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4976}
4977
4978/* Cleanup Rx queue */
4979static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4980                             struct mvpp2_rx_queue *rxq)
4981{
4982        int cpu;
4983
4984        mvpp2_rxq_drop_pkts(port, rxq);
4985
4986        if (rxq->descs)
4987                dma_free_coherent(port->dev->dev.parent,
4988                                  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4989                                  rxq->descs,
4990                                  rxq->descs_dma);
4991
4992        rxq->descs             = NULL;
4993        rxq->last_desc         = 0;
4994        rxq->next_desc_to_proc = 0;
4995        rxq->descs_dma         = 0;
4996
4997        /* Clear Rx descriptors queue starting address and size;
4998         * free descriptor number
4999         */
5000        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5001        cpu = get_cpu();
5002        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5003        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5004        mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5005        put_cpu();
5006}
5007
5008/* Create and initialize a Tx queue */
5009static int mvpp2_txq_init(struct mvpp2_port *port,
5010                          struct mvpp2_tx_queue *txq)
5011{
5012        u32 val;
5013        int cpu, desc, desc_per_txq, tx_port_num;
5014        struct mvpp2_txq_pcpu *txq_pcpu;
5015
5016        txq->size = port->tx_ring_size;
5017
5018        /* Allocate memory for Tx descriptors */
5019        txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5020                                txq->size * MVPP2_DESC_ALIGNED_SIZE,
5021                                &txq->descs_dma, GFP_KERNEL);
5022        if (!txq->descs)
5023                return -ENOMEM;
5024
5025        txq->last_desc = txq->size - 1;
5026
5027        /* Set Tx descriptors queue starting address - indirect access */
5028        cpu = get_cpu();
5029        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5030        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5031                           txq->descs_dma);
5032        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5033                           txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5034        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5035        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5036                           txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5037        val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5038        val &= ~MVPP2_TXQ_PENDING_MASK;
5039        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5040
5041        /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5042         * for each existing TXQ.
5043         * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5044         * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5045         */
5046        desc_per_txq = 16;
5047        desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5048               (txq->log_id * desc_per_txq);
5049
5050        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5051                           MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5052                           MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5053        put_cpu();
5054
5055        /* WRR / EJP configuration - indirect access */
5056        tx_port_num = mvpp2_egress_port(port);
5057        mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5058
5059        val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5060        val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5061        val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5062        val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5063        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5064
5065        val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5066        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5067                    val);
5068
5069        for_each_present_cpu(cpu) {
5070                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5071                txq_pcpu->size = txq->size;
5072                txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5073                                                sizeof(*txq_pcpu->buffs),
5074                                                GFP_KERNEL);
5075                if (!txq_pcpu->buffs)
5076                        goto cleanup;
5077
5078                txq_pcpu->count = 0;
5079                txq_pcpu->reserved_num = 0;
5080                txq_pcpu->txq_put_index = 0;
5081                txq_pcpu->txq_get_index = 0;
5082        }
5083
5084        return 0;
5085cleanup:
5086        for_each_present_cpu(cpu) {
5087                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5088                kfree(txq_pcpu->buffs);
5089        }
5090
5091        dma_free_coherent(port->dev->dev.parent,
5092                          txq->size * MVPP2_DESC_ALIGNED_SIZE,
5093                          txq->descs, txq->descs_dma);
5094
5095        return -ENOMEM;
5096}
5097
5098/* Free allocated TXQ resources */
5099static void mvpp2_txq_deinit(struct mvpp2_port *port,
5100                             struct mvpp2_tx_queue *txq)
5101{
5102        struct mvpp2_txq_pcpu *txq_pcpu;
5103        int cpu;
5104
5105        for_each_present_cpu(cpu) {
5106                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5107                kfree(txq_pcpu->buffs);
5108        }
5109
5110        if (txq->descs)
5111                dma_free_coherent(port->dev->dev.parent,
5112                                  txq->size * MVPP2_DESC_ALIGNED_SIZE,
5113                                  txq->descs, txq->descs_dma);
5114
5115        txq->descs             = NULL;
5116        txq->last_desc         = 0;
5117        txq->next_desc_to_proc = 0;
5118        txq->descs_dma         = 0;
5119
5120        /* Set minimum bandwidth for disabled TXQs */
5121        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5122
5123        /* Set Tx descriptors queue starting address and size */
5124        cpu = get_cpu();
5125        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5126        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5127        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5128        put_cpu();
5129}
5130
5131/* Cleanup Tx ports */
5132static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5133{
5134        struct mvpp2_txq_pcpu *txq_pcpu;
5135        int delay, pending, cpu;
5136        u32 val;
5137
5138        cpu = get_cpu();
5139        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5140        val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5141        val |= MVPP2_TXQ_DRAIN_EN_MASK;
5142        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5143
5144        /* The napi queue has been stopped so wait for all packets
5145         * to be transmitted.
5146         */
5147        delay = 0;
5148        do {
5149                if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5150                        netdev_warn(port->dev,
5151                                    "port %d: cleaning queue %d timed out\n",
5152                                    port->id, txq->log_id);
5153                        break;
5154                }
5155                mdelay(1);
5156                delay++;
5157
5158                pending = mvpp2_percpu_read(port->priv, cpu,
5159                                            MVPP2_TXQ_PENDING_REG);
5160                pending &= MVPP2_TXQ_PENDING_MASK;
5161        } while (pending);
5162
5163        val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5164        mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5165        put_cpu();
5166
5167        for_each_present_cpu(cpu) {
5168                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5169
5170                /* Release all packets */
5171                mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5172
5173                /* Reset queue */
5174                txq_pcpu->count = 0;
5175                txq_pcpu->txq_put_index = 0;
5176                txq_pcpu->txq_get_index = 0;
5177        }
5178}
5179
5180/* Cleanup all Tx queues */
5181static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5182{
5183        struct mvpp2_tx_queue *txq;
5184        int queue;
5185        u32 val;
5186
5187        val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5188
5189        /* Reset Tx ports and delete Tx queues */
5190        val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5191        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5192
5193        for (queue = 0; queue < txq_number; queue++) {
5194                txq = port->txqs[queue];
5195                mvpp2_txq_clean(port, txq);
5196                mvpp2_txq_deinit(port, txq);
5197        }
5198
5199        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5200
5201        val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5202        mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5203}
5204
5205/* Cleanup all Rx queues */
5206static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5207{
5208        int queue;
5209
5210        for (queue = 0; queue < rxq_number; queue++)
5211                mvpp2_rxq_deinit(port, port->rxqs[queue]);
5212}
5213
5214/* Init all Rx queues for port */
5215static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5216{
5217        int queue, err;
5218
5219        for (queue = 0; queue < rxq_number; queue++) {
5220                err = mvpp2_rxq_init(port, port->rxqs[queue]);
5221                if (err)
5222                        goto err_cleanup;
5223        }
5224        return 0;
5225
5226err_cleanup:
5227        mvpp2_cleanup_rxqs(port);
5228        return err;
5229}
5230
5231/* Init all tx queues for port */
5232static int mvpp2_setup_txqs(struct mvpp2_port *port)
5233{
5234        struct mvpp2_tx_queue *txq;
5235        int queue, err;
5236
5237        for (queue = 0; queue < txq_number; queue++) {
5238                txq = port->txqs[queue];
5239                err = mvpp2_txq_init(port, txq);
5240                if (err)
5241                        goto err_cleanup;
5242        }
5243
5244        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5245        return 0;
5246
5247err_cleanup:
5248        mvpp2_cleanup_txqs(port);
5249        return err;
5250}
5251
5252/* The callback for per-port interrupt */
5253static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5254{
5255        struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5256
5257        mvpp2_interrupts_disable(port);
5258
5259        napi_schedule(&port->napi);
5260
5261        return IRQ_HANDLED;
5262}
5263
5264/* Adjust link */
5265static void mvpp2_link_event(struct net_device *dev)
5266{
5267        struct mvpp2_port *port = netdev_priv(dev);
5268        struct phy_device *phydev = dev->phydev;
5269        int status_change = 0;
5270        u32 val;
5271
5272        if (phydev->link) {
5273                if ((port->speed != phydev->speed) ||
5274                    (port->duplex != phydev->duplex)) {
5275                        u32 val;
5276
5277                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5278                        val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5279                                 MVPP2_GMAC_CONFIG_GMII_SPEED |
5280                                 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5281                                 MVPP2_GMAC_AN_SPEED_EN |
5282                                 MVPP2_GMAC_AN_DUPLEX_EN);
5283
5284                        if (phydev->duplex)
5285                                val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5286
5287                        if (phydev->speed == SPEED_1000)
5288                                val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5289                        else if (phydev->speed == SPEED_100)
5290                                val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5291
5292                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5293
5294                        port->duplex = phydev->duplex;
5295                        port->speed  = phydev->speed;
5296                }
5297        }
5298
5299        if (phydev->link != port->link) {
5300                if (!phydev->link) {
5301                        port->duplex = -1;
5302                        port->speed = 0;
5303                }
5304
5305                port->link = phydev->link;
5306                status_change = 1;
5307        }
5308
5309        if (status_change) {
5310                if (phydev->link) {
5311                        val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5312                        val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5313                                MVPP2_GMAC_FORCE_LINK_DOWN);
5314                        writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5315                        mvpp2_egress_enable(port);
5316                        mvpp2_ingress_enable(port);
5317                } else {
5318                        mvpp2_ingress_disable(port);
5319                        mvpp2_egress_disable(port);
5320                }
5321                phy_print_status(phydev);
5322        }
5323}
5324
5325static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5326{
5327        ktime_t interval;
5328
5329        if (!port_pcpu->timer_scheduled) {
5330                port_pcpu->timer_scheduled = true;
5331                interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5332                hrtimer_start(&port_pcpu->tx_done_timer, interval,
5333                              HRTIMER_MODE_REL_PINNED);
5334        }
5335}
5336
5337static void mvpp2_tx_proc_cb(unsigned long data)
5338{
5339        struct net_device *dev = (struct net_device *)data;
5340        struct mvpp2_port *port = netdev_priv(dev);
5341        struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5342        unsigned int tx_todo, cause;
5343
5344        if (!netif_running(dev))
5345                return;
5346        port_pcpu->timer_scheduled = false;
5347
5348        /* Process all the Tx queues */
5349        cause = (1 << txq_number) - 1;
5350        tx_todo = mvpp2_tx_done(port, cause);
5351
5352        /* Set the timer in case not all the packets were processed */
5353        if (tx_todo)
5354                mvpp2_timer_set(port_pcpu);
5355}
5356
5357static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5358{
5359        struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5360                                                         struct mvpp2_port_pcpu,
5361                                                         tx_done_timer);
5362
5363        tasklet_schedule(&port_pcpu->tx_done_tasklet);
5364
5365        return HRTIMER_NORESTART;
5366}
5367
5368/* Main RX/TX processing routines */
5369
5370/* Display more error info */
5371static void mvpp2_rx_error(struct mvpp2_port *port,
5372                           struct mvpp2_rx_desc *rx_desc)
5373{
5374        u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5375        size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
5376
5377        switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5378        case MVPP2_RXD_ERR_CRC:
5379                netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5380                           status, sz);
5381                break;
5382        case MVPP2_RXD_ERR_OVERRUN:
5383                netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5384                           status, sz);
5385                break;
5386        case MVPP2_RXD_ERR_RESOURCE:
5387                netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5388                           status, sz);
5389                break;
5390        }
5391}
5392
5393/* Handle RX checksum offload */
5394static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5395                          struct sk_buff *skb)
5396{
5397        if (((status & MVPP2_RXD_L3_IP4) &&
5398             !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5399            (status & MVPP2_RXD_L3_IP6))
5400                if (((status & MVPP2_RXD_L4_UDP) ||
5401                     (status & MVPP2_RXD_L4_TCP)) &&
5402                     (status & MVPP2_RXD_L4_CSUM_OK)) {
5403                        skb->csum = 0;
5404                        skb->ip_summed = CHECKSUM_UNNECESSARY;
5405                        return;
5406                }
5407
5408        skb->ip_summed = CHECKSUM_NONE;
5409}
5410
5411/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5412static int mvpp2_rx_refill(struct mvpp2_port *port,
5413                           struct mvpp2_bm_pool *bm_pool, int pool)
5414{
5415        dma_addr_t dma_addr;
5416        phys_addr_t phys_addr;
5417        void *buf;
5418
5419        /* No recycle or too many buffers are in use, so allocate a new skb */
5420        buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5421                              GFP_ATOMIC);
5422        if (!buf)
5423                return -ENOMEM;
5424
5425        mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5426
5427        return 0;
5428}
5429
5430/* Handle tx checksum */
5431static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5432{
5433        if (skb->ip_summed == CHECKSUM_PARTIAL) {
5434                int ip_hdr_len = 0;
5435                u8 l4_proto;
5436
5437                if (skb->protocol == htons(ETH_P_IP)) {
5438                        struct iphdr *ip4h = ip_hdr(skb);
5439
5440                        /* Calculate IPv4 checksum and L4 checksum */
5441                        ip_hdr_len = ip4h->ihl;
5442                        l4_proto = ip4h->protocol;
5443                } else if (skb->protocol == htons(ETH_P_IPV6)) {
5444                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
5445
5446                        /* Read l4_protocol from one of IPv6 extra headers */
5447                        if (skb_network_header_len(skb) > 0)
5448                                ip_hdr_len = (skb_network_header_len(skb) >> 2);
5449                        l4_proto = ip6h->nexthdr;
5450                } else {
5451                        return MVPP2_TXD_L4_CSUM_NOT;
5452                }
5453
5454                return mvpp2_txq_desc_csum(skb_network_offset(skb),
5455                                skb->protocol, ip_hdr_len, l4_proto);
5456        }
5457
5458        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5459}
5460
5461/* Main rx processing */
5462static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5463                    struct mvpp2_rx_queue *rxq)
5464{
5465        struct net_device *dev = port->dev;
5466        int rx_received;
5467        int rx_done = 0;
5468        u32 rcvd_pkts = 0;
5469        u32 rcvd_bytes = 0;
5470
5471        /* Get number of received packets and clamp the to-do */
5472        rx_received = mvpp2_rxq_received(port, rxq->id);
5473        if (rx_todo > rx_received)
5474                rx_todo = rx_received;
5475
5476        while (rx_done < rx_todo) {
5477                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5478                struct mvpp2_bm_pool *bm_pool;
5479                struct sk_buff *skb;
5480                unsigned int frag_size;
5481                dma_addr_t dma_addr;
5482                phys_addr_t phys_addr;
5483                u32 rx_status;
5484                int pool, rx_bytes, err;
5485                void *data;
5486
5487                rx_done++;
5488                rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5489                rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5490                rx_bytes -= MVPP2_MH_SIZE;
5491                dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5492                phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5493                data = (void *)phys_to_virt(phys_addr);
5494
5495                pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5496                        MVPP2_RXD_BM_POOL_ID_OFFS;
5497                bm_pool = &port->priv->bm_pools[pool];
5498
5499                /* In case of an error, release the requested buffer pointer
5500                 * to the Buffer Manager. This request process is controlled
5501                 * by the hardware, and the information about the buffer is
5502                 * comprised by the RX descriptor.
5503                 */
5504                if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5505err_drop_frame:
5506                        dev->stats.rx_errors++;
5507                        mvpp2_rx_error(port, rx_desc);
5508                        /* Return the buffer to the pool */
5509                        mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5510                        continue;
5511                }
5512
5513                if (bm_pool->frag_size > PAGE_SIZE)
5514                        frag_size = 0;
5515                else
5516                        frag_size = bm_pool->frag_size;
5517
5518                skb = build_skb(data, frag_size);
5519                if (!skb) {
5520                        netdev_warn(port->dev, "skb build failed\n");
5521                        goto err_drop_frame;
5522                }
5523
5524                err = mvpp2_rx_refill(port, bm_pool, pool);
5525                if (err) {
5526                        netdev_err(port->dev, "failed to refill BM pools\n");
5527                        goto err_drop_frame;
5528                }
5529
5530                dma_unmap_single(dev->dev.parent, dma_addr,
5531                                 bm_pool->buf_size, DMA_FROM_DEVICE);
5532
5533                rcvd_pkts++;
5534                rcvd_bytes += rx_bytes;
5535
5536                skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5537                skb_put(skb, rx_bytes);
5538                skb->protocol = eth_type_trans(skb, dev);
5539                mvpp2_rx_csum(port, rx_status, skb);
5540
5541                napi_gro_receive(&port->napi, skb);
5542        }
5543
5544        if (rcvd_pkts) {
5545                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5546
5547                u64_stats_update_begin(&stats->syncp);
5548                stats->rx_packets += rcvd_pkts;
5549                stats->rx_bytes   += rcvd_bytes;
5550                u64_stats_update_end(&stats->syncp);
5551        }
5552
5553        /* Update Rx queue management counters */
5554        wmb();
5555        mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5556
5557        return rx_todo;
5558}
5559
5560static inline void
5561tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5562                  struct mvpp2_tx_desc *desc)
5563{
5564        dma_addr_t buf_dma_addr =
5565                mvpp2_txdesc_dma_addr_get(port, desc);
5566        size_t buf_sz =
5567                mvpp2_txdesc_size_get(port, desc);
5568        dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5569                         buf_sz, DMA_TO_DEVICE);
5570        mvpp2_txq_desc_put(txq);
5571}
5572
5573/* Handle tx fragmentation processing */
5574static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5575                                 struct mvpp2_tx_queue *aggr_txq,
5576                                 struct mvpp2_tx_queue *txq)
5577{
5578        struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5579        struct mvpp2_tx_desc *tx_desc;
5580        int i;
5581        dma_addr_t buf_dma_addr;
5582
5583        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5584                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5585                void *addr = page_address(frag->page.p) + frag->page_offset;
5586
5587                tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5588                mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5589                mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5590
5591                buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5592                                               frag->size,
5593                                               DMA_TO_DEVICE);
5594                if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5595                        mvpp2_txq_desc_put(txq);
5596                        goto cleanup;
5597                }
5598
5599                mvpp2_txdesc_offset_set(port, tx_desc,
5600                                        buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5601                mvpp2_txdesc_dma_addr_set(port, tx_desc,
5602                                          buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5603
5604                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5605                        /* Last descriptor */
5606                        mvpp2_txdesc_cmd_set(port, tx_desc,
5607                                             MVPP2_TXD_L_DESC);
5608                        mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5609                } else {
5610                        /* Descriptor in the middle: Not First, Not Last */
5611                        mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5612                        mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5613                }
5614        }
5615
5616        return 0;
5617cleanup:
5618        /* Release all descriptors that were used to map fragments of
5619         * this packet, as well as the corresponding DMA mappings
5620         */
5621        for (i = i - 1; i >= 0; i--) {
5622                tx_desc = txq->descs + i;
5623                tx_desc_unmap_put(port, txq, tx_desc);
5624        }
5625
5626        return -ENOMEM;
5627}
5628
5629/* Main tx processing */
5630static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5631{
5632        struct mvpp2_port *port = netdev_priv(dev);
5633        struct mvpp2_tx_queue *txq, *aggr_txq;
5634        struct mvpp2_txq_pcpu *txq_pcpu;
5635        struct mvpp2_tx_desc *tx_desc;
5636        dma_addr_t buf_dma_addr;
5637        int frags = 0;
5638        u16 txq_id;
5639        u32 tx_cmd;
5640
5641        txq_id = skb_get_queue_mapping(skb);
5642        txq = port->txqs[txq_id];
5643        txq_pcpu = this_cpu_ptr(txq->pcpu);
5644        aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5645
5646        frags = skb_shinfo(skb)->nr_frags + 1;
5647
5648        /* Check number of available descriptors */
5649        if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5650            mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5651                                             txq_pcpu, frags)) {
5652                frags = 0;
5653                goto out;
5654        }
5655
5656        /* Get a descriptor for the first part of the packet */
5657        tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5658        mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5659        mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5660
5661        buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5662                                      skb_headlen(skb), DMA_TO_DEVICE);
5663        if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5664                mvpp2_txq_desc_put(txq);
5665                frags = 0;
5666                goto out;
5667        }
5668
5669        mvpp2_txdesc_offset_set(port, tx_desc,
5670                                buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5671        mvpp2_txdesc_dma_addr_set(port, tx_desc,
5672                                  buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5673
5674        tx_cmd = mvpp2_skb_tx_csum(port, skb);
5675
5676        if (frags == 1) {
5677                /* First and Last descriptor */
5678                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5679                mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5680                mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5681        } else {
5682                /* First but not Last */
5683                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5684                mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5685                mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5686
5687                /* Continue with other skb fragments */
5688                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5689                        tx_desc_unmap_put(port, txq, tx_desc);
5690                        frags = 0;
5691                        goto out;
5692                }
5693        }
5694
5695        txq_pcpu->reserved_num -= frags;
5696        txq_pcpu->count += frags;
5697        aggr_txq->count += frags;
5698
5699        /* Enable transmit */
5700        wmb();
5701        mvpp2_aggr_txq_pend_desc_add(port, frags);
5702
5703        if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5704                struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5705
5706                netif_tx_stop_queue(nq);
5707        }
5708out:
5709        if (frags > 0) {
5710                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5711
5712                u64_stats_update_begin(&stats->syncp);
5713                stats->tx_packets++;
5714                stats->tx_bytes += skb->len;
5715                u64_stats_update_end(&stats->syncp);
5716        } else {
5717                dev->stats.tx_dropped++;
5718                dev_kfree_skb_any(skb);
5719        }
5720
5721        /* Finalize TX processing */
5722        if (txq_pcpu->count >= txq->done_pkts_coal)
5723                mvpp2_txq_done(port, txq, txq_pcpu);
5724
5725        /* Set the timer in case not all frags were processed */
5726        if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5727                struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5728
5729                mvpp2_timer_set(port_pcpu);
5730        }
5731
5732        return NETDEV_TX_OK;
5733}
5734
5735static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5736{
5737        if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5738                netdev_err(dev, "FCS error\n");
5739        if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5740                netdev_err(dev, "rx fifo overrun error\n");
5741        if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5742                netdev_err(dev, "tx fifo underrun error\n");
5743}
5744
5745static int mvpp2_poll(struct napi_struct *napi, int budget)
5746{
5747        u32 cause_rx_tx, cause_rx, cause_misc;
5748        int rx_done = 0;
5749        struct mvpp2_port *port = netdev_priv(napi->dev);
5750        int cpu = smp_processor_id();
5751
5752        /* Rx/Tx cause register
5753         *
5754         * Bits 0-15: each bit indicates received packets on the Rx queue
5755         * (bit 0 is for Rx queue 0).
5756         *
5757         * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5758         * (bit 16 is for Tx queue 0).
5759         *
5760         * Each CPU has its own Rx/Tx cause register
5761         */
5762        cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
5763                                        MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5764        cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5765        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5766
5767        if (cause_misc) {
5768                mvpp2_cause_error(port->dev, cause_misc);
5769
5770                /* Clear the cause register */
5771                mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5772                mvpp2_percpu_write(port->priv, cpu,
5773                                   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5774                                   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5775        }
5776
5777        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5778
5779        /* Process RX packets */
5780        cause_rx |= port->pending_cause_rx;
5781        while (cause_rx && budget > 0) {
5782                int count;
5783                struct mvpp2_rx_queue *rxq;
5784
5785                rxq = mvpp2_get_rx_queue(port, cause_rx);
5786                if (!rxq)
5787                        break;
5788
5789                count = mvpp2_rx(port, budget, rxq);
5790                rx_done += count;
5791                budget -= count;
5792                if (budget > 0) {
5793                        /* Clear the bit associated to this Rx queue
5794                         * so that next iteration will continue from
5795                         * the next Rx queue.
5796                         */
5797                        cause_rx &= ~(1 << rxq->logic_rxq);
5798                }
5799        }
5800
5801        if (budget > 0) {
5802                cause_rx = 0;
5803                napi_complete_done(napi, rx_done);
5804
5805                mvpp2_interrupts_enable(port);
5806        }
5807        port->pending_cause_rx = cause_rx;
5808        return rx_done;
5809}
5810
5811/* Set hw internals when starting port */
5812static void mvpp2_start_dev(struct mvpp2_port *port)
5813{
5814        struct net_device *ndev = port->dev;
5815
5816        mvpp2_gmac_max_rx_size_set(port);
5817        mvpp2_txp_max_tx_size_set(port);
5818
5819        napi_enable(&port->napi);
5820
5821        /* Enable interrupts on all CPUs */
5822        mvpp2_interrupts_enable(port);
5823
5824        mvpp2_port_enable(port);
5825        phy_start(ndev->phydev);
5826        netif_tx_start_all_queues(port->dev);
5827}
5828
5829/* Set hw internals when stopping port */
5830static void mvpp2_stop_dev(struct mvpp2_port *port)
5831{
5832        struct net_device *ndev = port->dev;
5833
5834        /* Stop new packets from arriving to RXQs */
5835        mvpp2_ingress_disable(port);
5836
5837        mdelay(10);
5838
5839        /* Disable interrupts on all CPUs */
5840        mvpp2_interrupts_disable(port);
5841
5842        napi_disable(&port->napi);
5843
5844        netif_carrier_off(port->dev);
5845        netif_tx_stop_all_queues(port->dev);
5846
5847        mvpp2_egress_disable(port);
5848        mvpp2_port_disable(port);
5849        phy_stop(ndev->phydev);
5850}
5851
5852static int mvpp2_check_ringparam_valid(struct net_device *dev,
5853                                       struct ethtool_ringparam *ring)
5854{
5855        u16 new_rx_pending = ring->rx_pending;
5856        u16 new_tx_pending = ring->tx_pending;
5857
5858        if (ring->rx_pending == 0 || ring->tx_pending == 0)
5859                return -EINVAL;
5860
5861        if (ring->rx_pending > MVPP2_MAX_RXD)
5862                new_rx_pending = MVPP2_MAX_RXD;
5863        else if (!IS_ALIGNED(ring->rx_pending, 16))
5864                new_rx_pending = ALIGN(ring->rx_pending, 16);
5865
5866        if (ring->tx_pending > MVPP2_MAX_TXD)
5867                new_tx_pending = MVPP2_MAX_TXD;
5868        else if (!IS_ALIGNED(ring->tx_pending, 32))
5869                new_tx_pending = ALIGN(ring->tx_pending, 32);
5870
5871        if (ring->rx_pending != new_rx_pending) {
5872                netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5873                            ring->rx_pending, new_rx_pending);
5874                ring->rx_pending = new_rx_pending;
5875        }
5876
5877        if (ring->tx_pending != new_tx_pending) {
5878                netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5879                            ring->tx_pending, new_tx_pending);
5880                ring->tx_pending = new_tx_pending;
5881        }
5882
5883        return 0;
5884}
5885
5886static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5887{
5888        u32 mac_addr_l, mac_addr_m, mac_addr_h;
5889
5890        mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5891        mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5892        mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5893        addr[0] = (mac_addr_h >> 24) & 0xFF;
5894        addr[1] = (mac_addr_h >> 16) & 0xFF;
5895        addr[2] = (mac_addr_h >> 8) & 0xFF;
5896        addr[3] = mac_addr_h & 0xFF;
5897        addr[4] = mac_addr_m & 0xFF;
5898        addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5899}
5900
5901static int mvpp2_phy_connect(struct mvpp2_port *port)
5902{
5903        struct phy_device *phy_dev;
5904
5905        phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5906                                 port->phy_interface);
5907        if (!phy_dev) {
5908                netdev_err(port->dev, "cannot connect to phy\n");
5909                return -ENODEV;
5910        }
5911        phy_dev->supported &= PHY_GBIT_FEATURES;
5912        phy_dev->advertising = phy_dev->supported;
5913
5914        port->link    = 0;
5915        port->duplex  = 0;
5916        port->speed   = 0;
5917
5918        return 0;
5919}
5920
5921static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5922{
5923        struct net_device *ndev = port->dev;
5924
5925        phy_disconnect(ndev->phydev);
5926}
5927
5928static int mvpp2_open(struct net_device *dev)
5929{
5930        struct mvpp2_port *port = netdev_priv(dev);
5931        unsigned char mac_bcast[ETH_ALEN] = {
5932                        0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5933        int err;
5934
5935        err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5936        if (err) {
5937                netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5938                return err;
5939        }
5940        err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5941                                      dev->dev_addr, true);
5942        if (err) {
5943                netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5944                return err;
5945        }
5946        err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5947        if (err) {
5948                netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5949                return err;
5950        }
5951        err = mvpp2_prs_def_flow(port);
5952        if (err) {
5953                netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5954                return err;
5955        }
5956
5957        /* Allocate the Rx/Tx queues */
5958        err = mvpp2_setup_rxqs(port);
5959        if (err) {
5960                netdev_err(port->dev, "cannot allocate Rx queues\n");
5961                return err;
5962        }
5963
5964        err = mvpp2_setup_txqs(port);
5965        if (err) {
5966                netdev_err(port->dev, "cannot allocate Tx queues\n");
5967                goto err_cleanup_rxqs;
5968        }
5969
5970        err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5971        if (err) {
5972                netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5973                goto err_cleanup_txqs;
5974        }
5975
5976        /* In default link is down */
5977        netif_carrier_off(port->dev);
5978
5979        err = mvpp2_phy_connect(port);
5980        if (err < 0)
5981                goto err_free_irq;
5982
5983        /* Unmask interrupts on all CPUs */
5984        on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5985
5986        mvpp2_start_dev(port);
5987
5988        return 0;
5989
5990err_free_irq:
5991        free_irq(port->irq, port);
5992err_cleanup_txqs:
5993        mvpp2_cleanup_txqs(port);
5994err_cleanup_rxqs:
5995        mvpp2_cleanup_rxqs(port);
5996        return err;
5997}
5998
5999static int mvpp2_stop(struct net_device *dev)
6000{
6001        struct mvpp2_port *port = netdev_priv(dev);
6002        struct mvpp2_port_pcpu *port_pcpu;
6003        int cpu;
6004
6005        mvpp2_stop_dev(port);
6006        mvpp2_phy_disconnect(port);
6007
6008        /* Mask interrupts on all CPUs */
6009        on_each_cpu(mvpp2_interrupts_mask, port, 1);
6010
6011        free_irq(port->irq, port);
6012        for_each_present_cpu(cpu) {
6013                port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6014
6015                hrtimer_cancel(&port_pcpu->tx_done_timer);
6016                port_pcpu->timer_scheduled = false;
6017                tasklet_kill(&port_pcpu->tx_done_tasklet);
6018        }
6019        mvpp2_cleanup_rxqs(port);
6020        mvpp2_cleanup_txqs(port);
6021
6022        return 0;
6023}
6024
6025static void mvpp2_set_rx_mode(struct net_device *dev)
6026{
6027        struct mvpp2_port *port = netdev_priv(dev);
6028        struct mvpp2 *priv = port->priv;
6029        struct netdev_hw_addr *ha;
6030        int id = port->id;
6031        bool allmulti = dev->flags & IFF_ALLMULTI;
6032
6033        mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6034        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6035        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6036
6037        /* Remove all port->id's mcast enries */
6038        mvpp2_prs_mcast_del_all(priv, id);
6039
6040        if (allmulti && !netdev_mc_empty(dev)) {
6041                netdev_for_each_mc_addr(ha, dev)
6042                        mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6043        }
6044}
6045
6046static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6047{
6048        struct mvpp2_port *port = netdev_priv(dev);
6049        const struct sockaddr *addr = p;
6050        int err;
6051
6052        if (!is_valid_ether_addr(addr->sa_data)) {
6053                err = -EADDRNOTAVAIL;
6054                goto log_error;
6055        }
6056
6057        if (!netif_running(dev)) {
6058                err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6059                if (!err)
6060                        return 0;
6061                /* Reconfigure parser to accept the original MAC address */
6062                err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6063                if (err)
6064                        goto log_error;
6065        }
6066
6067        mvpp2_stop_dev(port);
6068
6069        err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6070        if (!err)
6071                goto out_start;
6072
6073        /* Reconfigure parser accept the original MAC address */
6074        err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6075        if (err)
6076                goto log_error;
6077out_start:
6078        mvpp2_start_dev(port);
6079        mvpp2_egress_enable(port);
6080        mvpp2_ingress_enable(port);
6081        return 0;
6082log_error:
6083        netdev_err(dev, "failed to change MAC address\n");
6084        return err;
6085}
6086
6087static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6088{
6089        struct mvpp2_port *port = netdev_priv(dev);
6090        int err;
6091
6092        if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6093                netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6094                            ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6095                mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6096        }
6097
6098        if (!netif_running(dev)) {
6099                err = mvpp2_bm_update_mtu(dev, mtu);
6100                if (!err) {
6101                        port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
6102                        return 0;
6103                }
6104
6105                /* Reconfigure BM to the original MTU */
6106                err = mvpp2_bm_update_mtu(dev, dev->mtu);
6107                if (err)
6108                        goto log_error;
6109        }
6110
6111        mvpp2_stop_dev(port);
6112
6113        err = mvpp2_bm_update_mtu(dev, mtu);
6114        if (!err) {
6115                port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
6116                goto out_start;
6117        }
6118
6119        /* Reconfigure BM to the original MTU */
6120        err = mvpp2_bm_update_mtu(dev, dev->mtu);
6121        if (err)
6122                goto log_error;
6123
6124out_start:
6125        mvpp2_start_dev(port);
6126        mvpp2_egress_enable(port);
6127        mvpp2_ingress_enable(port);
6128
6129        return 0;
6130log_error:
6131        netdev_err(dev, "failed to change MTU\n");
6132        return err;
6133}
6134
6135static void
6136mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6137{
6138        struct mvpp2_port *port = netdev_priv(dev);
6139        unsigned int start;
6140        int cpu;
6141
6142        for_each_possible_cpu(cpu) {
6143                struct mvpp2_pcpu_stats *cpu_stats;
6144                u64 rx_packets;
6145                u64 rx_bytes;
6146                u64 tx_packets;
6147                u64 tx_bytes;
6148
6149                cpu_stats = per_cpu_ptr(port->stats, cpu);
6150                do {
6151                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6152                        rx_packets = cpu_stats->rx_packets;
6153                        rx_bytes   = cpu_stats->rx_bytes;
6154                        tx_packets = cpu_stats->tx_packets;
6155                        tx_bytes   = cpu_stats->tx_bytes;
6156                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6157
6158                stats->rx_packets += rx_packets;
6159                stats->rx_bytes   += rx_bytes;
6160                stats->tx_packets += tx_packets;
6161                stats->tx_bytes   += tx_bytes;
6162        }
6163
6164        stats->rx_errors        = dev->stats.rx_errors;
6165        stats->rx_dropped       = dev->stats.rx_dropped;
6166        stats->tx_dropped       = dev->stats.tx_dropped;
6167}
6168
6169static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6170{
6171        int ret;
6172
6173        if (!dev->phydev)
6174                return -ENOTSUPP;
6175
6176        ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
6177        if (!ret)
6178                mvpp2_link_event(dev);
6179
6180        return ret;
6181}
6182
6183/* Ethtool methods */
6184
6185/* Set interrupt coalescing for ethtools */
6186static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6187                                      struct ethtool_coalesce *c)
6188{
6189        struct mvpp2_port *port = netdev_priv(dev);
6190        int queue;
6191
6192        for (queue = 0; queue < rxq_number; queue++) {
6193                struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6194
6195                rxq->time_coal = c->rx_coalesce_usecs;
6196                rxq->pkts_coal = c->rx_max_coalesced_frames;
6197                mvpp2_rx_pkts_coal_set(port, rxq);
6198                mvpp2_rx_time_coal_set(port, rxq);
6199        }
6200
6201        for (queue = 0; queue < txq_number; queue++) {
6202                struct mvpp2_tx_queue *txq = port->txqs[queue];
6203
6204                txq->done_pkts_coal = c->tx_max_coalesced_frames;
6205        }
6206
6207        return 0;
6208}
6209
6210/* get coalescing for ethtools */
6211static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6212                                      struct ethtool_coalesce *c)
6213{
6214        struct mvpp2_port *port = netdev_priv(dev);
6215
6216        c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
6217        c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
6218        c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
6219        return 0;
6220}
6221
6222static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6223                                      struct ethtool_drvinfo *drvinfo)
6224{
6225        strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6226                sizeof(drvinfo->driver));
6227        strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6228                sizeof(drvinfo->version));
6229        strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6230                sizeof(drvinfo->bus_info));
6231}
6232
6233static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6234                                        struct ethtool_ringparam *ring)
6235{
6236        struct mvpp2_port *port = netdev_priv(dev);
6237
6238        ring->rx_max_pending = MVPP2_MAX_RXD;
6239        ring->tx_max_pending = MVPP2_MAX_TXD;
6240        ring->rx_pending = port->rx_ring_size;
6241        ring->tx_pending = port->tx_ring_size;
6242}
6243
6244static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6245                                       struct ethtool_ringparam *ring)
6246{
6247        struct mvpp2_port *port = netdev_priv(dev);
6248        u16 prev_rx_ring_size = port->rx_ring_size;
6249        u16 prev_tx_ring_size = port->tx_ring_size;
6250        int err;
6251
6252        err = mvpp2_check_ringparam_valid(dev, ring);
6253        if (err)
6254                return err;
6255
6256        if (!netif_running(dev)) {
6257                port->rx_ring_size = ring->rx_pending;
6258                port->tx_ring_size = ring->tx_pending;
6259                return 0;
6260        }
6261
6262        /* The interface is running, so we have to force a
6263         * reallocation of the queues
6264         */
6265        mvpp2_stop_dev(port);
6266        mvpp2_cleanup_rxqs(port);
6267        mvpp2_cleanup_txqs(port);
6268
6269        port->rx_ring_size = ring->rx_pending;
6270        port->tx_ring_size = ring->tx_pending;
6271
6272        err = mvpp2_setup_rxqs(port);
6273        if (err) {
6274                /* Reallocate Rx queues with the original ring size */
6275                port->rx_ring_size = prev_rx_ring_size;
6276                ring->rx_pending = prev_rx_ring_size;
6277                err = mvpp2_setup_rxqs(port);
6278                if (err)
6279                        goto err_out;
6280        }
6281        err = mvpp2_setup_txqs(port);
6282        if (err) {
6283                /* Reallocate Tx queues with the original ring size */
6284                port->tx_ring_size = prev_tx_ring_size;
6285                ring->tx_pending = prev_tx_ring_size;
6286                err = mvpp2_setup_txqs(port);
6287                if (err)
6288                        goto err_clean_rxqs;
6289        }
6290
6291        mvpp2_start_dev(port);
6292        mvpp2_egress_enable(port);
6293        mvpp2_ingress_enable(port);
6294
6295        return 0;
6296
6297err_clean_rxqs:
6298        mvpp2_cleanup_rxqs(port);
6299err_out:
6300        netdev_err(dev, "failed to change ring parameters");
6301        return err;
6302}
6303
6304/* Device ops */
6305
6306static const struct net_device_ops mvpp2_netdev_ops = {
6307        .ndo_open               = mvpp2_open,
6308        .ndo_stop               = mvpp2_stop,
6309        .ndo_start_xmit         = mvpp2_tx,
6310        .ndo_set_rx_mode        = mvpp2_set_rx_mode,
6311        .ndo_set_mac_address    = mvpp2_set_mac_address,
6312        .ndo_change_mtu         = mvpp2_change_mtu,
6313        .ndo_get_stats64        = mvpp2_get_stats64,
6314        .ndo_do_ioctl           = mvpp2_ioctl,
6315};
6316
6317static const struct ethtool_ops mvpp2_eth_tool_ops = {
6318        .nway_reset     = phy_ethtool_nway_reset,
6319        .get_link       = ethtool_op_get_link,
6320        .set_coalesce   = mvpp2_ethtool_set_coalesce,
6321        .get_coalesce   = mvpp2_ethtool_get_coalesce,
6322        .get_drvinfo    = mvpp2_ethtool_get_drvinfo,
6323        .get_ringparam  = mvpp2_ethtool_get_ringparam,
6324        .set_ringparam  = mvpp2_ethtool_set_ringparam,
6325        .get_link_ksettings = phy_ethtool_get_link_ksettings,
6326        .set_link_ksettings = phy_ethtool_set_link_ksettings,
6327};
6328
6329/* Initialize port HW */
6330static int mvpp2_port_init(struct mvpp2_port *port)
6331{
6332        struct device *dev = port->dev->dev.parent;
6333        struct mvpp2 *priv = port->priv;
6334        struct mvpp2_txq_pcpu *txq_pcpu;
6335        int queue, cpu, err;
6336
6337        if (port->first_rxq + rxq_number >
6338            MVPP2_MAX_PORTS * priv->max_port_rxqs)
6339                return -EINVAL;
6340
6341        /* Disable port */
6342        mvpp2_egress_disable(port);
6343        mvpp2_port_disable(port);
6344
6345        port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6346                                  GFP_KERNEL);
6347        if (!port->txqs)
6348                return -ENOMEM;
6349
6350        /* Associate physical Tx queues to this port and initialize.
6351         * The mapping is predefined.
6352         */
6353        for (queue = 0; queue < txq_number; queue++) {
6354                int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6355                struct mvpp2_tx_queue *txq;
6356
6357                txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6358                if (!txq) {
6359                        err = -ENOMEM;
6360                        goto err_free_percpu;
6361                }
6362
6363                txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6364                if (!txq->pcpu) {
6365                        err = -ENOMEM;
6366                        goto err_free_percpu;
6367                }
6368
6369                txq->id = queue_phy_id;
6370                txq->log_id = queue;
6371                txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6372                for_each_present_cpu(cpu) {
6373                        txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6374                        txq_pcpu->cpu = cpu;
6375                }
6376
6377                port->txqs[queue] = txq;
6378        }
6379
6380        port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6381                                  GFP_KERNEL);
6382        if (!port->rxqs) {
6383                err = -ENOMEM;
6384                goto err_free_percpu;
6385        }
6386
6387        /* Allocate and initialize Rx queue for this port */
6388        for (queue = 0; queue < rxq_number; queue++) {
6389                struct mvpp2_rx_queue *rxq;
6390
6391                /* Map physical Rx queue to port's logical Rx queue */
6392                rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6393                if (!rxq) {
6394                        err = -ENOMEM;
6395                        goto err_free_percpu;
6396                }
6397                /* Map this Rx queue to a physical queue */
6398                rxq->id = port->first_rxq + queue;
6399                rxq->port = port->id;
6400                rxq->logic_rxq = queue;
6401
6402                port->rxqs[queue] = rxq;
6403        }
6404
6405        /* Configure Rx queue group interrupt for this port */
6406        if (priv->hw_version == MVPP21) {
6407                mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6408                            rxq_number);
6409        } else {
6410                u32 val;
6411
6412                val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6413                mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6414
6415                val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6416                mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6417        }
6418
6419        /* Create Rx descriptor rings */
6420        for (queue = 0; queue < rxq_number; queue++) {
6421                struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6422
6423                rxq->size = port->rx_ring_size;
6424                rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6425                rxq->time_coal = MVPP2_RX_COAL_USEC;
6426        }
6427
6428        mvpp2_ingress_disable(port);
6429
6430        /* Port default configuration */
6431        mvpp2_defaults_set(port);
6432
6433        /* Port's classifier configuration */
6434        mvpp2_cls_oversize_rxq_set(port);
6435        mvpp2_cls_port_config(port);
6436
6437        /* Provide an initial Rx packet size */
6438        port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6439
6440        /* Initialize pools for swf */
6441        err = mvpp2_swf_bm_pool_init(port);
6442        if (err)
6443                goto err_free_percpu;
6444
6445        return 0;
6446
6447err_free_percpu:
6448        for (queue = 0; queue < txq_number; queue++) {
6449                if (!port->txqs[queue])
6450                        continue;
6451                free_percpu(port->txqs[queue]->pcpu);
6452        }
6453        return err;
6454}
6455
6456/* Ports initialization */
6457static int mvpp2_port_probe(struct platform_device *pdev,
6458                            struct device_node *port_node,
6459                            struct mvpp2 *priv)
6460{
6461        struct device_node *phy_node;
6462        struct mvpp2_port *port;
6463        struct mvpp2_port_pcpu *port_pcpu;
6464        struct net_device *dev;
6465        struct resource *res;
6466        const char *dt_mac_addr;
6467        const char *mac_from;
6468        char hw_mac_addr[ETH_ALEN];
6469        u32 id;
6470        int features;
6471        int phy_mode;
6472        int err, i, cpu;
6473
6474        dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number);
6475        if (!dev)
6476                return -ENOMEM;
6477
6478        phy_node = of_parse_phandle(port_node, "phy", 0);
6479        if (!phy_node) {
6480                dev_err(&pdev->dev, "missing phy\n");
6481                err = -ENODEV;
6482                goto err_free_netdev;
6483        }
6484
6485        phy_mode = of_get_phy_mode(port_node);
6486        if (phy_mode < 0) {
6487                dev_err(&pdev->dev, "incorrect phy mode\n");
6488                err = phy_mode;
6489                goto err_free_netdev;
6490        }
6491
6492        if (of_property_read_u32(port_node, "port-id", &id)) {
6493                err = -EINVAL;
6494                dev_err(&pdev->dev, "missing port-id value\n");
6495                goto err_free_netdev;
6496        }
6497
6498        dev->tx_queue_len = MVPP2_MAX_TXD;
6499        dev->watchdog_timeo = 5 * HZ;
6500        dev->netdev_ops = &mvpp2_netdev_ops;
6501        dev->ethtool_ops = &mvpp2_eth_tool_ops;
6502
6503        port = netdev_priv(dev);
6504
6505        port->irq = irq_of_parse_and_map(port_node, 0);
6506        if (port->irq <= 0) {
6507                err = -EINVAL;
6508                goto err_free_netdev;
6509        }
6510
6511        if (of_property_read_bool(port_node, "marvell,loopback"))
6512                port->flags |= MVPP2_F_LOOPBACK;
6513
6514        port->priv = priv;
6515        port->id = id;
6516        if (priv->hw_version == MVPP21)
6517                port->first_rxq = port->id * rxq_number;
6518        else
6519                port->first_rxq = port->id * priv->max_port_rxqs;
6520
6521        port->phy_node = phy_node;
6522        port->phy_interface = phy_mode;
6523
6524        if (priv->hw_version == MVPP21) {
6525                res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6526                port->base = devm_ioremap_resource(&pdev->dev, res);
6527                if (IS_ERR(port->base)) {
6528                        err = PTR_ERR(port->base);
6529                        goto err_free_irq;
6530                }
6531        } else {
6532                if (of_property_read_u32(port_node, "gop-port-id",
6533                                         &port->gop_id)) {
6534                        err = -EINVAL;
6535                        dev_err(&pdev->dev, "missing gop-port-id value\n");
6536                        goto err_free_irq;
6537                }
6538
6539                port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6540        }
6541
6542        /* Alloc per-cpu stats */
6543        port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6544        if (!port->stats) {
6545                err = -ENOMEM;
6546                goto err_free_irq;
6547        }
6548
6549        dt_mac_addr = of_get_mac_address(port_node);
6550        if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6551                mac_from = "device tree";
6552                ether_addr_copy(dev->dev_addr, dt_mac_addr);
6553        } else {
6554                if (priv->hw_version == MVPP21)
6555                        mvpp21_get_mac_address(port, hw_mac_addr);
6556                if (is_valid_ether_addr(hw_mac_addr)) {
6557                        mac_from = "hardware";
6558                        ether_addr_copy(dev->dev_addr, hw_mac_addr);
6559                } else {
6560                        mac_from = "random";
6561                        eth_hw_addr_random(dev);
6562                }
6563        }
6564
6565        port->tx_ring_size = MVPP2_MAX_TXD;
6566        port->rx_ring_size = MVPP2_MAX_RXD;
6567        port->dev = dev;
6568        SET_NETDEV_DEV(dev, &pdev->dev);
6569
6570        err = mvpp2_port_init(port);
6571        if (err < 0) {
6572                dev_err(&pdev->dev, "failed to init port %d\n", id);
6573                goto err_free_stats;
6574        }
6575
6576        mvpp2_port_mii_set(port);
6577        mvpp2_port_periodic_xon_disable(port);
6578
6579        if (priv->hw_version == MVPP21)
6580                mvpp2_port_fc_adv_enable(port);
6581
6582        mvpp2_port_reset(port);
6583
6584        port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6585        if (!port->pcpu) {
6586                err = -ENOMEM;
6587                goto err_free_txq_pcpu;
6588        }
6589
6590        for_each_present_cpu(cpu) {
6591                port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6592
6593                hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6594                             HRTIMER_MODE_REL_PINNED);
6595                port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6596                port_pcpu->timer_scheduled = false;
6597
6598                tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6599                             (unsigned long)dev);
6600        }
6601
6602        netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6603        features = NETIF_F_SG | NETIF_F_IP_CSUM;
6604        dev->features = features | NETIF_F_RXCSUM;
6605        dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6606        dev->vlan_features |= features;
6607
6608        /* MTU range: 68 - 9676 */
6609        dev->min_mtu = ETH_MIN_MTU;
6610        /* 9676 == 9700 - 20 and rounding to 8 */
6611        dev->max_mtu = 9676;
6612
6613        err = register_netdev(dev);
6614        if (err < 0) {
6615                dev_err(&pdev->dev, "failed to register netdev\n");
6616                goto err_free_port_pcpu;
6617        }
6618        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6619
6620        priv->port_list[id] = port;
6621        return 0;
6622
6623err_free_port_pcpu:
6624        free_percpu(port->pcpu);
6625err_free_txq_pcpu:
6626        for (i = 0; i < txq_number; i++)
6627                free_percpu(port->txqs[i]->pcpu);
6628err_free_stats:
6629        free_percpu(port->stats);
6630err_free_irq:
6631        irq_dispose_mapping(port->irq);
6632err_free_netdev:
6633        of_node_put(phy_node);
6634        free_netdev(dev);
6635        return err;
6636}
6637
6638/* Ports removal routine */
6639static void mvpp2_port_remove(struct mvpp2_port *port)
6640{
6641        int i;
6642
6643        unregister_netdev(port->dev);
6644        of_node_put(port->phy_node);
6645        free_percpu(port->pcpu);
6646        free_percpu(port->stats);
6647        for (i = 0; i < txq_number; i++)
6648                free_percpu(port->txqs[i]->pcpu);
6649        irq_dispose_mapping(port->irq);
6650        free_netdev(port->dev);
6651}
6652
6653/* Initialize decoding windows */
6654static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6655                                    struct mvpp2 *priv)
6656{
6657        u32 win_enable;
6658        int i;
6659
6660        for (i = 0; i < 6; i++) {
6661                mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6662                mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6663
6664                if (i < 4)
6665                        mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6666        }
6667
6668        win_enable = 0;
6669
6670        for (i = 0; i < dram->num_cs; i++) {
6671                const struct mbus_dram_window *cs = dram->cs + i;
6672
6673                mvpp2_write(priv, MVPP2_WIN_BASE(i),
6674                            (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6675                            dram->mbus_dram_target_id);
6676
6677                mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6678                            (cs->size - 1) & 0xffff0000);
6679
6680                win_enable |= (1 << i);
6681        }
6682
6683        mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6684}
6685
6686/* Initialize Rx FIFO's */
6687static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6688{
6689        int port;
6690
6691        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6692                mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6693                            MVPP2_RX_FIFO_PORT_DATA_SIZE);
6694                mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6695                            MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6696        }
6697
6698        mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6699                    MVPP2_RX_FIFO_PORT_MIN_PKT);
6700        mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6701}
6702
6703static void mvpp2_axi_init(struct mvpp2 *priv)
6704{
6705        u32 val, rdval, wrval;
6706
6707        mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6708
6709        /* AXI Bridge Configuration */
6710
6711        rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6712                << MVPP22_AXI_ATTR_CACHE_OFFS;
6713        rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6714                << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6715
6716        wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6717                << MVPP22_AXI_ATTR_CACHE_OFFS;
6718        wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6719                << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6720
6721        /* BM */
6722        mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6723        mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6724
6725        /* Descriptors */
6726        mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6727        mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6728        mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6729        mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6730
6731        /* Buffer Data */
6732        mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6733        mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6734
6735        val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6736                << MVPP22_AXI_CODE_CACHE_OFFS;
6737        val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6738                << MVPP22_AXI_CODE_DOMAIN_OFFS;
6739        mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6740        mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6741
6742        val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6743                << MVPP22_AXI_CODE_CACHE_OFFS;
6744        val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6745                << MVPP22_AXI_CODE_DOMAIN_OFFS;
6746
6747        mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6748
6749        val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6750                << MVPP22_AXI_CODE_CACHE_OFFS;
6751        val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6752                << MVPP22_AXI_CODE_DOMAIN_OFFS;
6753
6754        mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6755}
6756
6757/* Initialize network controller common part HW */
6758static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6759{
6760        const struct mbus_dram_target_info *dram_target_info;
6761        int err, i;
6762        u32 val;
6763
6764        /* Checks for hardware constraints */
6765        if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
6766            (txq_number > MVPP2_MAX_TXQ)) {
6767                dev_err(&pdev->dev, "invalid queue size parameter\n");
6768                return -EINVAL;
6769        }
6770
6771        /* MBUS windows configuration */
6772        dram_target_info = mv_mbus_dram_info();
6773        if (dram_target_info)
6774                mvpp2_conf_mbus_windows(dram_target_info, priv);
6775
6776        if (priv->hw_version == MVPP22)
6777                mvpp2_axi_init(priv);
6778
6779        /* Disable HW PHY polling */
6780        if (priv->hw_version == MVPP21) {
6781                val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6782                val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6783                writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6784        } else {
6785                val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6786                val &= ~MVPP22_SMI_POLLING_EN;
6787                writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6788        }
6789
6790        /* Allocate and initialize aggregated TXQs */
6791        priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6792                                       sizeof(*priv->aggr_txqs),
6793                                       GFP_KERNEL);
6794        if (!priv->aggr_txqs)
6795                return -ENOMEM;
6796
6797        for_each_present_cpu(i) {
6798                priv->aggr_txqs[i].id = i;
6799                priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6800                err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6801                                          MVPP2_AGGR_TXQ_SIZE, i, priv);
6802                if (err < 0)
6803                        return err;
6804        }
6805
6806        /* Rx Fifo Init */
6807        mvpp2_rx_fifo_init(priv);
6808
6809        /* Reset Rx queue group interrupt configuration */
6810        for (i = 0; i < MVPP2_MAX_PORTS; i++) {
6811                if (priv->hw_version == MVPP21) {
6812                        mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
6813                                    rxq_number);
6814                        continue;
6815                } else {
6816                        u32 val;
6817
6818                        val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6819                        mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6820
6821                        val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6822                        mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6823                }
6824        }
6825
6826        if (priv->hw_version == MVPP21)
6827                writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6828                       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6829
6830        /* Allow cache snoop when transmiting packets */
6831        mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6832
6833        /* Buffer Manager initialization */
6834        err = mvpp2_bm_init(pdev, priv);
6835        if (err < 0)
6836                return err;
6837
6838        /* Parser default initialization */
6839        err = mvpp2_prs_default_init(pdev, priv);
6840        if (err < 0)
6841                return err;
6842
6843        /* Classifier default initialization */
6844        mvpp2_cls_init(priv);
6845
6846        return 0;
6847}
6848
6849static int mvpp2_probe(struct platform_device *pdev)
6850{
6851        struct device_node *dn = pdev->dev.of_node;
6852        struct device_node *port_node;
6853        struct mvpp2 *priv;
6854        struct resource *res;
6855        void __iomem *base;
6856        int port_count, cpu;
6857        int err;
6858
6859        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6860        if (!priv)
6861                return -ENOMEM;
6862
6863        priv->hw_version =
6864                (unsigned long)of_device_get_match_data(&pdev->dev);
6865
6866        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6867        base = devm_ioremap_resource(&pdev->dev, res);
6868        if (IS_ERR(base))
6869                return PTR_ERR(base);
6870
6871        if (priv->hw_version == MVPP21) {
6872                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6873                priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6874                if (IS_ERR(priv->lms_base))
6875                        return PTR_ERR(priv->lms_base);
6876        } else {
6877                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6878                priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6879                if (IS_ERR(priv->iface_base))
6880                        return PTR_ERR(priv->iface_base);
6881        }
6882
6883        for_each_present_cpu(cpu) {
6884                u32 addr_space_sz;
6885
6886                addr_space_sz = (priv->hw_version == MVPP21 ?
6887                                 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6888                priv->cpu_base[cpu] = base + cpu * addr_space_sz;
6889        }
6890
6891        if (priv->hw_version == MVPP21)
6892                priv->max_port_rxqs = 8;
6893        else
6894                priv->max_port_rxqs = 32;
6895
6896        priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6897        if (IS_ERR(priv->pp_clk))
6898                return PTR_ERR(priv->pp_clk);
6899        err = clk_prepare_enable(priv->pp_clk);
6900        if (err < 0)
6901                return err;
6902
6903        priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6904        if (IS_ERR(priv->gop_clk)) {
6905                err = PTR_ERR(priv->gop_clk);
6906                goto err_pp_clk;
6907        }
6908        err = clk_prepare_enable(priv->gop_clk);
6909        if (err < 0)
6910                goto err_pp_clk;
6911
6912        if (priv->hw_version == MVPP22) {
6913                priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6914                if (IS_ERR(priv->mg_clk)) {
6915                        err = PTR_ERR(priv->mg_clk);
6916                        goto err_gop_clk;
6917                }
6918
6919                err = clk_prepare_enable(priv->mg_clk);
6920                if (err < 0)
6921                        goto err_gop_clk;
6922        }
6923
6924        /* Get system's tclk rate */
6925        priv->tclk = clk_get_rate(priv->pp_clk);
6926
6927        if (priv->hw_version == MVPP22) {
6928                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
6929                if (err)
6930                        goto err_mg_clk;
6931                /* Sadly, the BM pools all share the same register to
6932                 * store the high 32 bits of their address. So they
6933                 * must all have the same high 32 bits, which forces
6934                 * us to restrict coherent memory to DMA_BIT_MASK(32).
6935                 */
6936                err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6937                if (err)
6938                        goto err_mg_clk;
6939        }
6940
6941        /* Initialize network controller */
6942        err = mvpp2_init(pdev, priv);
6943        if (err < 0) {
6944                dev_err(&pdev->dev, "failed to initialize controller\n");
6945                goto err_mg_clk;
6946        }
6947
6948        port_count = of_get_available_child_count(dn);
6949        if (port_count == 0) {
6950                dev_err(&pdev->dev, "no ports enabled\n");
6951                err = -ENODEV;
6952                goto err_mg_clk;
6953        }
6954
6955        priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6956                                       sizeof(*priv->port_list),
6957                                       GFP_KERNEL);
6958        if (!priv->port_list) {
6959                err = -ENOMEM;
6960                goto err_mg_clk;
6961        }
6962
6963        /* Initialize ports */
6964        for_each_available_child_of_node(dn, port_node) {
6965                err = mvpp2_port_probe(pdev, port_node, priv);
6966                if (err < 0)
6967                        goto err_mg_clk;
6968        }
6969
6970        platform_set_drvdata(pdev, priv);
6971        return 0;
6972
6973err_mg_clk:
6974        if (priv->hw_version == MVPP22)
6975                clk_disable_unprepare(priv->mg_clk);
6976err_gop_clk:
6977        clk_disable_unprepare(priv->gop_clk);
6978err_pp_clk:
6979        clk_disable_unprepare(priv->pp_clk);
6980        return err;
6981}
6982
6983static int mvpp2_remove(struct platform_device *pdev)
6984{
6985        struct mvpp2 *priv = platform_get_drvdata(pdev);
6986        struct device_node *dn = pdev->dev.of_node;
6987        struct device_node *port_node;
6988        int i = 0;
6989
6990        for_each_available_child_of_node(dn, port_node) {
6991                if (priv->port_list[i])
6992                        mvpp2_port_remove(priv->port_list[i]);
6993                i++;
6994        }
6995
6996        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6997                struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6998
6999                mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7000        }
7001
7002        for_each_present_cpu(i) {
7003                struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7004
7005                dma_free_coherent(&pdev->dev,
7006                                  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7007                                  aggr_txq->descs,
7008                                  aggr_txq->descs_dma);
7009        }
7010
7011        clk_disable_unprepare(priv->mg_clk);
7012        clk_disable_unprepare(priv->pp_clk);
7013        clk_disable_unprepare(priv->gop_clk);
7014
7015        return 0;
7016}
7017
7018static const struct of_device_id mvpp2_match[] = {
7019        {
7020                .compatible = "marvell,armada-375-pp2",
7021                .data = (void *)MVPP21,
7022        },
7023        {
7024                .compatible = "marvell,armada-7k-pp22",
7025                .data = (void *)MVPP22,
7026        },
7027        { }
7028};
7029MODULE_DEVICE_TABLE(of, mvpp2_match);
7030
7031static struct platform_driver mvpp2_driver = {
7032        .probe = mvpp2_probe,
7033        .remove = mvpp2_remove,
7034        .driver = {
7035                .name = MVPP2_DRIVER_NAME,
7036                .of_match_table = mvpp2_match,
7037        },
7038};
7039
7040module_platform_driver(mvpp2_driver);
7041
7042MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7043MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7044MODULE_LICENSE("GPL v2");
7045