linux/drivers/net/ethernet/marvell/mvneta.c
<<
>>
Prefs
   1/*
   2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   3 *
   4 * Copyright (C) 2012 Marvell
   5 *
   6 * Rami Rosen <rosenr@marvell.com>
   7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
   8 *
   9 * This file is licensed under the terms of the GNU General Public
  10 * License version 2. This program is licensed "as is" without any
  11 * warranty of any kind, whether express or implied.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/cpu.h>
  16#include <linux/etherdevice.h>
  17#include <linux/if_vlan.h>
  18#include <linux/inetdevice.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mbus.h>
  23#include <linux/module.h>
  24#include <linux/netdevice.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_irq.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_net.h>
  30#include <linux/phy/phy.h>
  31#include <linux/phy.h>
  32#include <linux/phylink.h>
  33#include <linux/platform_device.h>
  34#include <linux/skbuff.h>
  35#include <net/hwbm.h>
  36#include "mvneta_bm.h"
  37#include <net/ip.h>
  38#include <net/ipv6.h>
  39#include <net/tso.h>
  40#include <net/page_pool.h>
  41#include <linux/bpf_trace.h>
  42
  43/* Registers */
  44#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  45#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
  46#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT     4
  47#define      MVNETA_RXQ_SHORT_POOL_ID_MASK      0x30
  48#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT      6
  49#define      MVNETA_RXQ_LONG_POOL_ID_MASK       0xc0
  50#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  51#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  52#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  53#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  54#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  55#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  56#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  57#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  58#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  59#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  60#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  61#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  62#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  63#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)    (0x1700 + ((pool) << 2))
  64#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT   3
  65#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK    0xfff8
  66#define MVNETA_PORT_RX_RESET                    0x1cc0
  67#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  68#define MVNETA_PHY_ADDR                         0x2000
  69#define      MVNETA_PHY_ADDR_MASK               0x1f
  70#define MVNETA_MBUS_RETRY                       0x2010
  71#define MVNETA_UNIT_INTR_CAUSE                  0x2080
  72#define MVNETA_UNIT_CONTROL                     0x20B0
  73#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  74#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  75#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  76#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  77#define MVNETA_BASE_ADDR_ENABLE                 0x2290
  78#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
  79#define MVNETA_PORT_CONFIG                      0x2400
  80#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  81#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  82#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  83#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  84#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  85#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  86#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  87#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
  88#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
  89                                                 MVNETA_DEF_RXQ_ARP(q)   | \
  90                                                 MVNETA_DEF_RXQ_TCP(q)   | \
  91                                                 MVNETA_DEF_RXQ_UDP(q)   | \
  92                                                 MVNETA_DEF_RXQ_BPDU(q)  | \
  93                                                 MVNETA_TX_UNSET_ERR_SUM | \
  94                                                 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  95#define MVNETA_PORT_CONFIG_EXTEND                0x2404
  96#define MVNETA_MAC_ADDR_LOW                      0x2414
  97#define MVNETA_MAC_ADDR_HIGH                     0x2418
  98#define MVNETA_SDMA_CONFIG                       0x241c
  99#define      MVNETA_SDMA_BRST_SIZE_16            4
 100#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
 101#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 102#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 103#define      MVNETA_DESC_SWAP                    BIT(6)
 104#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 105#define MVNETA_VLAN_PRIO_TO_RXQ                  0x2440
 106#define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
 107#define MVNETA_PORT_STATUS                       0x2444
 108#define      MVNETA_TX_IN_PRGRS                  BIT(0)
 109#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 110#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 111/* Only exists on Armada XP and Armada 370 */
 112#define MVNETA_SERDES_CFG                        0x24A0
 113#define      MVNETA_SGMII_SERDES_PROTO           0x0cc7
 114#define      MVNETA_QSGMII_SERDES_PROTO          0x0667
 115#define      MVNETA_HSGMII_SERDES_PROTO          0x1107
 116#define MVNETA_TYPE_PRIO                         0x24bc
 117#define      MVNETA_FORCE_UNI                    BIT(21)
 118#define MVNETA_TXQ_CMD_1                         0x24e4
 119#define MVNETA_TXQ_CMD                           0x2448
 120#define      MVNETA_TXQ_DISABLE_SHIFT            8
 121#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 122#define MVNETA_RX_DISCARD_FRAME_COUNT            0x2484
 123#define MVNETA_OVERRUN_FRAME_COUNT               0x2488
 124#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 125#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 126#define MVNETA_ACC_MODE                          0x2500
 127#define MVNETA_BM_ADDRESS                        0x2504
 128#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 129#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 130#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 131#define      MVNETA_CPU_RXQ_ACCESS(rxq)          BIT(rxq)
 132#define      MVNETA_CPU_TXQ_ACCESS(txq)          BIT(txq + 8)
 133#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 134
 135/* Exception Interrupt Port/Queue Cause register
 136 *
 137 * Their behavior depend of the mapping done using the PCPX2Q
 138 * registers. For a given CPU if the bit associated to a queue is not
 139 * set, then for the register a read from this CPU will always return
 140 * 0 and a write won't do anything
 141 */
 142
 143#define MVNETA_INTR_NEW_CAUSE                    0x25a0
 144#define MVNETA_INTR_NEW_MASK                     0x25a4
 145
 146/* bits  0..7  = TXQ SENT, one bit per queue.
 147 * bits  8..15 = RXQ OCCUP, one bit per queue.
 148 * bits 16..23 = RXQ FREE, one bit per queue.
 149 * bit  29 = OLD_REG_SUM, see old reg ?
 150 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 151 * bit  31 = MISC_SUM,   one bit for 4 ports
 152 */
 153#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 154#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 155#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 156#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 157#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 158
 159#define MVNETA_INTR_OLD_CAUSE                    0x25a8
 160#define MVNETA_INTR_OLD_MASK                     0x25ac
 161
 162/* Data Path Port/Queue Cause Register */
 163#define MVNETA_INTR_MISC_CAUSE                   0x25b0
 164#define MVNETA_INTR_MISC_MASK                    0x25b4
 165
 166#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
 167#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
 168#define      MVNETA_CAUSE_PTP                    BIT(4)
 169
 170#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
 171#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
 172#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
 173#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
 174#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
 175#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
 176#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
 177#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
 178
 179#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
 180#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
 181#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
 182
 183#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
 184#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
 185#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
 186
 187#define MVNETA_INTR_ENABLE                       0x25b8
 188#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
 189#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 190
 191#define MVNETA_RXQ_CMD                           0x2680
 192#define      MVNETA_RXQ_DISABLE_SHIFT            8
 193#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 194#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 195#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 196#define MVNETA_GMAC_CTRL_0                       0x2c00
 197#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 198#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 199#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 200#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 201#define MVNETA_GMAC_CTRL_2                       0x2c08
 202#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 203#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 204#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 205#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 206#define MVNETA_GMAC_STATUS                       0x2c10
 207#define      MVNETA_GMAC_LINK_UP                 BIT(0)
 208#define      MVNETA_GMAC_SPEED_1000              BIT(1)
 209#define      MVNETA_GMAC_SPEED_100               BIT(2)
 210#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 211#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 212#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 213#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 214#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 215#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
 216#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 217#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 218#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 219#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 220#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 221#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
 222#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 223#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 224#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 225#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 226#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
 227#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 228#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 229#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 230#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 231#define MVNETA_GMAC_CTRL_4                       0x2c90
 232#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
 233#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 234#define      MVNETA_MIB_LATE_COLLISION           0x7c
 235#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 236#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 237#define MVNETA_DA_FILT_UCAST_BASE                0x3600
 238#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 239#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 240#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 241#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 242#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 243#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 244#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 245#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 246#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 247#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 248#define MVNETA_PORT_TX_RESET                     0x3cf0
 249#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 250#define MVNETA_TX_MTU                            0x3e0c
 251#define MVNETA_TX_TOKEN_SIZE                     0x3e14
 252#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 253#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 254#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 255
 256#define MVNETA_LPI_CTRL_0                        0x2cc0
 257#define MVNETA_LPI_CTRL_1                        0x2cc4
 258#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
 259#define MVNETA_LPI_CTRL_2                        0x2cc8
 260#define MVNETA_LPI_STATUS                        0x2ccc
 261
 262#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 263
 264/* Descriptor ring Macros */
 265#define MVNETA_QUEUE_NEXT_DESC(q, index)        \
 266        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 267
 268/* Various constants */
 269
 270/* Coalescing */
 271#define MVNETA_TXDONE_COAL_PKTS         0       /* interrupt per packet */
 272#define MVNETA_RX_COAL_PKTS             32
 273#define MVNETA_RX_COAL_USEC             100
 274
 275/* The two bytes Marvell header. Either contains a special value used
 276 * by Marvell switches when a specific hardware mode is enabled (not
 277 * supported by this driver) or is filled automatically by zeroes on
 278 * the RX side. Those two bytes being at the front of the Ethernet
 279 * header, they allow to have the IP header aligned on a 4 bytes
 280 * boundary automatically: the hardware skips those two bytes on its
 281 * own.
 282 */
 283#define MVNETA_MH_SIZE                  2
 284
 285#define MVNETA_VLAN_TAG_LEN             4
 286
 287#define MVNETA_TX_CSUM_DEF_SIZE         1600
 288#define MVNETA_TX_CSUM_MAX_SIZE         9800
 289#define MVNETA_ACC_MODE_EXT1            1
 290#define MVNETA_ACC_MODE_EXT2            2
 291
 292#define MVNETA_MAX_DECODE_WIN           6
 293
 294/* Timeout constants */
 295#define MVNETA_TX_DISABLE_TIMEOUT_MSEC  1000
 296#define MVNETA_RX_DISABLE_TIMEOUT_MSEC  1000
 297#define MVNETA_TX_FIFO_EMPTY_TIMEOUT    10000
 298
 299#define MVNETA_TX_MTU_MAX               0x3ffff
 300
 301/* The RSS lookup table actually has 256 entries but we do not use
 302 * them yet
 303 */
 304#define MVNETA_RSS_LU_TABLE_SIZE        1
 305
 306/* Max number of Rx descriptors */
 307#define MVNETA_MAX_RXD 512
 308
 309/* Max number of Tx descriptors */
 310#define MVNETA_MAX_TXD 1024
 311
 312/* Max number of allowed TCP segments for software TSO */
 313#define MVNETA_MAX_TSO_SEGS 100
 314
 315#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 316
 317/* descriptor aligned size */
 318#define MVNETA_DESC_ALIGNED_SIZE        32
 319
 320/* Number of bytes to be taken into account by HW when putting incoming data
 321 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 322 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 323 */
 324#define MVNETA_RX_PKT_OFFSET_CORRECTION         64
 325
 326#define MVNETA_RX_PKT_SIZE(mtu) \
 327        ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 328              ETH_HLEN + ETH_FCS_LEN,                        \
 329              cache_line_size())
 330
 331/* Driver assumes that the last 3 bits are 0 */
 332#define MVNETA_SKB_HEADROOM     ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
 333#define MVNETA_SKB_PAD  (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
 334                         MVNETA_SKB_HEADROOM))
 335#define MVNETA_MAX_RX_BUF_SIZE  (PAGE_SIZE - MVNETA_SKB_PAD)
 336
 337#define IS_TSO_HEADER(txq, addr) \
 338        ((addr >= txq->tso_hdrs_phys) && \
 339         (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
 340
 341#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 342        (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 343
 344enum {
 345        ETHTOOL_STAT_EEE_WAKEUP,
 346        ETHTOOL_STAT_SKB_ALLOC_ERR,
 347        ETHTOOL_STAT_REFILL_ERR,
 348        ETHTOOL_XDP_REDIRECT,
 349        ETHTOOL_XDP_PASS,
 350        ETHTOOL_XDP_DROP,
 351        ETHTOOL_XDP_TX,
 352        ETHTOOL_XDP_TX_ERR,
 353        ETHTOOL_XDP_XMIT,
 354        ETHTOOL_XDP_XMIT_ERR,
 355        ETHTOOL_MAX_STATS,
 356};
 357
 358struct mvneta_statistic {
 359        unsigned short offset;
 360        unsigned short type;
 361        const char name[ETH_GSTRING_LEN];
 362};
 363
 364#define T_REG_32        32
 365#define T_REG_64        64
 366#define T_SW            1
 367
 368#define MVNETA_XDP_PASS         0
 369#define MVNETA_XDP_DROPPED      BIT(0)
 370#define MVNETA_XDP_TX           BIT(1)
 371#define MVNETA_XDP_REDIR        BIT(2)
 372
 373static const struct mvneta_statistic mvneta_statistics[] = {
 374        { 0x3000, T_REG_64, "good_octets_received", },
 375        { 0x3010, T_REG_32, "good_frames_received", },
 376        { 0x3008, T_REG_32, "bad_octets_received", },
 377        { 0x3014, T_REG_32, "bad_frames_received", },
 378        { 0x3018, T_REG_32, "broadcast_frames_received", },
 379        { 0x301c, T_REG_32, "multicast_frames_received", },
 380        { 0x3050, T_REG_32, "unrec_mac_control_received", },
 381        { 0x3058, T_REG_32, "good_fc_received", },
 382        { 0x305c, T_REG_32, "bad_fc_received", },
 383        { 0x3060, T_REG_32, "undersize_received", },
 384        { 0x3064, T_REG_32, "fragments_received", },
 385        { 0x3068, T_REG_32, "oversize_received", },
 386        { 0x306c, T_REG_32, "jabber_received", },
 387        { 0x3070, T_REG_32, "mac_receive_error", },
 388        { 0x3074, T_REG_32, "bad_crc_event", },
 389        { 0x3078, T_REG_32, "collision", },
 390        { 0x307c, T_REG_32, "late_collision", },
 391        { 0x2484, T_REG_32, "rx_discard", },
 392        { 0x2488, T_REG_32, "rx_overrun", },
 393        { 0x3020, T_REG_32, "frames_64_octets", },
 394        { 0x3024, T_REG_32, "frames_65_to_127_octets", },
 395        { 0x3028, T_REG_32, "frames_128_to_255_octets", },
 396        { 0x302c, T_REG_32, "frames_256_to_511_octets", },
 397        { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
 398        { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
 399        { 0x3038, T_REG_64, "good_octets_sent", },
 400        { 0x3040, T_REG_32, "good_frames_sent", },
 401        { 0x3044, T_REG_32, "excessive_collision", },
 402        { 0x3048, T_REG_32, "multicast_frames_sent", },
 403        { 0x304c, T_REG_32, "broadcast_frames_sent", },
 404        { 0x3054, T_REG_32, "fc_sent", },
 405        { 0x300c, T_REG_32, "internal_mac_transmit_err", },
 406        { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 407        { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
 408        { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 409        { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
 410        { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
 411        { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
 412        { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
 413        { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
 414        { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
 415        { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
 416};
 417
 418struct mvneta_stats {
 419        u64     rx_packets;
 420        u64     rx_bytes;
 421        u64     tx_packets;
 422        u64     tx_bytes;
 423        /* xdp */
 424        u64     xdp_redirect;
 425        u64     xdp_pass;
 426        u64     xdp_drop;
 427        u64     xdp_xmit;
 428        u64     xdp_xmit_err;
 429        u64     xdp_tx;
 430        u64     xdp_tx_err;
 431};
 432
 433struct mvneta_ethtool_stats {
 434        struct mvneta_stats ps;
 435        u64     skb_alloc_error;
 436        u64     refill_error;
 437};
 438
 439struct mvneta_pcpu_stats {
 440        struct u64_stats_sync syncp;
 441
 442        struct mvneta_ethtool_stats es;
 443        u64     rx_dropped;
 444        u64     rx_errors;
 445};
 446
 447struct mvneta_pcpu_port {
 448        /* Pointer to the shared port */
 449        struct mvneta_port      *pp;
 450
 451        /* Pointer to the CPU-local NAPI struct */
 452        struct napi_struct      napi;
 453
 454        /* Cause of the previous interrupt */
 455        u32                     cause_rx_tx;
 456};
 457
 458enum {
 459        __MVNETA_DOWN,
 460};
 461
 462struct mvneta_port {
 463        u8 id;
 464        struct mvneta_pcpu_port __percpu        *ports;
 465        struct mvneta_pcpu_stats __percpu       *stats;
 466
 467        unsigned long state;
 468
 469        int pkt_size;
 470        void __iomem *base;
 471        struct mvneta_rx_queue *rxqs;
 472        struct mvneta_tx_queue *txqs;
 473        struct net_device *dev;
 474        struct hlist_node node_online;
 475        struct hlist_node node_dead;
 476        int rxq_def;
 477        /* Protect the access to the percpu interrupt registers,
 478         * ensuring that the configuration remains coherent.
 479         */
 480        spinlock_t lock;
 481        bool is_stopped;
 482
 483        u32 cause_rx_tx;
 484        struct napi_struct napi;
 485
 486        struct bpf_prog *xdp_prog;
 487
 488        /* Core clock */
 489        struct clk *clk;
 490        /* AXI clock */
 491        struct clk *clk_bus;
 492        u8 mcast_count[256];
 493        u16 tx_ring_size;
 494        u16 rx_ring_size;
 495        u8 prio_tc_map[8];
 496
 497        phy_interface_t phy_interface;
 498        struct device_node *dn;
 499        unsigned int tx_csum_limit;
 500        struct phylink *phylink;
 501        struct phylink_config phylink_config;
 502        struct phy *comphy;
 503
 504        struct mvneta_bm *bm_priv;
 505        struct mvneta_bm_pool *pool_long;
 506        struct mvneta_bm_pool *pool_short;
 507        int bm_win_id;
 508
 509        bool eee_enabled;
 510        bool eee_active;
 511        bool tx_lpi_enabled;
 512
 513        u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 514
 515        u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 516
 517        /* Flags for special SoC configurations */
 518        bool neta_armada3700;
 519        u16 rx_offset_correction;
 520        const struct mbus_dram_target_info *dram_target_info;
 521};
 522
 523/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 524 * layout of the transmit and reception DMA descriptors, and their
 525 * layout is therefore defined by the hardware design
 526 */
 527
 528#define MVNETA_TX_L3_OFF_SHIFT  0
 529#define MVNETA_TX_IP_HLEN_SHIFT 8
 530#define MVNETA_TX_L4_UDP        BIT(16)
 531#define MVNETA_TX_L3_IP6        BIT(17)
 532#define MVNETA_TXD_IP_CSUM      BIT(18)
 533#define MVNETA_TXD_Z_PAD        BIT(19)
 534#define MVNETA_TXD_L_DESC       BIT(20)
 535#define MVNETA_TXD_F_DESC       BIT(21)
 536#define MVNETA_TXD_FLZ_DESC     (MVNETA_TXD_Z_PAD  | \
 537                                 MVNETA_TXD_L_DESC | \
 538                                 MVNETA_TXD_F_DESC)
 539#define MVNETA_TX_L4_CSUM_FULL  BIT(30)
 540#define MVNETA_TX_L4_CSUM_NOT   BIT(31)
 541
 542#define MVNETA_RXD_ERR_CRC              0x0
 543#define MVNETA_RXD_BM_POOL_SHIFT        13
 544#define MVNETA_RXD_BM_POOL_MASK         (BIT(13) | BIT(14))
 545#define MVNETA_RXD_ERR_SUMMARY          BIT(16)
 546#define MVNETA_RXD_ERR_OVERRUN          BIT(17)
 547#define MVNETA_RXD_ERR_LEN              BIT(18)
 548#define MVNETA_RXD_ERR_RESOURCE         (BIT(17) | BIT(18))
 549#define MVNETA_RXD_ERR_CODE_MASK        (BIT(17) | BIT(18))
 550#define MVNETA_RXD_L3_IP4               BIT(25)
 551#define MVNETA_RXD_LAST_DESC            BIT(26)
 552#define MVNETA_RXD_FIRST_DESC           BIT(27)
 553#define MVNETA_RXD_FIRST_LAST_DESC      (MVNETA_RXD_FIRST_DESC | \
 554                                         MVNETA_RXD_LAST_DESC)
 555#define MVNETA_RXD_L4_CSUM_OK           BIT(30)
 556
 557#if defined(__LITTLE_ENDIAN)
 558struct mvneta_tx_desc {
 559        u32  command;           /* Options used by HW for packet transmitting.*/
 560        u16  reserved1;         /* csum_l4 (for future use)             */
 561        u16  data_size;         /* Data size of transmitted packet in bytes */
 562        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 563        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 564        u32  reserved3[4];      /* Reserved - (for future use)          */
 565};
 566
 567struct mvneta_rx_desc {
 568        u32  status;            /* Info about received packet           */
 569        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 570        u16  data_size;         /* Size of received packet in bytes     */
 571
 572        u32  buf_phys_addr;     /* Physical address of the buffer       */
 573        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 574
 575        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 576        u16  reserved3;         /* prefetch_cmd, for future use         */
 577        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 578
 579        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 580        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 581};
 582#else
 583struct mvneta_tx_desc {
 584        u16  data_size;         /* Data size of transmitted packet in bytes */
 585        u16  reserved1;         /* csum_l4 (for future use)             */
 586        u32  command;           /* Options used by HW for packet transmitting.*/
 587        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 588        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 589        u32  reserved3[4];      /* Reserved - (for future use)          */
 590};
 591
 592struct mvneta_rx_desc {
 593        u16  data_size;         /* Size of received packet in bytes     */
 594        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 595        u32  status;            /* Info about received packet           */
 596
 597        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 598        u32  buf_phys_addr;     /* Physical address of the buffer       */
 599
 600        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 601        u16  reserved3;         /* prefetch_cmd, for future use         */
 602        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 603
 604        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 605        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 606};
 607#endif
 608
 609enum mvneta_tx_buf_type {
 610        MVNETA_TYPE_SKB,
 611        MVNETA_TYPE_XDP_TX,
 612        MVNETA_TYPE_XDP_NDO,
 613};
 614
 615struct mvneta_tx_buf {
 616        enum mvneta_tx_buf_type type;
 617        union {
 618                struct xdp_frame *xdpf;
 619                struct sk_buff *skb;
 620        };
 621};
 622
 623struct mvneta_tx_queue {
 624        /* Number of this TX queue, in the range 0-7 */
 625        u8 id;
 626
 627        /* Number of TX DMA descriptors in the descriptor ring */
 628        int size;
 629
 630        /* Number of currently used TX DMA descriptor in the
 631         * descriptor ring
 632         */
 633        int count;
 634        int pending;
 635        int tx_stop_threshold;
 636        int tx_wake_threshold;
 637
 638        /* Array of transmitted buffers */
 639        struct mvneta_tx_buf *buf;
 640
 641        /* Index of last TX DMA descriptor that was inserted */
 642        int txq_put_index;
 643
 644        /* Index of the TX DMA descriptor to be cleaned up */
 645        int txq_get_index;
 646
 647        u32 done_pkts_coal;
 648
 649        /* Virtual address of the TX DMA descriptors array */
 650        struct mvneta_tx_desc *descs;
 651
 652        /* DMA address of the TX DMA descriptors array */
 653        dma_addr_t descs_phys;
 654
 655        /* Index of the last TX DMA descriptor */
 656        int last_desc;
 657
 658        /* Index of the next TX DMA descriptor to process */
 659        int next_desc_to_proc;
 660
 661        /* DMA buffers for TSO headers */
 662        char *tso_hdrs;
 663
 664        /* DMA address of TSO headers */
 665        dma_addr_t tso_hdrs_phys;
 666
 667        /* Affinity mask for CPUs*/
 668        cpumask_t affinity_mask;
 669};
 670
 671struct mvneta_rx_queue {
 672        /* rx queue number, in the range 0-7 */
 673        u8 id;
 674
 675        /* num of rx descriptors in the rx descriptor ring */
 676        int size;
 677
 678        u32 pkts_coal;
 679        u32 time_coal;
 680
 681        /* page_pool */
 682        struct page_pool *page_pool;
 683        struct xdp_rxq_info xdp_rxq;
 684
 685        /* Virtual address of the RX buffer */
 686        void  **buf_virt_addr;
 687
 688        /* Virtual address of the RX DMA descriptors array */
 689        struct mvneta_rx_desc *descs;
 690
 691        /* DMA address of the RX DMA descriptors array */
 692        dma_addr_t descs_phys;
 693
 694        /* Index of the last RX DMA descriptor */
 695        int last_desc;
 696
 697        /* Index of the next RX DMA descriptor to process */
 698        int next_desc_to_proc;
 699
 700        /* Index of first RX DMA descriptor to refill */
 701        int first_to_refill;
 702        u32 refill_num;
 703};
 704
 705static enum cpuhp_state online_hpstate;
 706/* The hardware supports eight (8) rx queues, but we are only allowing
 707 * the first one to be used. Therefore, let's just allocate one queue.
 708 */
 709static int rxq_number = 8;
 710static int txq_number = 8;
 711
 712static int rxq_def;
 713
 714static int rx_copybreak __read_mostly = 256;
 715
 716/* HW BM need that each port be identify by a unique ID */
 717static int global_port_id;
 718
 719#define MVNETA_DRIVER_NAME "mvneta"
 720#define MVNETA_DRIVER_VERSION "1.0"
 721
 722/* Utility/helper methods */
 723
 724/* Write helper method */
 725static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 726{
 727        writel(data, pp->base + offset);
 728}
 729
 730/* Read helper method */
 731static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 732{
 733        return readl(pp->base + offset);
 734}
 735
 736/* Increment txq get counter */
 737static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
 738{
 739        txq->txq_get_index++;
 740        if (txq->txq_get_index == txq->size)
 741                txq->txq_get_index = 0;
 742}
 743
 744/* Increment txq put counter */
 745static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
 746{
 747        txq->txq_put_index++;
 748        if (txq->txq_put_index == txq->size)
 749                txq->txq_put_index = 0;
 750}
 751
 752
 753/* Clear all MIB counters */
 754static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 755{
 756        int i;
 757
 758        /* Perform dummy reads from MIB counters */
 759        for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 760                mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 761        mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
 762        mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 763}
 764
 765/* Get System Network Statistics */
 766static void
 767mvneta_get_stats64(struct net_device *dev,
 768                   struct rtnl_link_stats64 *stats)
 769{
 770        struct mvneta_port *pp = netdev_priv(dev);
 771        unsigned int start;
 772        int cpu;
 773
 774        for_each_possible_cpu(cpu) {
 775                struct mvneta_pcpu_stats *cpu_stats;
 776                u64 rx_packets;
 777                u64 rx_bytes;
 778                u64 rx_dropped;
 779                u64 rx_errors;
 780                u64 tx_packets;
 781                u64 tx_bytes;
 782
 783                cpu_stats = per_cpu_ptr(pp->stats, cpu);
 784                do {
 785                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 786                        rx_packets = cpu_stats->es.ps.rx_packets;
 787                        rx_bytes   = cpu_stats->es.ps.rx_bytes;
 788                        rx_dropped = cpu_stats->rx_dropped;
 789                        rx_errors  = cpu_stats->rx_errors;
 790                        tx_packets = cpu_stats->es.ps.tx_packets;
 791                        tx_bytes   = cpu_stats->es.ps.tx_bytes;
 792                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 793
 794                stats->rx_packets += rx_packets;
 795                stats->rx_bytes   += rx_bytes;
 796                stats->rx_dropped += rx_dropped;
 797                stats->rx_errors  += rx_errors;
 798                stats->tx_packets += tx_packets;
 799                stats->tx_bytes   += tx_bytes;
 800        }
 801
 802        stats->tx_dropped       = dev->stats.tx_dropped;
 803}
 804
 805/* Rx descriptors helper methods */
 806
 807/* Checks whether the RX descriptor having this status is both the first
 808 * and the last descriptor for the RX packet. Each RX packet is currently
 809 * received through a single RX descriptor, so not having each RX
 810 * descriptor with its first and last bits set is an error
 811 */
 812static int mvneta_rxq_desc_is_first_last(u32 status)
 813{
 814        return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 815                MVNETA_RXD_FIRST_LAST_DESC;
 816}
 817
 818/* Add number of descriptors ready to receive new packets */
 819static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 820                                          struct mvneta_rx_queue *rxq,
 821                                          int ndescs)
 822{
 823        /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 824         * be added at once
 825         */
 826        while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 827                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 828                            (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 829                             MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 830                ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 831        }
 832
 833        mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 834                    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 835}
 836
 837/* Get number of RX descriptors occupied by received packets */
 838static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 839                                        struct mvneta_rx_queue *rxq)
 840{
 841        u32 val;
 842
 843        val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 844        return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 845}
 846
 847/* Update num of rx desc called upon return from rx path or
 848 * from mvneta_rxq_drop_pkts().
 849 */
 850static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 851                                       struct mvneta_rx_queue *rxq,
 852                                       int rx_done, int rx_filled)
 853{
 854        u32 val;
 855
 856        if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 857                val = rx_done |
 858                  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 859                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 860                return;
 861        }
 862
 863        /* Only 255 descriptors can be added at once */
 864        while ((rx_done > 0) || (rx_filled > 0)) {
 865                if (rx_done <= 0xff) {
 866                        val = rx_done;
 867                        rx_done = 0;
 868                } else {
 869                        val = 0xff;
 870                        rx_done -= 0xff;
 871                }
 872                if (rx_filled <= 0xff) {
 873                        val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 874                        rx_filled = 0;
 875                } else {
 876                        val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 877                        rx_filled -= 0xff;
 878                }
 879                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 880        }
 881}
 882
 883/* Get pointer to next RX descriptor to be processed by SW */
 884static struct mvneta_rx_desc *
 885mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 886{
 887        int rx_desc = rxq->next_desc_to_proc;
 888
 889        rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 890        prefetch(rxq->descs + rxq->next_desc_to_proc);
 891        return rxq->descs + rx_desc;
 892}
 893
 894/* Change maximum receive size of the port. */
 895static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
 896{
 897        u32 val;
 898
 899        val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 900        val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
 901        val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
 902                MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
 903        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 904}
 905
 906
 907/* Set rx queue offset */
 908static void mvneta_rxq_offset_set(struct mvneta_port *pp,
 909                                  struct mvneta_rx_queue *rxq,
 910                                  int offset)
 911{
 912        u32 val;
 913
 914        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 915        val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
 916
 917        /* Offset is in */
 918        val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
 919        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 920}
 921
 922
 923/* Tx descriptors helper methods */
 924
 925/* Update HW with number of TX descriptors to be sent */
 926static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 927                                     struct mvneta_tx_queue *txq,
 928                                     int pend_desc)
 929{
 930        u32 val;
 931
 932        pend_desc += txq->pending;
 933
 934        /* Only 255 Tx descriptors can be added at once */
 935        do {
 936                val = min(pend_desc, 255);
 937                mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 938                pend_desc -= val;
 939        } while (pend_desc > 0);
 940        txq->pending = 0;
 941}
 942
 943/* Get pointer to next TX descriptor to be processed (send) by HW */
 944static struct mvneta_tx_desc *
 945mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 946{
 947        int tx_desc = txq->next_desc_to_proc;
 948
 949        txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 950        return txq->descs + tx_desc;
 951}
 952
 953/* Release the last allocated TX descriptor. Useful to handle DMA
 954 * mapping failures in the TX path.
 955 */
 956static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
 957{
 958        if (txq->next_desc_to_proc == 0)
 959                txq->next_desc_to_proc = txq->last_desc - 1;
 960        else
 961                txq->next_desc_to_proc--;
 962}
 963
 964/* Set rxq buf size */
 965static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
 966                                    struct mvneta_rx_queue *rxq,
 967                                    int buf_size)
 968{
 969        u32 val;
 970
 971        val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
 972
 973        val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
 974        val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
 975
 976        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 977}
 978
 979/* Disable buffer management (BM) */
 980static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 981                                  struct mvneta_rx_queue *rxq)
 982{
 983        u32 val;
 984
 985        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 986        val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
 987        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 988}
 989
 990/* Enable buffer management (BM) */
 991static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
 992                                 struct mvneta_rx_queue *rxq)
 993{
 994        u32 val;
 995
 996        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 997        val |= MVNETA_RXQ_HW_BUF_ALLOC;
 998        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 999}
1000
1001/* Notify HW about port's assignment of pool for bigger packets */
1002static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1003                                     struct mvneta_rx_queue *rxq)
1004{
1005        u32 val;
1006
1007        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1008        val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
1009        val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1010
1011        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1012}
1013
1014/* Notify HW about port's assignment of pool for smaller packets */
1015static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1016                                      struct mvneta_rx_queue *rxq)
1017{
1018        u32 val;
1019
1020        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1021        val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
1022        val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1023
1024        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1025}
1026
1027/* Set port's receive buffer size for assigned BM pool */
1028static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1029                                              int buf_size,
1030                                              u8 pool_id)
1031{
1032        u32 val;
1033
1034        if (!IS_ALIGNED(buf_size, 8)) {
1035                dev_warn(pp->dev->dev.parent,
1036                         "illegal buf_size value %d, round to %d\n",
1037                         buf_size, ALIGN(buf_size, 8));
1038                buf_size = ALIGN(buf_size, 8);
1039        }
1040
1041        val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1042        val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1043        mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1044}
1045
1046/* Configure MBUS window in order to enable access BM internal SRAM */
1047static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1048                                  u8 target, u8 attr)
1049{
1050        u32 win_enable, win_protect;
1051        int i;
1052
1053        win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1054
1055        if (pp->bm_win_id < 0) {
1056                /* Find first not occupied window */
1057                for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1058                        if (win_enable & (1 << i)) {
1059                                pp->bm_win_id = i;
1060                                break;
1061                        }
1062                }
1063                if (i == MVNETA_MAX_DECODE_WIN)
1064                        return -ENOMEM;
1065        } else {
1066                i = pp->bm_win_id;
1067        }
1068
1069        mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1070        mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1071
1072        if (i < 4)
1073                mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1074
1075        mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1076                    (attr << 8) | target);
1077
1078        mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1079
1080        win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1081        win_protect |= 3 << (2 * i);
1082        mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1083
1084        win_enable &= ~(1 << i);
1085        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1086
1087        return 0;
1088}
1089
1090static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1091{
1092        u32 wsize;
1093        u8 target, attr;
1094        int err;
1095
1096        /* Get BM window information */
1097        err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1098                                         &target, &attr);
1099        if (err < 0)
1100                return err;
1101
1102        pp->bm_win_id = -1;
1103
1104        /* Open NETA -> BM window */
1105        err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1106                                     target, attr);
1107        if (err < 0) {
1108                netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1109                return err;
1110        }
1111        return 0;
1112}
1113
1114/* Assign and initialize pools for port. In case of fail
1115 * buffer manager will remain disabled for current port.
1116 */
1117static int mvneta_bm_port_init(struct platform_device *pdev,
1118                               struct mvneta_port *pp)
1119{
1120        struct device_node *dn = pdev->dev.of_node;
1121        u32 long_pool_id, short_pool_id;
1122
1123        if (!pp->neta_armada3700) {
1124                int ret;
1125
1126                ret = mvneta_bm_port_mbus_init(pp);
1127                if (ret)
1128                        return ret;
1129        }
1130
1131        if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1132                netdev_info(pp->dev, "missing long pool id\n");
1133                return -EINVAL;
1134        }
1135
1136        /* Create port's long pool depending on mtu */
1137        pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1138                                           MVNETA_BM_LONG, pp->id,
1139                                           MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1140        if (!pp->pool_long) {
1141                netdev_info(pp->dev, "fail to obtain long pool for port\n");
1142                return -ENOMEM;
1143        }
1144
1145        pp->pool_long->port_map |= 1 << pp->id;
1146
1147        mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1148                                   pp->pool_long->id);
1149
1150        /* If short pool id is not defined, assume using single pool */
1151        if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1152                short_pool_id = long_pool_id;
1153
1154        /* Create port's short pool */
1155        pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1156                                            MVNETA_BM_SHORT, pp->id,
1157                                            MVNETA_BM_SHORT_PKT_SIZE);
1158        if (!pp->pool_short) {
1159                netdev_info(pp->dev, "fail to obtain short pool for port\n");
1160                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1161                return -ENOMEM;
1162        }
1163
1164        if (short_pool_id != long_pool_id) {
1165                pp->pool_short->port_map |= 1 << pp->id;
1166                mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1167                                           pp->pool_short->id);
1168        }
1169
1170        return 0;
1171}
1172
1173/* Update settings of a pool for bigger packets */
1174static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1175{
1176        struct mvneta_bm_pool *bm_pool = pp->pool_long;
1177        struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1178        int num;
1179
1180        /* Release all buffers from long pool */
1181        mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1182        if (hwbm_pool->buf_num) {
1183                WARN(1, "cannot free all buffers in pool %d\n",
1184                     bm_pool->id);
1185                goto bm_mtu_err;
1186        }
1187
1188        bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1189        bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1190        hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1191                        SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1192
1193        /* Fill entire long pool */
1194        num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1195        if (num != hwbm_pool->size) {
1196                WARN(1, "pool %d: %d of %d allocated\n",
1197                     bm_pool->id, num, hwbm_pool->size);
1198                goto bm_mtu_err;
1199        }
1200        mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1201
1202        return;
1203
1204bm_mtu_err:
1205        mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1206        mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1207
1208        pp->bm_priv = NULL;
1209        pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1210        mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1211        netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1212}
1213
1214/* Start the Ethernet port RX and TX activity */
1215static void mvneta_port_up(struct mvneta_port *pp)
1216{
1217        int queue;
1218        u32 q_map;
1219
1220        /* Enable all initialized TXs. */
1221        q_map = 0;
1222        for (queue = 0; queue < txq_number; queue++) {
1223                struct mvneta_tx_queue *txq = &pp->txqs[queue];
1224                if (txq->descs)
1225                        q_map |= (1 << queue);
1226        }
1227        mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1228
1229        q_map = 0;
1230        /* Enable all initialized RXQs. */
1231        for (queue = 0; queue < rxq_number; queue++) {
1232                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1233
1234                if (rxq->descs)
1235                        q_map |= (1 << queue);
1236        }
1237        mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1238}
1239
1240/* Stop the Ethernet port activity */
1241static void mvneta_port_down(struct mvneta_port *pp)
1242{
1243        u32 val;
1244        int count;
1245
1246        /* Stop Rx port activity. Check port Rx activity. */
1247        val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1248
1249        /* Issue stop command for active channels only */
1250        if (val != 0)
1251                mvreg_write(pp, MVNETA_RXQ_CMD,
1252                            val << MVNETA_RXQ_DISABLE_SHIFT);
1253
1254        /* Wait for all Rx activity to terminate. */
1255        count = 0;
1256        do {
1257                if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1258                        netdev_warn(pp->dev,
1259                                    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1260                                    val);
1261                        break;
1262                }
1263                mdelay(1);
1264
1265                val = mvreg_read(pp, MVNETA_RXQ_CMD);
1266        } while (val & MVNETA_RXQ_ENABLE_MASK);
1267
1268        /* Stop Tx port activity. Check port Tx activity. Issue stop
1269         * command for active channels only
1270         */
1271        val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1272
1273        if (val != 0)
1274                mvreg_write(pp, MVNETA_TXQ_CMD,
1275                            (val << MVNETA_TXQ_DISABLE_SHIFT));
1276
1277        /* Wait for all Tx activity to terminate. */
1278        count = 0;
1279        do {
1280                if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1281                        netdev_warn(pp->dev,
1282                                    "TIMEOUT for TX stopped status=0x%08x\n",
1283                                    val);
1284                        break;
1285                }
1286                mdelay(1);
1287
1288                /* Check TX Command reg that all Txqs are stopped */
1289                val = mvreg_read(pp, MVNETA_TXQ_CMD);
1290
1291        } while (val & MVNETA_TXQ_ENABLE_MASK);
1292
1293        /* Double check to verify that TX FIFO is empty */
1294        count = 0;
1295        do {
1296                if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1297                        netdev_warn(pp->dev,
1298                                    "TX FIFO empty timeout status=0x%08x\n",
1299                                    val);
1300                        break;
1301                }
1302                mdelay(1);
1303
1304                val = mvreg_read(pp, MVNETA_PORT_STATUS);
1305        } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1306                 (val & MVNETA_TX_IN_PRGRS));
1307
1308        udelay(200);
1309}
1310
1311/* Enable the port by setting the port enable bit of the MAC control register */
1312static void mvneta_port_enable(struct mvneta_port *pp)
1313{
1314        u32 val;
1315
1316        /* Enable port */
1317        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1318        val |= MVNETA_GMAC0_PORT_ENABLE;
1319        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1320}
1321
1322/* Disable the port and wait for about 200 usec before retuning */
1323static void mvneta_port_disable(struct mvneta_port *pp)
1324{
1325        u32 val;
1326
1327        /* Reset the Enable bit in the Serial Control Register */
1328        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1329        val &= ~MVNETA_GMAC0_PORT_ENABLE;
1330        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1331
1332        udelay(200);
1333}
1334
1335/* Multicast tables methods */
1336
1337/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1338static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1339{
1340        int offset;
1341        u32 val;
1342
1343        if (queue == -1) {
1344                val = 0;
1345        } else {
1346                val = 0x1 | (queue << 1);
1347                val |= (val << 24) | (val << 16) | (val << 8);
1348        }
1349
1350        for (offset = 0; offset <= 0xc; offset += 4)
1351                mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1352}
1353
1354/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1355static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1356{
1357        int offset;
1358        u32 val;
1359
1360        if (queue == -1) {
1361                val = 0;
1362        } else {
1363                val = 0x1 | (queue << 1);
1364                val |= (val << 24) | (val << 16) | (val << 8);
1365        }
1366
1367        for (offset = 0; offset <= 0xfc; offset += 4)
1368                mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1369
1370}
1371
1372/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1373static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1374{
1375        int offset;
1376        u32 val;
1377
1378        if (queue == -1) {
1379                memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1380                val = 0;
1381        } else {
1382                memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1383                val = 0x1 | (queue << 1);
1384                val |= (val << 24) | (val << 16) | (val << 8);
1385        }
1386
1387        for (offset = 0; offset <= 0xfc; offset += 4)
1388                mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1389}
1390
1391static void mvneta_percpu_unmask_interrupt(void *arg)
1392{
1393        struct mvneta_port *pp = arg;
1394
1395        /* All the queue are unmasked, but actually only the ones
1396         * mapped to this CPU will be unmasked
1397         */
1398        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1399                    MVNETA_RX_INTR_MASK_ALL |
1400                    MVNETA_TX_INTR_MASK_ALL |
1401                    MVNETA_MISCINTR_INTR_MASK);
1402}
1403
1404static void mvneta_percpu_mask_interrupt(void *arg)
1405{
1406        struct mvneta_port *pp = arg;
1407
1408        /* All the queue are masked, but actually only the ones
1409         * mapped to this CPU will be masked
1410         */
1411        mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1412        mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1413        mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1414}
1415
1416static void mvneta_percpu_clear_intr_cause(void *arg)
1417{
1418        struct mvneta_port *pp = arg;
1419
1420        /* All the queue are cleared, but actually only the ones
1421         * mapped to this CPU will be cleared
1422         */
1423        mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1424        mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1425        mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1426}
1427
1428/* This method sets defaults to the NETA port:
1429 *      Clears interrupt Cause and Mask registers.
1430 *      Clears all MAC tables.
1431 *      Sets defaults to all registers.
1432 *      Resets RX and TX descriptor rings.
1433 *      Resets PHY.
1434 * This method can be called after mvneta_port_down() to return the port
1435 *      settings to defaults.
1436 */
1437static void mvneta_defaults_set(struct mvneta_port *pp)
1438{
1439        int cpu;
1440        int queue;
1441        u32 val;
1442        int max_cpu = num_present_cpus();
1443
1444        /* Clear all Cause registers */
1445        on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1446
1447        /* Mask all interrupts */
1448        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1449        mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1450
1451        /* Enable MBUS Retry bit16 */
1452        mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1453
1454        /* Set CPU queue access map. CPUs are assigned to the RX and
1455         * TX queues modulo their number. If there is only one TX
1456         * queue then it is assigned to the CPU associated to the
1457         * default RX queue.
1458         */
1459        for_each_present_cpu(cpu) {
1460                int rxq_map = 0, txq_map = 0;
1461                int rxq, txq;
1462                if (!pp->neta_armada3700) {
1463                        for (rxq = 0; rxq < rxq_number; rxq++)
1464                                if ((rxq % max_cpu) == cpu)
1465                                        rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1466
1467                        for (txq = 0; txq < txq_number; txq++)
1468                                if ((txq % max_cpu) == cpu)
1469                                        txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1470
1471                        /* With only one TX queue we configure a special case
1472                         * which will allow to get all the irq on a single
1473                         * CPU
1474                         */
1475                        if (txq_number == 1)
1476                                txq_map = (cpu == pp->rxq_def) ?
1477                                        MVNETA_CPU_TXQ_ACCESS(1) : 0;
1478
1479                } else {
1480                        txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1481                        rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1482                }
1483
1484                mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1485        }
1486
1487        /* Reset RX and TX DMAs */
1488        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1489        mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1490
1491        /* Disable Legacy WRR, Disable EJP, Release from reset */
1492        mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1493        for (queue = 0; queue < txq_number; queue++) {
1494                mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1495                mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1496        }
1497
1498        mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1499        mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1500
1501        /* Set Port Acceleration Mode */
1502        if (pp->bm_priv)
1503                /* HW buffer management + legacy parser */
1504                val = MVNETA_ACC_MODE_EXT2;
1505        else
1506                /* SW buffer management + legacy parser */
1507                val = MVNETA_ACC_MODE_EXT1;
1508        mvreg_write(pp, MVNETA_ACC_MODE, val);
1509
1510        if (pp->bm_priv)
1511                mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1512
1513        /* Update val of portCfg register accordingly with all RxQueue types */
1514        val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1515        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1516
1517        val = 0;
1518        mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1519        mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1520
1521        /* Build PORT_SDMA_CONFIG_REG */
1522        val = 0;
1523
1524        /* Default burst size */
1525        val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1526        val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1527        val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1528
1529#if defined(__BIG_ENDIAN)
1530        val |= MVNETA_DESC_SWAP;
1531#endif
1532
1533        /* Assign port SDMA configuration */
1534        mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1535
1536        /* Disable PHY polling in hardware, since we're using the
1537         * kernel phylib to do this.
1538         */
1539        val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1540        val &= ~MVNETA_PHY_POLLING_ENABLE;
1541        mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1542
1543        mvneta_set_ucast_table(pp, -1);
1544        mvneta_set_special_mcast_table(pp, -1);
1545        mvneta_set_other_mcast_table(pp, -1);
1546
1547        /* Set port interrupt enable register - default enable all */
1548        mvreg_write(pp, MVNETA_INTR_ENABLE,
1549                    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1550                     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1551
1552        mvneta_mib_counters_clear(pp);
1553}
1554
1555/* Set max sizes for tx queues */
1556static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1557
1558{
1559        u32 val, size, mtu;
1560        int queue;
1561
1562        mtu = max_tx_size * 8;
1563        if (mtu > MVNETA_TX_MTU_MAX)
1564                mtu = MVNETA_TX_MTU_MAX;
1565
1566        /* Set MTU */
1567        val = mvreg_read(pp, MVNETA_TX_MTU);
1568        val &= ~MVNETA_TX_MTU_MAX;
1569        val |= mtu;
1570        mvreg_write(pp, MVNETA_TX_MTU, val);
1571
1572        /* TX token size and all TXQs token size must be larger that MTU */
1573        val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1574
1575        size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1576        if (size < mtu) {
1577                size = mtu;
1578                val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1579                val |= size;
1580                mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1581        }
1582        for (queue = 0; queue < txq_number; queue++) {
1583                val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1584
1585                size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1586                if (size < mtu) {
1587                        size = mtu;
1588                        val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1589                        val |= size;
1590                        mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1591                }
1592        }
1593}
1594
1595/* Set unicast address */
1596static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1597                                  int queue)
1598{
1599        unsigned int unicast_reg;
1600        unsigned int tbl_offset;
1601        unsigned int reg_offset;
1602
1603        /* Locate the Unicast table entry */
1604        last_nibble = (0xf & last_nibble);
1605
1606        /* offset from unicast tbl base */
1607        tbl_offset = (last_nibble / 4) * 4;
1608
1609        /* offset within the above reg  */
1610        reg_offset = last_nibble % 4;
1611
1612        unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1613
1614        if (queue == -1) {
1615                /* Clear accepts frame bit at specified unicast DA tbl entry */
1616                unicast_reg &= ~(0xff << (8 * reg_offset));
1617        } else {
1618                unicast_reg &= ~(0xff << (8 * reg_offset));
1619                unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1620        }
1621
1622        mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1623}
1624
1625/* Set mac address */
1626static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1627                                int queue)
1628{
1629        unsigned int mac_h;
1630        unsigned int mac_l;
1631
1632        if (queue != -1) {
1633                mac_l = (addr[4] << 8) | (addr[5]);
1634                mac_h = (addr[0] << 24) | (addr[1] << 16) |
1635                        (addr[2] << 8) | (addr[3] << 0);
1636
1637                mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1638                mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1639        }
1640
1641        /* Accept frames of this address */
1642        mvneta_set_ucast_addr(pp, addr[5], queue);
1643}
1644
1645/* Set the number of packets that will be received before RX interrupt
1646 * will be generated by HW.
1647 */
1648static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1649                                    struct mvneta_rx_queue *rxq, u32 value)
1650{
1651        mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1652                    value | MVNETA_RXQ_NON_OCCUPIED(0));
1653}
1654
1655/* Set the time delay in usec before RX interrupt will be generated by
1656 * HW.
1657 */
1658static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1659                                    struct mvneta_rx_queue *rxq, u32 value)
1660{
1661        u32 val;
1662        unsigned long clk_rate;
1663
1664        clk_rate = clk_get_rate(pp->clk);
1665        val = (clk_rate / 1000000) * value;
1666
1667        mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1668}
1669
1670/* Set threshold for TX_DONE pkts coalescing */
1671static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1672                                         struct mvneta_tx_queue *txq, u32 value)
1673{
1674        u32 val;
1675
1676        val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1677
1678        val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1679        val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1680
1681        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1682}
1683
1684/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1685static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1686                                u32 phys_addr, void *virt_addr,
1687                                struct mvneta_rx_queue *rxq)
1688{
1689        int i;
1690
1691        rx_desc->buf_phys_addr = phys_addr;
1692        i = rx_desc - rxq->descs;
1693        rxq->buf_virt_addr[i] = virt_addr;
1694}
1695
1696/* Decrement sent descriptors counter */
1697static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1698                                     struct mvneta_tx_queue *txq,
1699                                     int sent_desc)
1700{
1701        u32 val;
1702
1703        /* Only 255 TX descriptors can be updated at once */
1704        while (sent_desc > 0xff) {
1705                val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1706                mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1707                sent_desc = sent_desc - 0xff;
1708        }
1709
1710        val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1711        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1712}
1713
1714/* Get number of TX descriptors already sent by HW */
1715static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1716                                        struct mvneta_tx_queue *txq)
1717{
1718        u32 val;
1719        int sent_desc;
1720
1721        val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1722        sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1723                MVNETA_TXQ_SENT_DESC_SHIFT;
1724
1725        return sent_desc;
1726}
1727
1728/* Get number of sent descriptors and decrement counter.
1729 *  The number of sent descriptors is returned.
1730 */
1731static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1732                                     struct mvneta_tx_queue *txq)
1733{
1734        int sent_desc;
1735
1736        /* Get number of sent descriptors */
1737        sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1738
1739        /* Decrement sent descriptors counter */
1740        if (sent_desc)
1741                mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1742
1743        return sent_desc;
1744}
1745
1746/* Set TXQ descriptors fields relevant for CSUM calculation */
1747static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1748                                int ip_hdr_len, int l4_proto)
1749{
1750        u32 command;
1751
1752        /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1753         * G_L4_chk, L4_type; required only for checksum
1754         * calculation
1755         */
1756        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1757        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1758
1759        if (l3_proto == htons(ETH_P_IP))
1760                command |= MVNETA_TXD_IP_CSUM;
1761        else
1762                command |= MVNETA_TX_L3_IP6;
1763
1764        if (l4_proto == IPPROTO_TCP)
1765                command |=  MVNETA_TX_L4_CSUM_FULL;
1766        else if (l4_proto == IPPROTO_UDP)
1767                command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1768        else
1769                command |= MVNETA_TX_L4_CSUM_NOT;
1770
1771        return command;
1772}
1773
1774
1775/* Display more error info */
1776static void mvneta_rx_error(struct mvneta_port *pp,
1777                            struct mvneta_rx_desc *rx_desc)
1778{
1779        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1780        u32 status = rx_desc->status;
1781
1782        /* update per-cpu counter */
1783        u64_stats_update_begin(&stats->syncp);
1784        stats->rx_errors++;
1785        u64_stats_update_end(&stats->syncp);
1786
1787        switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1788        case MVNETA_RXD_ERR_CRC:
1789                netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1790                           status, rx_desc->data_size);
1791                break;
1792        case MVNETA_RXD_ERR_OVERRUN:
1793                netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1794                           status, rx_desc->data_size);
1795                break;
1796        case MVNETA_RXD_ERR_LEN:
1797                netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1798                           status, rx_desc->data_size);
1799                break;
1800        case MVNETA_RXD_ERR_RESOURCE:
1801                netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1802                           status, rx_desc->data_size);
1803                break;
1804        }
1805}
1806
1807/* Handle RX checksum offload based on the descriptor's status */
1808static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
1809{
1810        if ((pp->dev->features & NETIF_F_RXCSUM) &&
1811            (status & MVNETA_RXD_L3_IP4) &&
1812            (status & MVNETA_RXD_L4_CSUM_OK))
1813                return CHECKSUM_UNNECESSARY;
1814
1815        return CHECKSUM_NONE;
1816}
1817
1818/* Return tx queue pointer (find last set bit) according to <cause> returned
1819 * form tx_done reg. <cause> must not be null. The return value is always a
1820 * valid queue for matching the first one found in <cause>.
1821 */
1822static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1823                                                     u32 cause)
1824{
1825        int queue = fls(cause) - 1;
1826
1827        return &pp->txqs[queue];
1828}
1829
1830/* Free tx queue skbuffs */
1831static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1832                                 struct mvneta_tx_queue *txq, int num,
1833                                 struct netdev_queue *nq, bool napi)
1834{
1835        unsigned int bytes_compl = 0, pkts_compl = 0;
1836        struct xdp_frame_bulk bq;
1837        int i;
1838
1839        xdp_frame_bulk_init(&bq);
1840
1841        rcu_read_lock(); /* need for xdp_return_frame_bulk */
1842
1843        for (i = 0; i < num; i++) {
1844                struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1845                struct mvneta_tx_desc *tx_desc = txq->descs +
1846                        txq->txq_get_index;
1847
1848                mvneta_txq_inc_get(txq);
1849
1850                if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
1851                    buf->type != MVNETA_TYPE_XDP_TX)
1852                        dma_unmap_single(pp->dev->dev.parent,
1853                                         tx_desc->buf_phys_addr,
1854                                         tx_desc->data_size, DMA_TO_DEVICE);
1855                if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
1856                        bytes_compl += buf->skb->len;
1857                        pkts_compl++;
1858                        dev_kfree_skb_any(buf->skb);
1859                } else if (buf->type == MVNETA_TYPE_XDP_TX ||
1860                           buf->type == MVNETA_TYPE_XDP_NDO) {
1861                        if (napi && buf->type == MVNETA_TYPE_XDP_TX)
1862                                xdp_return_frame_rx_napi(buf->xdpf);
1863                        else
1864                                xdp_return_frame_bulk(buf->xdpf, &bq);
1865                }
1866        }
1867        xdp_flush_frame_bulk(&bq);
1868
1869        rcu_read_unlock();
1870
1871        netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1872}
1873
1874/* Handle end of transmission */
1875static void mvneta_txq_done(struct mvneta_port *pp,
1876                           struct mvneta_tx_queue *txq)
1877{
1878        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1879        int tx_done;
1880
1881        tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1882        if (!tx_done)
1883                return;
1884
1885        mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1886
1887        txq->count -= tx_done;
1888
1889        if (netif_tx_queue_stopped(nq)) {
1890                if (txq->count <= txq->tx_wake_threshold)
1891                        netif_tx_wake_queue(nq);
1892        }
1893}
1894
1895/* Refill processing for SW buffer management */
1896/* Allocate page per descriptor */
1897static int mvneta_rx_refill(struct mvneta_port *pp,
1898                            struct mvneta_rx_desc *rx_desc,
1899                            struct mvneta_rx_queue *rxq,
1900                            gfp_t gfp_mask)
1901{
1902        dma_addr_t phys_addr;
1903        struct page *page;
1904
1905        page = page_pool_alloc_pages(rxq->page_pool,
1906                                     gfp_mask | __GFP_NOWARN);
1907        if (!page)
1908                return -ENOMEM;
1909
1910        phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1911        mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1912
1913        return 0;
1914}
1915
1916/* Handle tx checksum */
1917static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1918{
1919        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1920                int ip_hdr_len = 0;
1921                __be16 l3_proto = vlan_get_protocol(skb);
1922                u8 l4_proto;
1923
1924                if (l3_proto == htons(ETH_P_IP)) {
1925                        struct iphdr *ip4h = ip_hdr(skb);
1926
1927                        /* Calculate IPv4 checksum and L4 checksum */
1928                        ip_hdr_len = ip4h->ihl;
1929                        l4_proto = ip4h->protocol;
1930                } else if (l3_proto == htons(ETH_P_IPV6)) {
1931                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
1932
1933                        /* Read l4_protocol from one of IPv6 extra headers */
1934                        if (skb_network_header_len(skb) > 0)
1935                                ip_hdr_len = (skb_network_header_len(skb) >> 2);
1936                        l4_proto = ip6h->nexthdr;
1937                } else
1938                        return MVNETA_TX_L4_CSUM_NOT;
1939
1940                return mvneta_txq_desc_csum(skb_network_offset(skb),
1941                                            l3_proto, ip_hdr_len, l4_proto);
1942        }
1943
1944        return MVNETA_TX_L4_CSUM_NOT;
1945}
1946
1947/* Drop packets received by the RXQ and free buffers */
1948static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1949                                 struct mvneta_rx_queue *rxq)
1950{
1951        int rx_done, i;
1952
1953        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1954        if (rx_done)
1955                mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1956
1957        if (pp->bm_priv) {
1958                for (i = 0; i < rx_done; i++) {
1959                        struct mvneta_rx_desc *rx_desc =
1960                                                  mvneta_rxq_next_desc_get(rxq);
1961                        u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1962                        struct mvneta_bm_pool *bm_pool;
1963
1964                        bm_pool = &pp->bm_priv->bm_pools[pool_id];
1965                        /* Return dropped buffer to the pool */
1966                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1967                                              rx_desc->buf_phys_addr);
1968                }
1969                return;
1970        }
1971
1972        for (i = 0; i < rxq->size; i++) {
1973                struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1974                void *data = rxq->buf_virt_addr[i];
1975                if (!data || !(rx_desc->buf_phys_addr))
1976                        continue;
1977
1978                page_pool_put_full_page(rxq->page_pool, data, false);
1979        }
1980        if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
1981                xdp_rxq_info_unreg(&rxq->xdp_rxq);
1982        page_pool_destroy(rxq->page_pool);
1983        rxq->page_pool = NULL;
1984}
1985
1986static void
1987mvneta_update_stats(struct mvneta_port *pp,
1988                    struct mvneta_stats *ps)
1989{
1990        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1991
1992        u64_stats_update_begin(&stats->syncp);
1993        stats->es.ps.rx_packets += ps->rx_packets;
1994        stats->es.ps.rx_bytes += ps->rx_bytes;
1995        /* xdp */
1996        stats->es.ps.xdp_redirect += ps->xdp_redirect;
1997        stats->es.ps.xdp_pass += ps->xdp_pass;
1998        stats->es.ps.xdp_drop += ps->xdp_drop;
1999        u64_stats_update_end(&stats->syncp);
2000}
2001
2002static inline
2003int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2004{
2005        struct mvneta_rx_desc *rx_desc;
2006        int curr_desc = rxq->first_to_refill;
2007        int i;
2008
2009        for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2010                rx_desc = rxq->descs + curr_desc;
2011                if (!(rx_desc->buf_phys_addr)) {
2012                        if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2013                                struct mvneta_pcpu_stats *stats;
2014
2015                                pr_err("Can't refill queue %d. Done %d from %d\n",
2016                                       rxq->id, i, rxq->refill_num);
2017
2018                                stats = this_cpu_ptr(pp->stats);
2019                                u64_stats_update_begin(&stats->syncp);
2020                                stats->es.refill_error++;
2021                                u64_stats_update_end(&stats->syncp);
2022                                break;
2023                        }
2024                }
2025                curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2026        }
2027        rxq->refill_num -= i;
2028        rxq->first_to_refill = curr_desc;
2029
2030        return i;
2031}
2032
2033static void
2034mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2035                    struct xdp_buff *xdp, struct skb_shared_info *sinfo,
2036                    int sync_len)
2037{
2038        int i;
2039
2040        for (i = 0; i < sinfo->nr_frags; i++)
2041                page_pool_put_full_page(rxq->page_pool,
2042                                        skb_frag_page(&sinfo->frags[i]), true);
2043        page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2044                           sync_len, true);
2045}
2046
2047static int
2048mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2049                        struct xdp_frame *xdpf, bool dma_map)
2050{
2051        struct mvneta_tx_desc *tx_desc;
2052        struct mvneta_tx_buf *buf;
2053        dma_addr_t dma_addr;
2054
2055        if (txq->count >= txq->tx_stop_threshold)
2056                return MVNETA_XDP_DROPPED;
2057
2058        tx_desc = mvneta_txq_next_desc_get(txq);
2059
2060        buf = &txq->buf[txq->txq_put_index];
2061        if (dma_map) {
2062                /* ndo_xdp_xmit */
2063                dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
2064                                          xdpf->len, DMA_TO_DEVICE);
2065                if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
2066                        mvneta_txq_desc_put(txq);
2067                        return MVNETA_XDP_DROPPED;
2068                }
2069                buf->type = MVNETA_TYPE_XDP_NDO;
2070        } else {
2071                struct page *page = virt_to_page(xdpf->data);
2072
2073                dma_addr = page_pool_get_dma_addr(page) +
2074                           sizeof(*xdpf) + xdpf->headroom;
2075                dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2076                                           xdpf->len, DMA_BIDIRECTIONAL);
2077                buf->type = MVNETA_TYPE_XDP_TX;
2078        }
2079        buf->xdpf = xdpf;
2080
2081        tx_desc->command = MVNETA_TXD_FLZ_DESC;
2082        tx_desc->buf_phys_addr = dma_addr;
2083        tx_desc->data_size = xdpf->len;
2084
2085        mvneta_txq_inc_put(txq);
2086        txq->pending++;
2087        txq->count++;
2088
2089        return MVNETA_XDP_TX;
2090}
2091
2092static int
2093mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2094{
2095        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2096        struct mvneta_tx_queue *txq;
2097        struct netdev_queue *nq;
2098        struct xdp_frame *xdpf;
2099        int cpu;
2100        u32 ret;
2101
2102        xdpf = xdp_convert_buff_to_frame(xdp);
2103        if (unlikely(!xdpf))
2104                return MVNETA_XDP_DROPPED;
2105
2106        cpu = smp_processor_id();
2107        txq = &pp->txqs[cpu % txq_number];
2108        nq = netdev_get_tx_queue(pp->dev, txq->id);
2109
2110        __netif_tx_lock(nq, cpu);
2111        ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2112        if (ret == MVNETA_XDP_TX) {
2113                u64_stats_update_begin(&stats->syncp);
2114                stats->es.ps.tx_bytes += xdpf->len;
2115                stats->es.ps.tx_packets++;
2116                stats->es.ps.xdp_tx++;
2117                u64_stats_update_end(&stats->syncp);
2118
2119                mvneta_txq_pend_desc_add(pp, txq, 0);
2120        } else {
2121                u64_stats_update_begin(&stats->syncp);
2122                stats->es.ps.xdp_tx_err++;
2123                u64_stats_update_end(&stats->syncp);
2124        }
2125        __netif_tx_unlock(nq);
2126
2127        return ret;
2128}
2129
2130static int
2131mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2132                struct xdp_frame **frames, u32 flags)
2133{
2134        struct mvneta_port *pp = netdev_priv(dev);
2135        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2136        int i, nxmit_byte = 0, nxmit = 0;
2137        int cpu = smp_processor_id();
2138        struct mvneta_tx_queue *txq;
2139        struct netdev_queue *nq;
2140        u32 ret;
2141
2142        if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2143                return -ENETDOWN;
2144
2145        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2146                return -EINVAL;
2147
2148        txq = &pp->txqs[cpu % txq_number];
2149        nq = netdev_get_tx_queue(pp->dev, txq->id);
2150
2151        __netif_tx_lock(nq, cpu);
2152        for (i = 0; i < num_frame; i++) {
2153                ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2154                if (ret != MVNETA_XDP_TX)
2155                        break;
2156
2157                nxmit_byte += frames[i]->len;
2158                nxmit++;
2159        }
2160
2161        if (unlikely(flags & XDP_XMIT_FLUSH))
2162                mvneta_txq_pend_desc_add(pp, txq, 0);
2163        __netif_tx_unlock(nq);
2164
2165        u64_stats_update_begin(&stats->syncp);
2166        stats->es.ps.tx_bytes += nxmit_byte;
2167        stats->es.ps.tx_packets += nxmit;
2168        stats->es.ps.xdp_xmit += nxmit;
2169        stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2170        u64_stats_update_end(&stats->syncp);
2171
2172        return nxmit;
2173}
2174
2175static int
2176mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2177               struct bpf_prog *prog, struct xdp_buff *xdp,
2178               u32 frame_sz, struct mvneta_stats *stats)
2179{
2180        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2181        unsigned int len, data_len, sync;
2182        u32 ret, act;
2183
2184        len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2185        data_len = xdp->data_end - xdp->data;
2186        act = bpf_prog_run_xdp(prog, xdp);
2187
2188        /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
2189        sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2190        sync = max(sync, len);
2191
2192        switch (act) {
2193        case XDP_PASS:
2194                stats->xdp_pass++;
2195                return MVNETA_XDP_PASS;
2196        case XDP_REDIRECT: {
2197                int err;
2198
2199                err = xdp_do_redirect(pp->dev, xdp, prog);
2200                if (unlikely(err)) {
2201                        mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2202                        ret = MVNETA_XDP_DROPPED;
2203                } else {
2204                        ret = MVNETA_XDP_REDIR;
2205                        stats->xdp_redirect++;
2206                }
2207                break;
2208        }
2209        case XDP_TX:
2210                ret = mvneta_xdp_xmit_back(pp, xdp);
2211                if (ret != MVNETA_XDP_TX)
2212                        mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2213                break;
2214        default:
2215                bpf_warn_invalid_xdp_action(act);
2216                fallthrough;
2217        case XDP_ABORTED:
2218                trace_xdp_exception(pp->dev, prog, act);
2219                fallthrough;
2220        case XDP_DROP:
2221                mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2222                ret = MVNETA_XDP_DROPPED;
2223                stats->xdp_drop++;
2224                break;
2225        }
2226
2227        stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
2228        stats->rx_packets++;
2229
2230        return ret;
2231}
2232
2233static void
2234mvneta_swbm_rx_frame(struct mvneta_port *pp,
2235                     struct mvneta_rx_desc *rx_desc,
2236                     struct mvneta_rx_queue *rxq,
2237                     struct xdp_buff *xdp, int *size,
2238                     struct page *page)
2239{
2240        unsigned char *data = page_address(page);
2241        int data_len = -MVNETA_MH_SIZE, len;
2242        struct net_device *dev = pp->dev;
2243        enum dma_data_direction dma_dir;
2244        struct skb_shared_info *sinfo;
2245
2246        if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2247                len = MVNETA_MAX_RX_BUF_SIZE;
2248                data_len += len;
2249        } else {
2250                len = *size;
2251                data_len += len - ETH_FCS_LEN;
2252        }
2253        *size = *size - len;
2254
2255        dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2256        dma_sync_single_for_cpu(dev->dev.parent,
2257                                rx_desc->buf_phys_addr,
2258                                len, dma_dir);
2259
2260        rx_desc->buf_phys_addr = 0;
2261
2262        /* Prefetch header */
2263        prefetch(data);
2264        xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2265                         data_len, false);
2266
2267        sinfo = xdp_get_shared_info_from_buff(xdp);
2268        sinfo->nr_frags = 0;
2269}
2270
2271static void
2272mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2273                            struct mvneta_rx_desc *rx_desc,
2274                            struct mvneta_rx_queue *rxq,
2275                            struct xdp_buff *xdp, int *size,
2276                            struct skb_shared_info *xdp_sinfo,
2277                            struct page *page)
2278{
2279        struct net_device *dev = pp->dev;
2280        enum dma_data_direction dma_dir;
2281        int data_len, len;
2282
2283        if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2284                len = MVNETA_MAX_RX_BUF_SIZE;
2285                data_len = len;
2286        } else {
2287                len = *size;
2288                data_len = len - ETH_FCS_LEN;
2289        }
2290        dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2291        dma_sync_single_for_cpu(dev->dev.parent,
2292                                rx_desc->buf_phys_addr,
2293                                len, dma_dir);
2294        rx_desc->buf_phys_addr = 0;
2295
2296        if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
2297                skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
2298
2299                skb_frag_off_set(frag, pp->rx_offset_correction);
2300                skb_frag_size_set(frag, data_len);
2301                __skb_frag_set_page(frag, page);
2302        } else {
2303                page_pool_put_full_page(rxq->page_pool, page, true);
2304        }
2305
2306        /* last fragment */
2307        if (len == *size) {
2308                struct skb_shared_info *sinfo;
2309
2310                sinfo = xdp_get_shared_info_from_buff(xdp);
2311                sinfo->nr_frags = xdp_sinfo->nr_frags;
2312                memcpy(sinfo->frags, xdp_sinfo->frags,
2313                       sinfo->nr_frags * sizeof(skb_frag_t));
2314        }
2315        *size -= len;
2316}
2317
2318static struct sk_buff *
2319mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2320                      struct xdp_buff *xdp, u32 desc_status)
2321{
2322        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2323        int i, num_frags = sinfo->nr_frags;
2324        struct sk_buff *skb;
2325
2326        skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2327        if (!skb)
2328                return ERR_PTR(-ENOMEM);
2329
2330        skb_mark_for_recycle(skb);
2331
2332        skb_reserve(skb, xdp->data - xdp->data_hard_start);
2333        skb_put(skb, xdp->data_end - xdp->data);
2334        skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2335
2336        for (i = 0; i < num_frags; i++) {
2337                skb_frag_t *frag = &sinfo->frags[i];
2338
2339                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2340                                skb_frag_page(frag), skb_frag_off(frag),
2341                                skb_frag_size(frag), PAGE_SIZE);
2342        }
2343
2344        return skb;
2345}
2346
2347/* Main rx processing when using software buffer management */
2348static int mvneta_rx_swbm(struct napi_struct *napi,
2349                          struct mvneta_port *pp, int budget,
2350                          struct mvneta_rx_queue *rxq)
2351{
2352        int rx_proc = 0, rx_todo, refill, size = 0;
2353        struct net_device *dev = pp->dev;
2354        struct skb_shared_info sinfo;
2355        struct mvneta_stats ps = {};
2356        struct bpf_prog *xdp_prog;
2357        u32 desc_status, frame_sz;
2358        struct xdp_buff xdp_buf;
2359
2360        xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
2361        xdp_buf.data_hard_start = NULL;
2362
2363        sinfo.nr_frags = 0;
2364
2365        /* Get number of received packets */
2366        rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2367
2368        xdp_prog = READ_ONCE(pp->xdp_prog);
2369
2370        /* Fairness NAPI loop */
2371        while (rx_proc < budget && rx_proc < rx_todo) {
2372                struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2373                u32 rx_status, index;
2374                struct sk_buff *skb;
2375                struct page *page;
2376
2377                index = rx_desc - rxq->descs;
2378                page = (struct page *)rxq->buf_virt_addr[index];
2379
2380                rx_status = rx_desc->status;
2381                rx_proc++;
2382                rxq->refill_num++;
2383
2384                if (rx_status & MVNETA_RXD_FIRST_DESC) {
2385                        /* Check errors only for FIRST descriptor */
2386                        if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2387                                mvneta_rx_error(pp, rx_desc);
2388                                goto next;
2389                        }
2390
2391                        size = rx_desc->data_size;
2392                        frame_sz = size - ETH_FCS_LEN;
2393                        desc_status = rx_status;
2394
2395                        mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2396                                             &size, page);
2397                } else {
2398                        if (unlikely(!xdp_buf.data_hard_start)) {
2399                                rx_desc->buf_phys_addr = 0;
2400                                page_pool_put_full_page(rxq->page_pool, page,
2401                                                        true);
2402                                goto next;
2403                        }
2404
2405                        mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2406                                                    &size, &sinfo, page);
2407                } /* Middle or Last descriptor */
2408
2409                if (!(rx_status & MVNETA_RXD_LAST_DESC))
2410                        /* no last descriptor this time */
2411                        continue;
2412
2413                if (size) {
2414                        mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2415                        goto next;
2416                }
2417
2418                if (xdp_prog &&
2419                    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2420                        goto next;
2421
2422                skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2423                if (IS_ERR(skb)) {
2424                        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2425
2426                        mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2427
2428                        u64_stats_update_begin(&stats->syncp);
2429                        stats->es.skb_alloc_error++;
2430                        stats->rx_dropped++;
2431                        u64_stats_update_end(&stats->syncp);
2432
2433                        goto next;
2434                }
2435
2436                ps.rx_bytes += skb->len;
2437                ps.rx_packets++;
2438
2439                skb->protocol = eth_type_trans(skb, dev);
2440                napi_gro_receive(napi, skb);
2441next:
2442                xdp_buf.data_hard_start = NULL;
2443                sinfo.nr_frags = 0;
2444        }
2445
2446        if (xdp_buf.data_hard_start)
2447                mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2448
2449        if (ps.xdp_redirect)
2450                xdp_do_flush_map();
2451
2452        if (ps.rx_packets)
2453                mvneta_update_stats(pp, &ps);
2454
2455        /* return some buffers to hardware queue, one at a time is too slow */
2456        refill = mvneta_rx_refill_queue(pp, rxq);
2457
2458        /* Update rxq management counters */
2459        mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2460
2461        return ps.rx_packets;
2462}
2463
2464/* Main rx processing when using hardware buffer management */
2465static int mvneta_rx_hwbm(struct napi_struct *napi,
2466                          struct mvneta_port *pp, int rx_todo,
2467                          struct mvneta_rx_queue *rxq)
2468{
2469        struct net_device *dev = pp->dev;
2470        int rx_done;
2471        u32 rcvd_pkts = 0;
2472        u32 rcvd_bytes = 0;
2473
2474        /* Get number of received packets */
2475        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2476
2477        if (rx_todo > rx_done)
2478                rx_todo = rx_done;
2479
2480        rx_done = 0;
2481
2482        /* Fairness NAPI loop */
2483        while (rx_done < rx_todo) {
2484                struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2485                struct mvneta_bm_pool *bm_pool = NULL;
2486                struct sk_buff *skb;
2487                unsigned char *data;
2488                dma_addr_t phys_addr;
2489                u32 rx_status, frag_size;
2490                int rx_bytes, err;
2491                u8 pool_id;
2492
2493                rx_done++;
2494                rx_status = rx_desc->status;
2495                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2496                data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2497                phys_addr = rx_desc->buf_phys_addr;
2498                pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2499                bm_pool = &pp->bm_priv->bm_pools[pool_id];
2500
2501                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2502                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2503err_drop_frame_ret_pool:
2504                        /* Return the buffer to the pool */
2505                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2506                                              rx_desc->buf_phys_addr);
2507err_drop_frame:
2508                        mvneta_rx_error(pp, rx_desc);
2509                        /* leave the descriptor untouched */
2510                        continue;
2511                }
2512
2513                if (rx_bytes <= rx_copybreak) {
2514                        /* better copy a small frame and not unmap the DMA region */
2515                        skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2516                        if (unlikely(!skb))
2517                                goto err_drop_frame_ret_pool;
2518
2519                        dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2520                                                      rx_desc->buf_phys_addr,
2521                                                      MVNETA_MH_SIZE + NET_SKB_PAD,
2522                                                      rx_bytes,
2523                                                      DMA_FROM_DEVICE);
2524                        skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2525                                     rx_bytes);
2526
2527                        skb->protocol = eth_type_trans(skb, dev);
2528                        skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2529                        napi_gro_receive(napi, skb);
2530
2531                        rcvd_pkts++;
2532                        rcvd_bytes += rx_bytes;
2533
2534                        /* Return the buffer to the pool */
2535                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2536                                              rx_desc->buf_phys_addr);
2537
2538                        /* leave the descriptor and buffer untouched */
2539                        continue;
2540                }
2541
2542                /* Refill processing */
2543                err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2544                if (err) {
2545                        struct mvneta_pcpu_stats *stats;
2546
2547                        netdev_err(dev, "Linux processing - Can't refill\n");
2548
2549                        stats = this_cpu_ptr(pp->stats);
2550                        u64_stats_update_begin(&stats->syncp);
2551                        stats->es.refill_error++;
2552                        u64_stats_update_end(&stats->syncp);
2553
2554                        goto err_drop_frame_ret_pool;
2555                }
2556
2557                frag_size = bm_pool->hwbm_pool.frag_size;
2558
2559                skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2560
2561                /* After refill old buffer has to be unmapped regardless
2562                 * the skb is successfully built or not.
2563                 */
2564                dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2565                                 bm_pool->buf_size, DMA_FROM_DEVICE);
2566                if (!skb)
2567                        goto err_drop_frame;
2568
2569                rcvd_pkts++;
2570                rcvd_bytes += rx_bytes;
2571
2572                /* Linux processing */
2573                skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2574                skb_put(skb, rx_bytes);
2575
2576                skb->protocol = eth_type_trans(skb, dev);
2577                skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2578
2579                napi_gro_receive(napi, skb);
2580        }
2581
2582        if (rcvd_pkts) {
2583                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2584
2585                u64_stats_update_begin(&stats->syncp);
2586                stats->es.ps.rx_packets += rcvd_pkts;
2587                stats->es.ps.rx_bytes += rcvd_bytes;
2588                u64_stats_update_end(&stats->syncp);
2589        }
2590
2591        /* Update rxq management counters */
2592        mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2593
2594        return rx_done;
2595}
2596
2597static inline void
2598mvneta_tso_put_hdr(struct sk_buff *skb,
2599                   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2600{
2601        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2602        struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2603        struct mvneta_tx_desc *tx_desc;
2604
2605        tx_desc = mvneta_txq_next_desc_get(txq);
2606        tx_desc->data_size = hdr_len;
2607        tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2608        tx_desc->command |= MVNETA_TXD_F_DESC;
2609        tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2610                                 txq->txq_put_index * TSO_HEADER_SIZE;
2611        buf->type = MVNETA_TYPE_SKB;
2612        buf->skb = NULL;
2613
2614        mvneta_txq_inc_put(txq);
2615}
2616
2617static inline int
2618mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2619                    struct sk_buff *skb, char *data, int size,
2620                    bool last_tcp, bool is_last)
2621{
2622        struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2623        struct mvneta_tx_desc *tx_desc;
2624
2625        tx_desc = mvneta_txq_next_desc_get(txq);
2626        tx_desc->data_size = size;
2627        tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2628                                                size, DMA_TO_DEVICE);
2629        if (unlikely(dma_mapping_error(dev->dev.parent,
2630                     tx_desc->buf_phys_addr))) {
2631                mvneta_txq_desc_put(txq);
2632                return -ENOMEM;
2633        }
2634
2635        tx_desc->command = 0;
2636        buf->type = MVNETA_TYPE_SKB;
2637        buf->skb = NULL;
2638
2639        if (last_tcp) {
2640                /* last descriptor in the TCP packet */
2641                tx_desc->command = MVNETA_TXD_L_DESC;
2642
2643                /* last descriptor in SKB */
2644                if (is_last)
2645                        buf->skb = skb;
2646        }
2647        mvneta_txq_inc_put(txq);
2648        return 0;
2649}
2650
2651static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2652                         struct mvneta_tx_queue *txq)
2653{
2654        int hdr_len, total_len, data_left;
2655        int desc_count = 0;
2656        struct mvneta_port *pp = netdev_priv(dev);
2657        struct tso_t tso;
2658        int i;
2659
2660        /* Count needed descriptors */
2661        if ((txq->count + tso_count_descs(skb)) >= txq->size)
2662                return 0;
2663
2664        if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2665                pr_info("*** Is this even possible?\n");
2666                return 0;
2667        }
2668
2669        /* Initialize the TSO handler, and prepare the first payload */
2670        hdr_len = tso_start(skb, &tso);
2671
2672        total_len = skb->len - hdr_len;
2673        while (total_len > 0) {
2674                char *hdr;
2675
2676                data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2677                total_len -= data_left;
2678                desc_count++;
2679
2680                /* prepare packet headers: MAC + IP + TCP */
2681                hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2682                tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2683
2684                mvneta_tso_put_hdr(skb, pp, txq);
2685
2686                while (data_left > 0) {
2687                        int size;
2688                        desc_count++;
2689
2690                        size = min_t(int, tso.size, data_left);
2691
2692                        if (mvneta_tso_put_data(dev, txq, skb,
2693                                                 tso.data, size,
2694                                                 size == data_left,
2695                                                 total_len == 0))
2696                                goto err_release;
2697                        data_left -= size;
2698
2699                        tso_build_data(skb, &tso, size);
2700                }
2701        }
2702
2703        return desc_count;
2704
2705err_release:
2706        /* Release all used data descriptors; header descriptors must not
2707         * be DMA-unmapped.
2708         */
2709        for (i = desc_count - 1; i >= 0; i--) {
2710                struct mvneta_tx_desc *tx_desc = txq->descs + i;
2711                if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2712                        dma_unmap_single(pp->dev->dev.parent,
2713                                         tx_desc->buf_phys_addr,
2714                                         tx_desc->data_size,
2715                                         DMA_TO_DEVICE);
2716                mvneta_txq_desc_put(txq);
2717        }
2718        return 0;
2719}
2720
2721/* Handle tx fragmentation processing */
2722static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2723                                  struct mvneta_tx_queue *txq)
2724{
2725        struct mvneta_tx_desc *tx_desc;
2726        int i, nr_frags = skb_shinfo(skb)->nr_frags;
2727
2728        for (i = 0; i < nr_frags; i++) {
2729                struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2730                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2731                void *addr = skb_frag_address(frag);
2732
2733                tx_desc = mvneta_txq_next_desc_get(txq);
2734                tx_desc->data_size = skb_frag_size(frag);
2735
2736                tx_desc->buf_phys_addr =
2737                        dma_map_single(pp->dev->dev.parent, addr,
2738                                       tx_desc->data_size, DMA_TO_DEVICE);
2739
2740                if (dma_mapping_error(pp->dev->dev.parent,
2741                                      tx_desc->buf_phys_addr)) {
2742                        mvneta_txq_desc_put(txq);
2743                        goto error;
2744                }
2745
2746                if (i == nr_frags - 1) {
2747                        /* Last descriptor */
2748                        tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2749                        buf->skb = skb;
2750                } else {
2751                        /* Descriptor in the middle: Not First, Not Last */
2752                        tx_desc->command = 0;
2753                        buf->skb = NULL;
2754                }
2755                buf->type = MVNETA_TYPE_SKB;
2756                mvneta_txq_inc_put(txq);
2757        }
2758
2759        return 0;
2760
2761error:
2762        /* Release all descriptors that were used to map fragments of
2763         * this packet, as well as the corresponding DMA mappings
2764         */
2765        for (i = i - 1; i >= 0; i--) {
2766                tx_desc = txq->descs + i;
2767                dma_unmap_single(pp->dev->dev.parent,
2768                                 tx_desc->buf_phys_addr,
2769                                 tx_desc->data_size,
2770                                 DMA_TO_DEVICE);
2771                mvneta_txq_desc_put(txq);
2772        }
2773
2774        return -ENOMEM;
2775}
2776
2777/* Main tx processing */
2778static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2779{
2780        struct mvneta_port *pp = netdev_priv(dev);
2781        u16 txq_id = skb_get_queue_mapping(skb);
2782        struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2783        struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2784        struct mvneta_tx_desc *tx_desc;
2785        int len = skb->len;
2786        int frags = 0;
2787        u32 tx_cmd;
2788
2789        if (!netif_running(dev))
2790                goto out;
2791
2792        if (skb_is_gso(skb)) {
2793                frags = mvneta_tx_tso(skb, dev, txq);
2794                goto out;
2795        }
2796
2797        frags = skb_shinfo(skb)->nr_frags + 1;
2798
2799        /* Get a descriptor for the first part of the packet */
2800        tx_desc = mvneta_txq_next_desc_get(txq);
2801
2802        tx_cmd = mvneta_skb_tx_csum(pp, skb);
2803
2804        tx_desc->data_size = skb_headlen(skb);
2805
2806        tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2807                                                tx_desc->data_size,
2808                                                DMA_TO_DEVICE);
2809        if (unlikely(dma_mapping_error(dev->dev.parent,
2810                                       tx_desc->buf_phys_addr))) {
2811                mvneta_txq_desc_put(txq);
2812                frags = 0;
2813                goto out;
2814        }
2815
2816        buf->type = MVNETA_TYPE_SKB;
2817        if (frags == 1) {
2818                /* First and Last descriptor */
2819                tx_cmd |= MVNETA_TXD_FLZ_DESC;
2820                tx_desc->command = tx_cmd;
2821                buf->skb = skb;
2822                mvneta_txq_inc_put(txq);
2823        } else {
2824                /* First but not Last */
2825                tx_cmd |= MVNETA_TXD_F_DESC;
2826                buf->skb = NULL;
2827                mvneta_txq_inc_put(txq);
2828                tx_desc->command = tx_cmd;
2829                /* Continue with other skb fragments */
2830                if (mvneta_tx_frag_process(pp, skb, txq)) {
2831                        dma_unmap_single(dev->dev.parent,
2832                                         tx_desc->buf_phys_addr,
2833                                         tx_desc->data_size,
2834                                         DMA_TO_DEVICE);
2835                        mvneta_txq_desc_put(txq);
2836                        frags = 0;
2837                        goto out;
2838                }
2839        }
2840
2841out:
2842        if (frags > 0) {
2843                struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2844                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2845
2846                netdev_tx_sent_queue(nq, len);
2847
2848                txq->count += frags;
2849                if (txq->count >= txq->tx_stop_threshold)
2850                        netif_tx_stop_queue(nq);
2851
2852                if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2853                    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2854                        mvneta_txq_pend_desc_add(pp, txq, frags);
2855                else
2856                        txq->pending += frags;
2857
2858                u64_stats_update_begin(&stats->syncp);
2859                stats->es.ps.tx_bytes += len;
2860                stats->es.ps.tx_packets++;
2861                u64_stats_update_end(&stats->syncp);
2862        } else {
2863                dev->stats.tx_dropped++;
2864                dev_kfree_skb_any(skb);
2865        }
2866
2867        return NETDEV_TX_OK;
2868}
2869
2870
2871/* Free tx resources, when resetting a port */
2872static void mvneta_txq_done_force(struct mvneta_port *pp,
2873                                  struct mvneta_tx_queue *txq)
2874
2875{
2876        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2877        int tx_done = txq->count;
2878
2879        mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
2880
2881        /* reset txq */
2882        txq->count = 0;
2883        txq->txq_put_index = 0;
2884        txq->txq_get_index = 0;
2885}
2886
2887/* Handle tx done - called in softirq context. The <cause_tx_done> argument
2888 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2889 */
2890static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2891{
2892        struct mvneta_tx_queue *txq;
2893        struct netdev_queue *nq;
2894        int cpu = smp_processor_id();
2895
2896        while (cause_tx_done) {
2897                txq = mvneta_tx_done_policy(pp, cause_tx_done);
2898
2899                nq = netdev_get_tx_queue(pp->dev, txq->id);
2900                __netif_tx_lock(nq, cpu);
2901
2902                if (txq->count)
2903                        mvneta_txq_done(pp, txq);
2904
2905                __netif_tx_unlock(nq);
2906                cause_tx_done &= ~((1 << txq->id));
2907        }
2908}
2909
2910/* Compute crc8 of the specified address, using a unique algorithm ,
2911 * according to hw spec, different than generic crc8 algorithm
2912 */
2913static int mvneta_addr_crc(unsigned char *addr)
2914{
2915        int crc = 0;
2916        int i;
2917
2918        for (i = 0; i < ETH_ALEN; i++) {
2919                int j;
2920
2921                crc = (crc ^ addr[i]) << 8;
2922                for (j = 7; j >= 0; j--) {
2923                        if (crc & (0x100 << j))
2924                                crc ^= 0x107 << j;
2925                }
2926        }
2927
2928        return crc;
2929}
2930
2931/* This method controls the net device special MAC multicast support.
2932 * The Special Multicast Table for MAC addresses supports MAC of the form
2933 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2934 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2935 * Table entries in the DA-Filter table. This method set the Special
2936 * Multicast Table appropriate entry.
2937 */
2938static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2939                                          unsigned char last_byte,
2940                                          int queue)
2941{
2942        unsigned int smc_table_reg;
2943        unsigned int tbl_offset;
2944        unsigned int reg_offset;
2945
2946        /* Register offset from SMC table base    */
2947        tbl_offset = (last_byte / 4);
2948        /* Entry offset within the above reg */
2949        reg_offset = last_byte % 4;
2950
2951        smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2952                                        + tbl_offset * 4));
2953
2954        if (queue == -1)
2955                smc_table_reg &= ~(0xff << (8 * reg_offset));
2956        else {
2957                smc_table_reg &= ~(0xff << (8 * reg_offset));
2958                smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2959        }
2960
2961        mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2962                    smc_table_reg);
2963}
2964
2965/* This method controls the network device Other MAC multicast support.
2966 * The Other Multicast Table is used for multicast of another type.
2967 * A CRC-8 is used as an index to the Other Multicast Table entries
2968 * in the DA-Filter table.
2969 * The method gets the CRC-8 value from the calling routine and
2970 * sets the Other Multicast Table appropriate entry according to the
2971 * specified CRC-8 .
2972 */
2973static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2974                                        unsigned char crc8,
2975                                        int queue)
2976{
2977        unsigned int omc_table_reg;
2978        unsigned int tbl_offset;
2979        unsigned int reg_offset;
2980
2981        tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2982        reg_offset = crc8 % 4;       /* Entry offset within the above reg   */
2983
2984        omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2985
2986        if (queue == -1) {
2987                /* Clear accepts frame bit at specified Other DA table entry */
2988                omc_table_reg &= ~(0xff << (8 * reg_offset));
2989        } else {
2990                omc_table_reg &= ~(0xff << (8 * reg_offset));
2991                omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2992        }
2993
2994        mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2995}
2996
2997/* The network device supports multicast using two tables:
2998 *    1) Special Multicast Table for MAC addresses of the form
2999 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3000 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3001 *       Table entries in the DA-Filter table.
3002 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
3003 *       is used as an index to the Other Multicast Table entries in the
3004 *       DA-Filter table.
3005 */
3006static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3007                                 int queue)
3008{
3009        unsigned char crc_result = 0;
3010
3011        if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
3012                mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3013                return 0;
3014        }
3015
3016        crc_result = mvneta_addr_crc(p_addr);
3017        if (queue == -1) {
3018                if (pp->mcast_count[crc_result] == 0) {
3019                        netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3020                                    crc_result);
3021                        return -EINVAL;
3022                }
3023
3024                pp->mcast_count[crc_result]--;
3025                if (pp->mcast_count[crc_result] != 0) {
3026                        netdev_info(pp->dev,
3027                                    "After delete there are %d valid Mcast for crc8=0x%02x\n",
3028                                    pp->mcast_count[crc_result], crc_result);
3029                        return -EINVAL;
3030                }
3031        } else
3032                pp->mcast_count[crc_result]++;
3033
3034        mvneta_set_other_mcast_addr(pp, crc_result, queue);
3035
3036        return 0;
3037}
3038
3039/* Configure Fitering mode of Ethernet port */
3040static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3041                                          int is_promisc)
3042{
3043        u32 port_cfg_reg, val;
3044
3045        port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3046
3047        val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3048
3049        /* Set / Clear UPM bit in port configuration register */
3050        if (is_promisc) {
3051                /* Accept all Unicast addresses */
3052                port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
3053                val |= MVNETA_FORCE_UNI;
3054                mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3055                mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3056        } else {
3057                /* Reject all Unicast addresses */
3058                port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
3059                val &= ~MVNETA_FORCE_UNI;
3060        }
3061
3062        mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3063        mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3064}
3065
3066/* register unicast and multicast addresses */
3067static void mvneta_set_rx_mode(struct net_device *dev)
3068{
3069        struct mvneta_port *pp = netdev_priv(dev);
3070        struct netdev_hw_addr *ha;
3071
3072        if (dev->flags & IFF_PROMISC) {
3073                /* Accept all: Multicast + Unicast */
3074                mvneta_rx_unicast_promisc_set(pp, 1);
3075                mvneta_set_ucast_table(pp, pp->rxq_def);
3076                mvneta_set_special_mcast_table(pp, pp->rxq_def);
3077                mvneta_set_other_mcast_table(pp, pp->rxq_def);
3078        } else {
3079                /* Accept single Unicast */
3080                mvneta_rx_unicast_promisc_set(pp, 0);
3081                mvneta_set_ucast_table(pp, -1);
3082                mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3083
3084                if (dev->flags & IFF_ALLMULTI) {
3085                        /* Accept all multicast */
3086                        mvneta_set_special_mcast_table(pp, pp->rxq_def);
3087                        mvneta_set_other_mcast_table(pp, pp->rxq_def);
3088                } else {
3089                        /* Accept only initialized multicast */
3090                        mvneta_set_special_mcast_table(pp, -1);
3091                        mvneta_set_other_mcast_table(pp, -1);
3092
3093                        if (!netdev_mc_empty(dev)) {
3094                                netdev_for_each_mc_addr(ha, dev) {
3095                                        mvneta_mcast_addr_set(pp, ha->addr,
3096                                                              pp->rxq_def);
3097                                }
3098                        }
3099                }
3100        }
3101}
3102
3103/* Interrupt handling - the callback for request_irq() */
3104static irqreturn_t mvneta_isr(int irq, void *dev_id)
3105{
3106        struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3107
3108        mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3109        napi_schedule(&pp->napi);
3110
3111        return IRQ_HANDLED;
3112}
3113
3114/* Interrupt handling - the callback for request_percpu_irq() */
3115static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3116{
3117        struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3118
3119        disable_percpu_irq(port->pp->dev->irq);
3120        napi_schedule(&port->napi);
3121
3122        return IRQ_HANDLED;
3123}
3124
3125static void mvneta_link_change(struct mvneta_port *pp)
3126{
3127        u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3128
3129        phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3130}
3131
3132/* NAPI handler
3133 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3134 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3135 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3136 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3137 * Each CPU has its own causeRxTx register
3138 */
3139static int mvneta_poll(struct napi_struct *napi, int budget)
3140{
3141        int rx_done = 0;
3142        u32 cause_rx_tx;
3143        int rx_queue;
3144        struct mvneta_port *pp = netdev_priv(napi->dev);
3145        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3146
3147        if (!netif_running(pp->dev)) {
3148                napi_complete(napi);
3149                return rx_done;
3150        }
3151
3152        /* Read cause register */
3153        cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3154        if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3155                u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3156
3157                mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3158
3159                if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3160                                  MVNETA_CAUSE_LINK_CHANGE))
3161                        mvneta_link_change(pp);
3162        }
3163
3164        /* Release Tx descriptors */
3165        if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3166                mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3167                cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3168        }
3169
3170        /* For the case where the last mvneta_poll did not process all
3171         * RX packets
3172         */
3173        cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3174                port->cause_rx_tx;
3175
3176        rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3177        if (rx_queue) {
3178                rx_queue = rx_queue - 1;
3179                if (pp->bm_priv)
3180                        rx_done = mvneta_rx_hwbm(napi, pp, budget,
3181                                                 &pp->rxqs[rx_queue]);
3182                else
3183                        rx_done = mvneta_rx_swbm(napi, pp, budget,
3184                                                 &pp->rxqs[rx_queue]);
3185        }
3186
3187        if (rx_done < budget) {
3188                cause_rx_tx = 0;
3189                napi_complete_done(napi, rx_done);
3190
3191                if (pp->neta_armada3700) {
3192                        unsigned long flags;
3193
3194                        local_irq_save(flags);
3195                        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3196                                    MVNETA_RX_INTR_MASK(rxq_number) |
3197                                    MVNETA_TX_INTR_MASK(txq_number) |
3198                                    MVNETA_MISCINTR_INTR_MASK);
3199                        local_irq_restore(flags);
3200                } else {
3201                        enable_percpu_irq(pp->dev->irq, 0);
3202                }
3203        }
3204
3205        if (pp->neta_armada3700)
3206                pp->cause_rx_tx = cause_rx_tx;
3207        else
3208                port->cause_rx_tx = cause_rx_tx;
3209
3210        return rx_done;
3211}
3212
3213static int mvneta_create_page_pool(struct mvneta_port *pp,
3214                                   struct mvneta_rx_queue *rxq, int size)
3215{
3216        struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3217        struct page_pool_params pp_params = {
3218                .order = 0,
3219                .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3220                .pool_size = size,
3221                .nid = NUMA_NO_NODE,
3222                .dev = pp->dev->dev.parent,
3223                .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3224                .offset = pp->rx_offset_correction,
3225                .max_len = MVNETA_MAX_RX_BUF_SIZE,
3226        };
3227        int err;
3228
3229        rxq->page_pool = page_pool_create(&pp_params);
3230        if (IS_ERR(rxq->page_pool)) {
3231                err = PTR_ERR(rxq->page_pool);
3232                rxq->page_pool = NULL;
3233                return err;
3234        }
3235
3236        err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
3237        if (err < 0)
3238                goto err_free_pp;
3239
3240        err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3241                                         rxq->page_pool);
3242        if (err)
3243                goto err_unregister_rxq;
3244
3245        return 0;
3246
3247err_unregister_rxq:
3248        xdp_rxq_info_unreg(&rxq->xdp_rxq);
3249err_free_pp:
3250        page_pool_destroy(rxq->page_pool);
3251        rxq->page_pool = NULL;
3252        return err;
3253}
3254
3255/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3256static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3257                           int num)
3258{
3259        int i, err;
3260
3261        err = mvneta_create_page_pool(pp, rxq, num);
3262        if (err < 0)
3263                return err;
3264
3265        for (i = 0; i < num; i++) {
3266                memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3267                if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3268                                     GFP_KERNEL) != 0) {
3269                        netdev_err(pp->dev,
3270                                   "%s:rxq %d, %d of %d buffs  filled\n",
3271                                   __func__, rxq->id, i, num);
3272                        break;
3273                }
3274        }
3275
3276        /* Add this number of RX descriptors as non occupied (ready to
3277         * get packets)
3278         */
3279        mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3280
3281        return i;
3282}
3283
3284/* Free all packets pending transmit from all TXQs and reset TX port */
3285static void mvneta_tx_reset(struct mvneta_port *pp)
3286{
3287        int queue;
3288
3289        /* free the skb's in the tx ring */
3290        for (queue = 0; queue < txq_number; queue++)
3291                mvneta_txq_done_force(pp, &pp->txqs[queue]);
3292
3293        mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3294        mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3295}
3296
3297static void mvneta_rx_reset(struct mvneta_port *pp)
3298{
3299        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3300        mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3301}
3302
3303/* Rx/Tx queue initialization/cleanup methods */
3304
3305static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3306                              struct mvneta_rx_queue *rxq)
3307{
3308        rxq->size = pp->rx_ring_size;
3309
3310        /* Allocate memory for RX descriptors */
3311        rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3312                                        rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3313                                        &rxq->descs_phys, GFP_KERNEL);
3314        if (!rxq->descs)
3315                return -ENOMEM;
3316
3317        rxq->last_desc = rxq->size - 1;
3318
3319        return 0;
3320}
3321
3322static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3323                               struct mvneta_rx_queue *rxq)
3324{
3325        /* Set Rx descriptors queue starting address */
3326        mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3327        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3328
3329        /* Set coalescing pkts and time */
3330        mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3331        mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3332
3333        if (!pp->bm_priv) {
3334                /* Set Offset */
3335                mvneta_rxq_offset_set(pp, rxq, 0);
3336                mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3337                                        MVNETA_MAX_RX_BUF_SIZE :
3338                                        MVNETA_RX_BUF_SIZE(pp->pkt_size));
3339                mvneta_rxq_bm_disable(pp, rxq);
3340                mvneta_rxq_fill(pp, rxq, rxq->size);
3341        } else {
3342                /* Set Offset */
3343                mvneta_rxq_offset_set(pp, rxq,
3344                                      NET_SKB_PAD - pp->rx_offset_correction);
3345
3346                mvneta_rxq_bm_enable(pp, rxq);
3347                /* Fill RXQ with buffers from RX pool */
3348                mvneta_rxq_long_pool_set(pp, rxq);
3349                mvneta_rxq_short_pool_set(pp, rxq);
3350                mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3351        }
3352}
3353
3354/* Create a specified RX queue */
3355static int mvneta_rxq_init(struct mvneta_port *pp,
3356                           struct mvneta_rx_queue *rxq)
3357
3358{
3359        int ret;
3360
3361        ret = mvneta_rxq_sw_init(pp, rxq);
3362        if (ret < 0)
3363                return ret;
3364
3365        mvneta_rxq_hw_init(pp, rxq);
3366
3367        return 0;
3368}
3369
3370/* Cleanup Rx queue */
3371static void mvneta_rxq_deinit(struct mvneta_port *pp,
3372                              struct mvneta_rx_queue *rxq)
3373{
3374        mvneta_rxq_drop_pkts(pp, rxq);
3375
3376        if (rxq->descs)
3377                dma_free_coherent(pp->dev->dev.parent,
3378                                  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3379                                  rxq->descs,
3380                                  rxq->descs_phys);
3381
3382        rxq->descs             = NULL;
3383        rxq->last_desc         = 0;
3384        rxq->next_desc_to_proc = 0;
3385        rxq->descs_phys        = 0;
3386        rxq->first_to_refill   = 0;
3387        rxq->refill_num        = 0;
3388}
3389
3390static int mvneta_txq_sw_init(struct mvneta_port *pp,
3391                              struct mvneta_tx_queue *txq)
3392{
3393        int cpu;
3394
3395        txq->size = pp->tx_ring_size;
3396
3397        /* A queue must always have room for at least one skb.
3398         * Therefore, stop the queue when the free entries reaches
3399         * the maximum number of descriptors per skb.
3400         */
3401        txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3402        txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3403
3404        /* Allocate memory for TX descriptors */
3405        txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3406                                        txq->size * MVNETA_DESC_ALIGNED_SIZE,
3407                                        &txq->descs_phys, GFP_KERNEL);
3408        if (!txq->descs)
3409                return -ENOMEM;
3410
3411        txq->last_desc = txq->size - 1;
3412
3413        txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3414        if (!txq->buf)
3415                return -ENOMEM;
3416
3417        /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3418        txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3419                                           txq->size * TSO_HEADER_SIZE,
3420                                           &txq->tso_hdrs_phys, GFP_KERNEL);
3421        if (!txq->tso_hdrs)
3422                return -ENOMEM;
3423
3424        /* Setup XPS mapping */
3425        if (pp->neta_armada3700)
3426                cpu = 0;
3427        else if (txq_number > 1)
3428                cpu = txq->id % num_present_cpus();
3429        else
3430                cpu = pp->rxq_def % num_present_cpus();
3431        cpumask_set_cpu(cpu, &txq->affinity_mask);
3432        netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3433
3434        return 0;
3435}
3436
3437static void mvneta_txq_hw_init(struct mvneta_port *pp,
3438                               struct mvneta_tx_queue *txq)
3439{
3440        /* Set maximum bandwidth for enabled TXQs */
3441        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3442        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3443
3444        /* Set Tx descriptors queue starting address */
3445        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3446        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3447
3448        mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3449}
3450
3451/* Create and initialize a tx queue */
3452static int mvneta_txq_init(struct mvneta_port *pp,
3453                           struct mvneta_tx_queue *txq)
3454{
3455        int ret;
3456
3457        ret = mvneta_txq_sw_init(pp, txq);
3458        if (ret < 0)
3459                return ret;
3460
3461        mvneta_txq_hw_init(pp, txq);
3462
3463        return 0;
3464}
3465
3466/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3467static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3468                                 struct mvneta_tx_queue *txq)
3469{
3470        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3471
3472        kfree(txq->buf);
3473
3474        if (txq->tso_hdrs)
3475                dma_free_coherent(pp->dev->dev.parent,
3476                                  txq->size * TSO_HEADER_SIZE,
3477                                  txq->tso_hdrs, txq->tso_hdrs_phys);
3478        if (txq->descs)
3479                dma_free_coherent(pp->dev->dev.parent,
3480                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
3481                                  txq->descs, txq->descs_phys);
3482
3483        netdev_tx_reset_queue(nq);
3484
3485        txq->descs             = NULL;
3486        txq->last_desc         = 0;
3487        txq->next_desc_to_proc = 0;
3488        txq->descs_phys        = 0;
3489}
3490
3491static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3492                                 struct mvneta_tx_queue *txq)
3493{
3494        /* Set minimum bandwidth for disabled TXQs */
3495        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3496        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3497
3498        /* Set Tx descriptors queue starting address and size */
3499        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3500        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3501}
3502
3503static void mvneta_txq_deinit(struct mvneta_port *pp,
3504                              struct mvneta_tx_queue *txq)
3505{
3506        mvneta_txq_sw_deinit(pp, txq);
3507        mvneta_txq_hw_deinit(pp, txq);
3508}
3509
3510/* Cleanup all Tx queues */
3511static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3512{
3513        int queue;
3514
3515        for (queue = 0; queue < txq_number; queue++)
3516                mvneta_txq_deinit(pp, &pp->txqs[queue]);
3517}
3518
3519/* Cleanup all Rx queues */
3520static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3521{
3522        int queue;
3523
3524        for (queue = 0; queue < rxq_number; queue++)
3525                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3526}
3527
3528
3529/* Init all Rx queues */
3530static int mvneta_setup_rxqs(struct mvneta_port *pp)
3531{
3532        int queue;
3533
3534        for (queue = 0; queue < rxq_number; queue++) {
3535                int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3536
3537                if (err) {
3538                        netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3539                                   __func__, queue);
3540                        mvneta_cleanup_rxqs(pp);
3541                        return err;
3542                }
3543        }
3544
3545        return 0;
3546}
3547
3548/* Init all tx queues */
3549static int mvneta_setup_txqs(struct mvneta_port *pp)
3550{
3551        int queue;
3552
3553        for (queue = 0; queue < txq_number; queue++) {
3554                int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3555                if (err) {
3556                        netdev_err(pp->dev, "%s: can't create txq=%d\n",
3557                                   __func__, queue);
3558                        mvneta_cleanup_txqs(pp);
3559                        return err;
3560                }
3561        }
3562
3563        return 0;
3564}
3565
3566static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3567{
3568        int ret;
3569
3570        ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3571        if (ret)
3572                return ret;
3573
3574        return phy_power_on(pp->comphy);
3575}
3576
3577static int mvneta_config_interface(struct mvneta_port *pp,
3578                                   phy_interface_t interface)
3579{
3580        int ret = 0;
3581
3582        if (pp->comphy) {
3583                if (interface == PHY_INTERFACE_MODE_SGMII ||
3584                    interface == PHY_INTERFACE_MODE_1000BASEX ||
3585                    interface == PHY_INTERFACE_MODE_2500BASEX) {
3586                        ret = mvneta_comphy_init(pp, interface);
3587                }
3588        } else {
3589                switch (interface) {
3590                case PHY_INTERFACE_MODE_QSGMII:
3591                        mvreg_write(pp, MVNETA_SERDES_CFG,
3592                                    MVNETA_QSGMII_SERDES_PROTO);
3593                        break;
3594
3595                case PHY_INTERFACE_MODE_SGMII:
3596                case PHY_INTERFACE_MODE_1000BASEX:
3597                        mvreg_write(pp, MVNETA_SERDES_CFG,
3598                                    MVNETA_SGMII_SERDES_PROTO);
3599                        break;
3600
3601                case PHY_INTERFACE_MODE_2500BASEX:
3602                        mvreg_write(pp, MVNETA_SERDES_CFG,
3603                                    MVNETA_HSGMII_SERDES_PROTO);
3604                        break;
3605                default:
3606                        break;
3607                }
3608        }
3609
3610        pp->phy_interface = interface;
3611
3612        return ret;
3613}
3614
3615static void mvneta_start_dev(struct mvneta_port *pp)
3616{
3617        int cpu;
3618
3619        WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3620
3621        mvneta_max_rx_size_set(pp, pp->pkt_size);
3622        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3623
3624        /* start the Rx/Tx activity */
3625        mvneta_port_enable(pp);
3626
3627        if (!pp->neta_armada3700) {
3628                /* Enable polling on the port */
3629                for_each_online_cpu(cpu) {
3630                        struct mvneta_pcpu_port *port =
3631                                per_cpu_ptr(pp->ports, cpu);
3632
3633                        napi_enable(&port->napi);
3634                }
3635        } else {
3636                napi_enable(&pp->napi);
3637        }
3638
3639        /* Unmask interrupts. It has to be done from each CPU */
3640        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3641
3642        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3643                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3644                    MVNETA_CAUSE_LINK_CHANGE);
3645
3646        phylink_start(pp->phylink);
3647
3648        /* We may have called phylink_speed_down before */
3649        phylink_speed_up(pp->phylink);
3650
3651        netif_tx_start_all_queues(pp->dev);
3652
3653        clear_bit(__MVNETA_DOWN, &pp->state);
3654}
3655
3656static void mvneta_stop_dev(struct mvneta_port *pp)
3657{
3658        unsigned int cpu;
3659
3660        set_bit(__MVNETA_DOWN, &pp->state);
3661
3662        if (device_may_wakeup(&pp->dev->dev))
3663                phylink_speed_down(pp->phylink, false);
3664
3665        phylink_stop(pp->phylink);
3666
3667        if (!pp->neta_armada3700) {
3668                for_each_online_cpu(cpu) {
3669                        struct mvneta_pcpu_port *port =
3670                                per_cpu_ptr(pp->ports, cpu);
3671
3672                        napi_disable(&port->napi);
3673                }
3674        } else {
3675                napi_disable(&pp->napi);
3676        }
3677
3678        netif_carrier_off(pp->dev);
3679
3680        mvneta_port_down(pp);
3681        netif_tx_stop_all_queues(pp->dev);
3682
3683        /* Stop the port activity */
3684        mvneta_port_disable(pp);
3685
3686        /* Clear all ethernet port interrupts */
3687        on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3688
3689        /* Mask all ethernet port interrupts */
3690        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3691
3692        mvneta_tx_reset(pp);
3693        mvneta_rx_reset(pp);
3694
3695        WARN_ON(phy_power_off(pp->comphy));
3696}
3697
3698static void mvneta_percpu_enable(void *arg)
3699{
3700        struct mvneta_port *pp = arg;
3701
3702        enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3703}
3704
3705static void mvneta_percpu_disable(void *arg)
3706{
3707        struct mvneta_port *pp = arg;
3708
3709        disable_percpu_irq(pp->dev->irq);
3710}
3711
3712/* Change the device mtu */
3713static int mvneta_change_mtu(struct net_device *dev, int mtu)
3714{
3715        struct mvneta_port *pp = netdev_priv(dev);
3716        int ret;
3717
3718        if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3719                netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3720                            mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3721                mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3722        }
3723
3724        if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
3725                netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
3726                return -EINVAL;
3727        }
3728
3729        dev->mtu = mtu;
3730
3731        if (!netif_running(dev)) {
3732                if (pp->bm_priv)
3733                        mvneta_bm_update_mtu(pp, mtu);
3734
3735                netdev_update_features(dev);
3736                return 0;
3737        }
3738
3739        /* The interface is running, so we have to force a
3740         * reallocation of the queues
3741         */
3742        mvneta_stop_dev(pp);
3743        on_each_cpu(mvneta_percpu_disable, pp, true);
3744
3745        mvneta_cleanup_txqs(pp);
3746        mvneta_cleanup_rxqs(pp);
3747
3748        if (pp->bm_priv)
3749                mvneta_bm_update_mtu(pp, mtu);
3750
3751        pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3752
3753        ret = mvneta_setup_rxqs(pp);
3754        if (ret) {
3755                netdev_err(dev, "unable to setup rxqs after MTU change\n");
3756                return ret;
3757        }
3758
3759        ret = mvneta_setup_txqs(pp);
3760        if (ret) {
3761                netdev_err(dev, "unable to setup txqs after MTU change\n");
3762                return ret;
3763        }
3764
3765        on_each_cpu(mvneta_percpu_enable, pp, true);
3766        mvneta_start_dev(pp);
3767
3768        netdev_update_features(dev);
3769
3770        return 0;
3771}
3772
3773static netdev_features_t mvneta_fix_features(struct net_device *dev,
3774                                             netdev_features_t features)
3775{
3776        struct mvneta_port *pp = netdev_priv(dev);
3777
3778        if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3779                features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3780                netdev_info(dev,
3781                            "Disable IP checksum for MTU greater than %dB\n",
3782                            pp->tx_csum_limit);
3783        }
3784
3785        return features;
3786}
3787
3788/* Get mac address */
3789static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3790{
3791        u32 mac_addr_l, mac_addr_h;
3792
3793        mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3794        mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3795        addr[0] = (mac_addr_h >> 24) & 0xFF;
3796        addr[1] = (mac_addr_h >> 16) & 0xFF;
3797        addr[2] = (mac_addr_h >> 8) & 0xFF;
3798        addr[3] = mac_addr_h & 0xFF;
3799        addr[4] = (mac_addr_l >> 8) & 0xFF;
3800        addr[5] = mac_addr_l & 0xFF;
3801}
3802
3803/* Handle setting mac address */
3804static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3805{
3806        struct mvneta_port *pp = netdev_priv(dev);
3807        struct sockaddr *sockaddr = addr;
3808        int ret;
3809
3810        ret = eth_prepare_mac_addr_change(dev, addr);
3811        if (ret < 0)
3812                return ret;
3813        /* Remove previous address table entry */
3814        mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3815
3816        /* Set new addr in hw */
3817        mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3818
3819        eth_commit_mac_addr_change(dev, addr);
3820        return 0;
3821}
3822
3823static void mvneta_validate(struct phylink_config *config,
3824                            unsigned long *supported,
3825                            struct phylink_link_state *state)
3826{
3827        struct net_device *ndev = to_net_dev(config->dev);
3828        struct mvneta_port *pp = netdev_priv(ndev);
3829        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3830
3831        /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
3832         * When in 802.3z mode, we must have AN enabled:
3833         * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
3834         * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
3835         */
3836        if (phy_interface_mode_is_8023z(state->interface)) {
3837                if (!phylink_test(state->advertising, Autoneg)) {
3838                        bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3839                        return;
3840                }
3841        } else if (state->interface != PHY_INTERFACE_MODE_NA &&
3842                   state->interface != PHY_INTERFACE_MODE_QSGMII &&
3843                   state->interface != PHY_INTERFACE_MODE_SGMII &&
3844                   !phy_interface_mode_is_rgmii(state->interface)) {
3845                bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3846                return;
3847        }
3848
3849        /* Allow all the expected bits */
3850        phylink_set(mask, Autoneg);
3851        phylink_set_port_modes(mask);
3852
3853        /* Asymmetric pause is unsupported */
3854        phylink_set(mask, Pause);
3855
3856        /* Half-duplex at speeds higher than 100Mbit is unsupported */
3857        if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3858                phylink_set(mask, 1000baseT_Full);
3859                phylink_set(mask, 1000baseX_Full);
3860        }
3861        if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3862                phylink_set(mask, 2500baseT_Full);
3863                phylink_set(mask, 2500baseX_Full);
3864        }
3865
3866        if (!phy_interface_mode_is_8023z(state->interface)) {
3867                /* 10M and 100M are only supported in non-802.3z mode */
3868                phylink_set(mask, 10baseT_Half);
3869                phylink_set(mask, 10baseT_Full);
3870                phylink_set(mask, 100baseT_Half);
3871                phylink_set(mask, 100baseT_Full);
3872        }
3873
3874        bitmap_and(supported, supported, mask,
3875                   __ETHTOOL_LINK_MODE_MASK_NBITS);
3876        bitmap_and(state->advertising, state->advertising, mask,
3877                   __ETHTOOL_LINK_MODE_MASK_NBITS);
3878
3879        /* We can only operate at 2500BaseX or 1000BaseX.  If requested
3880         * to advertise both, only report advertising at 2500BaseX.
3881         */
3882        phylink_helper_basex_speed(state);
3883}
3884
3885static void mvneta_mac_pcs_get_state(struct phylink_config *config,
3886                                     struct phylink_link_state *state)
3887{
3888        struct net_device *ndev = to_net_dev(config->dev);
3889        struct mvneta_port *pp = netdev_priv(ndev);
3890        u32 gmac_stat;
3891
3892        gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3893
3894        if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3895                state->speed =
3896                        state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3897                        SPEED_2500 : SPEED_1000;
3898        else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3899                state->speed = SPEED_100;
3900        else
3901                state->speed = SPEED_10;
3902
3903        state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3904        state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3905        state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3906
3907        state->pause = 0;
3908        if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3909                state->pause |= MLO_PAUSE_RX;
3910        if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3911                state->pause |= MLO_PAUSE_TX;
3912}
3913
3914static void mvneta_mac_an_restart(struct phylink_config *config)
3915{
3916        struct net_device *ndev = to_net_dev(config->dev);
3917        struct mvneta_port *pp = netdev_priv(ndev);
3918        u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3919
3920        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3921                    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3922        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3923                    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3924}
3925
3926static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3927                              const struct phylink_link_state *state)
3928{
3929        struct net_device *ndev = to_net_dev(config->dev);
3930        struct mvneta_port *pp = netdev_priv(ndev);
3931        u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3932        u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3933        u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3934        u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3935        u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3936
3937        new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3938        new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3939                                   MVNETA_GMAC2_PORT_RESET);
3940        new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3941        new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3942        new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3943                             MVNETA_GMAC_INBAND_RESTART_AN |
3944                             MVNETA_GMAC_AN_SPEED_EN |
3945                             MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3946                             MVNETA_GMAC_AN_FLOW_CTRL_EN |
3947                             MVNETA_GMAC_AN_DUPLEX_EN);
3948
3949        /* Even though it might look weird, when we're configured in
3950         * SGMII or QSGMII mode, the RGMII bit needs to be set.
3951         */
3952        new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3953
3954        if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3955            state->interface == PHY_INTERFACE_MODE_SGMII ||
3956            phy_interface_mode_is_8023z(state->interface))
3957                new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3958
3959        if (phylink_test(state->advertising, Pause))
3960                new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3961
3962        if (!phylink_autoneg_inband(mode)) {
3963                /* Phy or fixed speed - nothing to do, leave the
3964                 * configured speed, duplex and flow control as-is.
3965                 */
3966        } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3967                /* SGMII mode receives the state from the PHY */
3968                new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3969                new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3970                new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3971                                     MVNETA_GMAC_FORCE_LINK_PASS |
3972                                     MVNETA_GMAC_CONFIG_MII_SPEED |
3973                                     MVNETA_GMAC_CONFIG_GMII_SPEED |
3974                                     MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
3975                         MVNETA_GMAC_INBAND_AN_ENABLE |
3976                         MVNETA_GMAC_AN_SPEED_EN |
3977                         MVNETA_GMAC_AN_DUPLEX_EN;
3978        } else {
3979                /* 802.3z negotiation - only 1000base-X */
3980                new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3981                new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3982                new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3983                                     MVNETA_GMAC_FORCE_LINK_PASS |
3984                                     MVNETA_GMAC_CONFIG_MII_SPEED)) |
3985                         MVNETA_GMAC_INBAND_AN_ENABLE |
3986                         MVNETA_GMAC_CONFIG_GMII_SPEED |
3987                         /* The MAC only supports FD mode */
3988                         MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3989
3990                if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3991                        new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3992        }
3993
3994        /* Armada 370 documentation says we can only change the port mode
3995         * and in-band enable when the link is down, so force it down
3996         * while making these changes. We also do this for GMAC_CTRL2
3997         */
3998        if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3999            (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
4000            (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
4001                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4002                            (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
4003                            MVNETA_GMAC_FORCE_LINK_DOWN);
4004        }
4005
4006
4007        /* When at 2.5G, the link partner can send frames with shortened
4008         * preambles.
4009         */
4010        if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
4011                new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
4012
4013        if (pp->phy_interface != state->interface) {
4014                if (pp->comphy)
4015                        WARN_ON(phy_power_off(pp->comphy));
4016                WARN_ON(mvneta_config_interface(pp, state->interface));
4017        }
4018
4019        if (new_ctrl0 != gmac_ctrl0)
4020                mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4021        if (new_ctrl2 != gmac_ctrl2)
4022                mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4023        if (new_ctrl4 != gmac_ctrl4)
4024                mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4025        if (new_clk != gmac_clk)
4026                mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
4027        if (new_an != gmac_an)
4028                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
4029
4030        if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
4031                while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4032                        MVNETA_GMAC2_PORT_RESET) != 0)
4033                        continue;
4034        }
4035}
4036
4037static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
4038{
4039        u32 lpi_ctl1;
4040
4041        lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4042        if (enable)
4043                lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
4044        else
4045                lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
4046        mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
4047}
4048
4049static void mvneta_mac_link_down(struct phylink_config *config,
4050                                 unsigned int mode, phy_interface_t interface)
4051{
4052        struct net_device *ndev = to_net_dev(config->dev);
4053        struct mvneta_port *pp = netdev_priv(ndev);
4054        u32 val;
4055
4056        mvneta_port_down(pp);
4057
4058        if (!phylink_autoneg_inband(mode)) {
4059                val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4060                val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4061                val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4062                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4063        }
4064
4065        pp->eee_active = false;
4066        mvneta_set_eee(pp, false);
4067}
4068
4069static void mvneta_mac_link_up(struct phylink_config *config,
4070                               struct phy_device *phy,
4071                               unsigned int mode, phy_interface_t interface,
4072                               int speed, int duplex,
4073                               bool tx_pause, bool rx_pause)
4074{
4075        struct net_device *ndev = to_net_dev(config->dev);
4076        struct mvneta_port *pp = netdev_priv(ndev);
4077        u32 val;
4078
4079        if (!phylink_autoneg_inband(mode)) {
4080                val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4081                val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
4082                         MVNETA_GMAC_CONFIG_MII_SPEED |
4083                         MVNETA_GMAC_CONFIG_GMII_SPEED |
4084                         MVNETA_GMAC_CONFIG_FLOW_CTRL |
4085                         MVNETA_GMAC_CONFIG_FULL_DUPLEX);
4086                val |= MVNETA_GMAC_FORCE_LINK_PASS;
4087
4088                if (speed == SPEED_1000 || speed == SPEED_2500)
4089                        val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
4090                else if (speed == SPEED_100)
4091                        val |= MVNETA_GMAC_CONFIG_MII_SPEED;
4092
4093                if (duplex == DUPLEX_FULL)
4094                        val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4095
4096                if (tx_pause || rx_pause)
4097                        val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4098
4099                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4100        } else {
4101                /* When inband doesn't cover flow control or flow control is
4102                 * disabled, we need to manually configure it. This bit will
4103                 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4104                 */
4105                val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4106                val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
4107
4108                if (tx_pause || rx_pause)
4109                        val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4110
4111                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4112        }
4113
4114        mvneta_port_up(pp);
4115
4116        if (phy && pp->eee_enabled) {
4117                pp->eee_active = phy_init_eee(phy, 0) >= 0;
4118                mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4119        }
4120}
4121
4122static const struct phylink_mac_ops mvneta_phylink_ops = {
4123        .validate = mvneta_validate,
4124        .mac_pcs_get_state = mvneta_mac_pcs_get_state,
4125        .mac_an_restart = mvneta_mac_an_restart,
4126        .mac_config = mvneta_mac_config,
4127        .mac_link_down = mvneta_mac_link_down,
4128        .mac_link_up = mvneta_mac_link_up,
4129};
4130
4131static int mvneta_mdio_probe(struct mvneta_port *pp)
4132{
4133        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
4134        int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4135
4136        if (err)
4137                netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4138
4139        phylink_ethtool_get_wol(pp->phylink, &wol);
4140        device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4141
4142        /* PHY WoL may be enabled but device wakeup disabled */
4143        if (wol.supported)
4144                device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4145
4146        return err;
4147}
4148
4149static void mvneta_mdio_remove(struct mvneta_port *pp)
4150{
4151        phylink_disconnect_phy(pp->phylink);
4152}
4153
4154/* Electing a CPU must be done in an atomic way: it should be done
4155 * after or before the removal/insertion of a CPU and this function is
4156 * not reentrant.
4157 */
4158static void mvneta_percpu_elect(struct mvneta_port *pp)
4159{
4160        int elected_cpu = 0, max_cpu, cpu, i = 0;
4161
4162        /* Use the cpu associated to the rxq when it is online, in all
4163         * the other cases, use the cpu 0 which can't be offline.
4164         */
4165        if (cpu_online(pp->rxq_def))
4166                elected_cpu = pp->rxq_def;
4167
4168        max_cpu = num_present_cpus();
4169
4170        for_each_online_cpu(cpu) {
4171                int rxq_map = 0, txq_map = 0;
4172                int rxq;
4173
4174                for (rxq = 0; rxq < rxq_number; rxq++)
4175                        if ((rxq % max_cpu) == cpu)
4176                                rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4177
4178                if (cpu == elected_cpu)
4179                        /* Map the default receive queue to the elected CPU */
4180                        rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4181
4182                /* We update the TX queue map only if we have one
4183                 * queue. In this case we associate the TX queue to
4184                 * the CPU bound to the default RX queue
4185                 */
4186                if (txq_number == 1)
4187                        txq_map = (cpu == elected_cpu) ?
4188                                MVNETA_CPU_TXQ_ACCESS(1) : 0;
4189                else
4190                        txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4191                                MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
4192
4193                mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4194
4195                /* Update the interrupt mask on each CPU according the
4196                 * new mapping
4197                 */
4198                smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
4199                                         pp, true);
4200                i++;
4201
4202        }
4203};
4204
4205static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4206{
4207        int other_cpu;
4208        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4209                                                  node_online);
4210        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4211
4212        /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
4213         * are routed to CPU 0, so we don't need all the cpu-hotplug support
4214         */
4215        if (pp->neta_armada3700)
4216                return 0;
4217
4218        spin_lock(&pp->lock);
4219        /*
4220         * Configuring the driver for a new CPU while the driver is
4221         * stopping is racy, so just avoid it.
4222         */
4223        if (pp->is_stopped) {
4224                spin_unlock(&pp->lock);
4225                return 0;
4226        }
4227        netif_tx_stop_all_queues(pp->dev);
4228
4229        /*
4230         * We have to synchronise on tha napi of each CPU except the one
4231         * just being woken up
4232         */
4233        for_each_online_cpu(other_cpu) {
4234                if (other_cpu != cpu) {
4235                        struct mvneta_pcpu_port *other_port =
4236                                per_cpu_ptr(pp->ports, other_cpu);
4237
4238                        napi_synchronize(&other_port->napi);
4239                }
4240        }
4241
4242        /* Mask all ethernet port interrupts */
4243        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4244        napi_enable(&port->napi);
4245
4246        /*
4247         * Enable per-CPU interrupts on the CPU that is
4248         * brought up.
4249         */
4250        mvneta_percpu_enable(pp);
4251
4252        /*
4253         * Enable per-CPU interrupt on the one CPU we care
4254         * about.
4255         */
4256        mvneta_percpu_elect(pp);
4257
4258        /* Unmask all ethernet port interrupts */
4259        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4260        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4261                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4262                    MVNETA_CAUSE_LINK_CHANGE);
4263        netif_tx_start_all_queues(pp->dev);
4264        spin_unlock(&pp->lock);
4265        return 0;
4266}
4267
4268static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4269{
4270        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4271                                                  node_online);
4272        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4273
4274        /*
4275         * Thanks to this lock we are sure that any pending cpu election is
4276         * done.
4277         */
4278        spin_lock(&pp->lock);
4279        /* Mask all ethernet port interrupts */
4280        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4281        spin_unlock(&pp->lock);
4282
4283        napi_synchronize(&port->napi);
4284        napi_disable(&port->napi);
4285        /* Disable per-CPU interrupts on the CPU that is brought down. */
4286        mvneta_percpu_disable(pp);
4287        return 0;
4288}
4289
4290static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4291{
4292        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4293                                                  node_dead);
4294
4295        /* Check if a new CPU must be elected now this on is down */
4296        spin_lock(&pp->lock);
4297        mvneta_percpu_elect(pp);
4298        spin_unlock(&pp->lock);
4299        /* Unmask all ethernet port interrupts */
4300        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4301        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4302                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4303                    MVNETA_CAUSE_LINK_CHANGE);
4304        netif_tx_start_all_queues(pp->dev);
4305        return 0;
4306}
4307
4308static int mvneta_open(struct net_device *dev)
4309{
4310        struct mvneta_port *pp = netdev_priv(dev);
4311        int ret;
4312
4313        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4314
4315        ret = mvneta_setup_rxqs(pp);
4316        if (ret)
4317                return ret;
4318
4319        ret = mvneta_setup_txqs(pp);
4320        if (ret)
4321                goto err_cleanup_rxqs;
4322
4323        /* Connect to port interrupt line */
4324        if (pp->neta_armada3700)
4325                ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4326                                  dev->name, pp);
4327        else
4328                ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4329                                         dev->name, pp->ports);
4330        if (ret) {
4331                netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4332                goto err_cleanup_txqs;
4333        }
4334
4335        if (!pp->neta_armada3700) {
4336                /* Enable per-CPU interrupt on all the CPU to handle our RX
4337                 * queue interrupts
4338                 */
4339                on_each_cpu(mvneta_percpu_enable, pp, true);
4340
4341                pp->is_stopped = false;
4342                /* Register a CPU notifier to handle the case where our CPU
4343                 * might be taken offline.
4344                 */
4345                ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4346                                                       &pp->node_online);
4347                if (ret)
4348                        goto err_free_irq;
4349
4350                ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4351                                                       &pp->node_dead);
4352                if (ret)
4353                        goto err_free_online_hp;
4354        }
4355
4356        ret = mvneta_mdio_probe(pp);
4357        if (ret < 0) {
4358                netdev_err(dev, "cannot probe MDIO bus\n");
4359                goto err_free_dead_hp;
4360        }
4361
4362        mvneta_start_dev(pp);
4363
4364        return 0;
4365
4366err_free_dead_hp:
4367        if (!pp->neta_armada3700)
4368                cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4369                                                    &pp->node_dead);
4370err_free_online_hp:
4371        if (!pp->neta_armada3700)
4372                cpuhp_state_remove_instance_nocalls(online_hpstate,
4373                                                    &pp->node_online);
4374err_free_irq:
4375        if (pp->neta_armada3700) {
4376                free_irq(pp->dev->irq, pp);
4377        } else {
4378                on_each_cpu(mvneta_percpu_disable, pp, true);
4379                free_percpu_irq(pp->dev->irq, pp->ports);
4380        }
4381err_cleanup_txqs:
4382        mvneta_cleanup_txqs(pp);
4383err_cleanup_rxqs:
4384        mvneta_cleanup_rxqs(pp);
4385        return ret;
4386}
4387
4388/* Stop the port, free port interrupt line */
4389static int mvneta_stop(struct net_device *dev)
4390{
4391        struct mvneta_port *pp = netdev_priv(dev);
4392
4393        if (!pp->neta_armada3700) {
4394                /* Inform that we are stopping so we don't want to setup the
4395                 * driver for new CPUs in the notifiers. The code of the
4396                 * notifier for CPU online is protected by the same spinlock,
4397                 * so when we get the lock, the notifer work is done.
4398                 */
4399                spin_lock(&pp->lock);
4400                pp->is_stopped = true;
4401                spin_unlock(&pp->lock);
4402
4403                mvneta_stop_dev(pp);
4404                mvneta_mdio_remove(pp);
4405
4406                cpuhp_state_remove_instance_nocalls(online_hpstate,
4407                                                    &pp->node_online);
4408                cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4409                                                    &pp->node_dead);
4410                on_each_cpu(mvneta_percpu_disable, pp, true);
4411                free_percpu_irq(dev->irq, pp->ports);
4412        } else {
4413                mvneta_stop_dev(pp);
4414                mvneta_mdio_remove(pp);
4415                free_irq(dev->irq, pp);
4416        }
4417
4418        mvneta_cleanup_rxqs(pp);
4419        mvneta_cleanup_txqs(pp);
4420
4421        return 0;
4422}
4423
4424static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4425{
4426        struct mvneta_port *pp = netdev_priv(dev);
4427
4428        return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4429}
4430
4431static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4432                            struct netlink_ext_ack *extack)
4433{
4434        bool need_update, running = netif_running(dev);
4435        struct mvneta_port *pp = netdev_priv(dev);
4436        struct bpf_prog *old_prog;
4437
4438        if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4439                NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
4440                return -EOPNOTSUPP;
4441        }
4442
4443        if (pp->bm_priv) {
4444                NL_SET_ERR_MSG_MOD(extack,
4445                                   "Hardware Buffer Management not supported on XDP");
4446                return -EOPNOTSUPP;
4447        }
4448
4449        need_update = !!pp->xdp_prog != !!prog;
4450        if (running && need_update)
4451                mvneta_stop(dev);
4452
4453        old_prog = xchg(&pp->xdp_prog, prog);
4454        if (old_prog)
4455                bpf_prog_put(old_prog);
4456
4457        if (running && need_update)
4458                return mvneta_open(dev);
4459
4460        return 0;
4461}
4462
4463static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4464{
4465        switch (xdp->command) {
4466        case XDP_SETUP_PROG:
4467                return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4468        default:
4469                return -EINVAL;
4470        }
4471}
4472
4473/* Ethtool methods */
4474
4475/* Set link ksettings (phy address, speed) for ethtools */
4476static int
4477mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4478                                  const struct ethtool_link_ksettings *cmd)
4479{
4480        struct mvneta_port *pp = netdev_priv(ndev);
4481
4482        return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4483}
4484
4485/* Get link ksettings for ethtools */
4486static int
4487mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4488                                  struct ethtool_link_ksettings *cmd)
4489{
4490        struct mvneta_port *pp = netdev_priv(ndev);
4491
4492        return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4493}
4494
4495static int mvneta_ethtool_nway_reset(struct net_device *dev)
4496{
4497        struct mvneta_port *pp = netdev_priv(dev);
4498
4499        return phylink_ethtool_nway_reset(pp->phylink);
4500}
4501
4502/* Set interrupt coalescing for ethtools */
4503static int
4504mvneta_ethtool_set_coalesce(struct net_device *dev,
4505                            struct ethtool_coalesce *c,
4506                            struct kernel_ethtool_coalesce *kernel_coal,
4507                            struct netlink_ext_ack *extack)
4508{
4509        struct mvneta_port *pp = netdev_priv(dev);
4510        int queue;
4511
4512        for (queue = 0; queue < rxq_number; queue++) {
4513                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4514                rxq->time_coal = c->rx_coalesce_usecs;
4515                rxq->pkts_coal = c->rx_max_coalesced_frames;
4516                mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4517                mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4518        }
4519
4520        for (queue = 0; queue < txq_number; queue++) {
4521                struct mvneta_tx_queue *txq = &pp->txqs[queue];
4522                txq->done_pkts_coal = c->tx_max_coalesced_frames;
4523                mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4524        }
4525
4526        return 0;
4527}
4528
4529/* get coalescing for ethtools */
4530static int
4531mvneta_ethtool_get_coalesce(struct net_device *dev,
4532                            struct ethtool_coalesce *c,
4533                            struct kernel_ethtool_coalesce *kernel_coal,
4534                            struct netlink_ext_ack *extack)
4535{
4536        struct mvneta_port *pp = netdev_priv(dev);
4537
4538        c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
4539        c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
4540
4541        c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
4542        return 0;
4543}
4544
4545
4546static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4547                                    struct ethtool_drvinfo *drvinfo)
4548{
4549        strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4550                sizeof(drvinfo->driver));
4551        strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4552                sizeof(drvinfo->version));
4553        strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4554                sizeof(drvinfo->bus_info));
4555}
4556
4557
4558static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4559                                         struct ethtool_ringparam *ring)
4560{
4561        struct mvneta_port *pp = netdev_priv(netdev);
4562
4563        ring->rx_max_pending = MVNETA_MAX_RXD;
4564        ring->tx_max_pending = MVNETA_MAX_TXD;
4565        ring->rx_pending = pp->rx_ring_size;
4566        ring->tx_pending = pp->tx_ring_size;
4567}
4568
4569static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4570                                        struct ethtool_ringparam *ring)
4571{
4572        struct mvneta_port *pp = netdev_priv(dev);
4573
4574        if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4575                return -EINVAL;
4576        pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4577                ring->rx_pending : MVNETA_MAX_RXD;
4578
4579        pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4580                                   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4581        if (pp->tx_ring_size != ring->tx_pending)
4582                netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4583                            pp->tx_ring_size, ring->tx_pending);
4584
4585        if (netif_running(dev)) {
4586                mvneta_stop(dev);
4587                if (mvneta_open(dev)) {
4588                        netdev_err(dev,
4589                                   "error on opening device after ring param change\n");
4590                        return -ENOMEM;
4591                }
4592        }
4593
4594        return 0;
4595}
4596
4597static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4598                                          struct ethtool_pauseparam *pause)
4599{
4600        struct mvneta_port *pp = netdev_priv(dev);
4601
4602        phylink_ethtool_get_pauseparam(pp->phylink, pause);
4603}
4604
4605static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4606                                         struct ethtool_pauseparam *pause)
4607{
4608        struct mvneta_port *pp = netdev_priv(dev);
4609
4610        return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4611}
4612
4613static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4614                                       u8 *data)
4615{
4616        if (sset == ETH_SS_STATS) {
4617                int i;
4618
4619                for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4620                        memcpy(data + i * ETH_GSTRING_LEN,
4621                               mvneta_statistics[i].name, ETH_GSTRING_LEN);
4622        }
4623}
4624
4625static void
4626mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4627                                 struct mvneta_ethtool_stats *es)
4628{
4629        unsigned int start;
4630        int cpu;
4631
4632        for_each_possible_cpu(cpu) {
4633                struct mvneta_pcpu_stats *stats;
4634                u64 skb_alloc_error;
4635                u64 refill_error;
4636                u64 xdp_redirect;
4637                u64 xdp_xmit_err;
4638                u64 xdp_tx_err;
4639                u64 xdp_pass;
4640                u64 xdp_drop;
4641                u64 xdp_xmit;
4642                u64 xdp_tx;
4643
4644                stats = per_cpu_ptr(pp->stats, cpu);
4645                do {
4646                        start = u64_stats_fetch_begin_irq(&stats->syncp);
4647                        skb_alloc_error = stats->es.skb_alloc_error;
4648                        refill_error = stats->es.refill_error;
4649                        xdp_redirect = stats->es.ps.xdp_redirect;
4650                        xdp_pass = stats->es.ps.xdp_pass;
4651                        xdp_drop = stats->es.ps.xdp_drop;
4652                        xdp_xmit = stats->es.ps.xdp_xmit;
4653                        xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4654                        xdp_tx = stats->es.ps.xdp_tx;
4655                        xdp_tx_err = stats->es.ps.xdp_tx_err;
4656                } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
4657
4658                es->skb_alloc_error += skb_alloc_error;
4659                es->refill_error += refill_error;
4660                es->ps.xdp_redirect += xdp_redirect;
4661                es->ps.xdp_pass += xdp_pass;
4662                es->ps.xdp_drop += xdp_drop;
4663                es->ps.xdp_xmit += xdp_xmit;
4664                es->ps.xdp_xmit_err += xdp_xmit_err;
4665                es->ps.xdp_tx += xdp_tx;
4666                es->ps.xdp_tx_err += xdp_tx_err;
4667        }
4668}
4669
4670static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4671{
4672        struct mvneta_ethtool_stats stats = {};
4673        const struct mvneta_statistic *s;
4674        void __iomem *base = pp->base;
4675        u32 high, low;
4676        u64 val;
4677        int i;
4678
4679        mvneta_ethtool_update_pcpu_stats(pp, &stats);
4680        for (i = 0, s = mvneta_statistics;
4681             s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4682             s++, i++) {
4683                switch (s->type) {
4684                case T_REG_32:
4685                        val = readl_relaxed(base + s->offset);
4686                        pp->ethtool_stats[i] += val;
4687                        break;
4688                case T_REG_64:
4689                        /* Docs say to read low 32-bit then high */
4690                        low = readl_relaxed(base + s->offset);
4691                        high = readl_relaxed(base + s->offset + 4);
4692                        val = (u64)high << 32 | low;
4693                        pp->ethtool_stats[i] += val;
4694                        break;
4695                case T_SW:
4696                        switch (s->offset) {
4697                        case ETHTOOL_STAT_EEE_WAKEUP:
4698                                val = phylink_get_eee_err(pp->phylink);
4699                                pp->ethtool_stats[i] += val;
4700                                break;
4701                        case ETHTOOL_STAT_SKB_ALLOC_ERR:
4702                                pp->ethtool_stats[i] = stats.skb_alloc_error;
4703                                break;
4704                        case ETHTOOL_STAT_REFILL_ERR:
4705                                pp->ethtool_stats[i] = stats.refill_error;
4706                                break;
4707                        case ETHTOOL_XDP_REDIRECT:
4708                                pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4709                                break;
4710                        case ETHTOOL_XDP_PASS:
4711                                pp->ethtool_stats[i] = stats.ps.xdp_pass;
4712                                break;
4713                        case ETHTOOL_XDP_DROP:
4714                                pp->ethtool_stats[i] = stats.ps.xdp_drop;
4715                                break;
4716                        case ETHTOOL_XDP_TX:
4717                                pp->ethtool_stats[i] = stats.ps.xdp_tx;
4718                                break;
4719                        case ETHTOOL_XDP_TX_ERR:
4720                                pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4721                                break;
4722                        case ETHTOOL_XDP_XMIT:
4723                                pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4724                                break;
4725                        case ETHTOOL_XDP_XMIT_ERR:
4726                                pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4727                                break;
4728                        }
4729                        break;
4730                }
4731        }
4732}
4733
4734static void mvneta_ethtool_get_stats(struct net_device *dev,
4735                                     struct ethtool_stats *stats, u64 *data)
4736{
4737        struct mvneta_port *pp = netdev_priv(dev);
4738        int i;
4739
4740        mvneta_ethtool_update_stats(pp);
4741
4742        for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4743                *data++ = pp->ethtool_stats[i];
4744}
4745
4746static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4747{
4748        if (sset == ETH_SS_STATS)
4749                return ARRAY_SIZE(mvneta_statistics);
4750        return -EOPNOTSUPP;
4751}
4752
4753static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4754{
4755        return MVNETA_RSS_LU_TABLE_SIZE;
4756}
4757
4758static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4759                                    struct ethtool_rxnfc *info,
4760                                    u32 *rules __always_unused)
4761{
4762        switch (info->cmd) {
4763        case ETHTOOL_GRXRINGS:
4764                info->data =  rxq_number;
4765                return 0;
4766        case ETHTOOL_GRXFH:
4767                return -EOPNOTSUPP;
4768        default:
4769                return -EOPNOTSUPP;
4770        }
4771}
4772
4773static int  mvneta_config_rss(struct mvneta_port *pp)
4774{
4775        int cpu;
4776        u32 val;
4777
4778        netif_tx_stop_all_queues(pp->dev);
4779
4780        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4781
4782        if (!pp->neta_armada3700) {
4783                /* We have to synchronise on the napi of each CPU */
4784                for_each_online_cpu(cpu) {
4785                        struct mvneta_pcpu_port *pcpu_port =
4786                                per_cpu_ptr(pp->ports, cpu);
4787
4788                        napi_synchronize(&pcpu_port->napi);
4789                        napi_disable(&pcpu_port->napi);
4790                }
4791        } else {
4792                napi_synchronize(&pp->napi);
4793                napi_disable(&pp->napi);
4794        }
4795
4796        pp->rxq_def = pp->indir[0];
4797
4798        /* Update unicast mapping */
4799        mvneta_set_rx_mode(pp->dev);
4800
4801        /* Update val of portCfg register accordingly with all RxQueue types */
4802        val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4803        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4804
4805        /* Update the elected CPU matching the new rxq_def */
4806        spin_lock(&pp->lock);
4807        mvneta_percpu_elect(pp);
4808        spin_unlock(&pp->lock);
4809
4810        if (!pp->neta_armada3700) {
4811                /* We have to synchronise on the napi of each CPU */
4812                for_each_online_cpu(cpu) {
4813                        struct mvneta_pcpu_port *pcpu_port =
4814                                per_cpu_ptr(pp->ports, cpu);
4815
4816                        napi_enable(&pcpu_port->napi);
4817                }
4818        } else {
4819                napi_enable(&pp->napi);
4820        }
4821
4822        netif_tx_start_all_queues(pp->dev);
4823
4824        return 0;
4825}
4826
4827static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4828                                   const u8 *key, const u8 hfunc)
4829{
4830        struct mvneta_port *pp = netdev_priv(dev);
4831
4832        /* Current code for Armada 3700 doesn't support RSS features yet */
4833        if (pp->neta_armada3700)
4834                return -EOPNOTSUPP;
4835
4836        /* We require at least one supported parameter to be changed
4837         * and no change in any of the unsupported parameters
4838         */
4839        if (key ||
4840            (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4841                return -EOPNOTSUPP;
4842
4843        if (!indir)
4844                return 0;
4845
4846        memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4847
4848        return mvneta_config_rss(pp);
4849}
4850
4851static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4852                                   u8 *hfunc)
4853{
4854        struct mvneta_port *pp = netdev_priv(dev);
4855
4856        /* Current code for Armada 3700 doesn't support RSS features yet */
4857        if (pp->neta_armada3700)
4858                return -EOPNOTSUPP;
4859
4860        if (hfunc)
4861                *hfunc = ETH_RSS_HASH_TOP;
4862
4863        if (!indir)
4864                return 0;
4865
4866        memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4867
4868        return 0;
4869}
4870
4871static void mvneta_ethtool_get_wol(struct net_device *dev,
4872                                   struct ethtool_wolinfo *wol)
4873{
4874        struct mvneta_port *pp = netdev_priv(dev);
4875
4876        phylink_ethtool_get_wol(pp->phylink, wol);
4877}
4878
4879static int mvneta_ethtool_set_wol(struct net_device *dev,
4880                                  struct ethtool_wolinfo *wol)
4881{
4882        struct mvneta_port *pp = netdev_priv(dev);
4883        int ret;
4884
4885        ret = phylink_ethtool_set_wol(pp->phylink, wol);
4886        if (!ret)
4887                device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4888
4889        return ret;
4890}
4891
4892static int mvneta_ethtool_get_eee(struct net_device *dev,
4893                                  struct ethtool_eee *eee)
4894{
4895        struct mvneta_port *pp = netdev_priv(dev);
4896        u32 lpi_ctl0;
4897
4898        lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4899
4900        eee->eee_enabled = pp->eee_enabled;
4901        eee->eee_active = pp->eee_active;
4902        eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4903        eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4904
4905        return phylink_ethtool_get_eee(pp->phylink, eee);
4906}
4907
4908static int mvneta_ethtool_set_eee(struct net_device *dev,
4909                                  struct ethtool_eee *eee)
4910{
4911        struct mvneta_port *pp = netdev_priv(dev);
4912        u32 lpi_ctl0;
4913
4914        /* The Armada 37x documents do not give limits for this other than
4915         * it being an 8-bit register.
4916         */
4917        if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4918                return -EINVAL;
4919
4920        lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4921        lpi_ctl0 &= ~(0xff << 8);
4922        lpi_ctl0 |= eee->tx_lpi_timer << 8;
4923        mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4924
4925        pp->eee_enabled = eee->eee_enabled;
4926        pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4927
4928        mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4929
4930        return phylink_ethtool_set_eee(pp->phylink, eee);
4931}
4932
4933static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
4934{
4935        mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
4936}
4937
4938static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
4939{
4940        u32 val = 0;
4941        int i;
4942
4943        for (i = 0; i < rxq_number; i++)
4944                val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
4945
4946        mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
4947}
4948
4949static int mvneta_setup_mqprio(struct net_device *dev,
4950                               struct tc_mqprio_qopt *qopt)
4951{
4952        struct mvneta_port *pp = netdev_priv(dev);
4953        u8 num_tc;
4954        int i;
4955
4956        qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4957        num_tc = qopt->num_tc;
4958
4959        if (num_tc > rxq_number)
4960                return -EINVAL;
4961
4962        if (!num_tc) {
4963                mvneta_clear_rx_prio_map(pp);
4964                netdev_reset_tc(dev);
4965                return 0;
4966        }
4967
4968        memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
4969
4970        mvneta_setup_rx_prio_map(pp);
4971
4972        netdev_set_num_tc(dev, qopt->num_tc);
4973        for (i = 0; i < qopt->num_tc; i++)
4974                netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
4975
4976        return 0;
4977}
4978
4979static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
4980                           void *type_data)
4981{
4982        switch (type) {
4983        case TC_SETUP_QDISC_MQPRIO:
4984                return mvneta_setup_mqprio(dev, type_data);
4985        default:
4986                return -EOPNOTSUPP;
4987        }
4988}
4989
4990static const struct net_device_ops mvneta_netdev_ops = {
4991        .ndo_open            = mvneta_open,
4992        .ndo_stop            = mvneta_stop,
4993        .ndo_start_xmit      = mvneta_tx,
4994        .ndo_set_rx_mode     = mvneta_set_rx_mode,
4995        .ndo_set_mac_address = mvneta_set_mac_addr,
4996        .ndo_change_mtu      = mvneta_change_mtu,
4997        .ndo_fix_features    = mvneta_fix_features,
4998        .ndo_get_stats64     = mvneta_get_stats64,
4999        .ndo_eth_ioctl        = mvneta_ioctl,
5000        .ndo_bpf             = mvneta_xdp,
5001        .ndo_xdp_xmit        = mvneta_xdp_xmit,
5002        .ndo_setup_tc        = mvneta_setup_tc,
5003};
5004
5005static const struct ethtool_ops mvneta_eth_tool_ops = {
5006        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
5007                                     ETHTOOL_COALESCE_MAX_FRAMES,
5008        .nway_reset     = mvneta_ethtool_nway_reset,
5009        .get_link       = ethtool_op_get_link,
5010        .set_coalesce   = mvneta_ethtool_set_coalesce,
5011        .get_coalesce   = mvneta_ethtool_get_coalesce,
5012        .get_drvinfo    = mvneta_ethtool_get_drvinfo,
5013        .get_ringparam  = mvneta_ethtool_get_ringparam,
5014        .set_ringparam  = mvneta_ethtool_set_ringparam,
5015        .get_pauseparam = mvneta_ethtool_get_pauseparam,
5016        .set_pauseparam = mvneta_ethtool_set_pauseparam,
5017        .get_strings    = mvneta_ethtool_get_strings,
5018        .get_ethtool_stats = mvneta_ethtool_get_stats,
5019        .get_sset_count = mvneta_ethtool_get_sset_count,
5020        .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
5021        .get_rxnfc      = mvneta_ethtool_get_rxnfc,
5022        .get_rxfh       = mvneta_ethtool_get_rxfh,
5023        .set_rxfh       = mvneta_ethtool_set_rxfh,
5024        .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
5025        .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
5026        .get_wol        = mvneta_ethtool_get_wol,
5027        .set_wol        = mvneta_ethtool_set_wol,
5028        .get_eee        = mvneta_ethtool_get_eee,
5029        .set_eee        = mvneta_ethtool_set_eee,
5030};
5031
5032/* Initialize hw */
5033static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5034{
5035        int queue;
5036
5037        /* Disable port */
5038        mvneta_port_disable(pp);
5039
5040        /* Set port default values */
5041        mvneta_defaults_set(pp);
5042
5043        pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5044        if (!pp->txqs)
5045                return -ENOMEM;
5046
5047        /* Initialize TX descriptor rings */
5048        for (queue = 0; queue < txq_number; queue++) {
5049                struct mvneta_tx_queue *txq = &pp->txqs[queue];
5050                txq->id = queue;
5051                txq->size = pp->tx_ring_size;
5052                txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
5053        }
5054
5055        pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5056        if (!pp->rxqs)
5057                return -ENOMEM;
5058
5059        /* Create Rx descriptor rings */
5060        for (queue = 0; queue < rxq_number; queue++) {
5061                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5062                rxq->id = queue;
5063                rxq->size = pp->rx_ring_size;
5064                rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
5065                rxq->time_coal = MVNETA_RX_COAL_USEC;
5066                rxq->buf_virt_addr
5067                        = devm_kmalloc_array(pp->dev->dev.parent,
5068                                             rxq->size,
5069                                             sizeof(*rxq->buf_virt_addr),
5070                                             GFP_KERNEL);
5071                if (!rxq->buf_virt_addr)
5072                        return -ENOMEM;
5073        }
5074
5075        return 0;
5076}
5077
5078/* platform glue : initialize decoding windows */
5079static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5080                                     const struct mbus_dram_target_info *dram)
5081{
5082        u32 win_enable;
5083        u32 win_protect;
5084        int i;
5085
5086        for (i = 0; i < 6; i++) {
5087                mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5088                mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5089
5090                if (i < 4)
5091                        mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5092        }
5093
5094        win_enable = 0x3f;
5095        win_protect = 0;
5096
5097        if (dram) {
5098                for (i = 0; i < dram->num_cs; i++) {
5099                        const struct mbus_dram_window *cs = dram->cs + i;
5100
5101                        mvreg_write(pp, MVNETA_WIN_BASE(i),
5102                                    (cs->base & 0xffff0000) |
5103                                    (cs->mbus_attr << 8) |
5104                                    dram->mbus_dram_target_id);
5105
5106                        mvreg_write(pp, MVNETA_WIN_SIZE(i),
5107                                    (cs->size - 1) & 0xffff0000);
5108
5109                        win_enable &= ~(1 << i);
5110                        win_protect |= 3 << (2 * i);
5111                }
5112        } else {
5113                /* For Armada3700 open default 4GB Mbus window, leaving
5114                 * arbitration of target/attribute to a different layer
5115                 * of configuration.
5116                 */
5117                mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5118                win_enable &= ~BIT(0);
5119                win_protect = 3;
5120        }
5121
5122        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5123        mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5124}
5125
5126/* Power up the port */
5127static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5128{
5129        /* MAC Cause register should be cleared */
5130        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5131
5132        if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
5133            phy_mode != PHY_INTERFACE_MODE_SGMII &&
5134            !phy_interface_mode_is_8023z(phy_mode) &&
5135            !phy_interface_mode_is_rgmii(phy_mode))
5136                return -EINVAL;
5137
5138        return 0;
5139}
5140
5141/* Device initialization routine */
5142static int mvneta_probe(struct platform_device *pdev)
5143{
5144        struct device_node *dn = pdev->dev.of_node;
5145        struct device_node *bm_node;
5146        struct mvneta_port *pp;
5147        struct net_device *dev;
5148        struct phylink *phylink;
5149        struct phy *comphy;
5150        char hw_mac_addr[ETH_ALEN];
5151        phy_interface_t phy_mode;
5152        const char *mac_from;
5153        int tx_csum_limit;
5154        int err;
5155        int cpu;
5156
5157        dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
5158                                      txq_number, rxq_number);
5159        if (!dev)
5160                return -ENOMEM;
5161
5162        dev->irq = irq_of_parse_and_map(dn, 0);
5163        if (dev->irq == 0)
5164                return -EINVAL;
5165
5166        err = of_get_phy_mode(dn, &phy_mode);
5167        if (err) {
5168                dev_err(&pdev->dev, "incorrect phy-mode\n");
5169                goto err_free_irq;
5170        }
5171
5172        comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5173        if (comphy == ERR_PTR(-EPROBE_DEFER)) {
5174                err = -EPROBE_DEFER;
5175                goto err_free_irq;
5176        } else if (IS_ERR(comphy)) {
5177                comphy = NULL;
5178        }
5179
5180        pp = netdev_priv(dev);
5181        spin_lock_init(&pp->lock);
5182
5183        pp->phylink_config.dev = &dev->dev;
5184        pp->phylink_config.type = PHYLINK_NETDEV;
5185
5186        phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5187                                 phy_mode, &mvneta_phylink_ops);
5188        if (IS_ERR(phylink)) {
5189                err = PTR_ERR(phylink);
5190                goto err_free_irq;
5191        }
5192
5193        dev->tx_queue_len = MVNETA_MAX_TXD;
5194        dev->watchdog_timeo = 5 * HZ;
5195        dev->netdev_ops = &mvneta_netdev_ops;
5196
5197        dev->ethtool_ops = &mvneta_eth_tool_ops;
5198
5199        pp->phylink = phylink;
5200        pp->comphy = comphy;
5201        pp->phy_interface = phy_mode;
5202        pp->dn = dn;
5203
5204        pp->rxq_def = rxq_def;
5205        pp->indir[0] = rxq_def;
5206
5207        /* Get special SoC configurations */
5208        if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
5209                pp->neta_armada3700 = true;
5210
5211        pp->clk = devm_clk_get(&pdev->dev, "core");
5212        if (IS_ERR(pp->clk))
5213                pp->clk = devm_clk_get(&pdev->dev, NULL);
5214        if (IS_ERR(pp->clk)) {
5215                err = PTR_ERR(pp->clk);
5216                goto err_free_phylink;
5217        }
5218
5219        clk_prepare_enable(pp->clk);
5220
5221        pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5222        if (!IS_ERR(pp->clk_bus))
5223                clk_prepare_enable(pp->clk_bus);
5224
5225        pp->base = devm_platform_ioremap_resource(pdev, 0);
5226        if (IS_ERR(pp->base)) {
5227                err = PTR_ERR(pp->base);
5228                goto err_clk;
5229        }
5230
5231        /* Alloc per-cpu port structure */
5232        pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5233        if (!pp->ports) {
5234                err = -ENOMEM;
5235                goto err_clk;
5236        }
5237
5238        /* Alloc per-cpu stats */
5239        pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5240        if (!pp->stats) {
5241                err = -ENOMEM;
5242                goto err_free_ports;
5243        }
5244
5245        err = of_get_mac_address(dn, dev->dev_addr);
5246        if (!err) {
5247                mac_from = "device tree";
5248        } else {
5249                mvneta_get_mac_addr(pp, hw_mac_addr);
5250                if (is_valid_ether_addr(hw_mac_addr)) {
5251                        mac_from = "hardware";
5252                        memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
5253                } else {
5254                        mac_from = "random";
5255                        eth_hw_addr_random(dev);
5256                }
5257        }
5258
5259        if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
5260                if (tx_csum_limit < 0 ||
5261                    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
5262                        tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5263                        dev_info(&pdev->dev,
5264                                 "Wrong TX csum limit in DT, set to %dB\n",
5265                                 MVNETA_TX_CSUM_DEF_SIZE);
5266                }
5267        } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
5268                tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5269        } else {
5270                tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
5271        }
5272
5273        pp->tx_csum_limit = tx_csum_limit;
5274
5275        pp->dram_target_info = mv_mbus_dram_info();
5276        /* Armada3700 requires setting default configuration of Mbus
5277         * windows, however without using filled mbus_dram_target_info
5278         * structure.
5279         */
5280        if (pp->dram_target_info || pp->neta_armada3700)
5281                mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5282
5283        pp->tx_ring_size = MVNETA_MAX_TXD;
5284        pp->rx_ring_size = MVNETA_MAX_RXD;
5285
5286        pp->dev = dev;
5287        SET_NETDEV_DEV(dev, &pdev->dev);
5288
5289        pp->id = global_port_id++;
5290
5291        /* Obtain access to BM resources if enabled and already initialized */
5292        bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5293        if (bm_node) {
5294                pp->bm_priv = mvneta_bm_get(bm_node);
5295                if (pp->bm_priv) {
5296                        err = mvneta_bm_port_init(pdev, pp);
5297                        if (err < 0) {
5298                                dev_info(&pdev->dev,
5299                                         "use SW buffer management\n");
5300                                mvneta_bm_put(pp->bm_priv);
5301                                pp->bm_priv = NULL;
5302                        }
5303                }
5304                /* Set RX packet offset correction for platforms, whose
5305                 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5306                 * platforms and 0B for 32-bit ones.
5307                 */
5308                pp->rx_offset_correction = max(0,
5309                                               NET_SKB_PAD -
5310                                               MVNETA_RX_PKT_OFFSET_CORRECTION);
5311        }
5312        of_node_put(bm_node);
5313
5314        /* sw buffer management */
5315        if (!pp->bm_priv)
5316                pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5317
5318        err = mvneta_init(&pdev->dev, pp);
5319        if (err < 0)
5320                goto err_netdev;
5321
5322        err = mvneta_port_power_up(pp, pp->phy_interface);
5323        if (err < 0) {
5324                dev_err(&pdev->dev, "can't power up port\n");
5325                goto err_netdev;
5326        }
5327
5328        /* Armada3700 network controller does not support per-cpu
5329         * operation, so only single NAPI should be initialized.
5330         */
5331        if (pp->neta_armada3700) {
5332                netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
5333        } else {
5334                for_each_present_cpu(cpu) {
5335                        struct mvneta_pcpu_port *port =
5336                                per_cpu_ptr(pp->ports, cpu);
5337
5338                        netif_napi_add(dev, &port->napi, mvneta_poll,
5339                                       NAPI_POLL_WEIGHT);
5340                        port->pp = pp;
5341                }
5342        }
5343
5344        dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5345                        NETIF_F_TSO | NETIF_F_RXCSUM;
5346        dev->hw_features |= dev->features;
5347        dev->vlan_features |= dev->features;
5348        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5349        dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5350
5351        /* MTU range: 68 - 9676 */
5352        dev->min_mtu = ETH_MIN_MTU;
5353        /* 9676 == 9700 - 20 and rounding to 8 */
5354        dev->max_mtu = 9676;
5355
5356        err = register_netdev(dev);
5357        if (err < 0) {
5358                dev_err(&pdev->dev, "failed to register\n");
5359                goto err_netdev;
5360        }
5361
5362        netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5363                    dev->dev_addr);
5364
5365        platform_set_drvdata(pdev, pp->dev);
5366
5367        return 0;
5368
5369err_netdev:
5370        if (pp->bm_priv) {
5371                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5372                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5373                                       1 << pp->id);
5374                mvneta_bm_put(pp->bm_priv);
5375        }
5376        free_percpu(pp->stats);
5377err_free_ports:
5378        free_percpu(pp->ports);
5379err_clk:
5380        clk_disable_unprepare(pp->clk_bus);
5381        clk_disable_unprepare(pp->clk);
5382err_free_phylink:
5383        if (pp->phylink)
5384                phylink_destroy(pp->phylink);
5385err_free_irq:
5386        irq_dispose_mapping(dev->irq);
5387        return err;
5388}
5389
5390/* Device removal routine */
5391static int mvneta_remove(struct platform_device *pdev)
5392{
5393        struct net_device  *dev = platform_get_drvdata(pdev);
5394        struct mvneta_port *pp = netdev_priv(dev);
5395
5396        unregister_netdev(dev);
5397        clk_disable_unprepare(pp->clk_bus);
5398        clk_disable_unprepare(pp->clk);
5399        free_percpu(pp->ports);
5400        free_percpu(pp->stats);
5401        irq_dispose_mapping(dev->irq);
5402        phylink_destroy(pp->phylink);
5403
5404        if (pp->bm_priv) {
5405                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5406                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5407                                       1 << pp->id);
5408                mvneta_bm_put(pp->bm_priv);
5409        }
5410
5411        return 0;
5412}
5413
5414#ifdef CONFIG_PM_SLEEP
5415static int mvneta_suspend(struct device *device)
5416{
5417        int queue;
5418        struct net_device *dev = dev_get_drvdata(device);
5419        struct mvneta_port *pp = netdev_priv(dev);
5420
5421        if (!netif_running(dev))
5422                goto clean_exit;
5423
5424        if (!pp->neta_armada3700) {
5425                spin_lock(&pp->lock);
5426                pp->is_stopped = true;
5427                spin_unlock(&pp->lock);
5428
5429                cpuhp_state_remove_instance_nocalls(online_hpstate,
5430                                                    &pp->node_online);
5431                cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5432                                                    &pp->node_dead);
5433        }
5434
5435        rtnl_lock();
5436        mvneta_stop_dev(pp);
5437        rtnl_unlock();
5438
5439        for (queue = 0; queue < rxq_number; queue++) {
5440                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5441
5442                mvneta_rxq_drop_pkts(pp, rxq);
5443        }
5444
5445        for (queue = 0; queue < txq_number; queue++) {
5446                struct mvneta_tx_queue *txq = &pp->txqs[queue];
5447
5448                mvneta_txq_hw_deinit(pp, txq);
5449        }
5450
5451clean_exit:
5452        netif_device_detach(dev);
5453        clk_disable_unprepare(pp->clk_bus);
5454        clk_disable_unprepare(pp->clk);
5455
5456        return 0;
5457}
5458
5459static int mvneta_resume(struct device *device)
5460{
5461        struct platform_device *pdev = to_platform_device(device);
5462        struct net_device *dev = dev_get_drvdata(device);
5463        struct mvneta_port *pp = netdev_priv(dev);
5464        int err, queue;
5465
5466        clk_prepare_enable(pp->clk);
5467        if (!IS_ERR(pp->clk_bus))
5468                clk_prepare_enable(pp->clk_bus);
5469        if (pp->dram_target_info || pp->neta_armada3700)
5470                mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5471        if (pp->bm_priv) {
5472                err = mvneta_bm_port_init(pdev, pp);
5473                if (err < 0) {
5474                        dev_info(&pdev->dev, "use SW buffer management\n");
5475                        pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5476                        pp->bm_priv = NULL;
5477                }
5478        }
5479        mvneta_defaults_set(pp);
5480        err = mvneta_port_power_up(pp, pp->phy_interface);
5481        if (err < 0) {
5482                dev_err(device, "can't power up port\n");
5483                return err;
5484        }
5485
5486        netif_device_attach(dev);
5487
5488        if (!netif_running(dev))
5489                return 0;
5490
5491        for (queue = 0; queue < rxq_number; queue++) {
5492                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5493
5494                rxq->next_desc_to_proc = 0;
5495                mvneta_rxq_hw_init(pp, rxq);
5496        }
5497
5498        for (queue = 0; queue < txq_number; queue++) {
5499                struct mvneta_tx_queue *txq = &pp->txqs[queue];
5500
5501                txq->next_desc_to_proc = 0;
5502                mvneta_txq_hw_init(pp, txq);
5503        }
5504
5505        if (!pp->neta_armada3700) {
5506                spin_lock(&pp->lock);
5507                pp->is_stopped = false;
5508                spin_unlock(&pp->lock);
5509                cpuhp_state_add_instance_nocalls(online_hpstate,
5510                                                 &pp->node_online);
5511                cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5512                                                 &pp->node_dead);
5513        }
5514
5515        rtnl_lock();
5516        mvneta_start_dev(pp);
5517        rtnl_unlock();
5518        mvneta_set_rx_mode(dev);
5519
5520        return 0;
5521}
5522#endif
5523
5524static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5525
5526static const struct of_device_id mvneta_match[] = {
5527        { .compatible = "marvell,armada-370-neta" },
5528        { .compatible = "marvell,armada-xp-neta" },
5529        { .compatible = "marvell,armada-3700-neta" },
5530        { }
5531};
5532MODULE_DEVICE_TABLE(of, mvneta_match);
5533
5534static struct platform_driver mvneta_driver = {
5535        .probe = mvneta_probe,
5536        .remove = mvneta_remove,
5537        .driver = {
5538                .name = MVNETA_DRIVER_NAME,
5539                .of_match_table = mvneta_match,
5540                .pm = &mvneta_pm_ops,
5541        },
5542};
5543
5544static int __init mvneta_driver_init(void)
5545{
5546        int ret;
5547
5548        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5549                                      mvneta_cpu_online,
5550                                      mvneta_cpu_down_prepare);
5551        if (ret < 0)
5552                goto out;
5553        online_hpstate = ret;
5554        ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5555                                      NULL, mvneta_cpu_dead);
5556        if (ret)
5557                goto err_dead;
5558
5559        ret = platform_driver_register(&mvneta_driver);
5560        if (ret)
5561                goto err;
5562        return 0;
5563
5564err:
5565        cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5566err_dead:
5567        cpuhp_remove_multi_state(online_hpstate);
5568out:
5569        return ret;
5570}
5571module_init(mvneta_driver_init);
5572
5573static void __exit mvneta_driver_exit(void)
5574{
5575        platform_driver_unregister(&mvneta_driver);
5576        cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5577        cpuhp_remove_multi_state(online_hpstate);
5578}
5579module_exit(mvneta_driver_exit);
5580
5581MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5582MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5583MODULE_LICENSE("GPL");
5584
5585module_param(rxq_number, int, 0444);
5586module_param(txq_number, int, 0444);
5587
5588module_param(rxq_def, int, 0444);
5589module_param(rx_copybreak, int, 0644);
5590