linux/drivers/net/ethernet/marvell/mvneta.c
<<
>>
Prefs
   1/*
   2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   3 *
   4 * Copyright (C) 2012 Marvell
   5 *
   6 * Rami Rosen <rosenr@marvell.com>
   7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
   8 *
   9 * This file is licensed under the terms of the GNU General Public
  10 * License version 2. This program is licensed "as is" without any
  11 * warranty of any kind, whether express or implied.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/cpu.h>
  16#include <linux/etherdevice.h>
  17#include <linux/if_vlan.h>
  18#include <linux/inetdevice.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/kernel.h>
  22#include <linux/mbus.h>
  23#include <linux/module.h>
  24#include <linux/netdevice.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_irq.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_net.h>
  30#include <linux/phy.h>
  31#include <linux/phy_fixed.h>
  32#include <linux/platform_device.h>
  33#include <linux/skbuff.h>
  34#include <net/hwbm.h>
  35#include "mvneta_bm.h"
  36#include <net/ip.h>
  37#include <net/ipv6.h>
  38#include <net/tso.h>
  39
  40/* Registers */
  41#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  42#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
  43#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT     4
  44#define      MVNETA_RXQ_SHORT_POOL_ID_MASK      0x30
  45#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT      6
  46#define      MVNETA_RXQ_LONG_POOL_ID_MASK       0xc0
  47#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  48#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  49#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  50#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  51#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  52#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  53#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  54#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  55#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  56#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  57#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  58#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  59#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  60#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)    (0x1700 + ((pool) << 2))
  61#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT   3
  62#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK    0xfff8
  63#define MVNETA_PORT_RX_RESET                    0x1cc0
  64#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  65#define MVNETA_PHY_ADDR                         0x2000
  66#define      MVNETA_PHY_ADDR_MASK               0x1f
  67#define MVNETA_MBUS_RETRY                       0x2010
  68#define MVNETA_UNIT_INTR_CAUSE                  0x2080
  69#define MVNETA_UNIT_CONTROL                     0x20B0
  70#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  71#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  72#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  73#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  74#define MVNETA_BASE_ADDR_ENABLE                 0x2290
  75#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
  76#define MVNETA_PORT_CONFIG                      0x2400
  77#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  78#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  79#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  80#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  81#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  82#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  83#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  84#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
  85#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
  86                                                 MVNETA_DEF_RXQ_ARP(q)   | \
  87                                                 MVNETA_DEF_RXQ_TCP(q)   | \
  88                                                 MVNETA_DEF_RXQ_UDP(q)   | \
  89                                                 MVNETA_DEF_RXQ_BPDU(q)  | \
  90                                                 MVNETA_TX_UNSET_ERR_SUM | \
  91                                                 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  92#define MVNETA_PORT_CONFIG_EXTEND                0x2404
  93#define MVNETA_MAC_ADDR_LOW                      0x2414
  94#define MVNETA_MAC_ADDR_HIGH                     0x2418
  95#define MVNETA_SDMA_CONFIG                       0x241c
  96#define      MVNETA_SDMA_BRST_SIZE_16            4
  97#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
  98#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
  99#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 100#define      MVNETA_DESC_SWAP                    BIT(6)
 101#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 102#define MVNETA_PORT_STATUS                       0x2444
 103#define      MVNETA_TX_IN_PRGRS                  BIT(1)
 104#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 105#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 106#define MVNETA_SERDES_CFG                        0x24A0
 107#define      MVNETA_SGMII_SERDES_PROTO           0x0cc7
 108#define      MVNETA_QSGMII_SERDES_PROTO          0x0667
 109#define MVNETA_TYPE_PRIO                         0x24bc
 110#define      MVNETA_FORCE_UNI                    BIT(21)
 111#define MVNETA_TXQ_CMD_1                         0x24e4
 112#define MVNETA_TXQ_CMD                           0x2448
 113#define      MVNETA_TXQ_DISABLE_SHIFT            8
 114#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 115#define MVNETA_RX_DISCARD_FRAME_COUNT            0x2484
 116#define MVNETA_OVERRUN_FRAME_COUNT               0x2488
 117#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 118#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 119#define MVNETA_ACC_MODE                          0x2500
 120#define MVNETA_BM_ADDRESS                        0x2504
 121#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 122#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 123#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 124#define      MVNETA_CPU_RXQ_ACCESS(rxq)          BIT(rxq)
 125#define      MVNETA_CPU_TXQ_ACCESS(txq)          BIT(txq + 8)
 126#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 127
 128/* Exception Interrupt Port/Queue Cause register
 129 *
 130 * Their behavior depend of the mapping done using the PCPX2Q
 131 * registers. For a given CPU if the bit associated to a queue is not
 132 * set, then for the register a read from this CPU will always return
 133 * 0 and a write won't do anything
 134 */
 135
 136#define MVNETA_INTR_NEW_CAUSE                    0x25a0
 137#define MVNETA_INTR_NEW_MASK                     0x25a4
 138
 139/* bits  0..7  = TXQ SENT, one bit per queue.
 140 * bits  8..15 = RXQ OCCUP, one bit per queue.
 141 * bits 16..23 = RXQ FREE, one bit per queue.
 142 * bit  29 = OLD_REG_SUM, see old reg ?
 143 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 144 * bit  31 = MISC_SUM,   one bit for 4 ports
 145 */
 146#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 147#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 148#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 149#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 150#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 151
 152#define MVNETA_INTR_OLD_CAUSE                    0x25a8
 153#define MVNETA_INTR_OLD_MASK                     0x25ac
 154
 155/* Data Path Port/Queue Cause Register */
 156#define MVNETA_INTR_MISC_CAUSE                   0x25b0
 157#define MVNETA_INTR_MISC_MASK                    0x25b4
 158
 159#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
 160#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
 161#define      MVNETA_CAUSE_PTP                    BIT(4)
 162
 163#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
 164#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
 165#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
 166#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
 167#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
 168#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
 169#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
 170#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
 171
 172#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
 173#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
 174#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
 175
 176#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
 177#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
 178#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
 179
 180#define MVNETA_INTR_ENABLE                       0x25b8
 181#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
 182#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 183
 184#define MVNETA_RXQ_CMD                           0x2680
 185#define      MVNETA_RXQ_DISABLE_SHIFT            8
 186#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 187#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 188#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 189#define MVNETA_GMAC_CTRL_0                       0x2c00
 190#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 191#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 192#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 193#define MVNETA_GMAC_CTRL_2                       0x2c08
 194#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 195#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 196#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 197#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 198#define MVNETA_GMAC_STATUS                       0x2c10
 199#define      MVNETA_GMAC_LINK_UP                 BIT(0)
 200#define      MVNETA_GMAC_SPEED_1000              BIT(1)
 201#define      MVNETA_GMAC_SPEED_100               BIT(2)
 202#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 203#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 204#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 205#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 206#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 207#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 208#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 209#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 210#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 211#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 212#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 213#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 214#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 215#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 216#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 217#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 218#define      MVNETA_MIB_LATE_COLLISION           0x7c
 219#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 220#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 221#define MVNETA_DA_FILT_UCAST_BASE                0x3600
 222#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 223#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 224#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 225#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 226#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 227#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 228#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 229#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 230#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 231#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 232#define MVNETA_PORT_TX_RESET                     0x3cf0
 233#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 234#define MVNETA_TX_MTU                            0x3e0c
 235#define MVNETA_TX_TOKEN_SIZE                     0x3e14
 236#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 237#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 238#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 239
 240#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 241
 242/* Descriptor ring Macros */
 243#define MVNETA_QUEUE_NEXT_DESC(q, index)        \
 244        (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 245
 246/* Various constants */
 247
 248/* Coalescing */
 249#define MVNETA_TXDONE_COAL_PKTS         0       /* interrupt per packet */
 250#define MVNETA_RX_COAL_PKTS             32
 251#define MVNETA_RX_COAL_USEC             100
 252
 253/* The two bytes Marvell header. Either contains a special value used
 254 * by Marvell switches when a specific hardware mode is enabled (not
 255 * supported by this driver) or is filled automatically by zeroes on
 256 * the RX side. Those two bytes being at the front of the Ethernet
 257 * header, they allow to have the IP header aligned on a 4 bytes
 258 * boundary automatically: the hardware skips those two bytes on its
 259 * own.
 260 */
 261#define MVNETA_MH_SIZE                  2
 262
 263#define MVNETA_VLAN_TAG_LEN             4
 264
 265#define MVNETA_TX_CSUM_DEF_SIZE         1600
 266#define MVNETA_TX_CSUM_MAX_SIZE         9800
 267#define MVNETA_ACC_MODE_EXT1            1
 268#define MVNETA_ACC_MODE_EXT2            2
 269
 270#define MVNETA_MAX_DECODE_WIN           6
 271
 272/* Timeout constants */
 273#define MVNETA_TX_DISABLE_TIMEOUT_MSEC  1000
 274#define MVNETA_RX_DISABLE_TIMEOUT_MSEC  1000
 275#define MVNETA_TX_FIFO_EMPTY_TIMEOUT    10000
 276
 277#define MVNETA_TX_MTU_MAX               0x3ffff
 278
 279/* The RSS lookup table actually has 256 entries but we do not use
 280 * them yet
 281 */
 282#define MVNETA_RSS_LU_TABLE_SIZE        1
 283
 284/* Max number of Rx descriptors */
 285#define MVNETA_MAX_RXD 128
 286
 287/* Max number of Tx descriptors */
 288#define MVNETA_MAX_TXD 532
 289
 290/* Max number of allowed TCP segments for software TSO */
 291#define MVNETA_MAX_TSO_SEGS 100
 292
 293#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 294
 295/* descriptor aligned size */
 296#define MVNETA_DESC_ALIGNED_SIZE        32
 297
 298/* Number of bytes to be taken into account by HW when putting incoming data
 299 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 300 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 301 */
 302#define MVNETA_RX_PKT_OFFSET_CORRECTION         64
 303
 304#define MVNETA_RX_PKT_SIZE(mtu) \
 305        ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 306              ETH_HLEN + ETH_FCS_LEN,                        \
 307              cache_line_size())
 308
 309#define IS_TSO_HEADER(txq, addr) \
 310        ((addr >= txq->tso_hdrs_phys) && \
 311         (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
 312
 313#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 314        (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 315
 316struct mvneta_statistic {
 317        unsigned short offset;
 318        unsigned short type;
 319        const char name[ETH_GSTRING_LEN];
 320};
 321
 322#define T_REG_32        32
 323#define T_REG_64        64
 324
 325static const struct mvneta_statistic mvneta_statistics[] = {
 326        { 0x3000, T_REG_64, "good_octets_received", },
 327        { 0x3010, T_REG_32, "good_frames_received", },
 328        { 0x3008, T_REG_32, "bad_octets_received", },
 329        { 0x3014, T_REG_32, "bad_frames_received", },
 330        { 0x3018, T_REG_32, "broadcast_frames_received", },
 331        { 0x301c, T_REG_32, "multicast_frames_received", },
 332        { 0x3050, T_REG_32, "unrec_mac_control_received", },
 333        { 0x3058, T_REG_32, "good_fc_received", },
 334        { 0x305c, T_REG_32, "bad_fc_received", },
 335        { 0x3060, T_REG_32, "undersize_received", },
 336        { 0x3064, T_REG_32, "fragments_received", },
 337        { 0x3068, T_REG_32, "oversize_received", },
 338        { 0x306c, T_REG_32, "jabber_received", },
 339        { 0x3070, T_REG_32, "mac_receive_error", },
 340        { 0x3074, T_REG_32, "bad_crc_event", },
 341        { 0x3078, T_REG_32, "collision", },
 342        { 0x307c, T_REG_32, "late_collision", },
 343        { 0x2484, T_REG_32, "rx_discard", },
 344        { 0x2488, T_REG_32, "rx_overrun", },
 345        { 0x3020, T_REG_32, "frames_64_octets", },
 346        { 0x3024, T_REG_32, "frames_65_to_127_octets", },
 347        { 0x3028, T_REG_32, "frames_128_to_255_octets", },
 348        { 0x302c, T_REG_32, "frames_256_to_511_octets", },
 349        { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
 350        { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
 351        { 0x3038, T_REG_64, "good_octets_sent", },
 352        { 0x3040, T_REG_32, "good_frames_sent", },
 353        { 0x3044, T_REG_32, "excessive_collision", },
 354        { 0x3048, T_REG_32, "multicast_frames_sent", },
 355        { 0x304c, T_REG_32, "broadcast_frames_sent", },
 356        { 0x3054, T_REG_32, "fc_sent", },
 357        { 0x300c, T_REG_32, "internal_mac_transmit_err", },
 358};
 359
 360struct mvneta_pcpu_stats {
 361        struct  u64_stats_sync syncp;
 362        u64     rx_packets;
 363        u64     rx_bytes;
 364        u64     tx_packets;
 365        u64     tx_bytes;
 366};
 367
 368struct mvneta_pcpu_port {
 369        /* Pointer to the shared port */
 370        struct mvneta_port      *pp;
 371
 372        /* Pointer to the CPU-local NAPI struct */
 373        struct napi_struct      napi;
 374
 375        /* Cause of the previous interrupt */
 376        u32                     cause_rx_tx;
 377};
 378
 379struct mvneta_port {
 380        u8 id;
 381        struct mvneta_pcpu_port __percpu        *ports;
 382        struct mvneta_pcpu_stats __percpu       *stats;
 383
 384        int pkt_size;
 385        unsigned int frag_size;
 386        void __iomem *base;
 387        struct mvneta_rx_queue *rxqs;
 388        struct mvneta_tx_queue *txqs;
 389        struct net_device *dev;
 390        struct hlist_node node_online;
 391        struct hlist_node node_dead;
 392        int rxq_def;
 393        /* Protect the access to the percpu interrupt registers,
 394         * ensuring that the configuration remains coherent.
 395         */
 396        spinlock_t lock;
 397        bool is_stopped;
 398
 399        u32 cause_rx_tx;
 400        struct napi_struct napi;
 401
 402        /* Core clock */
 403        struct clk *clk;
 404        /* AXI clock */
 405        struct clk *clk_bus;
 406        u8 mcast_count[256];
 407        u16 tx_ring_size;
 408        u16 rx_ring_size;
 409
 410        struct mii_bus *mii_bus;
 411        phy_interface_t phy_interface;
 412        struct device_node *phy_node;
 413        unsigned int link;
 414        unsigned int duplex;
 415        unsigned int speed;
 416        unsigned int tx_csum_limit;
 417        unsigned int use_inband_status:1;
 418
 419        struct mvneta_bm *bm_priv;
 420        struct mvneta_bm_pool *pool_long;
 421        struct mvneta_bm_pool *pool_short;
 422        int bm_win_id;
 423
 424        u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 425
 426        u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 427
 428        /* Flags for special SoC configurations */
 429        bool neta_armada3700;
 430        u16 rx_offset_correction;
 431        const struct mbus_dram_target_info *dram_target_info;
 432};
 433
 434/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 435 * layout of the transmit and reception DMA descriptors, and their
 436 * layout is therefore defined by the hardware design
 437 */
 438
 439#define MVNETA_TX_L3_OFF_SHIFT  0
 440#define MVNETA_TX_IP_HLEN_SHIFT 8
 441#define MVNETA_TX_L4_UDP        BIT(16)
 442#define MVNETA_TX_L3_IP6        BIT(17)
 443#define MVNETA_TXD_IP_CSUM      BIT(18)
 444#define MVNETA_TXD_Z_PAD        BIT(19)
 445#define MVNETA_TXD_L_DESC       BIT(20)
 446#define MVNETA_TXD_F_DESC       BIT(21)
 447#define MVNETA_TXD_FLZ_DESC     (MVNETA_TXD_Z_PAD  | \
 448                                 MVNETA_TXD_L_DESC | \
 449                                 MVNETA_TXD_F_DESC)
 450#define MVNETA_TX_L4_CSUM_FULL  BIT(30)
 451#define MVNETA_TX_L4_CSUM_NOT   BIT(31)
 452
 453#define MVNETA_RXD_ERR_CRC              0x0
 454#define MVNETA_RXD_BM_POOL_SHIFT        13
 455#define MVNETA_RXD_BM_POOL_MASK         (BIT(13) | BIT(14))
 456#define MVNETA_RXD_ERR_SUMMARY          BIT(16)
 457#define MVNETA_RXD_ERR_OVERRUN          BIT(17)
 458#define MVNETA_RXD_ERR_LEN              BIT(18)
 459#define MVNETA_RXD_ERR_RESOURCE         (BIT(17) | BIT(18))
 460#define MVNETA_RXD_ERR_CODE_MASK        (BIT(17) | BIT(18))
 461#define MVNETA_RXD_L3_IP4               BIT(25)
 462#define MVNETA_RXD_FIRST_LAST_DESC      (BIT(26) | BIT(27))
 463#define MVNETA_RXD_L4_CSUM_OK           BIT(30)
 464
 465#if defined(__LITTLE_ENDIAN)
 466struct mvneta_tx_desc {
 467        u32  command;           /* Options used by HW for packet transmitting.*/
 468        u16  reserverd1;        /* csum_l4 (for future use)             */
 469        u16  data_size;         /* Data size of transmitted packet in bytes */
 470        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 471        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 472        u32  reserved3[4];      /* Reserved - (for future use)          */
 473};
 474
 475struct mvneta_rx_desc {
 476        u32  status;            /* Info about received packet           */
 477        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 478        u16  data_size;         /* Size of received packet in bytes     */
 479
 480        u32  buf_phys_addr;     /* Physical address of the buffer       */
 481        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 482
 483        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 484        u16  reserved3;         /* prefetch_cmd, for future use         */
 485        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 486
 487        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 488        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 489};
 490#else
 491struct mvneta_tx_desc {
 492        u16  data_size;         /* Data size of transmitted packet in bytes */
 493        u16  reserverd1;        /* csum_l4 (for future use)             */
 494        u32  command;           /* Options used by HW for packet transmitting.*/
 495        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 496        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 497        u32  reserved3[4];      /* Reserved - (for future use)          */
 498};
 499
 500struct mvneta_rx_desc {
 501        u16  data_size;         /* Size of received packet in bytes     */
 502        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 503        u32  status;            /* Info about received packet           */
 504
 505        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 506        u32  buf_phys_addr;     /* Physical address of the buffer       */
 507
 508        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 509        u16  reserved3;         /* prefetch_cmd, for future use         */
 510        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 511
 512        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 513        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 514};
 515#endif
 516
 517struct mvneta_tx_queue {
 518        /* Number of this TX queue, in the range 0-7 */
 519        u8 id;
 520
 521        /* Number of TX DMA descriptors in the descriptor ring */
 522        int size;
 523
 524        /* Number of currently used TX DMA descriptor in the
 525         * descriptor ring
 526         */
 527        int count;
 528        int pending;
 529        int tx_stop_threshold;
 530        int tx_wake_threshold;
 531
 532        /* Array of transmitted skb */
 533        struct sk_buff **tx_skb;
 534
 535        /* Index of last TX DMA descriptor that was inserted */
 536        int txq_put_index;
 537
 538        /* Index of the TX DMA descriptor to be cleaned up */
 539        int txq_get_index;
 540
 541        u32 done_pkts_coal;
 542
 543        /* Virtual address of the TX DMA descriptors array */
 544        struct mvneta_tx_desc *descs;
 545
 546        /* DMA address of the TX DMA descriptors array */
 547        dma_addr_t descs_phys;
 548
 549        /* Index of the last TX DMA descriptor */
 550        int last_desc;
 551
 552        /* Index of the next TX DMA descriptor to process */
 553        int next_desc_to_proc;
 554
 555        /* DMA buffers for TSO headers */
 556        char *tso_hdrs;
 557
 558        /* DMA address of TSO headers */
 559        dma_addr_t tso_hdrs_phys;
 560
 561        /* Affinity mask for CPUs*/
 562        cpumask_t affinity_mask;
 563};
 564
 565struct mvneta_rx_queue {
 566        /* rx queue number, in the range 0-7 */
 567        u8 id;
 568
 569        /* num of rx descriptors in the rx descriptor ring */
 570        int size;
 571
 572        /* counter of times when mvneta_refill() failed */
 573        int missed;
 574
 575        u32 pkts_coal;
 576        u32 time_coal;
 577
 578        /* Virtual address of the RX buffer */
 579        void  **buf_virt_addr;
 580
 581        /* Virtual address of the RX DMA descriptors array */
 582        struct mvneta_rx_desc *descs;
 583
 584        /* DMA address of the RX DMA descriptors array */
 585        dma_addr_t descs_phys;
 586
 587        /* Index of the last RX DMA descriptor */
 588        int last_desc;
 589
 590        /* Index of the next RX DMA descriptor to process */
 591        int next_desc_to_proc;
 592};
 593
 594static enum cpuhp_state online_hpstate;
 595/* The hardware supports eight (8) rx queues, but we are only allowing
 596 * the first one to be used. Therefore, let's just allocate one queue.
 597 */
 598static int rxq_number = 8;
 599static int txq_number = 8;
 600
 601static int rxq_def;
 602
 603static int rx_copybreak __read_mostly = 256;
 604
 605/* HW BM need that each port be identify by a unique ID */
 606static int global_port_id;
 607
 608#define MVNETA_DRIVER_NAME "mvneta"
 609#define MVNETA_DRIVER_VERSION "1.0"
 610
 611/* Utility/helper methods */
 612
 613/* Write helper method */
 614static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 615{
 616        writel(data, pp->base + offset);
 617}
 618
 619/* Read helper method */
 620static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 621{
 622        return readl(pp->base + offset);
 623}
 624
 625/* Increment txq get counter */
 626static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
 627{
 628        txq->txq_get_index++;
 629        if (txq->txq_get_index == txq->size)
 630                txq->txq_get_index = 0;
 631}
 632
 633/* Increment txq put counter */
 634static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
 635{
 636        txq->txq_put_index++;
 637        if (txq->txq_put_index == txq->size)
 638                txq->txq_put_index = 0;
 639}
 640
 641
 642/* Clear all MIB counters */
 643static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 644{
 645        int i;
 646        u32 dummy;
 647
 648        /* Perform dummy reads from MIB counters */
 649        for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 650                dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 651        dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
 652        dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 653}
 654
 655/* Get System Network Statistics */
 656static void
 657mvneta_get_stats64(struct net_device *dev,
 658                   struct rtnl_link_stats64 *stats)
 659{
 660        struct mvneta_port *pp = netdev_priv(dev);
 661        unsigned int start;
 662        int cpu;
 663
 664        for_each_possible_cpu(cpu) {
 665                struct mvneta_pcpu_stats *cpu_stats;
 666                u64 rx_packets;
 667                u64 rx_bytes;
 668                u64 tx_packets;
 669                u64 tx_bytes;
 670
 671                cpu_stats = per_cpu_ptr(pp->stats, cpu);
 672                do {
 673                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 674                        rx_packets = cpu_stats->rx_packets;
 675                        rx_bytes   = cpu_stats->rx_bytes;
 676                        tx_packets = cpu_stats->tx_packets;
 677                        tx_bytes   = cpu_stats->tx_bytes;
 678                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 679
 680                stats->rx_packets += rx_packets;
 681                stats->rx_bytes   += rx_bytes;
 682                stats->tx_packets += tx_packets;
 683                stats->tx_bytes   += tx_bytes;
 684        }
 685
 686        stats->rx_errors        = dev->stats.rx_errors;
 687        stats->rx_dropped       = dev->stats.rx_dropped;
 688
 689        stats->tx_dropped       = dev->stats.tx_dropped;
 690}
 691
 692/* Rx descriptors helper methods */
 693
 694/* Checks whether the RX descriptor having this status is both the first
 695 * and the last descriptor for the RX packet. Each RX packet is currently
 696 * received through a single RX descriptor, so not having each RX
 697 * descriptor with its first and last bits set is an error
 698 */
 699static int mvneta_rxq_desc_is_first_last(u32 status)
 700{
 701        return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 702                MVNETA_RXD_FIRST_LAST_DESC;
 703}
 704
 705/* Add number of descriptors ready to receive new packets */
 706static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 707                                          struct mvneta_rx_queue *rxq,
 708                                          int ndescs)
 709{
 710        /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 711         * be added at once
 712         */
 713        while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 714                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 715                            (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 716                             MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 717                ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 718        }
 719
 720        mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 721                    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 722}
 723
 724/* Get number of RX descriptors occupied by received packets */
 725static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 726                                        struct mvneta_rx_queue *rxq)
 727{
 728        u32 val;
 729
 730        val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 731        return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 732}
 733
 734/* Update num of rx desc called upon return from rx path or
 735 * from mvneta_rxq_drop_pkts().
 736 */
 737static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 738                                       struct mvneta_rx_queue *rxq,
 739                                       int rx_done, int rx_filled)
 740{
 741        u32 val;
 742
 743        if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 744                val = rx_done |
 745                  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 746                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 747                return;
 748        }
 749
 750        /* Only 255 descriptors can be added at once */
 751        while ((rx_done > 0) || (rx_filled > 0)) {
 752                if (rx_done <= 0xff) {
 753                        val = rx_done;
 754                        rx_done = 0;
 755                } else {
 756                        val = 0xff;
 757                        rx_done -= 0xff;
 758                }
 759                if (rx_filled <= 0xff) {
 760                        val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 761                        rx_filled = 0;
 762                } else {
 763                        val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 764                        rx_filled -= 0xff;
 765                }
 766                mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 767        }
 768}
 769
 770/* Get pointer to next RX descriptor to be processed by SW */
 771static struct mvneta_rx_desc *
 772mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 773{
 774        int rx_desc = rxq->next_desc_to_proc;
 775
 776        rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 777        prefetch(rxq->descs + rxq->next_desc_to_proc);
 778        return rxq->descs + rx_desc;
 779}
 780
 781/* Change maximum receive size of the port. */
 782static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
 783{
 784        u32 val;
 785
 786        val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 787        val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
 788        val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
 789                MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
 790        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 791}
 792
 793
 794/* Set rx queue offset */
 795static void mvneta_rxq_offset_set(struct mvneta_port *pp,
 796                                  struct mvneta_rx_queue *rxq,
 797                                  int offset)
 798{
 799        u32 val;
 800
 801        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 802        val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
 803
 804        /* Offset is in */
 805        val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
 806        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 807}
 808
 809
 810/* Tx descriptors helper methods */
 811
 812/* Update HW with number of TX descriptors to be sent */
 813static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 814                                     struct mvneta_tx_queue *txq,
 815                                     int pend_desc)
 816{
 817        u32 val;
 818
 819        /* Only 255 descriptors can be added at once ; Assume caller
 820         * process TX desriptors in quanta less than 256
 821         */
 822        val = pend_desc + txq->pending;
 823        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 824        txq->pending = 0;
 825}
 826
 827/* Get pointer to next TX descriptor to be processed (send) by HW */
 828static struct mvneta_tx_desc *
 829mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 830{
 831        int tx_desc = txq->next_desc_to_proc;
 832
 833        txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 834        return txq->descs + tx_desc;
 835}
 836
 837/* Release the last allocated TX descriptor. Useful to handle DMA
 838 * mapping failures in the TX path.
 839 */
 840static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
 841{
 842        if (txq->next_desc_to_proc == 0)
 843                txq->next_desc_to_proc = txq->last_desc - 1;
 844        else
 845                txq->next_desc_to_proc--;
 846}
 847
 848/* Set rxq buf size */
 849static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
 850                                    struct mvneta_rx_queue *rxq,
 851                                    int buf_size)
 852{
 853        u32 val;
 854
 855        val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
 856
 857        val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
 858        val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
 859
 860        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 861}
 862
 863/* Disable buffer management (BM) */
 864static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 865                                  struct mvneta_rx_queue *rxq)
 866{
 867        u32 val;
 868
 869        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 870        val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
 871        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 872}
 873
 874/* Enable buffer management (BM) */
 875static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
 876                                 struct mvneta_rx_queue *rxq)
 877{
 878        u32 val;
 879
 880        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 881        val |= MVNETA_RXQ_HW_BUF_ALLOC;
 882        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 883}
 884
 885/* Notify HW about port's assignment of pool for bigger packets */
 886static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
 887                                     struct mvneta_rx_queue *rxq)
 888{
 889        u32 val;
 890
 891        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 892        val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
 893        val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
 894
 895        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 896}
 897
 898/* Notify HW about port's assignment of pool for smaller packets */
 899static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
 900                                      struct mvneta_rx_queue *rxq)
 901{
 902        u32 val;
 903
 904        val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 905        val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
 906        val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
 907
 908        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 909}
 910
 911/* Set port's receive buffer size for assigned BM pool */
 912static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
 913                                              int buf_size,
 914                                              u8 pool_id)
 915{
 916        u32 val;
 917
 918        if (!IS_ALIGNED(buf_size, 8)) {
 919                dev_warn(pp->dev->dev.parent,
 920                         "illegal buf_size value %d, round to %d\n",
 921                         buf_size, ALIGN(buf_size, 8));
 922                buf_size = ALIGN(buf_size, 8);
 923        }
 924
 925        val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
 926        val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
 927        mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
 928}
 929
 930/* Configure MBUS window in order to enable access BM internal SRAM */
 931static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
 932                                  u8 target, u8 attr)
 933{
 934        u32 win_enable, win_protect;
 935        int i;
 936
 937        win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
 938
 939        if (pp->bm_win_id < 0) {
 940                /* Find first not occupied window */
 941                for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
 942                        if (win_enable & (1 << i)) {
 943                                pp->bm_win_id = i;
 944                                break;
 945                        }
 946                }
 947                if (i == MVNETA_MAX_DECODE_WIN)
 948                        return -ENOMEM;
 949        } else {
 950                i = pp->bm_win_id;
 951        }
 952
 953        mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
 954        mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
 955
 956        if (i < 4)
 957                mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
 958
 959        mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
 960                    (attr << 8) | target);
 961
 962        mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
 963
 964        win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
 965        win_protect |= 3 << (2 * i);
 966        mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
 967
 968        win_enable &= ~(1 << i);
 969        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
 970
 971        return 0;
 972}
 973
 974static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
 975{
 976        u32 wsize;
 977        u8 target, attr;
 978        int err;
 979
 980        /* Get BM window information */
 981        err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
 982                                         &target, &attr);
 983        if (err < 0)
 984                return err;
 985
 986        pp->bm_win_id = -1;
 987
 988        /* Open NETA -> BM window */
 989        err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
 990                                     target, attr);
 991        if (err < 0) {
 992                netdev_info(pp->dev, "fail to configure mbus window to BM\n");
 993                return err;
 994        }
 995        return 0;
 996}
 997
 998/* Assign and initialize pools for port. In case of fail
 999 * buffer manager will remain disabled for current port.
1000 */
1001static int mvneta_bm_port_init(struct platform_device *pdev,
1002                               struct mvneta_port *pp)
1003{
1004        struct device_node *dn = pdev->dev.of_node;
1005        u32 long_pool_id, short_pool_id;
1006
1007        if (!pp->neta_armada3700) {
1008                int ret;
1009
1010                ret = mvneta_bm_port_mbus_init(pp);
1011                if (ret)
1012                        return ret;
1013        }
1014
1015        if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1016                netdev_info(pp->dev, "missing long pool id\n");
1017                return -EINVAL;
1018        }
1019
1020        /* Create port's long pool depending on mtu */
1021        pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1022                                           MVNETA_BM_LONG, pp->id,
1023                                           MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1024        if (!pp->pool_long) {
1025                netdev_info(pp->dev, "fail to obtain long pool for port\n");
1026                return -ENOMEM;
1027        }
1028
1029        pp->pool_long->port_map |= 1 << pp->id;
1030
1031        mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1032                                   pp->pool_long->id);
1033
1034        /* If short pool id is not defined, assume using single pool */
1035        if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1036                short_pool_id = long_pool_id;
1037
1038        /* Create port's short pool */
1039        pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1040                                            MVNETA_BM_SHORT, pp->id,
1041                                            MVNETA_BM_SHORT_PKT_SIZE);
1042        if (!pp->pool_short) {
1043                netdev_info(pp->dev, "fail to obtain short pool for port\n");
1044                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1045                return -ENOMEM;
1046        }
1047
1048        if (short_pool_id != long_pool_id) {
1049                pp->pool_short->port_map |= 1 << pp->id;
1050                mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1051                                           pp->pool_short->id);
1052        }
1053
1054        return 0;
1055}
1056
1057/* Update settings of a pool for bigger packets */
1058static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1059{
1060        struct mvneta_bm_pool *bm_pool = pp->pool_long;
1061        struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1062        int num;
1063
1064        /* Release all buffers from long pool */
1065        mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1066        if (hwbm_pool->buf_num) {
1067                WARN(1, "cannot free all buffers in pool %d\n",
1068                     bm_pool->id);
1069                goto bm_mtu_err;
1070        }
1071
1072        bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1073        bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1074        hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1075                        SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1076
1077        /* Fill entire long pool */
1078        num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1079        if (num != hwbm_pool->size) {
1080                WARN(1, "pool %d: %d of %d allocated\n",
1081                     bm_pool->id, num, hwbm_pool->size);
1082                goto bm_mtu_err;
1083        }
1084        mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1085
1086        return;
1087
1088bm_mtu_err:
1089        mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1090        mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1091
1092        pp->bm_priv = NULL;
1093        mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1094        netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1095}
1096
1097/* Start the Ethernet port RX and TX activity */
1098static void mvneta_port_up(struct mvneta_port *pp)
1099{
1100        int queue;
1101        u32 q_map;
1102
1103        /* Enable all initialized TXs. */
1104        q_map = 0;
1105        for (queue = 0; queue < txq_number; queue++) {
1106                struct mvneta_tx_queue *txq = &pp->txqs[queue];
1107                if (txq->descs)
1108                        q_map |= (1 << queue);
1109        }
1110        mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1111
1112        /* Enable all initialized RXQs. */
1113        for (queue = 0; queue < rxq_number; queue++) {
1114                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1115
1116                if (rxq->descs)
1117                        q_map |= (1 << queue);
1118        }
1119        mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1120}
1121
1122/* Stop the Ethernet port activity */
1123static void mvneta_port_down(struct mvneta_port *pp)
1124{
1125        u32 val;
1126        int count;
1127
1128        /* Stop Rx port activity. Check port Rx activity. */
1129        val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1130
1131        /* Issue stop command for active channels only */
1132        if (val != 0)
1133                mvreg_write(pp, MVNETA_RXQ_CMD,
1134                            val << MVNETA_RXQ_DISABLE_SHIFT);
1135
1136        /* Wait for all Rx activity to terminate. */
1137        count = 0;
1138        do {
1139                if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1140                        netdev_warn(pp->dev,
1141                                    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1142                                    val);
1143                        break;
1144                }
1145                mdelay(1);
1146
1147                val = mvreg_read(pp, MVNETA_RXQ_CMD);
1148        } while (val & MVNETA_RXQ_ENABLE_MASK);
1149
1150        /* Stop Tx port activity. Check port Tx activity. Issue stop
1151         * command for active channels only
1152         */
1153        val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1154
1155        if (val != 0)
1156                mvreg_write(pp, MVNETA_TXQ_CMD,
1157                            (val << MVNETA_TXQ_DISABLE_SHIFT));
1158
1159        /* Wait for all Tx activity to terminate. */
1160        count = 0;
1161        do {
1162                if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1163                        netdev_warn(pp->dev,
1164                                    "TIMEOUT for TX stopped status=0x%08x\n",
1165                                    val);
1166                        break;
1167                }
1168                mdelay(1);
1169
1170                /* Check TX Command reg that all Txqs are stopped */
1171                val = mvreg_read(pp, MVNETA_TXQ_CMD);
1172
1173        } while (val & MVNETA_TXQ_ENABLE_MASK);
1174
1175        /* Double check to verify that TX FIFO is empty */
1176        count = 0;
1177        do {
1178                if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1179                        netdev_warn(pp->dev,
1180                                    "TX FIFO empty timeout status=0x%08x\n",
1181                                    val);
1182                        break;
1183                }
1184                mdelay(1);
1185
1186                val = mvreg_read(pp, MVNETA_PORT_STATUS);
1187        } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1188                 (val & MVNETA_TX_IN_PRGRS));
1189
1190        udelay(200);
1191}
1192
1193/* Enable the port by setting the port enable bit of the MAC control register */
1194static void mvneta_port_enable(struct mvneta_port *pp)
1195{
1196        u32 val;
1197
1198        /* Enable port */
1199        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1200        val |= MVNETA_GMAC0_PORT_ENABLE;
1201        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1202}
1203
1204/* Disable the port and wait for about 200 usec before retuning */
1205static void mvneta_port_disable(struct mvneta_port *pp)
1206{
1207        u32 val;
1208
1209        /* Reset the Enable bit in the Serial Control Register */
1210        val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1211        val &= ~MVNETA_GMAC0_PORT_ENABLE;
1212        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1213
1214        udelay(200);
1215}
1216
1217/* Multicast tables methods */
1218
1219/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1220static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1221{
1222        int offset;
1223        u32 val;
1224
1225        if (queue == -1) {
1226                val = 0;
1227        } else {
1228                val = 0x1 | (queue << 1);
1229                val |= (val << 24) | (val << 16) | (val << 8);
1230        }
1231
1232        for (offset = 0; offset <= 0xc; offset += 4)
1233                mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1234}
1235
1236/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1237static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1238{
1239        int offset;
1240        u32 val;
1241
1242        if (queue == -1) {
1243                val = 0;
1244        } else {
1245                val = 0x1 | (queue << 1);
1246                val |= (val << 24) | (val << 16) | (val << 8);
1247        }
1248
1249        for (offset = 0; offset <= 0xfc; offset += 4)
1250                mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1251
1252}
1253
1254/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1255static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1256{
1257        int offset;
1258        u32 val;
1259
1260        if (queue == -1) {
1261                memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1262                val = 0;
1263        } else {
1264                memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1265                val = 0x1 | (queue << 1);
1266                val |= (val << 24) | (val << 16) | (val << 8);
1267        }
1268
1269        for (offset = 0; offset <= 0xfc; offset += 4)
1270                mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1271}
1272
1273static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1274{
1275        u32 val;
1276
1277        if (enable) {
1278                val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1279                val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1280                         MVNETA_GMAC_FORCE_LINK_DOWN |
1281                         MVNETA_GMAC_AN_FLOW_CTRL_EN);
1282                val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1283                       MVNETA_GMAC_AN_SPEED_EN |
1284                       MVNETA_GMAC_AN_DUPLEX_EN;
1285                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1286
1287                val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1288                val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1289                mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1290
1291                val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1292                val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1293                mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1294        } else {
1295                val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1296                val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1297                       MVNETA_GMAC_AN_SPEED_EN |
1298                       MVNETA_GMAC_AN_DUPLEX_EN);
1299                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1300
1301                val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1302                val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1303                mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1304
1305                val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1306                val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1307                mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1308        }
1309}
1310
1311static void mvneta_percpu_unmask_interrupt(void *arg)
1312{
1313        struct mvneta_port *pp = arg;
1314
1315        /* All the queue are unmasked, but actually only the ones
1316         * mapped to this CPU will be unmasked
1317         */
1318        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1319                    MVNETA_RX_INTR_MASK_ALL |
1320                    MVNETA_TX_INTR_MASK_ALL |
1321                    MVNETA_MISCINTR_INTR_MASK);
1322}
1323
1324static void mvneta_percpu_mask_interrupt(void *arg)
1325{
1326        struct mvneta_port *pp = arg;
1327
1328        /* All the queue are masked, but actually only the ones
1329         * mapped to this CPU will be masked
1330         */
1331        mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1332        mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1333        mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1334}
1335
1336static void mvneta_percpu_clear_intr_cause(void *arg)
1337{
1338        struct mvneta_port *pp = arg;
1339
1340        /* All the queue are cleared, but actually only the ones
1341         * mapped to this CPU will be cleared
1342         */
1343        mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1344        mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1345        mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1346}
1347
1348/* This method sets defaults to the NETA port:
1349 *      Clears interrupt Cause and Mask registers.
1350 *      Clears all MAC tables.
1351 *      Sets defaults to all registers.
1352 *      Resets RX and TX descriptor rings.
1353 *      Resets PHY.
1354 * This method can be called after mvneta_port_down() to return the port
1355 *      settings to defaults.
1356 */
1357static void mvneta_defaults_set(struct mvneta_port *pp)
1358{
1359        int cpu;
1360        int queue;
1361        u32 val;
1362        int max_cpu = num_present_cpus();
1363
1364        /* Clear all Cause registers */
1365        on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1366
1367        /* Mask all interrupts */
1368        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1369        mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1370
1371        /* Enable MBUS Retry bit16 */
1372        mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1373
1374        /* Set CPU queue access map. CPUs are assigned to the RX and
1375         * TX queues modulo their number. If there is only one TX
1376         * queue then it is assigned to the CPU associated to the
1377         * default RX queue.
1378         */
1379        for_each_present_cpu(cpu) {
1380                int rxq_map = 0, txq_map = 0;
1381                int rxq, txq;
1382                if (!pp->neta_armada3700) {
1383                        for (rxq = 0; rxq < rxq_number; rxq++)
1384                                if ((rxq % max_cpu) == cpu)
1385                                        rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1386
1387                        for (txq = 0; txq < txq_number; txq++)
1388                                if ((txq % max_cpu) == cpu)
1389                                        txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1390
1391                        /* With only one TX queue we configure a special case
1392                         * which will allow to get all the irq on a single
1393                         * CPU
1394                         */
1395                        if (txq_number == 1)
1396                                txq_map = (cpu == pp->rxq_def) ?
1397                                        MVNETA_CPU_TXQ_ACCESS(1) : 0;
1398
1399                } else {
1400                        txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1401                        rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1402                }
1403
1404                mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1405        }
1406
1407        /* Reset RX and TX DMAs */
1408        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1409        mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1410
1411        /* Disable Legacy WRR, Disable EJP, Release from reset */
1412        mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1413        for (queue = 0; queue < txq_number; queue++) {
1414                mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1415                mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1416        }
1417
1418        mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1419        mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1420
1421        /* Set Port Acceleration Mode */
1422        if (pp->bm_priv)
1423                /* HW buffer management + legacy parser */
1424                val = MVNETA_ACC_MODE_EXT2;
1425        else
1426                /* SW buffer management + legacy parser */
1427                val = MVNETA_ACC_MODE_EXT1;
1428        mvreg_write(pp, MVNETA_ACC_MODE, val);
1429
1430        if (pp->bm_priv)
1431                mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1432
1433        /* Update val of portCfg register accordingly with all RxQueue types */
1434        val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1435        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1436
1437        val = 0;
1438        mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1439        mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1440
1441        /* Build PORT_SDMA_CONFIG_REG */
1442        val = 0;
1443
1444        /* Default burst size */
1445        val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1446        val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1447        val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1448
1449#if defined(__BIG_ENDIAN)
1450        val |= MVNETA_DESC_SWAP;
1451#endif
1452
1453        /* Assign port SDMA configuration */
1454        mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1455
1456        /* Disable PHY polling in hardware, since we're using the
1457         * kernel phylib to do this.
1458         */
1459        val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1460        val &= ~MVNETA_PHY_POLLING_ENABLE;
1461        mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1462
1463        mvneta_set_autoneg(pp, pp->use_inband_status);
1464        mvneta_set_ucast_table(pp, -1);
1465        mvneta_set_special_mcast_table(pp, -1);
1466        mvneta_set_other_mcast_table(pp, -1);
1467
1468        /* Set port interrupt enable register - default enable all */
1469        mvreg_write(pp, MVNETA_INTR_ENABLE,
1470                    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1471                     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1472
1473        mvneta_mib_counters_clear(pp);
1474}
1475
1476/* Set max sizes for tx queues */
1477static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1478
1479{
1480        u32 val, size, mtu;
1481        int queue;
1482
1483        mtu = max_tx_size * 8;
1484        if (mtu > MVNETA_TX_MTU_MAX)
1485                mtu = MVNETA_TX_MTU_MAX;
1486
1487        /* Set MTU */
1488        val = mvreg_read(pp, MVNETA_TX_MTU);
1489        val &= ~MVNETA_TX_MTU_MAX;
1490        val |= mtu;
1491        mvreg_write(pp, MVNETA_TX_MTU, val);
1492
1493        /* TX token size and all TXQs token size must be larger that MTU */
1494        val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1495
1496        size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1497        if (size < mtu) {
1498                size = mtu;
1499                val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1500                val |= size;
1501                mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1502        }
1503        for (queue = 0; queue < txq_number; queue++) {
1504                val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1505
1506                size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1507                if (size < mtu) {
1508                        size = mtu;
1509                        val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1510                        val |= size;
1511                        mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1512                }
1513        }
1514}
1515
1516/* Set unicast address */
1517static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1518                                  int queue)
1519{
1520        unsigned int unicast_reg;
1521        unsigned int tbl_offset;
1522        unsigned int reg_offset;
1523
1524        /* Locate the Unicast table entry */
1525        last_nibble = (0xf & last_nibble);
1526
1527        /* offset from unicast tbl base */
1528        tbl_offset = (last_nibble / 4) * 4;
1529
1530        /* offset within the above reg  */
1531        reg_offset = last_nibble % 4;
1532
1533        unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1534
1535        if (queue == -1) {
1536                /* Clear accepts frame bit at specified unicast DA tbl entry */
1537                unicast_reg &= ~(0xff << (8 * reg_offset));
1538        } else {
1539                unicast_reg &= ~(0xff << (8 * reg_offset));
1540                unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1541        }
1542
1543        mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1544}
1545
1546/* Set mac address */
1547static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1548                                int queue)
1549{
1550        unsigned int mac_h;
1551        unsigned int mac_l;
1552
1553        if (queue != -1) {
1554                mac_l = (addr[4] << 8) | (addr[5]);
1555                mac_h = (addr[0] << 24) | (addr[1] << 16) |
1556                        (addr[2] << 8) | (addr[3] << 0);
1557
1558                mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1559                mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1560        }
1561
1562        /* Accept frames of this address */
1563        mvneta_set_ucast_addr(pp, addr[5], queue);
1564}
1565
1566/* Set the number of packets that will be received before RX interrupt
1567 * will be generated by HW.
1568 */
1569static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1570                                    struct mvneta_rx_queue *rxq, u32 value)
1571{
1572        mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1573                    value | MVNETA_RXQ_NON_OCCUPIED(0));
1574        rxq->pkts_coal = value;
1575}
1576
1577/* Set the time delay in usec before RX interrupt will be generated by
1578 * HW.
1579 */
1580static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1581                                    struct mvneta_rx_queue *rxq, u32 value)
1582{
1583        u32 val;
1584        unsigned long clk_rate;
1585
1586        clk_rate = clk_get_rate(pp->clk);
1587        val = (clk_rate / 1000000) * value;
1588
1589        mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1590        rxq->time_coal = value;
1591}
1592
1593/* Set threshold for TX_DONE pkts coalescing */
1594static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1595                                         struct mvneta_tx_queue *txq, u32 value)
1596{
1597        u32 val;
1598
1599        val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1600
1601        val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1602        val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1603
1604        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1605
1606        txq->done_pkts_coal = value;
1607}
1608
1609/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1610static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1611                                u32 phys_addr, void *virt_addr,
1612                                struct mvneta_rx_queue *rxq)
1613{
1614        int i;
1615
1616        rx_desc->buf_phys_addr = phys_addr;
1617        i = rx_desc - rxq->descs;
1618        rxq->buf_virt_addr[i] = virt_addr;
1619}
1620
1621/* Decrement sent descriptors counter */
1622static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1623                                     struct mvneta_tx_queue *txq,
1624                                     int sent_desc)
1625{
1626        u32 val;
1627
1628        /* Only 255 TX descriptors can be updated at once */
1629        while (sent_desc > 0xff) {
1630                val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1631                mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1632                sent_desc = sent_desc - 0xff;
1633        }
1634
1635        val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1636        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1637}
1638
1639/* Get number of TX descriptors already sent by HW */
1640static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1641                                        struct mvneta_tx_queue *txq)
1642{
1643        u32 val;
1644        int sent_desc;
1645
1646        val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1647        sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1648                MVNETA_TXQ_SENT_DESC_SHIFT;
1649
1650        return sent_desc;
1651}
1652
1653/* Get number of sent descriptors and decrement counter.
1654 *  The number of sent descriptors is returned.
1655 */
1656static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1657                                     struct mvneta_tx_queue *txq)
1658{
1659        int sent_desc;
1660
1661        /* Get number of sent descriptors */
1662        sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1663
1664        /* Decrement sent descriptors counter */
1665        if (sent_desc)
1666                mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1667
1668        return sent_desc;
1669}
1670
1671/* Set TXQ descriptors fields relevant for CSUM calculation */
1672static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1673                                int ip_hdr_len, int l4_proto)
1674{
1675        u32 command;
1676
1677        /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1678         * G_L4_chk, L4_type; required only for checksum
1679         * calculation
1680         */
1681        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1682        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1683
1684        if (l3_proto == htons(ETH_P_IP))
1685                command |= MVNETA_TXD_IP_CSUM;
1686        else
1687                command |= MVNETA_TX_L3_IP6;
1688
1689        if (l4_proto == IPPROTO_TCP)
1690                command |=  MVNETA_TX_L4_CSUM_FULL;
1691        else if (l4_proto == IPPROTO_UDP)
1692                command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1693        else
1694                command |= MVNETA_TX_L4_CSUM_NOT;
1695
1696        return command;
1697}
1698
1699
1700/* Display more error info */
1701static void mvneta_rx_error(struct mvneta_port *pp,
1702                            struct mvneta_rx_desc *rx_desc)
1703{
1704        u32 status = rx_desc->status;
1705
1706        if (!mvneta_rxq_desc_is_first_last(status)) {
1707                netdev_err(pp->dev,
1708                           "bad rx status %08x (buffer oversize), size=%d\n",
1709                           status, rx_desc->data_size);
1710                return;
1711        }
1712
1713        switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1714        case MVNETA_RXD_ERR_CRC:
1715                netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1716                           status, rx_desc->data_size);
1717                break;
1718        case MVNETA_RXD_ERR_OVERRUN:
1719                netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1720                           status, rx_desc->data_size);
1721                break;
1722        case MVNETA_RXD_ERR_LEN:
1723                netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1724                           status, rx_desc->data_size);
1725                break;
1726        case MVNETA_RXD_ERR_RESOURCE:
1727                netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1728                           status, rx_desc->data_size);
1729                break;
1730        }
1731}
1732
1733/* Handle RX checksum offload based on the descriptor's status */
1734static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1735                           struct sk_buff *skb)
1736{
1737        if ((status & MVNETA_RXD_L3_IP4) &&
1738            (status & MVNETA_RXD_L4_CSUM_OK)) {
1739                skb->csum = 0;
1740                skb->ip_summed = CHECKSUM_UNNECESSARY;
1741                return;
1742        }
1743
1744        skb->ip_summed = CHECKSUM_NONE;
1745}
1746
1747/* Return tx queue pointer (find last set bit) according to <cause> returned
1748 * form tx_done reg. <cause> must not be null. The return value is always a
1749 * valid queue for matching the first one found in <cause>.
1750 */
1751static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1752                                                     u32 cause)
1753{
1754        int queue = fls(cause) - 1;
1755
1756        return &pp->txqs[queue];
1757}
1758
1759/* Free tx queue skbuffs */
1760static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1761                                 struct mvneta_tx_queue *txq, int num,
1762                                 struct netdev_queue *nq)
1763{
1764        unsigned int bytes_compl = 0, pkts_compl = 0;
1765        int i;
1766
1767        for (i = 0; i < num; i++) {
1768                struct mvneta_tx_desc *tx_desc = txq->descs +
1769                        txq->txq_get_index;
1770                struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1771
1772                if (skb) {
1773                        bytes_compl += skb->len;
1774                        pkts_compl++;
1775                }
1776
1777                mvneta_txq_inc_get(txq);
1778
1779                if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1780                        dma_unmap_single(pp->dev->dev.parent,
1781                                         tx_desc->buf_phys_addr,
1782                                         tx_desc->data_size, DMA_TO_DEVICE);
1783                if (!skb)
1784                        continue;
1785                dev_kfree_skb_any(skb);
1786        }
1787
1788        netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1789}
1790
1791/* Handle end of transmission */
1792static void mvneta_txq_done(struct mvneta_port *pp,
1793                           struct mvneta_tx_queue *txq)
1794{
1795        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1796        int tx_done;
1797
1798        tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1799        if (!tx_done)
1800                return;
1801
1802        mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1803
1804        txq->count -= tx_done;
1805
1806        if (netif_tx_queue_stopped(nq)) {
1807                if (txq->count <= txq->tx_wake_threshold)
1808                        netif_tx_wake_queue(nq);
1809        }
1810}
1811
1812void *mvneta_frag_alloc(unsigned int frag_size)
1813{
1814        if (likely(frag_size <= PAGE_SIZE))
1815                return netdev_alloc_frag(frag_size);
1816        else
1817                return kmalloc(frag_size, GFP_ATOMIC);
1818}
1819EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1820
1821void mvneta_frag_free(unsigned int frag_size, void *data)
1822{
1823        if (likely(frag_size <= PAGE_SIZE))
1824                skb_free_frag(data);
1825        else
1826                kfree(data);
1827}
1828EXPORT_SYMBOL_GPL(mvneta_frag_free);
1829
1830/* Refill processing for SW buffer management */
1831static int mvneta_rx_refill(struct mvneta_port *pp,
1832                            struct mvneta_rx_desc *rx_desc,
1833                            struct mvneta_rx_queue *rxq)
1834
1835{
1836        dma_addr_t phys_addr;
1837        void *data;
1838
1839        data = mvneta_frag_alloc(pp->frag_size);
1840        if (!data)
1841                return -ENOMEM;
1842
1843        phys_addr = dma_map_single(pp->dev->dev.parent, data,
1844                                   MVNETA_RX_BUF_SIZE(pp->pkt_size),
1845                                   DMA_FROM_DEVICE);
1846        if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1847                mvneta_frag_free(pp->frag_size, data);
1848                return -ENOMEM;
1849        }
1850
1851        phys_addr += pp->rx_offset_correction;
1852        mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1853        return 0;
1854}
1855
1856/* Handle tx checksum */
1857static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1858{
1859        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1860                int ip_hdr_len = 0;
1861                __be16 l3_proto = vlan_get_protocol(skb);
1862                u8 l4_proto;
1863
1864                if (l3_proto == htons(ETH_P_IP)) {
1865                        struct iphdr *ip4h = ip_hdr(skb);
1866
1867                        /* Calculate IPv4 checksum and L4 checksum */
1868                        ip_hdr_len = ip4h->ihl;
1869                        l4_proto = ip4h->protocol;
1870                } else if (l3_proto == htons(ETH_P_IPV6)) {
1871                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
1872
1873                        /* Read l4_protocol from one of IPv6 extra headers */
1874                        if (skb_network_header_len(skb) > 0)
1875                                ip_hdr_len = (skb_network_header_len(skb) >> 2);
1876                        l4_proto = ip6h->nexthdr;
1877                } else
1878                        return MVNETA_TX_L4_CSUM_NOT;
1879
1880                return mvneta_txq_desc_csum(skb_network_offset(skb),
1881                                            l3_proto, ip_hdr_len, l4_proto);
1882        }
1883
1884        return MVNETA_TX_L4_CSUM_NOT;
1885}
1886
1887/* Drop packets received by the RXQ and free buffers */
1888static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1889                                 struct mvneta_rx_queue *rxq)
1890{
1891        int rx_done, i;
1892
1893        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1894        if (rx_done)
1895                mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1896
1897        if (pp->bm_priv) {
1898                for (i = 0; i < rx_done; i++) {
1899                        struct mvneta_rx_desc *rx_desc =
1900                                                  mvneta_rxq_next_desc_get(rxq);
1901                        u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1902                        struct mvneta_bm_pool *bm_pool;
1903
1904                        bm_pool = &pp->bm_priv->bm_pools[pool_id];
1905                        /* Return dropped buffer to the pool */
1906                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1907                                              rx_desc->buf_phys_addr);
1908                }
1909                return;
1910        }
1911
1912        for (i = 0; i < rxq->size; i++) {
1913                struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1914                void *data = rxq->buf_virt_addr[i];
1915
1916                dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1917                                 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1918                mvneta_frag_free(pp->frag_size, data);
1919        }
1920}
1921
1922/* Main rx processing when using software buffer management */
1923static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1924                          struct mvneta_rx_queue *rxq)
1925{
1926        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
1927        struct net_device *dev = pp->dev;
1928        int rx_done;
1929        u32 rcvd_pkts = 0;
1930        u32 rcvd_bytes = 0;
1931
1932        /* Get number of received packets */
1933        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1934
1935        if (rx_todo > rx_done)
1936                rx_todo = rx_done;
1937
1938        rx_done = 0;
1939
1940        /* Fairness NAPI loop */
1941        while (rx_done < rx_todo) {
1942                struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1943                struct sk_buff *skb;
1944                unsigned char *data;
1945                dma_addr_t phys_addr;
1946                u32 rx_status, frag_size;
1947                int rx_bytes, err, index;
1948
1949                rx_done++;
1950                rx_status = rx_desc->status;
1951                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1952                index = rx_desc - rxq->descs;
1953                data = rxq->buf_virt_addr[index];
1954                phys_addr = rx_desc->buf_phys_addr;
1955
1956                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1957                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1958err_drop_frame:
1959                        dev->stats.rx_errors++;
1960                        mvneta_rx_error(pp, rx_desc);
1961                        /* leave the descriptor untouched */
1962                        continue;
1963                }
1964
1965                if (rx_bytes <= rx_copybreak) {
1966                /* better copy a small frame and not unmap the DMA region */
1967                        skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1968                        if (unlikely(!skb))
1969                                goto err_drop_frame;
1970
1971                        dma_sync_single_range_for_cpu(dev->dev.parent,
1972                                                      phys_addr,
1973                                                      MVNETA_MH_SIZE + NET_SKB_PAD,
1974                                                      rx_bytes,
1975                                                      DMA_FROM_DEVICE);
1976                        skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
1977                                     rx_bytes);
1978
1979                        skb->protocol = eth_type_trans(skb, dev);
1980                        mvneta_rx_csum(pp, rx_status, skb);
1981                        napi_gro_receive(&port->napi, skb);
1982
1983                        rcvd_pkts++;
1984                        rcvd_bytes += rx_bytes;
1985
1986                        /* leave the descriptor and buffer untouched */
1987                        continue;
1988                }
1989
1990                /* Refill processing */
1991                err = mvneta_rx_refill(pp, rx_desc, rxq);
1992                if (err) {
1993                        netdev_err(dev, "Linux processing - Can't refill\n");
1994                        rxq->missed++;
1995                        goto err_drop_frame;
1996                }
1997
1998                frag_size = pp->frag_size;
1999
2000                skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2001
2002                /* After refill old buffer has to be unmapped regardless
2003                 * the skb is successfully built or not.
2004                 */
2005                dma_unmap_single(dev->dev.parent, phys_addr,
2006                                 MVNETA_RX_BUF_SIZE(pp->pkt_size),
2007                                 DMA_FROM_DEVICE);
2008
2009                if (!skb)
2010                        goto err_drop_frame;
2011
2012                rcvd_pkts++;
2013                rcvd_bytes += rx_bytes;
2014
2015                /* Linux processing */
2016                skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2017                skb_put(skb, rx_bytes);
2018
2019                skb->protocol = eth_type_trans(skb, dev);
2020
2021                mvneta_rx_csum(pp, rx_status, skb);
2022
2023                napi_gro_receive(&port->napi, skb);
2024        }
2025
2026        if (rcvd_pkts) {
2027                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2028
2029                u64_stats_update_begin(&stats->syncp);
2030                stats->rx_packets += rcvd_pkts;
2031                stats->rx_bytes   += rcvd_bytes;
2032                u64_stats_update_end(&stats->syncp);
2033        }
2034
2035        /* Update rxq management counters */
2036        mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2037
2038        return rx_done;
2039}
2040
2041/* Main rx processing when using hardware buffer management */
2042static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
2043                          struct mvneta_rx_queue *rxq)
2044{
2045        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2046        struct net_device *dev = pp->dev;
2047        int rx_done;
2048        u32 rcvd_pkts = 0;
2049        u32 rcvd_bytes = 0;
2050
2051        /* Get number of received packets */
2052        rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2053
2054        if (rx_todo > rx_done)
2055                rx_todo = rx_done;
2056
2057        rx_done = 0;
2058
2059        /* Fairness NAPI loop */
2060        while (rx_done < rx_todo) {
2061                struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2062                struct mvneta_bm_pool *bm_pool = NULL;
2063                struct sk_buff *skb;
2064                unsigned char *data;
2065                dma_addr_t phys_addr;
2066                u32 rx_status, frag_size;
2067                int rx_bytes, err;
2068                u8 pool_id;
2069
2070                rx_done++;
2071                rx_status = rx_desc->status;
2072                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2073                data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2074                phys_addr = rx_desc->buf_phys_addr;
2075                pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2076                bm_pool = &pp->bm_priv->bm_pools[pool_id];
2077
2078                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2079                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2080err_drop_frame_ret_pool:
2081                        /* Return the buffer to the pool */
2082                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2083                                              rx_desc->buf_phys_addr);
2084err_drop_frame:
2085                        dev->stats.rx_errors++;
2086                        mvneta_rx_error(pp, rx_desc);
2087                        /* leave the descriptor untouched */
2088                        continue;
2089                }
2090
2091                if (rx_bytes <= rx_copybreak) {
2092                        /* better copy a small frame and not unmap the DMA region */
2093                        skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2094                        if (unlikely(!skb))
2095                                goto err_drop_frame_ret_pool;
2096
2097                        dma_sync_single_range_for_cpu(dev->dev.parent,
2098                                                      rx_desc->buf_phys_addr,
2099                                                      MVNETA_MH_SIZE + NET_SKB_PAD,
2100                                                      rx_bytes,
2101                                                      DMA_FROM_DEVICE);
2102                        skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2103                                     rx_bytes);
2104
2105                        skb->protocol = eth_type_trans(skb, dev);
2106                        mvneta_rx_csum(pp, rx_status, skb);
2107                        napi_gro_receive(&port->napi, skb);
2108
2109                        rcvd_pkts++;
2110                        rcvd_bytes += rx_bytes;
2111
2112                        /* Return the buffer to the pool */
2113                        mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2114                                              rx_desc->buf_phys_addr);
2115
2116                        /* leave the descriptor and buffer untouched */
2117                        continue;
2118                }
2119
2120                /* Refill processing */
2121                err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2122                if (err) {
2123                        netdev_err(dev, "Linux processing - Can't refill\n");
2124                        rxq->missed++;
2125                        goto err_drop_frame_ret_pool;
2126                }
2127
2128                frag_size = bm_pool->hwbm_pool.frag_size;
2129
2130                skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2131
2132                /* After refill old buffer has to be unmapped regardless
2133                 * the skb is successfully built or not.
2134                 */
2135                dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2136                                 bm_pool->buf_size, DMA_FROM_DEVICE);
2137                if (!skb)
2138                        goto err_drop_frame;
2139
2140                rcvd_pkts++;
2141                rcvd_bytes += rx_bytes;
2142
2143                /* Linux processing */
2144                skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2145                skb_put(skb, rx_bytes);
2146
2147                skb->protocol = eth_type_trans(skb, dev);
2148
2149                mvneta_rx_csum(pp, rx_status, skb);
2150
2151                napi_gro_receive(&port->napi, skb);
2152        }
2153
2154        if (rcvd_pkts) {
2155                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2156
2157                u64_stats_update_begin(&stats->syncp);
2158                stats->rx_packets += rcvd_pkts;
2159                stats->rx_bytes   += rcvd_bytes;
2160                u64_stats_update_end(&stats->syncp);
2161        }
2162
2163        /* Update rxq management counters */
2164        mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2165
2166        return rx_done;
2167}
2168
2169static inline void
2170mvneta_tso_put_hdr(struct sk_buff *skb,
2171                   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2172{
2173        struct mvneta_tx_desc *tx_desc;
2174        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2175
2176        txq->tx_skb[txq->txq_put_index] = NULL;
2177        tx_desc = mvneta_txq_next_desc_get(txq);
2178        tx_desc->data_size = hdr_len;
2179        tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2180        tx_desc->command |= MVNETA_TXD_F_DESC;
2181        tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2182                                 txq->txq_put_index * TSO_HEADER_SIZE;
2183        mvneta_txq_inc_put(txq);
2184}
2185
2186static inline int
2187mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2188                    struct sk_buff *skb, char *data, int size,
2189                    bool last_tcp, bool is_last)
2190{
2191        struct mvneta_tx_desc *tx_desc;
2192
2193        tx_desc = mvneta_txq_next_desc_get(txq);
2194        tx_desc->data_size = size;
2195        tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2196                                                size, DMA_TO_DEVICE);
2197        if (unlikely(dma_mapping_error(dev->dev.parent,
2198                     tx_desc->buf_phys_addr))) {
2199                mvneta_txq_desc_put(txq);
2200                return -ENOMEM;
2201        }
2202
2203        tx_desc->command = 0;
2204        txq->tx_skb[txq->txq_put_index] = NULL;
2205
2206        if (last_tcp) {
2207                /* last descriptor in the TCP packet */
2208                tx_desc->command = MVNETA_TXD_L_DESC;
2209
2210                /* last descriptor in SKB */
2211                if (is_last)
2212                        txq->tx_skb[txq->txq_put_index] = skb;
2213        }
2214        mvneta_txq_inc_put(txq);
2215        return 0;
2216}
2217
2218static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2219                         struct mvneta_tx_queue *txq)
2220{
2221        int total_len, data_left;
2222        int desc_count = 0;
2223        struct mvneta_port *pp = netdev_priv(dev);
2224        struct tso_t tso;
2225        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2226        int i;
2227
2228        /* Count needed descriptors */
2229        if ((txq->count + tso_count_descs(skb)) >= txq->size)
2230                return 0;
2231
2232        if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2233                pr_info("*** Is this even  possible???!?!?\n");
2234                return 0;
2235        }
2236
2237        /* Initialize the TSO handler, and prepare the first payload */
2238        tso_start(skb, &tso);
2239
2240        total_len = skb->len - hdr_len;
2241        while (total_len > 0) {
2242                char *hdr;
2243
2244                data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2245                total_len -= data_left;
2246                desc_count++;
2247
2248                /* prepare packet headers: MAC + IP + TCP */
2249                hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2250                tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2251
2252                mvneta_tso_put_hdr(skb, pp, txq);
2253
2254                while (data_left > 0) {
2255                        int size;
2256                        desc_count++;
2257
2258                        size = min_t(int, tso.size, data_left);
2259
2260                        if (mvneta_tso_put_data(dev, txq, skb,
2261                                                 tso.data, size,
2262                                                 size == data_left,
2263                                                 total_len == 0))
2264                                goto err_release;
2265                        data_left -= size;
2266
2267                        tso_build_data(skb, &tso, size);
2268                }
2269        }
2270
2271        return desc_count;
2272
2273err_release:
2274        /* Release all used data descriptors; header descriptors must not
2275         * be DMA-unmapped.
2276         */
2277        for (i = desc_count - 1; i >= 0; i--) {
2278                struct mvneta_tx_desc *tx_desc = txq->descs + i;
2279                if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2280                        dma_unmap_single(pp->dev->dev.parent,
2281                                         tx_desc->buf_phys_addr,
2282                                         tx_desc->data_size,
2283                                         DMA_TO_DEVICE);
2284                mvneta_txq_desc_put(txq);
2285        }
2286        return 0;
2287}
2288
2289/* Handle tx fragmentation processing */
2290static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2291                                  struct mvneta_tx_queue *txq)
2292{
2293        struct mvneta_tx_desc *tx_desc;
2294        int i, nr_frags = skb_shinfo(skb)->nr_frags;
2295
2296        for (i = 0; i < nr_frags; i++) {
2297                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2298                void *addr = page_address(frag->page.p) + frag->page_offset;
2299
2300                tx_desc = mvneta_txq_next_desc_get(txq);
2301                tx_desc->data_size = frag->size;
2302
2303                tx_desc->buf_phys_addr =
2304                        dma_map_single(pp->dev->dev.parent, addr,
2305                                       tx_desc->data_size, DMA_TO_DEVICE);
2306
2307                if (dma_mapping_error(pp->dev->dev.parent,
2308                                      tx_desc->buf_phys_addr)) {
2309                        mvneta_txq_desc_put(txq);
2310                        goto error;
2311                }
2312
2313                if (i == nr_frags - 1) {
2314                        /* Last descriptor */
2315                        tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2316                        txq->tx_skb[txq->txq_put_index] = skb;
2317                } else {
2318                        /* Descriptor in the middle: Not First, Not Last */
2319                        tx_desc->command = 0;
2320                        txq->tx_skb[txq->txq_put_index] = NULL;
2321                }
2322                mvneta_txq_inc_put(txq);
2323        }
2324
2325        return 0;
2326
2327error:
2328        /* Release all descriptors that were used to map fragments of
2329         * this packet, as well as the corresponding DMA mappings
2330         */
2331        for (i = i - 1; i >= 0; i--) {
2332                tx_desc = txq->descs + i;
2333                dma_unmap_single(pp->dev->dev.parent,
2334                                 tx_desc->buf_phys_addr,
2335                                 tx_desc->data_size,
2336                                 DMA_TO_DEVICE);
2337                mvneta_txq_desc_put(txq);
2338        }
2339
2340        return -ENOMEM;
2341}
2342
2343/* Main tx processing */
2344static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2345{
2346        struct mvneta_port *pp = netdev_priv(dev);
2347        u16 txq_id = skb_get_queue_mapping(skb);
2348        struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2349        struct mvneta_tx_desc *tx_desc;
2350        int len = skb->len;
2351        int frags = 0;
2352        u32 tx_cmd;
2353
2354        if (!netif_running(dev))
2355                goto out;
2356
2357        if (skb_is_gso(skb)) {
2358                frags = mvneta_tx_tso(skb, dev, txq);
2359                goto out;
2360        }
2361
2362        frags = skb_shinfo(skb)->nr_frags + 1;
2363
2364        /* Get a descriptor for the first part of the packet */
2365        tx_desc = mvneta_txq_next_desc_get(txq);
2366
2367        tx_cmd = mvneta_skb_tx_csum(pp, skb);
2368
2369        tx_desc->data_size = skb_headlen(skb);
2370
2371        tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2372                                                tx_desc->data_size,
2373                                                DMA_TO_DEVICE);
2374        if (unlikely(dma_mapping_error(dev->dev.parent,
2375                                       tx_desc->buf_phys_addr))) {
2376                mvneta_txq_desc_put(txq);
2377                frags = 0;
2378                goto out;
2379        }
2380
2381        if (frags == 1) {
2382                /* First and Last descriptor */
2383                tx_cmd |= MVNETA_TXD_FLZ_DESC;
2384                tx_desc->command = tx_cmd;
2385                txq->tx_skb[txq->txq_put_index] = skb;
2386                mvneta_txq_inc_put(txq);
2387        } else {
2388                /* First but not Last */
2389                tx_cmd |= MVNETA_TXD_F_DESC;
2390                txq->tx_skb[txq->txq_put_index] = NULL;
2391                mvneta_txq_inc_put(txq);
2392                tx_desc->command = tx_cmd;
2393                /* Continue with other skb fragments */
2394                if (mvneta_tx_frag_process(pp, skb, txq)) {
2395                        dma_unmap_single(dev->dev.parent,
2396                                         tx_desc->buf_phys_addr,
2397                                         tx_desc->data_size,
2398                                         DMA_TO_DEVICE);
2399                        mvneta_txq_desc_put(txq);
2400                        frags = 0;
2401                        goto out;
2402                }
2403        }
2404
2405out:
2406        if (frags > 0) {
2407                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2408                struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2409
2410                netdev_tx_sent_queue(nq, len);
2411
2412                txq->count += frags;
2413                if (txq->count >= txq->tx_stop_threshold)
2414                        netif_tx_stop_queue(nq);
2415
2416                if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2417                    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2418                        mvneta_txq_pend_desc_add(pp, txq, frags);
2419                else
2420                        txq->pending += frags;
2421
2422                u64_stats_update_begin(&stats->syncp);
2423                stats->tx_packets++;
2424                stats->tx_bytes  += len;
2425                u64_stats_update_end(&stats->syncp);
2426        } else {
2427                dev->stats.tx_dropped++;
2428                dev_kfree_skb_any(skb);
2429        }
2430
2431        return NETDEV_TX_OK;
2432}
2433
2434
2435/* Free tx resources, when resetting a port */
2436static void mvneta_txq_done_force(struct mvneta_port *pp,
2437                                  struct mvneta_tx_queue *txq)
2438
2439{
2440        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2441        int tx_done = txq->count;
2442
2443        mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2444
2445        /* reset txq */
2446        txq->count = 0;
2447        txq->txq_put_index = 0;
2448        txq->txq_get_index = 0;
2449}
2450
2451/* Handle tx done - called in softirq context. The <cause_tx_done> argument
2452 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2453 */
2454static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2455{
2456        struct mvneta_tx_queue *txq;
2457        struct netdev_queue *nq;
2458
2459        while (cause_tx_done) {
2460                txq = mvneta_tx_done_policy(pp, cause_tx_done);
2461
2462                nq = netdev_get_tx_queue(pp->dev, txq->id);
2463                __netif_tx_lock(nq, smp_processor_id());
2464
2465                if (txq->count)
2466                        mvneta_txq_done(pp, txq);
2467
2468                __netif_tx_unlock(nq);
2469                cause_tx_done &= ~((1 << txq->id));
2470        }
2471}
2472
2473/* Compute crc8 of the specified address, using a unique algorithm ,
2474 * according to hw spec, different than generic crc8 algorithm
2475 */
2476static int mvneta_addr_crc(unsigned char *addr)
2477{
2478        int crc = 0;
2479        int i;
2480
2481        for (i = 0; i < ETH_ALEN; i++) {
2482                int j;
2483
2484                crc = (crc ^ addr[i]) << 8;
2485                for (j = 7; j >= 0; j--) {
2486                        if (crc & (0x100 << j))
2487                                crc ^= 0x107 << j;
2488                }
2489        }
2490
2491        return crc;
2492}
2493
2494/* This method controls the net device special MAC multicast support.
2495 * The Special Multicast Table for MAC addresses supports MAC of the form
2496 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2497 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2498 * Table entries in the DA-Filter table. This method set the Special
2499 * Multicast Table appropriate entry.
2500 */
2501static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2502                                          unsigned char last_byte,
2503                                          int queue)
2504{
2505        unsigned int smc_table_reg;
2506        unsigned int tbl_offset;
2507        unsigned int reg_offset;
2508
2509        /* Register offset from SMC table base    */
2510        tbl_offset = (last_byte / 4);
2511        /* Entry offset within the above reg */
2512        reg_offset = last_byte % 4;
2513
2514        smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2515                                        + tbl_offset * 4));
2516
2517        if (queue == -1)
2518                smc_table_reg &= ~(0xff << (8 * reg_offset));
2519        else {
2520                smc_table_reg &= ~(0xff << (8 * reg_offset));
2521                smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2522        }
2523
2524        mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2525                    smc_table_reg);
2526}
2527
2528/* This method controls the network device Other MAC multicast support.
2529 * The Other Multicast Table is used for multicast of another type.
2530 * A CRC-8 is used as an index to the Other Multicast Table entries
2531 * in the DA-Filter table.
2532 * The method gets the CRC-8 value from the calling routine and
2533 * sets the Other Multicast Table appropriate entry according to the
2534 * specified CRC-8 .
2535 */
2536static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2537                                        unsigned char crc8,
2538                                        int queue)
2539{
2540        unsigned int omc_table_reg;
2541        unsigned int tbl_offset;
2542        unsigned int reg_offset;
2543
2544        tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2545        reg_offset = crc8 % 4;       /* Entry offset within the above reg   */
2546
2547        omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2548
2549        if (queue == -1) {
2550                /* Clear accepts frame bit at specified Other DA table entry */
2551                omc_table_reg &= ~(0xff << (8 * reg_offset));
2552        } else {
2553                omc_table_reg &= ~(0xff << (8 * reg_offset));
2554                omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2555        }
2556
2557        mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2558}
2559
2560/* The network device supports multicast using two tables:
2561 *    1) Special Multicast Table for MAC addresses of the form
2562 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2563 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2564 *       Table entries in the DA-Filter table.
2565 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
2566 *       is used as an index to the Other Multicast Table entries in the
2567 *       DA-Filter table.
2568 */
2569static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2570                                 int queue)
2571{
2572        unsigned char crc_result = 0;
2573
2574        if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2575                mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2576                return 0;
2577        }
2578
2579        crc_result = mvneta_addr_crc(p_addr);
2580        if (queue == -1) {
2581                if (pp->mcast_count[crc_result] == 0) {
2582                        netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2583                                    crc_result);
2584                        return -EINVAL;
2585                }
2586
2587                pp->mcast_count[crc_result]--;
2588                if (pp->mcast_count[crc_result] != 0) {
2589                        netdev_info(pp->dev,
2590                                    "After delete there are %d valid Mcast for crc8=0x%02x\n",
2591                                    pp->mcast_count[crc_result], crc_result);
2592                        return -EINVAL;
2593                }
2594        } else
2595                pp->mcast_count[crc_result]++;
2596
2597        mvneta_set_other_mcast_addr(pp, crc_result, queue);
2598
2599        return 0;
2600}
2601
2602/* Configure Fitering mode of Ethernet port */
2603static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2604                                          int is_promisc)
2605{
2606        u32 port_cfg_reg, val;
2607
2608        port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2609
2610        val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2611
2612        /* Set / Clear UPM bit in port configuration register */
2613        if (is_promisc) {
2614                /* Accept all Unicast addresses */
2615                port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2616                val |= MVNETA_FORCE_UNI;
2617                mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2618                mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2619        } else {
2620                /* Reject all Unicast addresses */
2621                port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2622                val &= ~MVNETA_FORCE_UNI;
2623        }
2624
2625        mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2626        mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2627}
2628
2629/* register unicast and multicast addresses */
2630static void mvneta_set_rx_mode(struct net_device *dev)
2631{
2632        struct mvneta_port *pp = netdev_priv(dev);
2633        struct netdev_hw_addr *ha;
2634
2635        if (dev->flags & IFF_PROMISC) {
2636                /* Accept all: Multicast + Unicast */
2637                mvneta_rx_unicast_promisc_set(pp, 1);
2638                mvneta_set_ucast_table(pp, pp->rxq_def);
2639                mvneta_set_special_mcast_table(pp, pp->rxq_def);
2640                mvneta_set_other_mcast_table(pp, pp->rxq_def);
2641        } else {
2642                /* Accept single Unicast */
2643                mvneta_rx_unicast_promisc_set(pp, 0);
2644                mvneta_set_ucast_table(pp, -1);
2645                mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2646
2647                if (dev->flags & IFF_ALLMULTI) {
2648                        /* Accept all multicast */
2649                        mvneta_set_special_mcast_table(pp, pp->rxq_def);
2650                        mvneta_set_other_mcast_table(pp, pp->rxq_def);
2651                } else {
2652                        /* Accept only initialized multicast */
2653                        mvneta_set_special_mcast_table(pp, -1);
2654                        mvneta_set_other_mcast_table(pp, -1);
2655
2656                        if (!netdev_mc_empty(dev)) {
2657                                netdev_for_each_mc_addr(ha, dev) {
2658                                        mvneta_mcast_addr_set(pp, ha->addr,
2659                                                              pp->rxq_def);
2660                                }
2661                        }
2662                }
2663        }
2664}
2665
2666/* Interrupt handling - the callback for request_irq() */
2667static irqreturn_t mvneta_isr(int irq, void *dev_id)
2668{
2669        struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2670
2671        mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2672        napi_schedule(&pp->napi);
2673
2674        return IRQ_HANDLED;
2675}
2676
2677/* Interrupt handling - the callback for request_percpu_irq() */
2678static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2679{
2680        struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2681
2682        disable_percpu_irq(port->pp->dev->irq);
2683        napi_schedule(&port->napi);
2684
2685        return IRQ_HANDLED;
2686}
2687
2688static int mvneta_fixed_link_update(struct mvneta_port *pp,
2689                                    struct phy_device *phy)
2690{
2691        struct fixed_phy_status status;
2692        struct fixed_phy_status changed = {};
2693        u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2694
2695        status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2696        if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2697                status.speed = SPEED_1000;
2698        else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2699                status.speed = SPEED_100;
2700        else
2701                status.speed = SPEED_10;
2702        status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2703        changed.link = 1;
2704        changed.speed = 1;
2705        changed.duplex = 1;
2706        fixed_phy_update_state(phy, &status, &changed);
2707        return 0;
2708}
2709
2710/* NAPI handler
2711 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2712 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2713 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2714 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2715 * Each CPU has its own causeRxTx register
2716 */
2717static int mvneta_poll(struct napi_struct *napi, int budget)
2718{
2719        int rx_done = 0;
2720        u32 cause_rx_tx;
2721        int rx_queue;
2722        struct mvneta_port *pp = netdev_priv(napi->dev);
2723        struct net_device *ndev = pp->dev;
2724        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2725
2726        if (!netif_running(pp->dev)) {
2727                napi_complete(napi);
2728                return rx_done;
2729        }
2730
2731        /* Read cause register */
2732        cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2733        if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2734                u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2735
2736                mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2737                if (pp->use_inband_status && (cause_misc &
2738                                (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2739                                 MVNETA_CAUSE_LINK_CHANGE |
2740                                 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2741                        mvneta_fixed_link_update(pp, ndev->phydev);
2742                }
2743        }
2744
2745        /* Release Tx descriptors */
2746        if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2747                mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2748                cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2749        }
2750
2751        /* For the case where the last mvneta_poll did not process all
2752         * RX packets
2753         */
2754        rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2755
2756        cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2757                port->cause_rx_tx;
2758
2759        if (rx_queue) {
2760                rx_queue = rx_queue - 1;
2761                if (pp->bm_priv)
2762                        rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
2763                else
2764                        rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
2765        }
2766
2767        if (rx_done < budget) {
2768                cause_rx_tx = 0;
2769                napi_complete_done(napi, rx_done);
2770
2771                if (pp->neta_armada3700) {
2772                        unsigned long flags;
2773
2774                        local_irq_save(flags);
2775                        mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2776                                    MVNETA_RX_INTR_MASK(rxq_number) |
2777                                    MVNETA_TX_INTR_MASK(txq_number) |
2778                                    MVNETA_MISCINTR_INTR_MASK);
2779                        local_irq_restore(flags);
2780                } else {
2781                        enable_percpu_irq(pp->dev->irq, 0);
2782                }
2783        }
2784
2785        if (pp->neta_armada3700)
2786                pp->cause_rx_tx = cause_rx_tx;
2787        else
2788                port->cause_rx_tx = cause_rx_tx;
2789
2790        return rx_done;
2791}
2792
2793/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2794static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2795                           int num)
2796{
2797        int i;
2798
2799        for (i = 0; i < num; i++) {
2800                memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2801                if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2802                        netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
2803                                __func__, rxq->id, i, num);
2804                        break;
2805                }
2806        }
2807
2808        /* Add this number of RX descriptors as non occupied (ready to
2809         * get packets)
2810         */
2811        mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2812
2813        return i;
2814}
2815
2816/* Free all packets pending transmit from all TXQs and reset TX port */
2817static void mvneta_tx_reset(struct mvneta_port *pp)
2818{
2819        int queue;
2820
2821        /* free the skb's in the tx ring */
2822        for (queue = 0; queue < txq_number; queue++)
2823                mvneta_txq_done_force(pp, &pp->txqs[queue]);
2824
2825        mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2826        mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2827}
2828
2829static void mvneta_rx_reset(struct mvneta_port *pp)
2830{
2831        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2832        mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2833}
2834
2835/* Rx/Tx queue initialization/cleanup methods */
2836
2837/* Create a specified RX queue */
2838static int mvneta_rxq_init(struct mvneta_port *pp,
2839                           struct mvneta_rx_queue *rxq)
2840
2841{
2842        rxq->size = pp->rx_ring_size;
2843
2844        /* Allocate memory for RX descriptors */
2845        rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2846                                        rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2847                                        &rxq->descs_phys, GFP_KERNEL);
2848        if (!rxq->descs)
2849                return -ENOMEM;
2850
2851        rxq->last_desc = rxq->size - 1;
2852
2853        /* Set Rx descriptors queue starting address */
2854        mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2855        mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2856
2857        /* Set Offset */
2858        mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
2859
2860        /* Set coalescing pkts and time */
2861        mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2862        mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2863
2864        if (!pp->bm_priv) {
2865                /* Fill RXQ with buffers from RX pool */
2866                mvneta_rxq_buf_size_set(pp, rxq,
2867                                        MVNETA_RX_BUF_SIZE(pp->pkt_size));
2868                mvneta_rxq_bm_disable(pp, rxq);
2869                mvneta_rxq_fill(pp, rxq, rxq->size);
2870        } else {
2871                mvneta_rxq_bm_enable(pp, rxq);
2872                mvneta_rxq_long_pool_set(pp, rxq);
2873                mvneta_rxq_short_pool_set(pp, rxq);
2874                mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2875        }
2876
2877        return 0;
2878}
2879
2880/* Cleanup Rx queue */
2881static void mvneta_rxq_deinit(struct mvneta_port *pp,
2882                              struct mvneta_rx_queue *rxq)
2883{
2884        mvneta_rxq_drop_pkts(pp, rxq);
2885
2886        if (rxq->descs)
2887                dma_free_coherent(pp->dev->dev.parent,
2888                                  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2889                                  rxq->descs,
2890                                  rxq->descs_phys);
2891
2892        rxq->descs             = NULL;
2893        rxq->last_desc         = 0;
2894        rxq->next_desc_to_proc = 0;
2895        rxq->descs_phys        = 0;
2896}
2897
2898/* Create and initialize a tx queue */
2899static int mvneta_txq_init(struct mvneta_port *pp,
2900                           struct mvneta_tx_queue *txq)
2901{
2902        int cpu;
2903
2904        txq->size = pp->tx_ring_size;
2905
2906        /* A queue must always have room for at least one skb.
2907         * Therefore, stop the queue when the free entries reaches
2908         * the maximum number of descriptors per skb.
2909         */
2910        txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2911        txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2912
2913
2914        /* Allocate memory for TX descriptors */
2915        txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2916                                        txq->size * MVNETA_DESC_ALIGNED_SIZE,
2917                                        &txq->descs_phys, GFP_KERNEL);
2918        if (!txq->descs)
2919                return -ENOMEM;
2920
2921        txq->last_desc = txq->size - 1;
2922
2923        /* Set maximum bandwidth for enabled TXQs */
2924        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2925        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2926
2927        /* Set Tx descriptors queue starting address */
2928        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2929        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2930
2931        txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2932                                    GFP_KERNEL);
2933        if (!txq->tx_skb) {
2934                dma_free_coherent(pp->dev->dev.parent,
2935                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2936                                  txq->descs, txq->descs_phys);
2937                return -ENOMEM;
2938        }
2939
2940        /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2941        txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2942                                           txq->size * TSO_HEADER_SIZE,
2943                                           &txq->tso_hdrs_phys, GFP_KERNEL);
2944        if (!txq->tso_hdrs) {
2945                kfree(txq->tx_skb);
2946                dma_free_coherent(pp->dev->dev.parent,
2947                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2948                                  txq->descs, txq->descs_phys);
2949                return -ENOMEM;
2950        }
2951        mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2952
2953        /* Setup XPS mapping */
2954        if (txq_number > 1)
2955                cpu = txq->id % num_present_cpus();
2956        else
2957                cpu = pp->rxq_def % num_present_cpus();
2958        cpumask_set_cpu(cpu, &txq->affinity_mask);
2959        netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2960
2961        return 0;
2962}
2963
2964/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2965static void mvneta_txq_deinit(struct mvneta_port *pp,
2966                              struct mvneta_tx_queue *txq)
2967{
2968        struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2969
2970        kfree(txq->tx_skb);
2971
2972        if (txq->tso_hdrs)
2973                dma_free_coherent(pp->dev->dev.parent,
2974                                  txq->size * TSO_HEADER_SIZE,
2975                                  txq->tso_hdrs, txq->tso_hdrs_phys);
2976        if (txq->descs)
2977                dma_free_coherent(pp->dev->dev.parent,
2978                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2979                                  txq->descs, txq->descs_phys);
2980
2981        netdev_tx_reset_queue(nq);
2982
2983        txq->descs             = NULL;
2984        txq->last_desc         = 0;
2985        txq->next_desc_to_proc = 0;
2986        txq->descs_phys        = 0;
2987
2988        /* Set minimum bandwidth for disabled TXQs */
2989        mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2990        mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2991
2992        /* Set Tx descriptors queue starting address and size */
2993        mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2994        mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2995}
2996
2997/* Cleanup all Tx queues */
2998static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2999{
3000        int queue;
3001
3002        for (queue = 0; queue < txq_number; queue++)
3003                mvneta_txq_deinit(pp, &pp->txqs[queue]);
3004}
3005
3006/* Cleanup all Rx queues */
3007static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3008{
3009        int queue;
3010
3011        for (queue = 0; queue < txq_number; queue++)
3012                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3013}
3014
3015
3016/* Init all Rx queues */
3017static int mvneta_setup_rxqs(struct mvneta_port *pp)
3018{
3019        int queue;
3020
3021        for (queue = 0; queue < rxq_number; queue++) {
3022                int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3023
3024                if (err) {
3025                        netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3026                                   __func__, queue);
3027                        mvneta_cleanup_rxqs(pp);
3028                        return err;
3029                }
3030        }
3031
3032        return 0;
3033}
3034
3035/* Init all tx queues */
3036static int mvneta_setup_txqs(struct mvneta_port *pp)
3037{
3038        int queue;
3039
3040        for (queue = 0; queue < txq_number; queue++) {
3041                int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3042                if (err) {
3043                        netdev_err(pp->dev, "%s: can't create txq=%d\n",
3044                                   __func__, queue);
3045                        mvneta_cleanup_txqs(pp);
3046                        return err;
3047                }
3048        }
3049
3050        return 0;
3051}
3052
3053static void mvneta_start_dev(struct mvneta_port *pp)
3054{
3055        int cpu;
3056        struct net_device *ndev = pp->dev;
3057
3058        mvneta_max_rx_size_set(pp, pp->pkt_size);
3059        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3060
3061        /* start the Rx/Tx activity */
3062        mvneta_port_enable(pp);
3063
3064        if (!pp->neta_armada3700) {
3065                /* Enable polling on the port */
3066                for_each_online_cpu(cpu) {
3067                        struct mvneta_pcpu_port *port =
3068                                per_cpu_ptr(pp->ports, cpu);
3069
3070                        napi_enable(&port->napi);
3071                }
3072        } else {
3073                napi_enable(&pp->napi);
3074        }
3075
3076        /* Unmask interrupts. It has to be done from each CPU */
3077        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3078
3079        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3080                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3081                    MVNETA_CAUSE_LINK_CHANGE |
3082                    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3083
3084        phy_start(ndev->phydev);
3085        netif_tx_start_all_queues(pp->dev);
3086}
3087
3088static void mvneta_stop_dev(struct mvneta_port *pp)
3089{
3090        unsigned int cpu;
3091        struct net_device *ndev = pp->dev;
3092
3093        phy_stop(ndev->phydev);
3094
3095        if (!pp->neta_armada3700) {
3096                for_each_online_cpu(cpu) {
3097                        struct mvneta_pcpu_port *port =
3098                                per_cpu_ptr(pp->ports, cpu);
3099
3100                        napi_disable(&port->napi);
3101                }
3102        } else {
3103                napi_disable(&pp->napi);
3104        }
3105
3106        netif_carrier_off(pp->dev);
3107
3108        mvneta_port_down(pp);
3109        netif_tx_stop_all_queues(pp->dev);
3110
3111        /* Stop the port activity */
3112        mvneta_port_disable(pp);
3113
3114        /* Clear all ethernet port interrupts */
3115        on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3116
3117        /* Mask all ethernet port interrupts */
3118        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3119
3120        mvneta_tx_reset(pp);
3121        mvneta_rx_reset(pp);
3122}
3123
3124static void mvneta_percpu_enable(void *arg)
3125{
3126        struct mvneta_port *pp = arg;
3127
3128        enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3129}
3130
3131static void mvneta_percpu_disable(void *arg)
3132{
3133        struct mvneta_port *pp = arg;
3134
3135        disable_percpu_irq(pp->dev->irq);
3136}
3137
3138/* Change the device mtu */
3139static int mvneta_change_mtu(struct net_device *dev, int mtu)
3140{
3141        struct mvneta_port *pp = netdev_priv(dev);
3142        int ret;
3143
3144        if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3145                netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3146                            mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3147                mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3148        }
3149
3150        dev->mtu = mtu;
3151
3152        if (!netif_running(dev)) {
3153                if (pp->bm_priv)
3154                        mvneta_bm_update_mtu(pp, mtu);
3155
3156                netdev_update_features(dev);
3157                return 0;
3158        }
3159
3160        /* The interface is running, so we have to force a
3161         * reallocation of the queues
3162         */
3163        mvneta_stop_dev(pp);
3164        on_each_cpu(mvneta_percpu_disable, pp, true);
3165
3166        mvneta_cleanup_txqs(pp);
3167        mvneta_cleanup_rxqs(pp);
3168
3169        if (pp->bm_priv)
3170                mvneta_bm_update_mtu(pp, mtu);
3171
3172        pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3173        pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3174                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3175
3176        ret = mvneta_setup_rxqs(pp);
3177        if (ret) {
3178                netdev_err(dev, "unable to setup rxqs after MTU change\n");
3179                return ret;
3180        }
3181
3182        ret = mvneta_setup_txqs(pp);
3183        if (ret) {
3184                netdev_err(dev, "unable to setup txqs after MTU change\n");
3185                return ret;
3186        }
3187
3188        on_each_cpu(mvneta_percpu_enable, pp, true);
3189        mvneta_start_dev(pp);
3190        mvneta_port_up(pp);
3191
3192        netdev_update_features(dev);
3193
3194        return 0;
3195}
3196
3197static netdev_features_t mvneta_fix_features(struct net_device *dev,
3198                                             netdev_features_t features)
3199{
3200        struct mvneta_port *pp = netdev_priv(dev);
3201
3202        if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3203                features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3204                netdev_info(dev,
3205                            "Disable IP checksum for MTU greater than %dB\n",
3206                            pp->tx_csum_limit);
3207        }
3208
3209        return features;
3210}
3211
3212/* Get mac address */
3213static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3214{
3215        u32 mac_addr_l, mac_addr_h;
3216
3217        mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3218        mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3219        addr[0] = (mac_addr_h >> 24) & 0xFF;
3220        addr[1] = (mac_addr_h >> 16) & 0xFF;
3221        addr[2] = (mac_addr_h >> 8) & 0xFF;
3222        addr[3] = mac_addr_h & 0xFF;
3223        addr[4] = (mac_addr_l >> 8) & 0xFF;
3224        addr[5] = mac_addr_l & 0xFF;
3225}
3226
3227/* Handle setting mac address */
3228static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3229{
3230        struct mvneta_port *pp = netdev_priv(dev);
3231        struct sockaddr *sockaddr = addr;
3232        int ret;
3233
3234        ret = eth_prepare_mac_addr_change(dev, addr);
3235        if (ret < 0)
3236                return ret;
3237        /* Remove previous address table entry */
3238        mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3239
3240        /* Set new addr in hw */
3241        mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3242
3243        eth_commit_mac_addr_change(dev, addr);
3244        return 0;
3245}
3246
3247static void mvneta_adjust_link(struct net_device *ndev)
3248{
3249        struct mvneta_port *pp = netdev_priv(ndev);
3250        struct phy_device *phydev = ndev->phydev;
3251        int status_change = 0;
3252
3253        if (phydev->link) {
3254                if ((pp->speed != phydev->speed) ||
3255                    (pp->duplex != phydev->duplex)) {
3256                        u32 val;
3257
3258                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3259                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3260                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
3261                                 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3262
3263                        if (phydev->duplex)
3264                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3265
3266                        if (phydev->speed == SPEED_1000)
3267                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3268                        else if (phydev->speed == SPEED_100)
3269                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3270
3271                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3272
3273                        pp->duplex = phydev->duplex;
3274                        pp->speed  = phydev->speed;
3275                }
3276        }
3277
3278        if (phydev->link != pp->link) {
3279                if (!phydev->link) {
3280                        pp->duplex = -1;
3281                        pp->speed = 0;
3282                }
3283
3284                pp->link = phydev->link;
3285                status_change = 1;
3286        }
3287
3288        if (status_change) {
3289                if (phydev->link) {
3290                        if (!pp->use_inband_status) {
3291                                u32 val = mvreg_read(pp,
3292                                                  MVNETA_GMAC_AUTONEG_CONFIG);
3293                                val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3294                                val |= MVNETA_GMAC_FORCE_LINK_PASS;
3295                                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3296                                            val);
3297                        }
3298                        mvneta_port_up(pp);
3299                } else {
3300                        if (!pp->use_inband_status) {
3301                                u32 val = mvreg_read(pp,
3302                                                  MVNETA_GMAC_AUTONEG_CONFIG);
3303                                val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3304                                val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3305                                mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3306                                            val);
3307                        }
3308                        mvneta_port_down(pp);
3309                }
3310                phy_print_status(phydev);
3311        }
3312}
3313
3314static int mvneta_mdio_probe(struct mvneta_port *pp)
3315{
3316        struct phy_device *phy_dev;
3317        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3318
3319        phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
3320                                 pp->phy_interface);
3321        if (!phy_dev) {
3322                netdev_err(pp->dev, "could not find the PHY\n");
3323                return -ENODEV;
3324        }
3325
3326        phy_ethtool_get_wol(phy_dev, &wol);
3327        device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3328
3329        phy_dev->supported &= PHY_GBIT_FEATURES;
3330        phy_dev->advertising = phy_dev->supported;
3331
3332        pp->link    = 0;
3333        pp->duplex  = 0;
3334        pp->speed   = 0;
3335
3336        return 0;
3337}
3338
3339static void mvneta_mdio_remove(struct mvneta_port *pp)
3340{
3341        struct net_device *ndev = pp->dev;
3342
3343        phy_disconnect(ndev->phydev);
3344}
3345
3346/* Electing a CPU must be done in an atomic way: it should be done
3347 * after or before the removal/insertion of a CPU and this function is
3348 * not reentrant.
3349 */
3350static void mvneta_percpu_elect(struct mvneta_port *pp)
3351{
3352        int elected_cpu = 0, max_cpu, cpu, i = 0;
3353
3354        /* Use the cpu associated to the rxq when it is online, in all
3355         * the other cases, use the cpu 0 which can't be offline.
3356         */
3357        if (cpu_online(pp->rxq_def))
3358                elected_cpu = pp->rxq_def;
3359
3360        max_cpu = num_present_cpus();
3361
3362        for_each_online_cpu(cpu) {
3363                int rxq_map = 0, txq_map = 0;
3364                int rxq;
3365
3366                for (rxq = 0; rxq < rxq_number; rxq++)
3367                        if ((rxq % max_cpu) == cpu)
3368                                rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3369
3370                if (cpu == elected_cpu)
3371                        /* Map the default receive queue queue to the
3372                         * elected CPU
3373                         */
3374                        rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3375
3376                /* We update the TX queue map only if we have one
3377                 * queue. In this case we associate the TX queue to
3378                 * the CPU bound to the default RX queue
3379                 */
3380                if (txq_number == 1)
3381                        txq_map = (cpu == elected_cpu) ?
3382                                MVNETA_CPU_TXQ_ACCESS(1) : 0;
3383                else
3384                        txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3385                                MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3386
3387                mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3388
3389                /* Update the interrupt mask on each CPU according the
3390                 * new mapping
3391                 */
3392                smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3393                                         pp, true);
3394                i++;
3395
3396        }
3397};
3398
3399static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3400{
3401        int other_cpu;
3402        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3403                                                  node_online);
3404        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3405
3406
3407        spin_lock(&pp->lock);
3408        /*
3409         * Configuring the driver for a new CPU while the driver is
3410         * stopping is racy, so just avoid it.
3411         */
3412        if (pp->is_stopped) {
3413                spin_unlock(&pp->lock);
3414                return 0;
3415        }
3416        netif_tx_stop_all_queues(pp->dev);
3417
3418        /*
3419         * We have to synchronise on tha napi of each CPU except the one
3420         * just being woken up
3421         */
3422        for_each_online_cpu(other_cpu) {
3423                if (other_cpu != cpu) {
3424                        struct mvneta_pcpu_port *other_port =
3425                                per_cpu_ptr(pp->ports, other_cpu);
3426
3427                        napi_synchronize(&other_port->napi);
3428                }
3429        }
3430
3431        /* Mask all ethernet port interrupts */
3432        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3433        napi_enable(&port->napi);
3434
3435        /*
3436         * Enable per-CPU interrupts on the CPU that is
3437         * brought up.
3438         */
3439        mvneta_percpu_enable(pp);
3440
3441        /*
3442         * Enable per-CPU interrupt on the one CPU we care
3443         * about.
3444         */
3445        mvneta_percpu_elect(pp);
3446
3447        /* Unmask all ethernet port interrupts */
3448        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3449        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3450                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3451                    MVNETA_CAUSE_LINK_CHANGE |
3452                    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3453        netif_tx_start_all_queues(pp->dev);
3454        spin_unlock(&pp->lock);
3455        return 0;
3456}
3457
3458static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3459{
3460        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3461                                                  node_online);
3462        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3463
3464        /*
3465         * Thanks to this lock we are sure that any pending cpu election is
3466         * done.
3467         */
3468        spin_lock(&pp->lock);
3469        /* Mask all ethernet port interrupts */
3470        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3471        spin_unlock(&pp->lock);
3472
3473        napi_synchronize(&port->napi);
3474        napi_disable(&port->napi);
3475        /* Disable per-CPU interrupts on the CPU that is brought down. */
3476        mvneta_percpu_disable(pp);
3477        return 0;
3478}
3479
3480static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3481{
3482        struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3483                                                  node_dead);
3484
3485        /* Check if a new CPU must be elected now this on is down */
3486        spin_lock(&pp->lock);
3487        mvneta_percpu_elect(pp);
3488        spin_unlock(&pp->lock);
3489        /* Unmask all ethernet port interrupts */
3490        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3491        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3492                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3493                    MVNETA_CAUSE_LINK_CHANGE |
3494                    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3495        netif_tx_start_all_queues(pp->dev);
3496        return 0;
3497}
3498
3499static int mvneta_open(struct net_device *dev)
3500{
3501        struct mvneta_port *pp = netdev_priv(dev);
3502        int ret;
3503
3504        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3505        pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3506                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3507
3508        ret = mvneta_setup_rxqs(pp);
3509        if (ret)
3510                return ret;
3511
3512        ret = mvneta_setup_txqs(pp);
3513        if (ret)
3514                goto err_cleanup_rxqs;
3515
3516        /* Connect to port interrupt line */
3517        if (pp->neta_armada3700)
3518                ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3519                                  dev->name, pp);
3520        else
3521                ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3522                                         dev->name, pp->ports);
3523        if (ret) {
3524                netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3525                goto err_cleanup_txqs;
3526        }
3527
3528        if (!pp->neta_armada3700) {
3529                /* Enable per-CPU interrupt on all the CPU to handle our RX
3530                 * queue interrupts
3531                 */
3532                on_each_cpu(mvneta_percpu_enable, pp, true);
3533
3534                pp->is_stopped = false;
3535                /* Register a CPU notifier to handle the case where our CPU
3536                 * might be taken offline.
3537                 */
3538                ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3539                                                       &pp->node_online);
3540                if (ret)
3541                        goto err_free_irq;
3542
3543                ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3544                                                       &pp->node_dead);
3545                if (ret)
3546                        goto err_free_online_hp;
3547        }
3548
3549        /* In default link is down */
3550        netif_carrier_off(pp->dev);
3551
3552        ret = mvneta_mdio_probe(pp);
3553        if (ret < 0) {
3554                netdev_err(dev, "cannot probe MDIO bus\n");
3555                goto err_free_dead_hp;
3556        }
3557
3558        mvneta_start_dev(pp);
3559
3560        return 0;
3561
3562err_free_dead_hp:
3563        if (!pp->neta_armada3700)
3564                cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3565                                                    &pp->node_dead);
3566err_free_online_hp:
3567        if (!pp->neta_armada3700)
3568                cpuhp_state_remove_instance_nocalls(online_hpstate,
3569                                                    &pp->node_online);
3570err_free_irq:
3571        if (pp->neta_armada3700) {
3572                free_irq(pp->dev->irq, pp);
3573        } else {
3574                on_each_cpu(mvneta_percpu_disable, pp, true);
3575                free_percpu_irq(pp->dev->irq, pp->ports);
3576        }
3577err_cleanup_txqs:
3578        mvneta_cleanup_txqs(pp);
3579err_cleanup_rxqs:
3580        mvneta_cleanup_rxqs(pp);
3581        return ret;
3582}
3583
3584/* Stop the port, free port interrupt line */
3585static int mvneta_stop(struct net_device *dev)
3586{
3587        struct mvneta_port *pp = netdev_priv(dev);
3588
3589        if (!pp->neta_armada3700) {
3590                /* Inform that we are stopping so we don't want to setup the
3591                 * driver for new CPUs in the notifiers. The code of the
3592                 * notifier for CPU online is protected by the same spinlock,
3593                 * so when we get the lock, the notifer work is done.
3594                 */
3595                spin_lock(&pp->lock);
3596                pp->is_stopped = true;
3597                spin_unlock(&pp->lock);
3598
3599                mvneta_stop_dev(pp);
3600                mvneta_mdio_remove(pp);
3601
3602                cpuhp_state_remove_instance_nocalls(online_hpstate,
3603                                                    &pp->node_online);
3604                cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3605                                                    &pp->node_dead);
3606                on_each_cpu(mvneta_percpu_disable, pp, true);
3607                free_percpu_irq(dev->irq, pp->ports);
3608        } else {
3609                mvneta_stop_dev(pp);
3610                mvneta_mdio_remove(pp);
3611                free_irq(dev->irq, pp);
3612        }
3613
3614        mvneta_cleanup_rxqs(pp);
3615        mvneta_cleanup_txqs(pp);
3616
3617        return 0;
3618}
3619
3620static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3621{
3622        if (!dev->phydev)
3623                return -ENOTSUPP;
3624
3625        return phy_mii_ioctl(dev->phydev, ifr, cmd);
3626}
3627
3628/* Ethtool methods */
3629
3630/* Set link ksettings (phy address, speed) for ethtools */
3631static int
3632mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3633                                  const struct ethtool_link_ksettings *cmd)
3634{
3635        struct mvneta_port *pp = netdev_priv(ndev);
3636        struct phy_device *phydev = ndev->phydev;
3637
3638        if (!phydev)
3639                return -ENODEV;
3640
3641        if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3642                u32 val;
3643
3644                mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
3645
3646                if (cmd->base.autoneg == AUTONEG_DISABLE) {
3647                        val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3648                        val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3649                                 MVNETA_GMAC_CONFIG_GMII_SPEED |
3650                                 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3651
3652                        if (phydev->duplex)
3653                                val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3654
3655                        if (phydev->speed == SPEED_1000)
3656                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3657                        else if (phydev->speed == SPEED_100)
3658                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3659
3660                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3661                }
3662
3663                pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
3664                netdev_info(pp->dev, "autoneg status set to %i\n",
3665                            pp->use_inband_status);
3666
3667                if (netif_running(ndev)) {
3668                        mvneta_port_down(pp);
3669                        mvneta_port_up(pp);
3670                }
3671        }
3672
3673        return phy_ethtool_ksettings_set(ndev->phydev, cmd);
3674}
3675
3676/* Set interrupt coalescing for ethtools */
3677static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3678                                       struct ethtool_coalesce *c)
3679{
3680        struct mvneta_port *pp = netdev_priv(dev);
3681        int queue;
3682
3683        for (queue = 0; queue < rxq_number; queue++) {
3684                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3685                rxq->time_coal = c->rx_coalesce_usecs;
3686                rxq->pkts_coal = c->rx_max_coalesced_frames;
3687                mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3688                mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3689        }
3690
3691        for (queue = 0; queue < txq_number; queue++) {
3692                struct mvneta_tx_queue *txq = &pp->txqs[queue];
3693                txq->done_pkts_coal = c->tx_max_coalesced_frames;
3694                mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3695        }
3696
3697        return 0;
3698}
3699
3700/* get coalescing for ethtools */
3701static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3702                                       struct ethtool_coalesce *c)
3703{
3704        struct mvneta_port *pp = netdev_priv(dev);
3705
3706        c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
3707        c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
3708
3709        c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
3710        return 0;
3711}
3712
3713
3714static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3715                                    struct ethtool_drvinfo *drvinfo)
3716{
3717        strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3718                sizeof(drvinfo->driver));
3719        strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3720                sizeof(drvinfo->version));
3721        strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3722                sizeof(drvinfo->bus_info));
3723}
3724
3725
3726static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3727                                         struct ethtool_ringparam *ring)
3728{
3729        struct mvneta_port *pp = netdev_priv(netdev);
3730
3731        ring->rx_max_pending = MVNETA_MAX_RXD;
3732        ring->tx_max_pending = MVNETA_MAX_TXD;
3733        ring->rx_pending = pp->rx_ring_size;
3734        ring->tx_pending = pp->tx_ring_size;
3735}
3736
3737static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3738                                        struct ethtool_ringparam *ring)
3739{
3740        struct mvneta_port *pp = netdev_priv(dev);
3741
3742        if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3743                return -EINVAL;
3744        pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3745                ring->rx_pending : MVNETA_MAX_RXD;
3746
3747        pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3748                                   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3749        if (pp->tx_ring_size != ring->tx_pending)
3750                netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3751                            pp->tx_ring_size, ring->tx_pending);
3752
3753        if (netif_running(dev)) {
3754                mvneta_stop(dev);
3755                if (mvneta_open(dev)) {
3756                        netdev_err(dev,
3757                                   "error on opening device after ring param change\n");
3758                        return -ENOMEM;
3759                }
3760        }
3761
3762        return 0;
3763}
3764
3765static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3766                                       u8 *data)
3767{
3768        if (sset == ETH_SS_STATS) {
3769                int i;
3770
3771                for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3772                        memcpy(data + i * ETH_GSTRING_LEN,
3773                               mvneta_statistics[i].name, ETH_GSTRING_LEN);
3774        }
3775}
3776
3777static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3778{
3779        const struct mvneta_statistic *s;
3780        void __iomem *base = pp->base;
3781        u32 high, low, val;
3782        u64 val64;
3783        int i;
3784
3785        for (i = 0, s = mvneta_statistics;
3786             s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3787             s++, i++) {
3788                switch (s->type) {
3789                case T_REG_32:
3790                        val = readl_relaxed(base + s->offset);
3791                        pp->ethtool_stats[i] += val;
3792                        break;
3793                case T_REG_64:
3794                        /* Docs say to read low 32-bit then high */
3795                        low = readl_relaxed(base + s->offset);
3796                        high = readl_relaxed(base + s->offset + 4);
3797                        val64 = (u64)high << 32 | low;
3798                        pp->ethtool_stats[i] += val64;
3799                        break;
3800                }
3801        }
3802}
3803
3804static void mvneta_ethtool_get_stats(struct net_device *dev,
3805                                     struct ethtool_stats *stats, u64 *data)
3806{
3807        struct mvneta_port *pp = netdev_priv(dev);
3808        int i;
3809
3810        mvneta_ethtool_update_stats(pp);
3811
3812        for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3813                *data++ = pp->ethtool_stats[i];
3814}
3815
3816static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3817{
3818        if (sset == ETH_SS_STATS)
3819                return ARRAY_SIZE(mvneta_statistics);
3820        return -EOPNOTSUPP;
3821}
3822
3823static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3824{
3825        return MVNETA_RSS_LU_TABLE_SIZE;
3826}
3827
3828static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3829                                    struct ethtool_rxnfc *info,
3830                                    u32 *rules __always_unused)
3831{
3832        switch (info->cmd) {
3833        case ETHTOOL_GRXRINGS:
3834                info->data =  rxq_number;
3835                return 0;
3836        case ETHTOOL_GRXFH:
3837                return -EOPNOTSUPP;
3838        default:
3839                return -EOPNOTSUPP;
3840        }
3841}
3842
3843static int  mvneta_config_rss(struct mvneta_port *pp)
3844{
3845        int cpu;
3846        u32 val;
3847
3848        netif_tx_stop_all_queues(pp->dev);
3849
3850        on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3851
3852        /* We have to synchronise on the napi of each CPU */
3853        for_each_online_cpu(cpu) {
3854                struct mvneta_pcpu_port *pcpu_port =
3855                        per_cpu_ptr(pp->ports, cpu);
3856
3857                napi_synchronize(&pcpu_port->napi);
3858                napi_disable(&pcpu_port->napi);
3859        }
3860
3861        pp->rxq_def = pp->indir[0];
3862
3863        /* Update unicast mapping */
3864        mvneta_set_rx_mode(pp->dev);
3865
3866        /* Update val of portCfg register accordingly with all RxQueue types */
3867        val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3868        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3869
3870        /* Update the elected CPU matching the new rxq_def */
3871        spin_lock(&pp->lock);
3872        mvneta_percpu_elect(pp);
3873        spin_unlock(&pp->lock);
3874
3875        /* We have to synchronise on the napi of each CPU */
3876        for_each_online_cpu(cpu) {
3877                struct mvneta_pcpu_port *pcpu_port =
3878                        per_cpu_ptr(pp->ports, cpu);
3879
3880                napi_enable(&pcpu_port->napi);
3881        }
3882
3883        netif_tx_start_all_queues(pp->dev);
3884
3885        return 0;
3886}
3887
3888static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3889                                   const u8 *key, const u8 hfunc)
3890{
3891        struct mvneta_port *pp = netdev_priv(dev);
3892
3893        /* Current code for Armada 3700 doesn't support RSS features yet */
3894        if (pp->neta_armada3700)
3895                return -EOPNOTSUPP;
3896
3897        /* We require at least one supported parameter to be changed
3898         * and no change in any of the unsupported parameters
3899         */
3900        if (key ||
3901            (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3902                return -EOPNOTSUPP;
3903
3904        if (!indir)
3905                return 0;
3906
3907        memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3908
3909        return mvneta_config_rss(pp);
3910}
3911
3912static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3913                                   u8 *hfunc)
3914{
3915        struct mvneta_port *pp = netdev_priv(dev);
3916
3917        /* Current code for Armada 3700 doesn't support RSS features yet */
3918        if (pp->neta_armada3700)
3919                return -EOPNOTSUPP;
3920
3921        if (hfunc)
3922                *hfunc = ETH_RSS_HASH_TOP;
3923
3924        if (!indir)
3925                return 0;
3926
3927        memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3928
3929        return 0;
3930}
3931
3932static void mvneta_ethtool_get_wol(struct net_device *dev,
3933                                   struct ethtool_wolinfo *wol)
3934{
3935        wol->supported = 0;
3936        wol->wolopts = 0;
3937
3938        if (dev->phydev)
3939                phy_ethtool_get_wol(dev->phydev, wol);
3940}
3941
3942static int mvneta_ethtool_set_wol(struct net_device *dev,
3943                                  struct ethtool_wolinfo *wol)
3944{
3945        int ret;
3946
3947        if (!dev->phydev)
3948                return -EOPNOTSUPP;
3949
3950        ret = phy_ethtool_set_wol(dev->phydev, wol);
3951        if (!ret)
3952                device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
3953
3954        return ret;
3955}
3956
3957static const struct net_device_ops mvneta_netdev_ops = {
3958        .ndo_open            = mvneta_open,
3959        .ndo_stop            = mvneta_stop,
3960        .ndo_start_xmit      = mvneta_tx,
3961        .ndo_set_rx_mode     = mvneta_set_rx_mode,
3962        .ndo_set_mac_address = mvneta_set_mac_addr,
3963        .ndo_change_mtu      = mvneta_change_mtu,
3964        .ndo_fix_features    = mvneta_fix_features,
3965        .ndo_get_stats64     = mvneta_get_stats64,
3966        .ndo_do_ioctl        = mvneta_ioctl,
3967};
3968
3969static const struct ethtool_ops mvneta_eth_tool_ops = {
3970        .nway_reset     = phy_ethtool_nway_reset,
3971        .get_link       = ethtool_op_get_link,
3972        .set_coalesce   = mvneta_ethtool_set_coalesce,
3973        .get_coalesce   = mvneta_ethtool_get_coalesce,
3974        .get_drvinfo    = mvneta_ethtool_get_drvinfo,
3975        .get_ringparam  = mvneta_ethtool_get_ringparam,
3976        .set_ringparam  = mvneta_ethtool_set_ringparam,
3977        .get_strings    = mvneta_ethtool_get_strings,
3978        .get_ethtool_stats = mvneta_ethtool_get_stats,
3979        .get_sset_count = mvneta_ethtool_get_sset_count,
3980        .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3981        .get_rxnfc      = mvneta_ethtool_get_rxnfc,
3982        .get_rxfh       = mvneta_ethtool_get_rxfh,
3983        .set_rxfh       = mvneta_ethtool_set_rxfh,
3984        .get_link_ksettings = phy_ethtool_get_link_ksettings,
3985        .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
3986        .get_wol        = mvneta_ethtool_get_wol,
3987        .set_wol        = mvneta_ethtool_set_wol,
3988};
3989
3990/* Initialize hw */
3991static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3992{
3993        int queue;
3994
3995        /* Disable port */
3996        mvneta_port_disable(pp);
3997
3998        /* Set port default values */
3999        mvneta_defaults_set(pp);
4000
4001        pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4002        if (!pp->txqs)
4003                return -ENOMEM;
4004
4005        /* Initialize TX descriptor rings */
4006        for (queue = 0; queue < txq_number; queue++) {
4007                struct mvneta_tx_queue *txq = &pp->txqs[queue];
4008                txq->id = queue;
4009                txq->size = pp->tx_ring_size;
4010                txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4011        }
4012
4013        pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4014        if (!pp->rxqs)
4015                return -ENOMEM;
4016
4017        /* Create Rx descriptor rings */
4018        for (queue = 0; queue < rxq_number; queue++) {
4019                struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4020                rxq->id = queue;
4021                rxq->size = pp->rx_ring_size;
4022                rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4023                rxq->time_coal = MVNETA_RX_COAL_USEC;
4024                rxq->buf_virt_addr
4025                        = devm_kmalloc_array(pp->dev->dev.parent,
4026                                             rxq->size,
4027                                             sizeof(*rxq->buf_virt_addr),
4028                                             GFP_KERNEL);
4029                if (!rxq->buf_virt_addr)
4030                        return -ENOMEM;
4031        }
4032
4033        return 0;
4034}
4035
4036/* platform glue : initialize decoding windows */
4037static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4038                                     const struct mbus_dram_target_info *dram)
4039{
4040        u32 win_enable;
4041        u32 win_protect;
4042        int i;
4043
4044        for (i = 0; i < 6; i++) {
4045                mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4046                mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4047
4048                if (i < 4)
4049                        mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4050        }
4051
4052        win_enable = 0x3f;
4053        win_protect = 0;
4054
4055        if (dram) {
4056                for (i = 0; i < dram->num_cs; i++) {
4057                        const struct mbus_dram_window *cs = dram->cs + i;
4058
4059                        mvreg_write(pp, MVNETA_WIN_BASE(i),
4060                                    (cs->base & 0xffff0000) |
4061                                    (cs->mbus_attr << 8) |
4062                                    dram->mbus_dram_target_id);
4063
4064                        mvreg_write(pp, MVNETA_WIN_SIZE(i),
4065                                    (cs->size - 1) & 0xffff0000);
4066
4067                        win_enable &= ~(1 << i);
4068                        win_protect |= 3 << (2 * i);
4069                }
4070        } else {
4071                /* For Armada3700 open default 4GB Mbus window, leaving
4072                 * arbitration of target/attribute to a different layer
4073                 * of configuration.
4074                 */
4075                mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4076                win_enable &= ~BIT(0);
4077                win_protect = 3;
4078        }
4079
4080        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4081        mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4082}
4083
4084/* Power up the port */
4085static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4086{
4087        u32 ctrl;
4088
4089        /* MAC Cause register should be cleared */
4090        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4091
4092        ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4093
4094        /* Even though it might look weird, when we're configured in
4095         * SGMII or QSGMII mode, the RGMII bit needs to be set.
4096         */
4097        switch(phy_mode) {
4098        case PHY_INTERFACE_MODE_QSGMII:
4099                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4100                ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4101                break;
4102        case PHY_INTERFACE_MODE_SGMII:
4103                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4104                ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4105                break;
4106        case PHY_INTERFACE_MODE_RGMII:
4107        case PHY_INTERFACE_MODE_RGMII_ID:
4108        case PHY_INTERFACE_MODE_RGMII_RXID:
4109        case PHY_INTERFACE_MODE_RGMII_TXID:
4110                ctrl |= MVNETA_GMAC2_PORT_RGMII;
4111                break;
4112        default:
4113                return -EINVAL;
4114        }
4115
4116        /* Cancel Port Reset */
4117        ctrl &= ~MVNETA_GMAC2_PORT_RESET;
4118        mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
4119
4120        while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4121                MVNETA_GMAC2_PORT_RESET) != 0)
4122                continue;
4123
4124        return 0;
4125}
4126
4127/* Device initialization routine */
4128static int mvneta_probe(struct platform_device *pdev)
4129{
4130        struct resource *res;
4131        struct device_node *dn = pdev->dev.of_node;
4132        struct device_node *phy_node;
4133        struct device_node *bm_node;
4134        struct mvneta_port *pp;
4135        struct net_device *dev;
4136        const char *dt_mac_addr;
4137        char hw_mac_addr[ETH_ALEN];
4138        const char *mac_from;
4139        const char *managed;
4140        int tx_csum_limit;
4141        int phy_mode;
4142        int err;
4143        int cpu;
4144
4145        dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4146        if (!dev)
4147                return -ENOMEM;
4148
4149        dev->irq = irq_of_parse_and_map(dn, 0);
4150        if (dev->irq == 0) {
4151                err = -EINVAL;
4152                goto err_free_netdev;
4153        }
4154
4155        phy_node = of_parse_phandle(dn, "phy", 0);
4156        if (!phy_node) {
4157                if (!of_phy_is_fixed_link(dn)) {
4158                        dev_err(&pdev->dev, "no PHY specified\n");
4159                        err = -ENODEV;
4160                        goto err_free_irq;
4161                }
4162
4163                err = of_phy_register_fixed_link(dn);
4164                if (err < 0) {
4165                        dev_err(&pdev->dev, "cannot register fixed PHY\n");
4166                        goto err_free_irq;
4167                }
4168
4169                /* In the case of a fixed PHY, the DT node associated
4170                 * to the PHY is the Ethernet MAC DT node.
4171                 */
4172                phy_node = of_node_get(dn);
4173        }
4174
4175        phy_mode = of_get_phy_mode(dn);
4176        if (phy_mode < 0) {
4177                dev_err(&pdev->dev, "incorrect phy-mode\n");
4178                err = -EINVAL;
4179                goto err_put_phy_node;
4180        }
4181
4182        dev->tx_queue_len = MVNETA_MAX_TXD;
4183        dev->watchdog_timeo = 5 * HZ;
4184        dev->netdev_ops = &mvneta_netdev_ops;
4185
4186        dev->ethtool_ops = &mvneta_eth_tool_ops;
4187
4188        pp = netdev_priv(dev);
4189        spin_lock_init(&pp->lock);
4190        pp->phy_node = phy_node;
4191        pp->phy_interface = phy_mode;
4192
4193        err = of_property_read_string(dn, "managed", &managed);
4194        pp->use_inband_status = (err == 0 &&
4195                                 strcmp(managed, "in-band-status") == 0);
4196
4197        pp->rxq_def = rxq_def;
4198
4199        /* Set RX packet offset correction for platforms, whose
4200         * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4201         * platforms and 0B for 32-bit ones.
4202         */
4203        pp->rx_offset_correction =
4204                max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
4205
4206        pp->indir[0] = rxq_def;
4207
4208        /* Get special SoC configurations */
4209        if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4210                pp->neta_armada3700 = true;
4211
4212        pp->clk = devm_clk_get(&pdev->dev, "core");
4213        if (IS_ERR(pp->clk))
4214                pp->clk = devm_clk_get(&pdev->dev, NULL);
4215        if (IS_ERR(pp->clk)) {
4216                err = PTR_ERR(pp->clk);
4217                goto err_put_phy_node;
4218        }
4219
4220        clk_prepare_enable(pp->clk);
4221
4222        pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4223        if (!IS_ERR(pp->clk_bus))
4224                clk_prepare_enable(pp->clk_bus);
4225
4226        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4227        pp->base = devm_ioremap_resource(&pdev->dev, res);
4228        if (IS_ERR(pp->base)) {
4229                err = PTR_ERR(pp->base);
4230                goto err_clk;
4231        }
4232
4233        /* Alloc per-cpu port structure */
4234        pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4235        if (!pp->ports) {
4236                err = -ENOMEM;
4237                goto err_clk;
4238        }
4239
4240        /* Alloc per-cpu stats */
4241        pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4242        if (!pp->stats) {
4243                err = -ENOMEM;
4244                goto err_free_ports;
4245        }
4246
4247        dt_mac_addr = of_get_mac_address(dn);
4248        if (dt_mac_addr) {
4249                mac_from = "device tree";
4250                memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4251        } else {
4252                mvneta_get_mac_addr(pp, hw_mac_addr);
4253                if (is_valid_ether_addr(hw_mac_addr)) {
4254                        mac_from = "hardware";
4255                        memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4256                } else {
4257                        mac_from = "random";
4258                        eth_hw_addr_random(dev);
4259                }
4260        }
4261
4262        if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4263                if (tx_csum_limit < 0 ||
4264                    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4265                        tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4266                        dev_info(&pdev->dev,
4267                                 "Wrong TX csum limit in DT, set to %dB\n",
4268                                 MVNETA_TX_CSUM_DEF_SIZE);
4269                }
4270        } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4271                tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4272        } else {
4273                tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4274        }
4275
4276        pp->tx_csum_limit = tx_csum_limit;
4277
4278        pp->dram_target_info = mv_mbus_dram_info();
4279        /* Armada3700 requires setting default configuration of Mbus
4280         * windows, however without using filled mbus_dram_target_info
4281         * structure.
4282         */
4283        if (pp->dram_target_info || pp->neta_armada3700)
4284                mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4285
4286        pp->tx_ring_size = MVNETA_MAX_TXD;
4287        pp->rx_ring_size = MVNETA_MAX_RXD;
4288
4289        pp->dev = dev;
4290        SET_NETDEV_DEV(dev, &pdev->dev);
4291
4292        pp->id = global_port_id++;
4293
4294        /* Obtain access to BM resources if enabled and already initialized */
4295        bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4296        if (bm_node && bm_node->data) {
4297                pp->bm_priv = bm_node->data;
4298                err = mvneta_bm_port_init(pdev, pp);
4299                if (err < 0) {
4300                        dev_info(&pdev->dev, "use SW buffer management\n");
4301                        pp->bm_priv = NULL;
4302                }
4303        }
4304        of_node_put(bm_node);
4305
4306        err = mvneta_init(&pdev->dev, pp);
4307        if (err < 0)
4308                goto err_netdev;
4309
4310        err = mvneta_port_power_up(pp, phy_mode);
4311        if (err < 0) {
4312                dev_err(&pdev->dev, "can't power up port\n");
4313                goto err_netdev;
4314        }
4315
4316        /* Armada3700 network controller does not support per-cpu
4317         * operation, so only single NAPI should be initialized.
4318         */
4319        if (pp->neta_armada3700) {
4320                netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4321        } else {
4322                for_each_present_cpu(cpu) {
4323                        struct mvneta_pcpu_port *port =
4324                                per_cpu_ptr(pp->ports, cpu);
4325
4326                        netif_napi_add(dev, &port->napi, mvneta_poll,
4327                                       NAPI_POLL_WEIGHT);
4328                        port->pp = pp;
4329                }
4330        }
4331
4332        dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
4333        dev->hw_features |= dev->features;
4334        dev->vlan_features |= dev->features;
4335        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4336        dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4337
4338        /* MTU range: 68 - 9676 */
4339        dev->min_mtu = ETH_MIN_MTU;
4340        /* 9676 == 9700 - 20 and rounding to 8 */
4341        dev->max_mtu = 9676;
4342
4343        err = register_netdev(dev);
4344        if (err < 0) {
4345                dev_err(&pdev->dev, "failed to register\n");
4346                goto err_free_stats;
4347        }
4348
4349        netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4350                    dev->dev_addr);
4351
4352        platform_set_drvdata(pdev, pp->dev);
4353
4354        if (pp->use_inband_status) {
4355                struct phy_device *phy = of_phy_find_device(dn);
4356
4357                mvneta_fixed_link_update(pp, phy);
4358
4359                put_device(&phy->mdio.dev);
4360        }
4361
4362        return 0;
4363
4364err_netdev:
4365        unregister_netdev(dev);
4366        if (pp->bm_priv) {
4367                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4368                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4369                                       1 << pp->id);
4370        }
4371err_free_stats:
4372        free_percpu(pp->stats);
4373err_free_ports:
4374        free_percpu(pp->ports);
4375err_clk:
4376        clk_disable_unprepare(pp->clk_bus);
4377        clk_disable_unprepare(pp->clk);
4378err_put_phy_node:
4379        of_node_put(phy_node);
4380        if (of_phy_is_fixed_link(dn))
4381                of_phy_deregister_fixed_link(dn);
4382err_free_irq:
4383        irq_dispose_mapping(dev->irq);
4384err_free_netdev:
4385        free_netdev(dev);
4386        return err;
4387}
4388
4389/* Device removal routine */
4390static int mvneta_remove(struct platform_device *pdev)
4391{
4392        struct net_device  *dev = platform_get_drvdata(pdev);
4393        struct device_node *dn = pdev->dev.of_node;
4394        struct mvneta_port *pp = netdev_priv(dev);
4395
4396        unregister_netdev(dev);
4397        clk_disable_unprepare(pp->clk_bus);
4398        clk_disable_unprepare(pp->clk);
4399        free_percpu(pp->ports);
4400        free_percpu(pp->stats);
4401        if (of_phy_is_fixed_link(dn))
4402                of_phy_deregister_fixed_link(dn);
4403        irq_dispose_mapping(dev->irq);
4404        of_node_put(pp->phy_node);
4405        free_netdev(dev);
4406
4407        if (pp->bm_priv) {
4408                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4409                mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4410                                       1 << pp->id);
4411        }
4412
4413        return 0;
4414}
4415
4416#ifdef CONFIG_PM_SLEEP
4417static int mvneta_suspend(struct device *device)
4418{
4419        struct net_device *dev = dev_get_drvdata(device);
4420        struct mvneta_port *pp = netdev_priv(dev);
4421
4422        if (netif_running(dev))
4423                mvneta_stop(dev);
4424        netif_device_detach(dev);
4425        clk_disable_unprepare(pp->clk_bus);
4426        clk_disable_unprepare(pp->clk);
4427        return 0;
4428}
4429
4430static int mvneta_resume(struct device *device)
4431{
4432        struct platform_device *pdev = to_platform_device(device);
4433        struct net_device *dev = dev_get_drvdata(device);
4434        struct mvneta_port *pp = netdev_priv(dev);
4435        int err;
4436
4437        clk_prepare_enable(pp->clk);
4438        if (!IS_ERR(pp->clk_bus))
4439                clk_prepare_enable(pp->clk_bus);
4440        if (pp->dram_target_info || pp->neta_armada3700)
4441                mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4442        if (pp->bm_priv) {
4443                err = mvneta_bm_port_init(pdev, pp);
4444                if (err < 0) {
4445                        dev_info(&pdev->dev, "use SW buffer management\n");
4446                        pp->bm_priv = NULL;
4447                }
4448        }
4449        mvneta_defaults_set(pp);
4450        err = mvneta_port_power_up(pp, pp->phy_interface);
4451        if (err < 0) {
4452                dev_err(device, "can't power up port\n");
4453                return err;
4454        }
4455
4456        if (pp->use_inband_status)
4457                mvneta_fixed_link_update(pp, dev->phydev);
4458
4459        netif_device_attach(dev);
4460        if (netif_running(dev)) {
4461                mvneta_open(dev);
4462                mvneta_set_rx_mode(dev);
4463        }
4464
4465        return 0;
4466}
4467#endif
4468
4469static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4470
4471static const struct of_device_id mvneta_match[] = {
4472        { .compatible = "marvell,armada-370-neta" },
4473        { .compatible = "marvell,armada-xp-neta" },
4474        { .compatible = "marvell,armada-3700-neta" },
4475        { }
4476};
4477MODULE_DEVICE_TABLE(of, mvneta_match);
4478
4479static struct platform_driver mvneta_driver = {
4480        .probe = mvneta_probe,
4481        .remove = mvneta_remove,
4482        .driver = {
4483                .name = MVNETA_DRIVER_NAME,
4484                .of_match_table = mvneta_match,
4485                .pm = &mvneta_pm_ops,
4486        },
4487};
4488
4489static int __init mvneta_driver_init(void)
4490{
4491        int ret;
4492
4493        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4494                                      mvneta_cpu_online,
4495                                      mvneta_cpu_down_prepare);
4496        if (ret < 0)
4497                goto out;
4498        online_hpstate = ret;
4499        ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4500                                      NULL, mvneta_cpu_dead);
4501        if (ret)
4502                goto err_dead;
4503
4504        ret = platform_driver_register(&mvneta_driver);
4505        if (ret)
4506                goto err;
4507        return 0;
4508
4509err:
4510        cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4511err_dead:
4512        cpuhp_remove_multi_state(online_hpstate);
4513out:
4514        return ret;
4515}
4516module_init(mvneta_driver_init);
4517
4518static void __exit mvneta_driver_exit(void)
4519{
4520        platform_driver_unregister(&mvneta_driver);
4521        cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4522        cpuhp_remove_multi_state(online_hpstate);
4523}
4524module_exit(mvneta_driver_exit);
4525
4526MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4527MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4528MODULE_LICENSE("GPL");
4529
4530module_param(rxq_number, int, S_IRUGO);
4531module_param(txq_number, int, S_IRUGO);
4532
4533module_param(rxq_def, int, S_IRUGO);
4534module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
4535