linux/drivers/net/ethernet/calxeda/xgmac.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010-2011 Calxeda, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/kernel.h>
  19#include <linux/circ_buf.h>
  20#include <linux/interrupt.h>
  21#include <linux/etherdevice.h>
  22#include <linux/platform_device.h>
  23#include <linux/skbuff.h>
  24#include <linux/ethtool.h>
  25#include <linux/if.h>
  26#include <linux/crc32.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/slab.h>
  29
  30/* XGMAC Register definitions */
  31#define XGMAC_CONTROL           0x00000000      /* MAC Configuration */
  32#define XGMAC_FRAME_FILTER      0x00000004      /* MAC Frame Filter */
  33#define XGMAC_FLOW_CTRL         0x00000018      /* MAC Flow Control */
  34#define XGMAC_VLAN_TAG          0x0000001C      /* VLAN Tags */
  35#define XGMAC_VERSION           0x00000020      /* Version */
  36#define XGMAC_VLAN_INCL         0x00000024      /* VLAN tag for tx frames */
  37#define XGMAC_LPI_CTRL          0x00000028      /* LPI Control and Status */
  38#define XGMAC_LPI_TIMER         0x0000002C      /* LPI Timers Control */
  39#define XGMAC_TX_PACE           0x00000030      /* Transmit Pace and Stretch */
  40#define XGMAC_VLAN_HASH         0x00000034      /* VLAN Hash Table */
  41#define XGMAC_DEBUG             0x00000038      /* Debug */
  42#define XGMAC_INT_STAT          0x0000003C      /* Interrupt and Control */
  43#define XGMAC_ADDR_HIGH(reg)    (0x00000040 + ((reg) * 8))
  44#define XGMAC_ADDR_LOW(reg)     (0x00000044 + ((reg) * 8))
  45#define XGMAC_HASH(n)           (0x00000300 + (n) * 4) /* HASH table regs */
  46#define XGMAC_NUM_HASH          16
  47#define XGMAC_OMR               0x00000400
  48#define XGMAC_REMOTE_WAKE       0x00000700      /* Remote Wake-Up Frm Filter */
  49#define XGMAC_PMT               0x00000704      /* PMT Control and Status */
  50#define XGMAC_MMC_CTRL          0x00000800      /* XGMAC MMC Control */
  51#define XGMAC_MMC_INTR_RX       0x00000804      /* Recieve Interrupt */
  52#define XGMAC_MMC_INTR_TX       0x00000808      /* Transmit Interrupt */
  53#define XGMAC_MMC_INTR_MASK_RX  0x0000080c      /* Recieve Interrupt Mask */
  54#define XGMAC_MMC_INTR_MASK_TX  0x00000810      /* Transmit Interrupt Mask */
  55
  56/* Hardware TX Statistics Counters */
  57#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
  58#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
  59#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
  60#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
  61#define XGMAC_MMC_TXBCFRAME_G   0x00000824
  62#define XGMAC_MMC_TXMCFRAME_G   0x0000082C
  63#define XGMAC_MMC_TXUCFRAME_GB  0x00000864
  64#define XGMAC_MMC_TXMCFRAME_GB  0x0000086C
  65#define XGMAC_MMC_TXBCFRAME_GB  0x00000874
  66#define XGMAC_MMC_TXUNDERFLOW   0x0000087C
  67#define XGMAC_MMC_TXOCTET_G_LO  0x00000884
  68#define XGMAC_MMC_TXOCTET_G_HI  0x00000888
  69#define XGMAC_MMC_TXFRAME_G_LO  0x0000088C
  70#define XGMAC_MMC_TXFRAME_G_HI  0x00000890
  71#define XGMAC_MMC_TXPAUSEFRAME  0x00000894
  72#define XGMAC_MMC_TXVLANFRAME   0x0000089C
  73
  74/* Hardware RX Statistics Counters */
  75#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
  76#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
  77#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
  78#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
  79#define XGMAC_MMC_RXOCTET_G_LO  0x00000910
  80#define XGMAC_MMC_RXOCTET_G_HI  0x00000914
  81#define XGMAC_MMC_RXBCFRAME_G   0x00000918
  82#define XGMAC_MMC_RXMCFRAME_G   0x00000920
  83#define XGMAC_MMC_RXCRCERR      0x00000928
  84#define XGMAC_MMC_RXRUNT        0x00000930
  85#define XGMAC_MMC_RXJABBER      0x00000934
  86#define XGMAC_MMC_RXUCFRAME_G   0x00000970
  87#define XGMAC_MMC_RXLENGTHERR   0x00000978
  88#define XGMAC_MMC_RXPAUSEFRAME  0x00000988
  89#define XGMAC_MMC_RXOVERFLOW    0x00000990
  90#define XGMAC_MMC_RXVLANFRAME   0x00000998
  91#define XGMAC_MMC_RXWATCHDOG    0x000009a0
  92
  93/* DMA Control and Status Registers */
  94#define XGMAC_DMA_BUS_MODE      0x00000f00      /* Bus Mode */
  95#define XGMAC_DMA_TX_POLL       0x00000f04      /* Transmit Poll Demand */
  96#define XGMAC_DMA_RX_POLL       0x00000f08      /* Received Poll Demand */
  97#define XGMAC_DMA_RX_BASE_ADDR  0x00000f0c      /* Receive List Base */
  98#define XGMAC_DMA_TX_BASE_ADDR  0x00000f10      /* Transmit List Base */
  99#define XGMAC_DMA_STATUS        0x00000f14      /* Status Register */
 100#define XGMAC_DMA_CONTROL       0x00000f18      /* Ctrl (Operational Mode) */
 101#define XGMAC_DMA_INTR_ENA      0x00000f1c      /* Interrupt Enable */
 102#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20     /* Missed Frame Counter */
 103#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24      /* RX Intr Watchdog Timer */
 104#define XGMAC_DMA_AXI_BUS       0x00000f28      /* AXI Bus Mode */
 105#define XGMAC_DMA_AXI_STATUS    0x00000f2C      /* AXI Status */
 106#define XGMAC_DMA_HW_FEATURE    0x00000f58      /* Enabled Hardware Features */
 107
 108#define XGMAC_ADDR_AE           0x80000000
 109#define XGMAC_MAX_FILTER_ADDR   31
 110
 111/* PMT Control and Status */
 112#define XGMAC_PMT_POINTER_RESET 0x80000000
 113#define XGMAC_PMT_GLBL_UNICAST  0x00000200
 114#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
 115#define XGMAC_PMT_MAGIC_PKT     0x00000020
 116#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
 117#define XGMAC_PMT_MAGIC_PKT_EN  0x00000002
 118#define XGMAC_PMT_POWERDOWN     0x00000001
 119
 120#define XGMAC_CONTROL_SPD       0x40000000      /* Speed control */
 121#define XGMAC_CONTROL_SPD_MASK  0x60000000
 122#define XGMAC_CONTROL_SPD_1G    0x60000000
 123#define XGMAC_CONTROL_SPD_2_5G  0x40000000
 124#define XGMAC_CONTROL_SPD_10G   0x00000000
 125#define XGMAC_CONTROL_SARC      0x10000000      /* Source Addr Insert/Replace */
 126#define XGMAC_CONTROL_SARK_MASK 0x18000000
 127#define XGMAC_CONTROL_CAR       0x04000000      /* CRC Addition/Replacement */
 128#define XGMAC_CONTROL_CAR_MASK  0x06000000
 129#define XGMAC_CONTROL_DP        0x01000000      /* Disable Padding */
 130#define XGMAC_CONTROL_WD        0x00800000      /* Disable Watchdog on rx */
 131#define XGMAC_CONTROL_JD        0x00400000      /* Jabber disable */
 132#define XGMAC_CONTROL_JE        0x00100000      /* Jumbo frame */
 133#define XGMAC_CONTROL_LM        0x00001000      /* Loop-back mode */
 134#define XGMAC_CONTROL_IPC       0x00000400      /* Checksum Offload */
 135#define XGMAC_CONTROL_ACS       0x00000080      /* Automatic Pad/FCS Strip */
 136#define XGMAC_CONTROL_DDIC      0x00000010      /* Disable Deficit Idle Count */
 137#define XGMAC_CONTROL_TE        0x00000008      /* Transmitter Enable */
 138#define XGMAC_CONTROL_RE        0x00000004      /* Receiver Enable */
 139
 140/* XGMAC Frame Filter defines */
 141#define XGMAC_FRAME_FILTER_PR   0x00000001      /* Promiscuous Mode */
 142#define XGMAC_FRAME_FILTER_HUC  0x00000002      /* Hash Unicast */
 143#define XGMAC_FRAME_FILTER_HMC  0x00000004      /* Hash Multicast */
 144#define XGMAC_FRAME_FILTER_DAIF 0x00000008      /* DA Inverse Filtering */
 145#define XGMAC_FRAME_FILTER_PM   0x00000010      /* Pass all multicast */
 146#define XGMAC_FRAME_FILTER_DBF  0x00000020      /* Disable Broadcast frames */
 147#define XGMAC_FRAME_FILTER_SAIF 0x00000100      /* Inverse Filtering */
 148#define XGMAC_FRAME_FILTER_SAF  0x00000200      /* Source Address Filter */
 149#define XGMAC_FRAME_FILTER_HPF  0x00000400      /* Hash or perfect Filter */
 150#define XGMAC_FRAME_FILTER_VHF  0x00000800      /* VLAN Hash Filter */
 151#define XGMAC_FRAME_FILTER_VPF  0x00001000      /* VLAN Perfect Filter */
 152#define XGMAC_FRAME_FILTER_RA   0x80000000      /* Receive all mode */
 153
 154/* XGMAC FLOW CTRL defines */
 155#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000      /* Pause Time Mask */
 156#define XGMAC_FLOW_CTRL_PT_SHIFT        16
 157#define XGMAC_FLOW_CTRL_DZQP    0x00000080      /* Disable Zero-Quanta Phase */
 158#define XGMAC_FLOW_CTRL_PLT     0x00000020      /* Pause Low Threshhold */
 159#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030     /* PLT MASK */
 160#define XGMAC_FLOW_CTRL_UP      0x00000008      /* Unicast Pause Frame Detect */
 161#define XGMAC_FLOW_CTRL_RFE     0x00000004      /* Rx Flow Control Enable */
 162#define XGMAC_FLOW_CTRL_TFE     0x00000002      /* Tx Flow Control Enable */
 163#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001      /* Flow Control Busy ... */
 164
 165/* XGMAC_INT_STAT reg */
 166#define XGMAC_INT_STAT_PMT      0x0080          /* PMT Interrupt Status */
 167#define XGMAC_INT_STAT_LPI      0x0040          /* LPI Interrupt Status */
 168
 169/* DMA Bus Mode register defines */
 170#define DMA_BUS_MODE_SFT_RESET  0x00000001      /* Software Reset */
 171#define DMA_BUS_MODE_DSL_MASK   0x0000007c      /* Descriptor Skip Length */
 172#define DMA_BUS_MODE_DSL_SHIFT  2               /* (in DWORDS) */
 173#define DMA_BUS_MODE_ATDS       0x00000080      /* Alternate Descriptor Size */
 174
 175/* Programmable burst length */
 176#define DMA_BUS_MODE_PBL_MASK   0x00003f00      /* Programmable Burst Len */
 177#define DMA_BUS_MODE_PBL_SHIFT  8
 178#define DMA_BUS_MODE_FB         0x00010000      /* Fixed burst */
 179#define DMA_BUS_MODE_RPBL_MASK  0x003e0000      /* Rx-Programmable Burst Len */
 180#define DMA_BUS_MODE_RPBL_SHIFT 17
 181#define DMA_BUS_MODE_USP        0x00800000
 182#define DMA_BUS_MODE_8PBL       0x01000000
 183#define DMA_BUS_MODE_AAL        0x02000000
 184
 185/* DMA Bus Mode register defines */
 186#define DMA_BUS_PR_RATIO_MASK   0x0000c000      /* Rx/Tx priority ratio */
 187#define DMA_BUS_PR_RATIO_SHIFT  14
 188#define DMA_BUS_FB              0x00010000      /* Fixed Burst */
 189
 190/* DMA Control register defines */
 191#define DMA_CONTROL_ST          0x00002000      /* Start/Stop Transmission */
 192#define DMA_CONTROL_SR          0x00000002      /* Start/Stop Receive */
 193#define DMA_CONTROL_DFF         0x01000000      /* Disable flush of rx frames */
 194
 195/* DMA Normal interrupt */
 196#define DMA_INTR_ENA_NIE        0x00010000      /* Normal Summary */
 197#define DMA_INTR_ENA_AIE        0x00008000      /* Abnormal Summary */
 198#define DMA_INTR_ENA_ERE        0x00004000      /* Early Receive */
 199#define DMA_INTR_ENA_FBE        0x00002000      /* Fatal Bus Error */
 200#define DMA_INTR_ENA_ETE        0x00000400      /* Early Transmit */
 201#define DMA_INTR_ENA_RWE        0x00000200      /* Receive Watchdog */
 202#define DMA_INTR_ENA_RSE        0x00000100      /* Receive Stopped */
 203#define DMA_INTR_ENA_RUE        0x00000080      /* Receive Buffer Unavailable */
 204#define DMA_INTR_ENA_RIE        0x00000040      /* Receive Interrupt */
 205#define DMA_INTR_ENA_UNE        0x00000020      /* Tx Underflow */
 206#define DMA_INTR_ENA_OVE        0x00000010      /* Receive Overflow */
 207#define DMA_INTR_ENA_TJE        0x00000008      /* Transmit Jabber */
 208#define DMA_INTR_ENA_TUE        0x00000004      /* Transmit Buffer Unavail */
 209#define DMA_INTR_ENA_TSE        0x00000002      /* Transmit Stopped */
 210#define DMA_INTR_ENA_TIE        0x00000001      /* Transmit Interrupt */
 211
 212#define DMA_INTR_NORMAL         (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
 213                                 DMA_INTR_ENA_TUE)
 214
 215#define DMA_INTR_ABNORMAL       (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
 216                                 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
 217                                 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
 218                                 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
 219                                 DMA_INTR_ENA_TSE)
 220
 221/* DMA default interrupt mask */
 222#define DMA_INTR_DEFAULT_MASK   (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
 223
 224/* DMA Status register defines */
 225#define DMA_STATUS_GMI          0x08000000      /* MMC interrupt */
 226#define DMA_STATUS_GLI          0x04000000      /* GMAC Line interface int */
 227#define DMA_STATUS_EB_MASK      0x00380000      /* Error Bits Mask */
 228#define DMA_STATUS_EB_TX_ABORT  0x00080000      /* Error Bits - TX Abort */
 229#define DMA_STATUS_EB_RX_ABORT  0x00100000      /* Error Bits - RX Abort */
 230#define DMA_STATUS_TS_MASK      0x00700000      /* Transmit Process State */
 231#define DMA_STATUS_TS_SHIFT     20
 232#define DMA_STATUS_RS_MASK      0x000e0000      /* Receive Process State */
 233#define DMA_STATUS_RS_SHIFT     17
 234#define DMA_STATUS_NIS          0x00010000      /* Normal Interrupt Summary */
 235#define DMA_STATUS_AIS          0x00008000      /* Abnormal Interrupt Summary */
 236#define DMA_STATUS_ERI          0x00004000      /* Early Receive Interrupt */
 237#define DMA_STATUS_FBI          0x00002000      /* Fatal Bus Error Interrupt */
 238#define DMA_STATUS_ETI          0x00000400      /* Early Transmit Interrupt */
 239#define DMA_STATUS_RWT          0x00000200      /* Receive Watchdog Timeout */
 240#define DMA_STATUS_RPS          0x00000100      /* Receive Process Stopped */
 241#define DMA_STATUS_RU           0x00000080      /* Receive Buffer Unavailable */
 242#define DMA_STATUS_RI           0x00000040      /* Receive Interrupt */
 243#define DMA_STATUS_UNF          0x00000020      /* Transmit Underflow */
 244#define DMA_STATUS_OVF          0x00000010      /* Receive Overflow */
 245#define DMA_STATUS_TJT          0x00000008      /* Transmit Jabber Timeout */
 246#define DMA_STATUS_TU           0x00000004      /* Transmit Buffer Unavail */
 247#define DMA_STATUS_TPS          0x00000002      /* Transmit Process Stopped */
 248#define DMA_STATUS_TI           0x00000001      /* Transmit Interrupt */
 249
 250/* Common MAC defines */
 251#define MAC_ENABLE_TX           0x00000008      /* Transmitter Enable */
 252#define MAC_ENABLE_RX           0x00000004      /* Receiver Enable */
 253
 254/* XGMAC Operation Mode Register */
 255#define XGMAC_OMR_TSF           0x00200000      /* TX FIFO Store and Forward */
 256#define XGMAC_OMR_FTF           0x00100000      /* Flush Transmit FIFO */
 257#define XGMAC_OMR_TTC           0x00020000      /* Transmit Threshhold Ctrl */
 258#define XGMAC_OMR_TTC_MASK      0x00030000
 259#define XGMAC_OMR_RFD           0x00006000      /* FC Deactivation Threshhold */
 260#define XGMAC_OMR_RFD_MASK      0x00007000      /* FC Deact Threshhold MASK */
 261#define XGMAC_OMR_RFA           0x00000600      /* FC Activation Threshhold */
 262#define XGMAC_OMR_RFA_MASK      0x00000E00      /* FC Act Threshhold MASK */
 263#define XGMAC_OMR_EFC           0x00000100      /* Enable Hardware FC */
 264#define XGMAC_OMR_FEF           0x00000080      /* Forward Error Frames */
 265#define XGMAC_OMR_DT            0x00000040      /* Drop TCP/IP csum Errors */
 266#define XGMAC_OMR_RSF           0x00000020      /* RX FIFO Store and Forward */
 267#define XGMAC_OMR_RTC           0x00000010      /* RX Threshhold Ctrl */
 268#define XGMAC_OMR_RTC_MASK      0x00000018      /* RX Threshhold Ctrl MASK */
 269
 270/* XGMAC HW Features Register */
 271#define DMA_HW_FEAT_TXCOESEL    0x00010000      /* TX Checksum offload */
 272
 273#define XGMAC_MMC_CTRL_CNT_FRZ  0x00000008
 274
 275/* XGMAC Descriptor Defines */
 276#define MAX_DESC_BUF_SZ         (0x2000 - 8)
 277
 278#define RXDESC_EXT_STATUS       0x00000001
 279#define RXDESC_CRC_ERR          0x00000002
 280#define RXDESC_RX_ERR           0x00000008
 281#define RXDESC_RX_WDOG          0x00000010
 282#define RXDESC_FRAME_TYPE       0x00000020
 283#define RXDESC_GIANT_FRAME      0x00000080
 284#define RXDESC_LAST_SEG         0x00000100
 285#define RXDESC_FIRST_SEG        0x00000200
 286#define RXDESC_VLAN_FRAME       0x00000400
 287#define RXDESC_OVERFLOW_ERR     0x00000800
 288#define RXDESC_LENGTH_ERR       0x00001000
 289#define RXDESC_SA_FILTER_FAIL   0x00002000
 290#define RXDESC_DESCRIPTOR_ERR   0x00004000
 291#define RXDESC_ERROR_SUMMARY    0x00008000
 292#define RXDESC_FRAME_LEN_OFFSET 16
 293#define RXDESC_FRAME_LEN_MASK   0x3fff0000
 294#define RXDESC_DA_FILTER_FAIL   0x40000000
 295
 296#define RXDESC1_END_RING        0x00008000
 297
 298#define RXDESC_IP_PAYLOAD_MASK  0x00000003
 299#define RXDESC_IP_PAYLOAD_UDP   0x00000001
 300#define RXDESC_IP_PAYLOAD_TCP   0x00000002
 301#define RXDESC_IP_PAYLOAD_ICMP  0x00000003
 302#define RXDESC_IP_HEADER_ERR    0x00000008
 303#define RXDESC_IP_PAYLOAD_ERR   0x00000010
 304#define RXDESC_IPV4_PACKET      0x00000040
 305#define RXDESC_IPV6_PACKET      0x00000080
 306#define TXDESC_UNDERFLOW_ERR    0x00000001
 307#define TXDESC_JABBER_TIMEOUT   0x00000002
 308#define TXDESC_LOCAL_FAULT      0x00000004
 309#define TXDESC_REMOTE_FAULT     0x00000008
 310#define TXDESC_VLAN_FRAME       0x00000010
 311#define TXDESC_FRAME_FLUSHED    0x00000020
 312#define TXDESC_IP_HEADER_ERR    0x00000040
 313#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
 314#define TXDESC_ERROR_SUMMARY    0x00008000
 315#define TXDESC_SA_CTRL_INSERT   0x00040000
 316#define TXDESC_SA_CTRL_REPLACE  0x00080000
 317#define TXDESC_2ND_ADDR_CHAINED 0x00100000
 318#define TXDESC_END_RING         0x00200000
 319#define TXDESC_CSUM_IP          0x00400000
 320#define TXDESC_CSUM_IP_PAYLD    0x00800000
 321#define TXDESC_CSUM_ALL         0x00C00000
 322#define TXDESC_CRC_EN_REPLACE   0x01000000
 323#define TXDESC_CRC_EN_APPEND    0x02000000
 324#define TXDESC_DISABLE_PAD      0x04000000
 325#define TXDESC_FIRST_SEG        0x10000000
 326#define TXDESC_LAST_SEG         0x20000000
 327#define TXDESC_INTERRUPT        0x40000000
 328
 329#define DESC_OWN                0x80000000
 330#define DESC_BUFFER1_SZ_MASK    0x00001fff
 331#define DESC_BUFFER2_SZ_MASK    0x1fff0000
 332#define DESC_BUFFER2_SZ_OFFSET  16
 333
 334struct xgmac_dma_desc {
 335        __le32 flags;
 336        __le32 buf_size;
 337        __le32 buf1_addr;               /* Buffer 1 Address Pointer */
 338        __le32 buf2_addr;               /* Buffer 2 Address Pointer */
 339        __le32 ext_status;
 340        __le32 res[3];
 341};
 342
 343struct xgmac_extra_stats {
 344        /* Transmit errors */
 345        unsigned long tx_jabber;
 346        unsigned long tx_frame_flushed;
 347        unsigned long tx_payload_error;
 348        unsigned long tx_ip_header_error;
 349        unsigned long tx_local_fault;
 350        unsigned long tx_remote_fault;
 351        /* Receive errors */
 352        unsigned long rx_watchdog;
 353        unsigned long rx_da_filter_fail;
 354        unsigned long rx_sa_filter_fail;
 355        unsigned long rx_payload_error;
 356        unsigned long rx_ip_header_error;
 357        /* Tx/Rx IRQ errors */
 358        unsigned long tx_undeflow;
 359        unsigned long tx_process_stopped;
 360        unsigned long rx_buf_unav;
 361        unsigned long rx_process_stopped;
 362        unsigned long tx_early;
 363        unsigned long fatal_bus_error;
 364};
 365
 366struct xgmac_priv {
 367        struct xgmac_dma_desc *dma_rx;
 368        struct sk_buff **rx_skbuff;
 369        unsigned int rx_tail;
 370        unsigned int rx_head;
 371
 372        struct xgmac_dma_desc *dma_tx;
 373        struct sk_buff **tx_skbuff;
 374        unsigned int tx_head;
 375        unsigned int tx_tail;
 376
 377        void __iomem *base;
 378        struct sk_buff_head rx_recycle;
 379        unsigned int dma_buf_sz;
 380        dma_addr_t dma_rx_phy;
 381        dma_addr_t dma_tx_phy;
 382
 383        struct net_device *dev;
 384        struct device *device;
 385        struct napi_struct napi;
 386
 387        struct xgmac_extra_stats xstats;
 388
 389        spinlock_t stats_lock;
 390        int pmt_irq;
 391        char rx_pause;
 392        char tx_pause;
 393        int wolopts;
 394};
 395
 396/* XGMAC Configuration Settings */
 397#define MAX_MTU                 9000
 398#define PAUSE_TIME              0x400
 399
 400#define DMA_RX_RING_SZ          256
 401#define DMA_TX_RING_SZ          128
 402/* minimum number of free TX descriptors required to wake up TX process */
 403#define TX_THRESH               (DMA_TX_RING_SZ/4)
 404
 405/* DMA descriptor ring helpers */
 406#define dma_ring_incr(n, s)     (((n) + 1) & ((s) - 1))
 407#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
 408#define dma_ring_cnt(h, t, s)   CIRC_CNT(h, t, s)
 409
 410/* XGMAC Descriptor Access Helpers */
 411static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
 412{
 413        if (buf_sz > MAX_DESC_BUF_SZ)
 414                p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
 415                        (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
 416        else
 417                p->buf_size = cpu_to_le32(buf_sz);
 418}
 419
 420static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
 421{
 422        u32 len = cpu_to_le32(p->flags);
 423        return (len & DESC_BUFFER1_SZ_MASK) +
 424                ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
 425}
 426
 427static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
 428                                     int buf_sz)
 429{
 430        struct xgmac_dma_desc *end = p + ring_size - 1;
 431
 432        memset(p, 0, sizeof(*p) * ring_size);
 433
 434        for (; p <= end; p++)
 435                desc_set_buf_len(p, buf_sz);
 436
 437        end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
 438}
 439
 440static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
 441{
 442        memset(p, 0, sizeof(*p) * ring_size);
 443        p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
 444}
 445
 446static inline int desc_get_owner(struct xgmac_dma_desc *p)
 447{
 448        return le32_to_cpu(p->flags) & DESC_OWN;
 449}
 450
 451static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
 452{
 453        /* Clear all fields and set the owner */
 454        p->flags = cpu_to_le32(DESC_OWN);
 455}
 456
 457static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
 458{
 459        u32 tmpflags = le32_to_cpu(p->flags);
 460        tmpflags &= TXDESC_END_RING;
 461        tmpflags |= flags | DESC_OWN;
 462        p->flags = cpu_to_le32(tmpflags);
 463}
 464
 465static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
 466{
 467        return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
 468}
 469
 470static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
 471{
 472        return le32_to_cpu(p->buf1_addr);
 473}
 474
 475static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
 476                                     u32 paddr, int len)
 477{
 478        p->buf1_addr = cpu_to_le32(paddr);
 479        if (len > MAX_DESC_BUF_SZ)
 480                p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
 481}
 482
 483static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
 484                                              u32 paddr, int len)
 485{
 486        desc_set_buf_len(p, len);
 487        desc_set_buf_addr(p, paddr, len);
 488}
 489
 490static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
 491{
 492        u32 data = le32_to_cpu(p->flags);
 493        u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
 494        if (data & RXDESC_FRAME_TYPE)
 495                len -= ETH_FCS_LEN;
 496
 497        return len;
 498}
 499
 500static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
 501{
 502        int timeout = 1000;
 503        u32 reg = readl(ioaddr + XGMAC_OMR);
 504        writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
 505
 506        while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
 507                udelay(1);
 508}
 509
 510static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
 511{
 512        struct xgmac_extra_stats *x = &priv->xstats;
 513        u32 status = le32_to_cpu(p->flags);
 514
 515        if (!(status & TXDESC_ERROR_SUMMARY))
 516                return 0;
 517
 518        netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
 519        if (status & TXDESC_JABBER_TIMEOUT)
 520                x->tx_jabber++;
 521        if (status & TXDESC_FRAME_FLUSHED)
 522                x->tx_frame_flushed++;
 523        if (status & TXDESC_UNDERFLOW_ERR)
 524                xgmac_dma_flush_tx_fifo(priv->base);
 525        if (status & TXDESC_IP_HEADER_ERR)
 526                x->tx_ip_header_error++;
 527        if (status & TXDESC_LOCAL_FAULT)
 528                x->tx_local_fault++;
 529        if (status & TXDESC_REMOTE_FAULT)
 530                x->tx_remote_fault++;
 531        if (status & TXDESC_PAYLOAD_CSUM_ERR)
 532                x->tx_payload_error++;
 533
 534        return -1;
 535}
 536
 537static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
 538{
 539        struct xgmac_extra_stats *x = &priv->xstats;
 540        int ret = CHECKSUM_UNNECESSARY;
 541        u32 status = le32_to_cpu(p->flags);
 542        u32 ext_status = le32_to_cpu(p->ext_status);
 543
 544        if (status & RXDESC_DA_FILTER_FAIL) {
 545                netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
 546                x->rx_da_filter_fail++;
 547                return -1;
 548        }
 549
 550        /* Check if packet has checksum already */
 551        if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
 552                !(ext_status & RXDESC_IP_PAYLOAD_MASK))
 553                ret = CHECKSUM_NONE;
 554
 555        netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
 556                   (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
 557
 558        if (!(status & RXDESC_ERROR_SUMMARY))
 559                return ret;
 560
 561        /* Handle any errors */
 562        if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
 563                RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
 564                return -1;
 565
 566        if (status & RXDESC_EXT_STATUS) {
 567                if (ext_status & RXDESC_IP_HEADER_ERR)
 568                        x->rx_ip_header_error++;
 569                if (ext_status & RXDESC_IP_PAYLOAD_ERR)
 570                        x->rx_payload_error++;
 571                netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
 572                           ext_status);
 573                return CHECKSUM_NONE;
 574        }
 575
 576        return ret;
 577}
 578
 579static inline void xgmac_mac_enable(void __iomem *ioaddr)
 580{
 581        u32 value = readl(ioaddr + XGMAC_CONTROL);
 582        value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
 583        writel(value, ioaddr + XGMAC_CONTROL);
 584
 585        value = readl(ioaddr + XGMAC_DMA_CONTROL);
 586        value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
 587        writel(value, ioaddr + XGMAC_DMA_CONTROL);
 588}
 589
 590static inline void xgmac_mac_disable(void __iomem *ioaddr)
 591{
 592        u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
 593        value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
 594        writel(value, ioaddr + XGMAC_DMA_CONTROL);
 595
 596        value = readl(ioaddr + XGMAC_CONTROL);
 597        value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
 598        writel(value, ioaddr + XGMAC_CONTROL);
 599}
 600
 601static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
 602                               int num)
 603{
 604        u32 data;
 605
 606        data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
 607        writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
 608        data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 609        writel(data, ioaddr + XGMAC_ADDR_LOW(num));
 610}
 611
 612static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
 613                               int num)
 614{
 615        u32 hi_addr, lo_addr;
 616
 617        /* Read the MAC address from the hardware */
 618        hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
 619        lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
 620
 621        /* Extract the MAC address from the high and low words */
 622        addr[0] = lo_addr & 0xff;
 623        addr[1] = (lo_addr >> 8) & 0xff;
 624        addr[2] = (lo_addr >> 16) & 0xff;
 625        addr[3] = (lo_addr >> 24) & 0xff;
 626        addr[4] = hi_addr & 0xff;
 627        addr[5] = (hi_addr >> 8) & 0xff;
 628}
 629
 630static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
 631{
 632        u32 reg;
 633        unsigned int flow = 0;
 634
 635        priv->rx_pause = rx;
 636        priv->tx_pause = tx;
 637
 638        if (rx || tx) {
 639                if (rx)
 640                        flow |= XGMAC_FLOW_CTRL_RFE;
 641                if (tx)
 642                        flow |= XGMAC_FLOW_CTRL_TFE;
 643
 644                flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
 645                flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
 646
 647                writel(flow, priv->base + XGMAC_FLOW_CTRL);
 648
 649                reg = readl(priv->base + XGMAC_OMR);
 650                reg |= XGMAC_OMR_EFC;
 651                writel(reg, priv->base + XGMAC_OMR);
 652        } else {
 653                writel(0, priv->base + XGMAC_FLOW_CTRL);
 654
 655                reg = readl(priv->base + XGMAC_OMR);
 656                reg &= ~XGMAC_OMR_EFC;
 657                writel(reg, priv->base + XGMAC_OMR);
 658        }
 659
 660        return 0;
 661}
 662
 663static void xgmac_rx_refill(struct xgmac_priv *priv)
 664{
 665        struct xgmac_dma_desc *p;
 666        dma_addr_t paddr;
 667
 668        while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
 669                int entry = priv->rx_head;
 670                struct sk_buff *skb;
 671
 672                p = priv->dma_rx + entry;
 673
 674                if (priv->rx_skbuff[entry] != NULL)
 675                        continue;
 676
 677                skb = __skb_dequeue(&priv->rx_recycle);
 678                if (skb == NULL)
 679                        skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
 680                if (unlikely(skb == NULL))
 681                        break;
 682
 683                priv->rx_skbuff[entry] = skb;
 684                paddr = dma_map_single(priv->device, skb->data,
 685                                         priv->dma_buf_sz, DMA_FROM_DEVICE);
 686                desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
 687
 688                netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
 689                        priv->rx_head, priv->rx_tail);
 690
 691                priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
 692                /* Ensure descriptor is in memory before handing to h/w */
 693                wmb();
 694                desc_set_rx_owner(p);
 695        }
 696}
 697
 698/**
 699 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
 700 * @dev: net device structure
 701 * Description:  this function initializes the DMA RX/TX descriptors
 702 * and allocates the socket buffers.
 703 */
 704static int xgmac_dma_desc_rings_init(struct net_device *dev)
 705{
 706        struct xgmac_priv *priv = netdev_priv(dev);
 707        unsigned int bfsize;
 708
 709        /* Set the Buffer size according to the MTU;
 710         * indeed, in case of jumbo we need to bump-up the buffer sizes.
 711         */
 712        bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
 713                       64);
 714
 715        netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
 716
 717        priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
 718                                  GFP_KERNEL);
 719        if (!priv->rx_skbuff)
 720                return -ENOMEM;
 721
 722        priv->dma_rx = dma_alloc_coherent(priv->device,
 723                                          DMA_RX_RING_SZ *
 724                                          sizeof(struct xgmac_dma_desc),
 725                                          &priv->dma_rx_phy,
 726                                          GFP_KERNEL);
 727        if (!priv->dma_rx)
 728                goto err_dma_rx;
 729
 730        priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
 731                                  GFP_KERNEL);
 732        if (!priv->tx_skbuff)
 733                goto err_tx_skb;
 734
 735        priv->dma_tx = dma_alloc_coherent(priv->device,
 736                                          DMA_TX_RING_SZ *
 737                                          sizeof(struct xgmac_dma_desc),
 738                                          &priv->dma_tx_phy,
 739                                          GFP_KERNEL);
 740        if (!priv->dma_tx)
 741                goto err_dma_tx;
 742
 743        netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
 744            "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
 745            priv->dma_rx, priv->dma_tx,
 746            (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
 747
 748        priv->rx_tail = 0;
 749        priv->rx_head = 0;
 750        priv->dma_buf_sz = bfsize;
 751        desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
 752        xgmac_rx_refill(priv);
 753
 754        priv->tx_tail = 0;
 755        priv->tx_head = 0;
 756        desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
 757
 758        writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
 759        writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
 760
 761        return 0;
 762
 763err_dma_tx:
 764        kfree(priv->tx_skbuff);
 765err_tx_skb:
 766        dma_free_coherent(priv->device,
 767                          DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
 768                          priv->dma_rx, priv->dma_rx_phy);
 769err_dma_rx:
 770        kfree(priv->rx_skbuff);
 771        return -ENOMEM;
 772}
 773
 774static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
 775{
 776        int i;
 777        struct xgmac_dma_desc *p;
 778
 779        if (!priv->rx_skbuff)
 780                return;
 781
 782        for (i = 0; i < DMA_RX_RING_SZ; i++) {
 783                if (priv->rx_skbuff[i] == NULL)
 784                        continue;
 785
 786                p = priv->dma_rx + i;
 787                dma_unmap_single(priv->device, desc_get_buf_addr(p),
 788                                 priv->dma_buf_sz, DMA_FROM_DEVICE);
 789                dev_kfree_skb_any(priv->rx_skbuff[i]);
 790                priv->rx_skbuff[i] = NULL;
 791        }
 792}
 793
 794static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
 795{
 796        int i, f;
 797        struct xgmac_dma_desc *p;
 798
 799        if (!priv->tx_skbuff)
 800                return;
 801
 802        for (i = 0; i < DMA_TX_RING_SZ; i++) {
 803                if (priv->tx_skbuff[i] == NULL)
 804                        continue;
 805
 806                p = priv->dma_tx + i;
 807                dma_unmap_single(priv->device, desc_get_buf_addr(p),
 808                                 desc_get_buf_len(p), DMA_TO_DEVICE);
 809
 810                for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
 811                        p = priv->dma_tx + i++;
 812                        dma_unmap_page(priv->device, desc_get_buf_addr(p),
 813                                       desc_get_buf_len(p), DMA_TO_DEVICE);
 814                }
 815
 816                dev_kfree_skb_any(priv->tx_skbuff[i]);
 817                priv->tx_skbuff[i] = NULL;
 818        }
 819}
 820
 821static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
 822{
 823        /* Release the DMA TX/RX socket buffers */
 824        xgmac_free_rx_skbufs(priv);
 825        xgmac_free_tx_skbufs(priv);
 826
 827        /* Free the consistent memory allocated for descriptor rings */
 828        if (priv->dma_tx) {
 829                dma_free_coherent(priv->device,
 830                                  DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
 831                                  priv->dma_tx, priv->dma_tx_phy);
 832                priv->dma_tx = NULL;
 833        }
 834        if (priv->dma_rx) {
 835                dma_free_coherent(priv->device,
 836                                  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
 837                                  priv->dma_rx, priv->dma_rx_phy);
 838                priv->dma_rx = NULL;
 839        }
 840        kfree(priv->rx_skbuff);
 841        priv->rx_skbuff = NULL;
 842        kfree(priv->tx_skbuff);
 843        priv->tx_skbuff = NULL;
 844}
 845
 846/**
 847 * xgmac_tx:
 848 * @priv: private driver structure
 849 * Description: it reclaims resources after transmission completes.
 850 */
 851static void xgmac_tx_complete(struct xgmac_priv *priv)
 852{
 853        int i;
 854        void __iomem *ioaddr = priv->base;
 855
 856        writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
 857
 858        while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
 859                unsigned int entry = priv->tx_tail;
 860                struct sk_buff *skb = priv->tx_skbuff[entry];
 861                struct xgmac_dma_desc *p = priv->dma_tx + entry;
 862
 863                /* Check if the descriptor is owned by the DMA. */
 864                if (desc_get_owner(p))
 865                        break;
 866
 867                /* Verify tx error by looking at the last segment */
 868                if (desc_get_tx_ls(p))
 869                        desc_get_tx_status(priv, p);
 870
 871                netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
 872                        priv->tx_head, priv->tx_tail);
 873
 874                dma_unmap_single(priv->device, desc_get_buf_addr(p),
 875                                 desc_get_buf_len(p), DMA_TO_DEVICE);
 876
 877                priv->tx_skbuff[entry] = NULL;
 878                priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
 879
 880                if (!skb) {
 881                        continue;
 882                }
 883
 884                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 885                        entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
 886                                                              DMA_TX_RING_SZ);
 887                        p = priv->dma_tx + priv->tx_tail;
 888
 889                        dma_unmap_page(priv->device, desc_get_buf_addr(p),
 890                                       desc_get_buf_len(p), DMA_TO_DEVICE);
 891                }
 892
 893                /*
 894                 * If there's room in the queue (limit it to size)
 895                 * we add this skb back into the pool,
 896                 * if it's the right size.
 897                 */
 898                if ((skb_queue_len(&priv->rx_recycle) <
 899                        DMA_RX_RING_SZ) &&
 900                        skb_recycle_check(skb, priv->dma_buf_sz))
 901                        __skb_queue_head(&priv->rx_recycle, skb);
 902                else
 903                        dev_kfree_skb(skb);
 904        }
 905
 906        if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
 907            TX_THRESH)
 908                netif_wake_queue(priv->dev);
 909}
 910
 911/**
 912 * xgmac_tx_err:
 913 * @priv: pointer to the private device structure
 914 * Description: it cleans the descriptors and restarts the transmission
 915 * in case of errors.
 916 */
 917static void xgmac_tx_err(struct xgmac_priv *priv)
 918{
 919        u32 reg, value, inten;
 920
 921        netif_stop_queue(priv->dev);
 922
 923        inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
 924        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
 925
 926        reg = readl(priv->base + XGMAC_DMA_CONTROL);
 927        writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
 928        do {
 929                value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
 930        } while (value && (value != 0x600000));
 931
 932        xgmac_free_tx_skbufs(priv);
 933        desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
 934        priv->tx_tail = 0;
 935        priv->tx_head = 0;
 936        writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
 937
 938        writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
 939                priv->base + XGMAC_DMA_STATUS);
 940        writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
 941
 942        netif_wake_queue(priv->dev);
 943}
 944
 945static int xgmac_hw_init(struct net_device *dev)
 946{
 947        u32 value, ctrl;
 948        int limit;
 949        struct xgmac_priv *priv = netdev_priv(dev);
 950        void __iomem *ioaddr = priv->base;
 951
 952        /* Save the ctrl register value */
 953        ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
 954
 955        /* SW reset */
 956        value = DMA_BUS_MODE_SFT_RESET;
 957        writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
 958        limit = 15000;
 959        while (limit-- &&
 960                (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
 961                cpu_relax();
 962        if (limit < 0)
 963                return -EBUSY;
 964
 965        value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
 966                (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
 967                DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
 968        writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
 969
 970        /* Enable interrupts */
 971        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
 972        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
 973
 974        /* XGMAC requires AXI bus init. This is a 'magic number' for now */
 975        writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
 976
 977        ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
 978                XGMAC_CONTROL_CAR;
 979        if (dev->features & NETIF_F_RXCSUM)
 980                ctrl |= XGMAC_CONTROL_IPC;
 981        writel(ctrl, ioaddr + XGMAC_CONTROL);
 982
 983        value = DMA_CONTROL_DFF;
 984        writel(value, ioaddr + XGMAC_DMA_CONTROL);
 985
 986        /* Set the HW DMA mode and the COE */
 987        writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
 988                ioaddr + XGMAC_OMR);
 989
 990        /* Reset the MMC counters */
 991        writel(1, ioaddr + XGMAC_MMC_CTRL);
 992        return 0;
 993}
 994
 995/**
 996 *  xgmac_open - open entry point of the driver
 997 *  @dev : pointer to the device structure.
 998 *  Description:
 999 *  This function is the open entry point of the driver.
1000 *  Return value:
1001 *  0 on success and an appropriate (-)ve integer as defined in errno.h
1002 *  file on failure.
1003 */
1004static int xgmac_open(struct net_device *dev)
1005{
1006        int ret;
1007        struct xgmac_priv *priv = netdev_priv(dev);
1008        void __iomem *ioaddr = priv->base;
1009
1010        /* Check that the MAC address is valid.  If its not, refuse
1011         * to bring the device up. The user must specify an
1012         * address using the following linux command:
1013         *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
1014        if (!is_valid_ether_addr(dev->dev_addr)) {
1015                random_ether_addr(dev->dev_addr);
1016                netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1017                        dev->dev_addr);
1018        }
1019
1020        skb_queue_head_init(&priv->rx_recycle);
1021        memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1022
1023        /* Initialize the XGMAC and descriptors */
1024        xgmac_hw_init(dev);
1025        xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1026        xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1027
1028        ret = xgmac_dma_desc_rings_init(dev);
1029        if (ret < 0)
1030                return ret;
1031
1032        /* Enable the MAC Rx/Tx */
1033        xgmac_mac_enable(ioaddr);
1034
1035        napi_enable(&priv->napi);
1036        netif_start_queue(dev);
1037
1038        return 0;
1039}
1040
1041/**
1042 *  xgmac_release - close entry point of the driver
1043 *  @dev : device pointer.
1044 *  Description:
1045 *  This is the stop entry point of the driver.
1046 */
1047static int xgmac_stop(struct net_device *dev)
1048{
1049        struct xgmac_priv *priv = netdev_priv(dev);
1050
1051        netif_stop_queue(dev);
1052
1053        if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1054                napi_disable(&priv->napi);
1055
1056        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1057        skb_queue_purge(&priv->rx_recycle);
1058
1059        /* Disable the MAC core */
1060        xgmac_mac_disable(priv->base);
1061
1062        /* Release and free the Rx/Tx resources */
1063        xgmac_free_dma_desc_rings(priv);
1064
1065        return 0;
1066}
1067
1068/**
1069 *  xgmac_xmit:
1070 *  @skb : the socket buffer
1071 *  @dev : device pointer
1072 *  Description : Tx entry point of the driver.
1073 */
1074static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1075{
1076        struct xgmac_priv *priv = netdev_priv(dev);
1077        unsigned int entry;
1078        int i;
1079        int nfrags = skb_shinfo(skb)->nr_frags;
1080        struct xgmac_dma_desc *desc, *first;
1081        unsigned int desc_flags;
1082        unsigned int len;
1083        dma_addr_t paddr;
1084
1085        if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1086            (nfrags + 1)) {
1087                writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
1088                        priv->base + XGMAC_DMA_INTR_ENA);
1089                netif_stop_queue(dev);
1090                return NETDEV_TX_BUSY;
1091        }
1092
1093        desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1094                TXDESC_CSUM_ALL : 0;
1095        entry = priv->tx_head;
1096        desc = priv->dma_tx + entry;
1097        first = desc;
1098
1099        len = skb_headlen(skb);
1100        paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1101        if (dma_mapping_error(priv->device, paddr)) {
1102                dev_kfree_skb(skb);
1103                return -EIO;
1104        }
1105        priv->tx_skbuff[entry] = skb;
1106        desc_set_buf_addr_and_size(desc, paddr, len);
1107
1108        for (i = 0; i < nfrags; i++) {
1109                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1110
1111                len = frag->size;
1112
1113                paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1114                                         DMA_TO_DEVICE);
1115                if (dma_mapping_error(priv->device, paddr)) {
1116                        dev_kfree_skb(skb);
1117                        return -EIO;
1118                }
1119
1120                entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1121                desc = priv->dma_tx + entry;
1122                priv->tx_skbuff[entry] = NULL;
1123
1124                desc_set_buf_addr_and_size(desc, paddr, len);
1125                if (i < (nfrags - 1))
1126                        desc_set_tx_owner(desc, desc_flags);
1127        }
1128
1129        /* Interrupt on completition only for the latest segment */
1130        if (desc != first)
1131                desc_set_tx_owner(desc, desc_flags |
1132                        TXDESC_LAST_SEG | TXDESC_INTERRUPT);
1133        else
1134                desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
1135
1136        /* Set owner on first desc last to avoid race condition */
1137        wmb();
1138        desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1139
1140        priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1141
1142        writel(1, priv->base + XGMAC_DMA_TX_POLL);
1143
1144        return NETDEV_TX_OK;
1145}
1146
1147static int xgmac_rx(struct xgmac_priv *priv, int limit)
1148{
1149        unsigned int entry;
1150        unsigned int count = 0;
1151        struct xgmac_dma_desc *p;
1152
1153        while (count < limit) {
1154                int ip_checksum;
1155                struct sk_buff *skb;
1156                int frame_len;
1157
1158                writel(DMA_STATUS_RI | DMA_STATUS_NIS,
1159                       priv->base + XGMAC_DMA_STATUS);
1160
1161                entry = priv->rx_tail;
1162                p = priv->dma_rx + entry;
1163                if (desc_get_owner(p))
1164                        break;
1165
1166                count++;
1167                priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1168
1169                /* read the status of the incoming frame */
1170                ip_checksum = desc_get_rx_status(priv, p);
1171                if (ip_checksum < 0)
1172                        continue;
1173
1174                skb = priv->rx_skbuff[entry];
1175                if (unlikely(!skb)) {
1176                        netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1177                        break;
1178                }
1179                priv->rx_skbuff[entry] = NULL;
1180
1181                frame_len = desc_get_rx_frame_len(p);
1182                netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1183                        frame_len, ip_checksum);
1184
1185                skb_put(skb, frame_len);
1186                dma_unmap_single(priv->device, desc_get_buf_addr(p),
1187                                 frame_len, DMA_FROM_DEVICE);
1188
1189                skb->protocol = eth_type_trans(skb, priv->dev);
1190                skb->ip_summed = ip_checksum;
1191                if (ip_checksum == CHECKSUM_NONE)
1192                        netif_receive_skb(skb);
1193                else
1194                        napi_gro_receive(&priv->napi, skb);
1195        }
1196
1197        xgmac_rx_refill(priv);
1198
1199        writel(1, priv->base + XGMAC_DMA_RX_POLL);
1200
1201        return count;
1202}
1203
1204/**
1205 *  xgmac_poll - xgmac poll method (NAPI)
1206 *  @napi : pointer to the napi structure.
1207 *  @budget : maximum number of packets that the current CPU can receive from
1208 *            all interfaces.
1209 *  Description :
1210 *   This function implements the the reception process.
1211 *   Also it runs the TX completion thread
1212 */
1213static int xgmac_poll(struct napi_struct *napi, int budget)
1214{
1215        struct xgmac_priv *priv = container_of(napi,
1216                                       struct xgmac_priv, napi);
1217        int work_done = 0;
1218
1219        xgmac_tx_complete(priv);
1220        work_done = xgmac_rx(priv, budget);
1221
1222        if (work_done < budget) {
1223                napi_complete(napi);
1224                writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1225        }
1226        return work_done;
1227}
1228
1229/**
1230 *  xgmac_tx_timeout
1231 *  @dev : Pointer to net device structure
1232 *  Description: this function is called when a packet transmission fails to
1233 *   complete within a reasonable tmrate. The driver will mark the error in the
1234 *   netdev structure and arrange for the device to be reset to a sane state
1235 *   in order to transmit a new packet.
1236 */
1237static void xgmac_tx_timeout(struct net_device *dev)
1238{
1239        struct xgmac_priv *priv = netdev_priv(dev);
1240
1241        /* Clear Tx resources and restart transmitting again */
1242        xgmac_tx_err(priv);
1243}
1244
1245/**
1246 *  xgmac_set_rx_mode - entry point for multicast addressing
1247 *  @dev : pointer to the device structure
1248 *  Description:
1249 *  This function is a driver entry point which gets called by the kernel
1250 *  whenever multicast addresses must be enabled/disabled.
1251 *  Return value:
1252 *  void.
1253 */
1254static void xgmac_set_rx_mode(struct net_device *dev)
1255{
1256        int i;
1257        struct xgmac_priv *priv = netdev_priv(dev);
1258        void __iomem *ioaddr = priv->base;
1259        unsigned int value = 0;
1260        u32 hash_filter[XGMAC_NUM_HASH];
1261        int reg = 1;
1262        struct netdev_hw_addr *ha;
1263        bool use_hash = false;
1264
1265        netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1266                 netdev_mc_count(dev), netdev_uc_count(dev));
1267
1268        if (dev->flags & IFF_PROMISC) {
1269                writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
1270                return;
1271        }
1272
1273        memset(hash_filter, 0, sizeof(hash_filter));
1274
1275        if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
1276                use_hash = true;
1277                value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1278        }
1279        netdev_for_each_uc_addr(ha, dev) {
1280                if (use_hash) {
1281                        u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1282
1283                        /* The most significant 4 bits determine the register to
1284                         * use (H/L) while the other 5 bits determine the bit
1285                         * within the register. */
1286                        hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1287                } else {
1288                        xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1289                        reg++;
1290                }
1291        }
1292
1293        if (dev->flags & IFF_ALLMULTI) {
1294                value |= XGMAC_FRAME_FILTER_PM;
1295                goto out;
1296        }
1297
1298        if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1299                use_hash = true;
1300                value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1301        }
1302        netdev_for_each_mc_addr(ha, dev) {
1303                if (use_hash) {
1304                        u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1305
1306                        /* The most significant 4 bits determine the register to
1307                         * use (H/L) while the other 5 bits determine the bit
1308                         * within the register. */
1309                        hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1310                } else {
1311                        xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1312                        reg++;
1313                }
1314        }
1315
1316out:
1317        for (i = 0; i < XGMAC_NUM_HASH; i++)
1318                writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1319
1320        writel(value, ioaddr + XGMAC_FRAME_FILTER);
1321}
1322
1323/**
1324 *  xgmac_change_mtu - entry point to change MTU size for the device.
1325 *  @dev : device pointer.
1326 *  @new_mtu : the new MTU size for the device.
1327 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
1328 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
1329 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
1330 *  Return value:
1331 *  0 on success and an appropriate (-)ve integer as defined in errno.h
1332 *  file on failure.
1333 */
1334static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1335{
1336        struct xgmac_priv *priv = netdev_priv(dev);
1337        int old_mtu;
1338
1339        if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1340                netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1341                return -EINVAL;
1342        }
1343
1344        old_mtu = dev->mtu;
1345        dev->mtu = new_mtu;
1346
1347        /* return early if the buffer sizes will not change */
1348        if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1349                return 0;
1350        if (old_mtu == new_mtu)
1351                return 0;
1352
1353        /* Stop everything, get ready to change the MTU */
1354        if (!netif_running(dev))
1355                return 0;
1356
1357        /* Bring the interface down and then back up */
1358        xgmac_stop(dev);
1359        return xgmac_open(dev);
1360}
1361
1362static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1363{
1364        u32 intr_status;
1365        struct net_device *dev = (struct net_device *)dev_id;
1366        struct xgmac_priv *priv = netdev_priv(dev);
1367        void __iomem *ioaddr = priv->base;
1368
1369        intr_status = readl(ioaddr + XGMAC_INT_STAT);
1370        if (intr_status & XGMAC_INT_STAT_PMT) {
1371                netdev_dbg(priv->dev, "received Magic frame\n");
1372                /* clear the PMT bits 5 and 6 by reading the PMT */
1373                readl(ioaddr + XGMAC_PMT);
1374        }
1375        return IRQ_HANDLED;
1376}
1377
1378static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1379{
1380        u32 intr_status;
1381        bool tx_err = false;
1382        struct net_device *dev = (struct net_device *)dev_id;
1383        struct xgmac_priv *priv = netdev_priv(dev);
1384        struct xgmac_extra_stats *x = &priv->xstats;
1385
1386        /* read the status register (CSR5) */
1387        intr_status = readl(priv->base + XGMAC_DMA_STATUS);
1388        intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
1389        writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1390
1391        /* It displays the DMA process states (CSR5 register) */
1392        /* ABNORMAL interrupts */
1393        if (unlikely(intr_status & DMA_STATUS_AIS)) {
1394                if (intr_status & DMA_STATUS_TJT) {
1395                        netdev_err(priv->dev, "transmit jabber\n");
1396                        x->tx_jabber++;
1397                }
1398                if (intr_status & DMA_STATUS_RU)
1399                        x->rx_buf_unav++;
1400                if (intr_status & DMA_STATUS_RPS) {
1401                        netdev_err(priv->dev, "receive process stopped\n");
1402                        x->rx_process_stopped++;
1403                }
1404                if (intr_status & DMA_STATUS_ETI) {
1405                        netdev_err(priv->dev, "transmit early interrupt\n");
1406                        x->tx_early++;
1407                }
1408                if (intr_status & DMA_STATUS_TPS) {
1409                        netdev_err(priv->dev, "transmit process stopped\n");
1410                        x->tx_process_stopped++;
1411                        tx_err = true;
1412                }
1413                if (intr_status & DMA_STATUS_FBI) {
1414                        netdev_err(priv->dev, "fatal bus error\n");
1415                        x->fatal_bus_error++;
1416                        tx_err = true;
1417                }
1418
1419                if (tx_err)
1420                        xgmac_tx_err(priv);
1421        }
1422
1423        /* TX/RX NORMAL interrupts */
1424        if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
1425                writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1426                napi_schedule(&priv->napi);
1427        }
1428
1429        return IRQ_HANDLED;
1430}
1431
1432#ifdef CONFIG_NET_POLL_CONTROLLER
1433/* Polling receive - used by NETCONSOLE and other diagnostic tools
1434 * to allow network I/O with interrupts disabled. */
1435static void xgmac_poll_controller(struct net_device *dev)
1436{
1437        disable_irq(dev->irq);
1438        xgmac_interrupt(dev->irq, dev);
1439        enable_irq(dev->irq);
1440}
1441#endif
1442
1443static struct rtnl_link_stats64 *
1444xgmac_get_stats64(struct net_device *dev,
1445                       struct rtnl_link_stats64 *storage)
1446{
1447        struct xgmac_priv *priv = netdev_priv(dev);
1448        void __iomem *base = priv->base;
1449        u32 count;
1450
1451        spin_lock_bh(&priv->stats_lock);
1452        writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1453
1454        storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1455        storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1456
1457        storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1458        storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1459        storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1460        storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1461        storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1462
1463        storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1464        storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1465
1466        count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1467        storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1468        storage->tx_packets = count;
1469        storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1470
1471        writel(0, base + XGMAC_MMC_CTRL);
1472        spin_unlock_bh(&priv->stats_lock);
1473        return storage;
1474}
1475
1476static int xgmac_set_mac_address(struct net_device *dev, void *p)
1477{
1478        struct xgmac_priv *priv = netdev_priv(dev);
1479        void __iomem *ioaddr = priv->base;
1480        struct sockaddr *addr = p;
1481
1482        if (!is_valid_ether_addr(addr->sa_data))
1483                return -EADDRNOTAVAIL;
1484
1485        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1486
1487        xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1488
1489        return 0;
1490}
1491
1492static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1493{
1494        u32 ctrl;
1495        struct xgmac_priv *priv = netdev_priv(dev);
1496        void __iomem *ioaddr = priv->base;
1497        u32 changed = dev->features ^ features;
1498
1499        if (!(changed & NETIF_F_RXCSUM))
1500                return 0;
1501
1502        ctrl = readl(ioaddr + XGMAC_CONTROL);
1503        if (features & NETIF_F_RXCSUM)
1504                ctrl |= XGMAC_CONTROL_IPC;
1505        else
1506                ctrl &= ~XGMAC_CONTROL_IPC;
1507        writel(ctrl, ioaddr + XGMAC_CONTROL);
1508
1509        return 0;
1510}
1511
1512static const struct net_device_ops xgmac_netdev_ops = {
1513        .ndo_open = xgmac_open,
1514        .ndo_start_xmit = xgmac_xmit,
1515        .ndo_stop = xgmac_stop,
1516        .ndo_change_mtu = xgmac_change_mtu,
1517        .ndo_set_rx_mode = xgmac_set_rx_mode,
1518        .ndo_tx_timeout = xgmac_tx_timeout,
1519        .ndo_get_stats64 = xgmac_get_stats64,
1520#ifdef CONFIG_NET_POLL_CONTROLLER
1521        .ndo_poll_controller = xgmac_poll_controller,
1522#endif
1523        .ndo_set_mac_address = xgmac_set_mac_address,
1524        .ndo_set_features = xgmac_set_features,
1525};
1526
1527static int xgmac_ethtool_getsettings(struct net_device *dev,
1528                                          struct ethtool_cmd *cmd)
1529{
1530        cmd->autoneg = 0;
1531        cmd->duplex = DUPLEX_FULL;
1532        ethtool_cmd_speed_set(cmd, 10000);
1533        cmd->supported = 0;
1534        cmd->advertising = 0;
1535        cmd->transceiver = XCVR_INTERNAL;
1536        return 0;
1537}
1538
1539static void xgmac_get_pauseparam(struct net_device *netdev,
1540                                      struct ethtool_pauseparam *pause)
1541{
1542        struct xgmac_priv *priv = netdev_priv(netdev);
1543
1544        pause->rx_pause = priv->rx_pause;
1545        pause->tx_pause = priv->tx_pause;
1546}
1547
1548static int xgmac_set_pauseparam(struct net_device *netdev,
1549                                     struct ethtool_pauseparam *pause)
1550{
1551        struct xgmac_priv *priv = netdev_priv(netdev);
1552
1553        if (pause->autoneg)
1554                return -EINVAL;
1555
1556        return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1557}
1558
1559struct xgmac_stats {
1560        char stat_string[ETH_GSTRING_LEN];
1561        int stat_offset;
1562        bool is_reg;
1563};
1564
1565#define XGMAC_STAT(m)   \
1566        { #m, offsetof(struct xgmac_priv, xstats.m), false }
1567#define XGMAC_HW_STAT(m, reg_offset)    \
1568        { #m, reg_offset, true }
1569
1570static const struct xgmac_stats xgmac_gstrings_stats[] = {
1571        XGMAC_STAT(tx_frame_flushed),
1572        XGMAC_STAT(tx_payload_error),
1573        XGMAC_STAT(tx_ip_header_error),
1574        XGMAC_STAT(tx_local_fault),
1575        XGMAC_STAT(tx_remote_fault),
1576        XGMAC_STAT(tx_early),
1577        XGMAC_STAT(tx_process_stopped),
1578        XGMAC_STAT(tx_jabber),
1579        XGMAC_STAT(rx_buf_unav),
1580        XGMAC_STAT(rx_process_stopped),
1581        XGMAC_STAT(rx_payload_error),
1582        XGMAC_STAT(rx_ip_header_error),
1583        XGMAC_STAT(rx_da_filter_fail),
1584        XGMAC_STAT(rx_sa_filter_fail),
1585        XGMAC_STAT(fatal_bus_error),
1586        XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1587        XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1588        XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1589        XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1590        XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1591};
1592#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1593
1594static void xgmac_get_ethtool_stats(struct net_device *dev,
1595                                         struct ethtool_stats *dummy,
1596                                         u64 *data)
1597{
1598        struct xgmac_priv *priv = netdev_priv(dev);
1599        void *p = priv;
1600        int i;
1601
1602        for (i = 0; i < XGMAC_STATS_LEN; i++) {
1603                if (xgmac_gstrings_stats[i].is_reg)
1604                        *data++ = readl(priv->base +
1605                                xgmac_gstrings_stats[i].stat_offset);
1606                else
1607                        *data++ = *(u32 *)(p +
1608                                xgmac_gstrings_stats[i].stat_offset);
1609        }
1610}
1611
1612static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1613{
1614        switch (sset) {
1615        case ETH_SS_STATS:
1616                return XGMAC_STATS_LEN;
1617        default:
1618                return -EINVAL;
1619        }
1620}
1621
1622static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1623                                   u8 *data)
1624{
1625        int i;
1626        u8 *p = data;
1627
1628        switch (stringset) {
1629        case ETH_SS_STATS:
1630                for (i = 0; i < XGMAC_STATS_LEN; i++) {
1631                        memcpy(p, xgmac_gstrings_stats[i].stat_string,
1632                               ETH_GSTRING_LEN);
1633                        p += ETH_GSTRING_LEN;
1634                }
1635                break;
1636        default:
1637                WARN_ON(1);
1638                break;
1639        }
1640}
1641
1642static void xgmac_get_wol(struct net_device *dev,
1643                               struct ethtool_wolinfo *wol)
1644{
1645        struct xgmac_priv *priv = netdev_priv(dev);
1646
1647        if (device_can_wakeup(priv->device)) {
1648                wol->supported = WAKE_MAGIC | WAKE_UCAST;
1649                wol->wolopts = priv->wolopts;
1650        }
1651}
1652
1653static int xgmac_set_wol(struct net_device *dev,
1654                              struct ethtool_wolinfo *wol)
1655{
1656        struct xgmac_priv *priv = netdev_priv(dev);
1657        u32 support = WAKE_MAGIC | WAKE_UCAST;
1658
1659        if (!device_can_wakeup(priv->device))
1660                return -ENOTSUPP;
1661
1662        if (wol->wolopts & ~support)
1663                return -EINVAL;
1664
1665        priv->wolopts = wol->wolopts;
1666
1667        if (wol->wolopts) {
1668                device_set_wakeup_enable(priv->device, 1);
1669                enable_irq_wake(dev->irq);
1670        } else {
1671                device_set_wakeup_enable(priv->device, 0);
1672                disable_irq_wake(dev->irq);
1673        }
1674
1675        return 0;
1676}
1677
1678static const struct ethtool_ops xgmac_ethtool_ops = {
1679        .get_settings = xgmac_ethtool_getsettings,
1680        .get_link = ethtool_op_get_link,
1681        .get_pauseparam = xgmac_get_pauseparam,
1682        .set_pauseparam = xgmac_set_pauseparam,
1683        .get_ethtool_stats = xgmac_get_ethtool_stats,
1684        .get_strings = xgmac_get_strings,
1685        .get_wol = xgmac_get_wol,
1686        .set_wol = xgmac_set_wol,
1687        .get_sset_count = xgmac_get_sset_count,
1688};
1689
1690/**
1691 * xgmac_probe
1692 * @pdev: platform device pointer
1693 * Description: the driver is initialized through platform_device.
1694 */
1695static int xgmac_probe(struct platform_device *pdev)
1696{
1697        int ret = 0;
1698        struct resource *res;
1699        struct net_device *ndev = NULL;
1700        struct xgmac_priv *priv = NULL;
1701        u32 uid;
1702
1703        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1704        if (!res)
1705                return -ENODEV;
1706
1707        if (!request_mem_region(res->start, resource_size(res), pdev->name))
1708                return -EBUSY;
1709
1710        ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1711        if (!ndev) {
1712                ret = -ENOMEM;
1713                goto err_alloc;
1714        }
1715
1716        SET_NETDEV_DEV(ndev, &pdev->dev);
1717        priv = netdev_priv(ndev);
1718        platform_set_drvdata(pdev, ndev);
1719        ether_setup(ndev);
1720        ndev->netdev_ops = &xgmac_netdev_ops;
1721        SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1722        spin_lock_init(&priv->stats_lock);
1723
1724        priv->device = &pdev->dev;
1725        priv->dev = ndev;
1726        priv->rx_pause = 1;
1727        priv->tx_pause = 1;
1728
1729        priv->base = ioremap(res->start, resource_size(res));
1730        if (!priv->base) {
1731                netdev_err(ndev, "ioremap failed\n");
1732                ret = -ENOMEM;
1733                goto err_io;
1734        }
1735
1736        uid = readl(priv->base + XGMAC_VERSION);
1737        netdev_info(ndev, "h/w version is 0x%x\n", uid);
1738
1739        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1740        ndev->irq = platform_get_irq(pdev, 0);
1741        if (ndev->irq == -ENXIO) {
1742                netdev_err(ndev, "No irq resource\n");
1743                ret = ndev->irq;
1744                goto err_irq;
1745        }
1746
1747        ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1748                          dev_name(&pdev->dev), ndev);
1749        if (ret < 0) {
1750                netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1751                        ndev->irq, ret);
1752                goto err_irq;
1753        }
1754
1755        priv->pmt_irq = platform_get_irq(pdev, 1);
1756        if (priv->pmt_irq == -ENXIO) {
1757                netdev_err(ndev, "No pmt irq resource\n");
1758                ret = priv->pmt_irq;
1759                goto err_pmt_irq;
1760        }
1761
1762        ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1763                          dev_name(&pdev->dev), ndev);
1764        if (ret < 0) {
1765                netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1766                        priv->pmt_irq, ret);
1767                goto err_pmt_irq;
1768        }
1769
1770        device_set_wakeup_capable(&pdev->dev, 1);
1771        if (device_can_wakeup(priv->device))
1772                priv->wolopts = WAKE_MAGIC;     /* Magic Frame as default */
1773
1774        ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1775        if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1776                ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1777                                     NETIF_F_RXCSUM;
1778        ndev->features |= ndev->hw_features;
1779        ndev->priv_flags |= IFF_UNICAST_FLT;
1780
1781        /* Get the MAC address */
1782        xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1783        if (!is_valid_ether_addr(ndev->dev_addr))
1784                netdev_warn(ndev, "MAC address %pM not valid",
1785                         ndev->dev_addr);
1786
1787        netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1788        ret = register_netdev(ndev);
1789        if (ret)
1790                goto err_reg;
1791
1792        return 0;
1793
1794err_reg:
1795        netif_napi_del(&priv->napi);
1796        free_irq(priv->pmt_irq, ndev);
1797err_pmt_irq:
1798        free_irq(ndev->irq, ndev);
1799err_irq:
1800        iounmap(priv->base);
1801err_io:
1802        free_netdev(ndev);
1803err_alloc:
1804        release_mem_region(res->start, resource_size(res));
1805        platform_set_drvdata(pdev, NULL);
1806        return ret;
1807}
1808
1809/**
1810 * xgmac_dvr_remove
1811 * @pdev: platform device pointer
1812 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1813 * changes the link status, releases the DMA descriptor rings,
1814 * unregisters the MDIO bus and unmaps the allocated memory.
1815 */
1816static int xgmac_remove(struct platform_device *pdev)
1817{
1818        struct net_device *ndev = platform_get_drvdata(pdev);
1819        struct xgmac_priv *priv = netdev_priv(ndev);
1820        struct resource *res;
1821
1822        xgmac_mac_disable(priv->base);
1823
1824        /* Free the IRQ lines */
1825        free_irq(ndev->irq, ndev);
1826        free_irq(priv->pmt_irq, ndev);
1827
1828        platform_set_drvdata(pdev, NULL);
1829        unregister_netdev(ndev);
1830        netif_napi_del(&priv->napi);
1831
1832        iounmap(priv->base);
1833        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1834        release_mem_region(res->start, resource_size(res));
1835
1836        free_netdev(ndev);
1837
1838        return 0;
1839}
1840
1841#ifdef CONFIG_PM_SLEEP
1842static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1843{
1844        unsigned int pmt = 0;
1845
1846        if (mode & WAKE_MAGIC)
1847                pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
1848        if (mode & WAKE_UCAST)
1849                pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1850
1851        writel(pmt, ioaddr + XGMAC_PMT);
1852}
1853
1854static int xgmac_suspend(struct device *dev)
1855{
1856        struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1857        struct xgmac_priv *priv = netdev_priv(ndev);
1858        u32 value;
1859
1860        if (!ndev || !netif_running(ndev))
1861                return 0;
1862
1863        netif_device_detach(ndev);
1864        napi_disable(&priv->napi);
1865        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1866
1867        if (device_may_wakeup(priv->device)) {
1868                /* Stop TX/RX DMA Only */
1869                value = readl(priv->base + XGMAC_DMA_CONTROL);
1870                value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1871                writel(value, priv->base + XGMAC_DMA_CONTROL);
1872
1873                xgmac_pmt(priv->base, priv->wolopts);
1874        } else
1875                xgmac_mac_disable(priv->base);
1876
1877        return 0;
1878}
1879
1880static int xgmac_resume(struct device *dev)
1881{
1882        struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1883        struct xgmac_priv *priv = netdev_priv(ndev);
1884        void __iomem *ioaddr = priv->base;
1885
1886        if (!netif_running(ndev))
1887                return 0;
1888
1889        xgmac_pmt(ioaddr, 0);
1890
1891        /* Enable the MAC and DMA */
1892        xgmac_mac_enable(ioaddr);
1893        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1894        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1895
1896        netif_device_attach(ndev);
1897        napi_enable(&priv->napi);
1898
1899        return 0;
1900}
1901
1902static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1903#define XGMAC_PM_OPS (&xgmac_pm_ops)
1904#else
1905#define XGMAC_PM_OPS NULL
1906#endif /* CONFIG_PM_SLEEP */
1907
1908static const struct of_device_id xgmac_of_match[] = {
1909        { .compatible = "calxeda,hb-xgmac", },
1910        {},
1911};
1912MODULE_DEVICE_TABLE(of, xgmac_of_match);
1913
1914static struct platform_driver xgmac_driver = {
1915        .driver = {
1916                .name = "calxedaxgmac",
1917                .of_match_table = xgmac_of_match,
1918        },
1919        .probe = xgmac_probe,
1920        .remove = xgmac_remove,
1921        .driver.pm = XGMAC_PM_OPS,
1922};
1923
1924module_platform_driver(xgmac_driver);
1925
1926MODULE_AUTHOR("Calxeda, Inc.");
1927MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1928MODULE_LICENSE("GPL v2");
1929