linux/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 1999 - 2010 Intel Corporation.
   4 * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
   5 *
   6 * This code was derived from the Intel e1000e Linux driver.
   7 */
   8
   9#include "pch_gbe.h"
  10#include "pch_gbe_phy.h"
  11#include <linux/module.h>
  12#include <linux/net_tstamp.h>
  13#include <linux/ptp_classify.h>
  14#include <linux/ptp_pch.h>
  15#include <linux/gpio.h>
  16
  17#define DRV_VERSION     "1.01"
  18const char pch_driver_version[] = DRV_VERSION;
  19
  20#define PCH_GBE_MAR_ENTRIES             16
  21#define PCH_GBE_SHORT_PKT               64
  22#define DSC_INIT16                      0xC000
  23#define PCH_GBE_DMA_ALIGN               0
  24#define PCH_GBE_DMA_PADDING             2
  25#define PCH_GBE_WATCHDOG_PERIOD         (5 * HZ)        /* watchdog time */
  26#define PCH_GBE_PCI_BAR                 1
  27#define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
  28
  29#define PCI_DEVICE_ID_INTEL_IOH1_GBE            0x8802
  30
  31#define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
  32#define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
  33
  34#define PCH_GBE_TX_WEIGHT         64
  35#define PCH_GBE_RX_WEIGHT         64
  36#define PCH_GBE_RX_BUFFER_WRITE   16
  37
  38/* Initialize the wake-on-LAN settings */
  39#define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
  40
  41#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
  42        PCH_GBE_CHIP_TYPE_INTERNAL | \
  43        PCH_GBE_RGMII_MODE_RGMII     \
  44        )
  45
  46/* Ethertype field values */
  47#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
  48#define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
  49#define PCH_GBE_FRAME_SIZE_2048         2048
  50#define PCH_GBE_FRAME_SIZE_4096         4096
  51#define PCH_GBE_FRAME_SIZE_8192         8192
  52
  53#define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
  54#define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
  55#define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
  56#define PCH_GBE_DESC_UNUSED(R) \
  57        ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  58        (R)->next_to_clean - (R)->next_to_use - 1)
  59
  60/* Pause packet value */
  61#define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
  62#define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
  63#define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
  64#define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
  65
  66
  67/* This defines the bits that are set in the Interrupt Mask
  68 * Set/Read Register.  Each bit is documented below:
  69 *   o RXT0   = Receiver Timer Interrupt (ring 0)
  70 *   o TXDW   = Transmit Descriptor Written Back
  71 *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
  72 *   o RXSEQ  = Receive Sequence Error
  73 *   o LSC    = Link Status Change
  74 */
  75#define PCH_GBE_INT_ENABLE_MASK ( \
  76        PCH_GBE_INT_RX_DMA_CMPLT |    \
  77        PCH_GBE_INT_RX_DSC_EMP   |    \
  78        PCH_GBE_INT_RX_FIFO_ERR  |    \
  79        PCH_GBE_INT_WOL_DET      |    \
  80        PCH_GBE_INT_TX_CMPLT          \
  81        )
  82
  83#define PCH_GBE_INT_DISABLE_ALL         0
  84
  85/* Macros for ieee1588 */
  86/* 0x40 Time Synchronization Channel Control Register Bits */
  87#define MASTER_MODE   (1<<0)
  88#define SLAVE_MODE    (0)
  89#define V2_MODE       (1<<31)
  90#define CAP_MODE0     (0)
  91#define CAP_MODE2     (1<<17)
  92
  93/* 0x44 Time Synchronization Channel Event Register Bits */
  94#define TX_SNAPSHOT_LOCKED (1<<0)
  95#define RX_SNAPSHOT_LOCKED (1<<1)
  96
  97#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
  98#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
  99
 100#define MINNOW_PHY_RESET_GPIO           13
 101
 102static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 103static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 104                               int data);
 105static void pch_gbe_set_multi(struct net_device *netdev);
 106
 107static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
 108{
 109        u8 *data = skb->data;
 110        unsigned int offset;
 111        u16 *hi, *id;
 112        u32 lo;
 113
 114        if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
 115                return 0;
 116
 117        offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 118
 119        if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
 120                return 0;
 121
 122        hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
 123        id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
 124
 125        memcpy(&lo, &hi[1], sizeof(lo));
 126
 127        return (uid_hi == *hi &&
 128                uid_lo == lo &&
 129                seqid  == *id);
 130}
 131
 132static void
 133pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 134{
 135        struct skb_shared_hwtstamps *shhwtstamps;
 136        struct pci_dev *pdev;
 137        u64 ns;
 138        u32 hi, lo, val;
 139        u16 uid, seq;
 140
 141        if (!adapter->hwts_rx_en)
 142                return;
 143
 144        /* Get ieee1588's dev information */
 145        pdev = adapter->ptp_pdev;
 146
 147        val = pch_ch_event_read(pdev);
 148
 149        if (!(val & RX_SNAPSHOT_LOCKED))
 150                return;
 151
 152        lo = pch_src_uuid_lo_read(pdev);
 153        hi = pch_src_uuid_hi_read(pdev);
 154
 155        uid = hi & 0xffff;
 156        seq = (hi >> 16) & 0xffff;
 157
 158        if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
 159                goto out;
 160
 161        ns = pch_rx_snap_read(pdev);
 162
 163        shhwtstamps = skb_hwtstamps(skb);
 164        memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 165        shhwtstamps->hwtstamp = ns_to_ktime(ns);
 166out:
 167        pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
 168}
 169
 170static void
 171pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 172{
 173        struct skb_shared_hwtstamps shhwtstamps;
 174        struct pci_dev *pdev;
 175        struct skb_shared_info *shtx;
 176        u64 ns;
 177        u32 cnt, val;
 178
 179        shtx = skb_shinfo(skb);
 180        if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
 181                return;
 182
 183        shtx->tx_flags |= SKBTX_IN_PROGRESS;
 184
 185        /* Get ieee1588's dev information */
 186        pdev = adapter->ptp_pdev;
 187
 188        /*
 189         * This really stinks, but we have to poll for the Tx time stamp.
 190         */
 191        for (cnt = 0; cnt < 100; cnt++) {
 192                val = pch_ch_event_read(pdev);
 193                if (val & TX_SNAPSHOT_LOCKED)
 194                        break;
 195                udelay(1);
 196        }
 197        if (!(val & TX_SNAPSHOT_LOCKED)) {
 198                shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
 199                return;
 200        }
 201
 202        ns = pch_tx_snap_read(pdev);
 203
 204        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 205        shhwtstamps.hwtstamp = ns_to_ktime(ns);
 206        skb_tstamp_tx(skb, &shhwtstamps);
 207
 208        pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
 209}
 210
 211static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 212{
 213        struct hwtstamp_config cfg;
 214        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 215        struct pci_dev *pdev;
 216        u8 station[20];
 217
 218        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 219                return -EFAULT;
 220
 221        if (cfg.flags) /* reserved for future extensions */
 222                return -EINVAL;
 223
 224        /* Get ieee1588's dev information */
 225        pdev = adapter->ptp_pdev;
 226
 227        if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 228                return -ERANGE;
 229
 230        switch (cfg.rx_filter) {
 231        case HWTSTAMP_FILTER_NONE:
 232                adapter->hwts_rx_en = 0;
 233                break;
 234        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 235                adapter->hwts_rx_en = 0;
 236                pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
 237                break;
 238        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 239                adapter->hwts_rx_en = 1;
 240                pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
 241                break;
 242        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 243                adapter->hwts_rx_en = 1;
 244                pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 245                strcpy(station, PTP_L4_MULTICAST_SA);
 246                pch_set_station_address(station, pdev);
 247                break;
 248        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 249                adapter->hwts_rx_en = 1;
 250                pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 251                strcpy(station, PTP_L2_MULTICAST_SA);
 252                pch_set_station_address(station, pdev);
 253                break;
 254        default:
 255                return -ERANGE;
 256        }
 257
 258        adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
 259
 260        /* Clear out any old time stamps. */
 261        pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
 262
 263        return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 264}
 265
 266static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 267{
 268        iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
 269}
 270
 271/**
 272 * pch_gbe_mac_read_mac_addr - Read MAC address
 273 * @hw:             Pointer to the HW structure
 274 * Returns:
 275 *      0:                      Successful.
 276 */
 277static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 278{
 279        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 280        u32  adr1a, adr1b;
 281
 282        adr1a = ioread32(&hw->reg->mac_adr[0].high);
 283        adr1b = ioread32(&hw->reg->mac_adr[0].low);
 284
 285        hw->mac.addr[0] = (u8)(adr1a & 0xFF);
 286        hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
 287        hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
 288        hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
 289        hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 290        hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 291
 292        netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
 293        return 0;
 294}
 295
 296/**
 297 * pch_gbe_wait_clr_bit - Wait to clear a bit
 298 * @reg:        Pointer of register
 299 * @bit:        Busy bit
 300 */
 301static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 302{
 303        u32 tmp;
 304
 305        /* wait busy */
 306        tmp = 1000;
 307        while ((ioread32(reg) & bit) && --tmp)
 308                cpu_relax();
 309        if (!tmp)
 310                pr_err("Error: busy bit is not cleared\n");
 311}
 312
 313/**
 314 * pch_gbe_mac_mar_set - Set MAC address register
 315 * @hw:     Pointer to the HW structure
 316 * @addr:   Pointer to the MAC address
 317 * @index:  MAC address array register
 318 */
 319static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 320{
 321        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 322        u32 mar_low, mar_high, adrmask;
 323
 324        netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
 325
 326        /*
 327         * HW expects these in little endian so we reverse the byte order
 328         * from network order (big endian) to little endian
 329         */
 330        mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
 331                   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 332        mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
 333        /* Stop the MAC Address of index. */
 334        adrmask = ioread32(&hw->reg->ADDR_MASK);
 335        iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
 336        /* wait busy */
 337        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 338        /* Set the MAC address to the MAC address 1A/1B register */
 339        iowrite32(mar_high, &hw->reg->mac_adr[index].high);
 340        iowrite32(mar_low, &hw->reg->mac_adr[index].low);
 341        /* Start the MAC address of index */
 342        iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
 343        /* wait busy */
 344        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 345}
 346
 347/**
 348 * pch_gbe_mac_reset_hw - Reset hardware
 349 * @hw: Pointer to the HW structure
 350 */
 351static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 352{
 353        /* Read the MAC address. and store to the private data */
 354        pch_gbe_mac_read_mac_addr(hw);
 355        iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 356        iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 357        pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 358        /* Setup the receive addresses */
 359        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 360        return;
 361}
 362
 363static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
 364{
 365        u32 rctl;
 366        /* Disables Receive MAC */
 367        rctl = ioread32(&hw->reg->MAC_RX_EN);
 368        iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 369}
 370
 371static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
 372{
 373        u32 rctl;
 374        /* Enables Receive MAC */
 375        rctl = ioread32(&hw->reg->MAC_RX_EN);
 376        iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 377}
 378
 379/**
 380 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
 381 * @hw: Pointer to the HW structure
 382 * @mar_count: Receive address registers
 383 */
 384static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
 385{
 386        u32 i;
 387
 388        /* Setup the receive address */
 389        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 390
 391        /* Zero out the other receive addresses */
 392        for (i = 1; i < mar_count; i++) {
 393                iowrite32(0, &hw->reg->mac_adr[i].high);
 394                iowrite32(0, &hw->reg->mac_adr[i].low);
 395        }
 396        iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
 397        /* wait busy */
 398        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 399}
 400
 401/**
 402 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
 403 * @hw:             Pointer to the HW structure
 404 * Returns:
 405 *      0:                      Successful.
 406 *      Negative value:         Failed.
 407 */
 408s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 409{
 410        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 411        struct pch_gbe_mac_info *mac = &hw->mac;
 412        u32 rx_fctrl;
 413
 414        netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
 415
 416        rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 417
 418        switch (mac->fc) {
 419        case PCH_GBE_FC_NONE:
 420                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 421                mac->tx_fc_enable = false;
 422                break;
 423        case PCH_GBE_FC_RX_PAUSE:
 424                rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 425                mac->tx_fc_enable = false;
 426                break;
 427        case PCH_GBE_FC_TX_PAUSE:
 428                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 429                mac->tx_fc_enable = true;
 430                break;
 431        case PCH_GBE_FC_FULL:
 432                rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 433                mac->tx_fc_enable = true;
 434                break;
 435        default:
 436                netdev_err(adapter->netdev,
 437                           "Flow control param set incorrectly\n");
 438                return -EINVAL;
 439        }
 440        if (mac->link_duplex == DUPLEX_HALF)
 441                rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 442        iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
 443        netdev_dbg(adapter->netdev,
 444                   "RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
 445                   ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 446        return 0;
 447}
 448
 449/**
 450 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
 451 * @hw:     Pointer to the HW structure
 452 * @wu_evt: Wake up event
 453 */
 454static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 455{
 456        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 457        u32 addr_mask;
 458
 459        netdev_dbg(adapter->netdev, "wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
 460                   wu_evt, ioread32(&hw->reg->ADDR_MASK));
 461
 462        if (wu_evt) {
 463                /* Set Wake-On-Lan address mask */
 464                addr_mask = ioread32(&hw->reg->ADDR_MASK);
 465                iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
 466                /* wait busy */
 467                pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
 468                iowrite32(0, &hw->reg->WOL_ST);
 469                iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
 470                iowrite32(0x02, &hw->reg->TCPIP_ACC);
 471                iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 472        } else {
 473                iowrite32(0, &hw->reg->WOL_CTRL);
 474                iowrite32(0, &hw->reg->WOL_ST);
 475        }
 476        return;
 477}
 478
 479/**
 480 * pch_gbe_mac_ctrl_miim - Control MIIM interface
 481 * @hw:   Pointer to the HW structure
 482 * @addr: Address of PHY
 483 * @dir:  Operetion. (Write or Read)
 484 * @reg:  Access register of PHY
 485 * @data: Write data.
 486 *
 487 * Returns: Read date.
 488 */
 489u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 490                        u16 data)
 491{
 492        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 493        u32 data_out = 0;
 494        unsigned int i;
 495        unsigned long flags;
 496
 497        spin_lock_irqsave(&hw->miim_lock, flags);
 498
 499        for (i = 100; i; --i) {
 500                if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
 501                        break;
 502                udelay(20);
 503        }
 504        if (i == 0) {
 505                netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
 506                spin_unlock_irqrestore(&hw->miim_lock, flags);
 507                return 0;       /* No way to indicate timeout error */
 508        }
 509        iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
 510                  (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
 511                  dir | data), &hw->reg->MIIM);
 512        for (i = 0; i < 100; i++) {
 513                udelay(20);
 514                data_out = ioread32(&hw->reg->MIIM);
 515                if ((data_out & PCH_GBE_MIIM_OPER_READY))
 516                        break;
 517        }
 518        spin_unlock_irqrestore(&hw->miim_lock, flags);
 519
 520        netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
 521                   dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
 522                   dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 523        return (u16) data_out;
 524}
 525
 526/**
 527 * pch_gbe_mac_set_pause_packet - Set pause packet
 528 * @hw:   Pointer to the HW structure
 529 */
 530static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 531{
 532        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 533        unsigned long tmp2, tmp3;
 534
 535        /* Set Pause packet */
 536        tmp2 = hw->mac.addr[1];
 537        tmp2 = (tmp2 << 8) | hw->mac.addr[0];
 538        tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
 539
 540        tmp3 = hw->mac.addr[5];
 541        tmp3 = (tmp3 << 8) | hw->mac.addr[4];
 542        tmp3 = (tmp3 << 8) | hw->mac.addr[3];
 543        tmp3 = (tmp3 << 8) | hw->mac.addr[2];
 544
 545        iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
 546        iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
 547        iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
 548        iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
 549        iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
 550
 551        /* Transmit Pause Packet */
 552        iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 553
 554        netdev_dbg(adapter->netdev,
 555                   "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 556                   ioread32(&hw->reg->PAUSE_PKT1),
 557                   ioread32(&hw->reg->PAUSE_PKT2),
 558                   ioread32(&hw->reg->PAUSE_PKT3),
 559                   ioread32(&hw->reg->PAUSE_PKT4),
 560                   ioread32(&hw->reg->PAUSE_PKT5));
 561
 562        return;
 563}
 564
 565
 566/**
 567 * pch_gbe_alloc_queues - Allocate memory for all rings
 568 * @adapter:  Board private structure to initialize
 569 * Returns:
 570 *      0:      Successfully
 571 *      Negative value: Failed
 572 */
 573static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 574{
 575        adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
 576                                        sizeof(*adapter->tx_ring), GFP_KERNEL);
 577        if (!adapter->tx_ring)
 578                return -ENOMEM;
 579
 580        adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
 581                                        sizeof(*adapter->rx_ring), GFP_KERNEL);
 582        if (!adapter->rx_ring)
 583                return -ENOMEM;
 584        return 0;
 585}
 586
 587/**
 588 * pch_gbe_init_stats - Initialize status
 589 * @adapter:  Board private structure to initialize
 590 */
 591static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
 592{
 593        memset(&adapter->stats, 0, sizeof(adapter->stats));
 594        return;
 595}
 596
 597/**
 598 * pch_gbe_init_phy - Initialize PHY
 599 * @adapter:  Board private structure to initialize
 600 * Returns:
 601 *      0:      Successfully
 602 *      Negative value: Failed
 603 */
 604static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
 605{
 606        struct net_device *netdev = adapter->netdev;
 607        u32 addr;
 608        u16 bmcr, stat;
 609
 610        /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
 611        for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 612                adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
 613                bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
 614                stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 615                stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 616                if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
 617                        break;
 618        }
 619        adapter->hw.phy.addr = adapter->mii.phy_id;
 620        netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
 621        if (addr == PCH_GBE_PHY_REGS_LEN)
 622                return -EAGAIN;
 623        /* Selected the phy and isolate the rest */
 624        for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 625                if (addr != adapter->mii.phy_id) {
 626                        pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 627                                           BMCR_ISOLATE);
 628                } else {
 629                        bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
 630                        pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 631                                           bmcr & ~BMCR_ISOLATE);
 632                }
 633        }
 634
 635        /* MII setup */
 636        adapter->mii.phy_id_mask = 0x1F;
 637        adapter->mii.reg_num_mask = 0x1F;
 638        adapter->mii.dev = adapter->netdev;
 639        adapter->mii.mdio_read = pch_gbe_mdio_read;
 640        adapter->mii.mdio_write = pch_gbe_mdio_write;
 641        adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
 642        return 0;
 643}
 644
 645/**
 646 * pch_gbe_mdio_read - The read function for mii
 647 * @netdev: Network interface device structure
 648 * @addr:   Phy ID
 649 * @reg:    Access location
 650 * Returns:
 651 *      0:      Successfully
 652 *      Negative value: Failed
 653 */
 654static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
 655{
 656        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 657        struct pch_gbe_hw *hw = &adapter->hw;
 658
 659        return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
 660                                     (u16) 0);
 661}
 662
 663/**
 664 * pch_gbe_mdio_write - The write function for mii
 665 * @netdev: Network interface device structure
 666 * @addr:   Phy ID (not used)
 667 * @reg:    Access location
 668 * @data:   Write data
 669 */
 670static void pch_gbe_mdio_write(struct net_device *netdev,
 671                               int addr, int reg, int data)
 672{
 673        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 674        struct pch_gbe_hw *hw = &adapter->hw;
 675
 676        pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
 677}
 678
 679/**
 680 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
 681 * @work:  Pointer of board private structure
 682 */
 683static void pch_gbe_reset_task(struct work_struct *work)
 684{
 685        struct pch_gbe_adapter *adapter;
 686        adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 687
 688        rtnl_lock();
 689        pch_gbe_reinit_locked(adapter);
 690        rtnl_unlock();
 691}
 692
 693/**
 694 * pch_gbe_reinit_locked- Re-initialization
 695 * @adapter:  Board private structure
 696 */
 697void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 698{
 699        pch_gbe_down(adapter);
 700        pch_gbe_up(adapter);
 701}
 702
 703/**
 704 * pch_gbe_reset - Reset GbE
 705 * @adapter:  Board private structure
 706 */
 707void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 708{
 709        struct net_device *netdev = adapter->netdev;
 710        struct pch_gbe_hw *hw = &adapter->hw;
 711        s32 ret_val;
 712
 713        pch_gbe_mac_reset_hw(hw);
 714        /* reprogram multicast address register after reset */
 715        pch_gbe_set_multi(netdev);
 716        /* Setup the receive address. */
 717        pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
 718
 719        ret_val = pch_gbe_phy_get_id(hw);
 720        if (ret_val) {
 721                netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
 722                return;
 723        }
 724        pch_gbe_phy_init_setting(hw);
 725        /* Setup Mac interface option RGMII */
 726        pch_gbe_phy_set_rgmii(hw);
 727}
 728
 729/**
 730 * pch_gbe_free_irq - Free an interrupt
 731 * @adapter:  Board private structure
 732 */
 733static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
 734{
 735        struct net_device *netdev = adapter->netdev;
 736
 737        free_irq(adapter->irq, netdev);
 738        pci_free_irq_vectors(adapter->pdev);
 739}
 740
 741/**
 742 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
 743 * @adapter:  Board private structure
 744 */
 745static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
 746{
 747        struct pch_gbe_hw *hw = &adapter->hw;
 748
 749        atomic_inc(&adapter->irq_sem);
 750        iowrite32(0, &hw->reg->INT_EN);
 751        ioread32(&hw->reg->INT_ST);
 752        synchronize_irq(adapter->irq);
 753
 754        netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
 755                   ioread32(&hw->reg->INT_EN));
 756}
 757
 758/**
 759 * pch_gbe_irq_enable - Enable default interrupt generation settings
 760 * @adapter:  Board private structure
 761 */
 762static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
 763{
 764        struct pch_gbe_hw *hw = &adapter->hw;
 765
 766        if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 767                iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 768        ioread32(&hw->reg->INT_ST);
 769        netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
 770                   ioread32(&hw->reg->INT_EN));
 771}
 772
 773
 774
 775/**
 776 * pch_gbe_setup_tctl - configure the Transmit control registers
 777 * @adapter:  Board private structure
 778 */
 779static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
 780{
 781        struct pch_gbe_hw *hw = &adapter->hw;
 782        u32 tx_mode, tcpip;
 783
 784        tx_mode = PCH_GBE_TM_LONG_PKT |
 785                PCH_GBE_TM_ST_AND_FD |
 786                PCH_GBE_TM_SHORT_PKT |
 787                PCH_GBE_TM_TH_TX_STRT_8 |
 788                PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
 789
 790        iowrite32(tx_mode, &hw->reg->TX_MODE);
 791
 792        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 793        tcpip |= PCH_GBE_TX_TCPIPACC_EN;
 794        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 795        return;
 796}
 797
 798/**
 799 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
 800 * @adapter:  Board private structure
 801 */
 802static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
 803{
 804        struct pch_gbe_hw *hw = &adapter->hw;
 805        u32 tdba, tdlen, dctrl;
 806
 807        netdev_dbg(adapter->netdev, "dma addr = 0x%08llx  size = 0x%08x\n",
 808                   (unsigned long long)adapter->tx_ring->dma,
 809                   adapter->tx_ring->size);
 810
 811        /* Setup the HW Tx Head and Tail descriptor pointers */
 812        tdba = adapter->tx_ring->dma;
 813        tdlen = adapter->tx_ring->size - 0x10;
 814        iowrite32(tdba, &hw->reg->TX_DSC_BASE);
 815        iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
 816        iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
 817
 818        /* Enables Transmission DMA */
 819        dctrl = ioread32(&hw->reg->DMA_CTRL);
 820        dctrl |= PCH_GBE_TX_DMA_EN;
 821        iowrite32(dctrl, &hw->reg->DMA_CTRL);
 822}
 823
 824/**
 825 * pch_gbe_setup_rctl - Configure the receive control registers
 826 * @adapter:  Board private structure
 827 */
 828static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 829{
 830        struct pch_gbe_hw *hw = &adapter->hw;
 831        u32 rx_mode, tcpip;
 832
 833        rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
 834        PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
 835
 836        iowrite32(rx_mode, &hw->reg->RX_MODE);
 837
 838        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 839
 840        tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
 841        tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
 842        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 843        return;
 844}
 845
 846/**
 847 * pch_gbe_configure_rx - Configure Receive Unit after Reset
 848 * @adapter:  Board private structure
 849 */
 850static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
 851{
 852        struct pch_gbe_hw *hw = &adapter->hw;
 853        u32 rdba, rdlen, rxdma;
 854
 855        netdev_dbg(adapter->netdev, "dma adr = 0x%08llx  size = 0x%08x\n",
 856                   (unsigned long long)adapter->rx_ring->dma,
 857                   adapter->rx_ring->size);
 858
 859        pch_gbe_mac_force_mac_fc(hw);
 860
 861        pch_gbe_disable_mac_rx(hw);
 862
 863        /* Disables Receive DMA */
 864        rxdma = ioread32(&hw->reg->DMA_CTRL);
 865        rxdma &= ~PCH_GBE_RX_DMA_EN;
 866        iowrite32(rxdma, &hw->reg->DMA_CTRL);
 867
 868        netdev_dbg(adapter->netdev,
 869                   "MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
 870                   ioread32(&hw->reg->MAC_RX_EN),
 871                   ioread32(&hw->reg->DMA_CTRL));
 872
 873        /* Setup the HW Rx Head and Tail Descriptor Pointers and
 874         * the Base and Length of the Rx Descriptor Ring */
 875        rdba = adapter->rx_ring->dma;
 876        rdlen = adapter->rx_ring->size - 0x10;
 877        iowrite32(rdba, &hw->reg->RX_DSC_BASE);
 878        iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
 879        iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
 880}
 881
 882/**
 883 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
 884 * @adapter:     Board private structure
 885 * @buffer_info: Buffer information structure
 886 */
 887static void pch_gbe_unmap_and_free_tx_resource(
 888        struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
 889{
 890        if (buffer_info->mapped) {
 891                dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 892                                 buffer_info->length, DMA_TO_DEVICE);
 893                buffer_info->mapped = false;
 894        }
 895        if (buffer_info->skb) {
 896                dev_kfree_skb_any(buffer_info->skb);
 897                buffer_info->skb = NULL;
 898        }
 899}
 900
 901/**
 902 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
 903 * @adapter:      Board private structure
 904 * @buffer_info:  Buffer information structure
 905 */
 906static void pch_gbe_unmap_and_free_rx_resource(
 907                                        struct pch_gbe_adapter *adapter,
 908                                        struct pch_gbe_buffer *buffer_info)
 909{
 910        if (buffer_info->mapped) {
 911                dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 912                                 buffer_info->length, DMA_FROM_DEVICE);
 913                buffer_info->mapped = false;
 914        }
 915        if (buffer_info->skb) {
 916                dev_kfree_skb_any(buffer_info->skb);
 917                buffer_info->skb = NULL;
 918        }
 919}
 920
 921/**
 922 * pch_gbe_clean_tx_ring - Free Tx Buffers
 923 * @adapter:  Board private structure
 924 * @tx_ring:  Ring to be cleaned
 925 */
 926static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
 927                                   struct pch_gbe_tx_ring *tx_ring)
 928{
 929        struct pch_gbe_hw *hw = &adapter->hw;
 930        struct pch_gbe_buffer *buffer_info;
 931        unsigned long size;
 932        unsigned int i;
 933
 934        /* Free all the Tx ring sk_buffs */
 935        for (i = 0; i < tx_ring->count; i++) {
 936                buffer_info = &tx_ring->buffer_info[i];
 937                pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 938        }
 939        netdev_dbg(adapter->netdev,
 940                   "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 941
 942        size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 943        memset(tx_ring->buffer_info, 0, size);
 944
 945        /* Zero out the descriptor ring */
 946        memset(tx_ring->desc, 0, tx_ring->size);
 947        tx_ring->next_to_use = 0;
 948        tx_ring->next_to_clean = 0;
 949        iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
 950        iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
 951}
 952
 953/**
 954 * pch_gbe_clean_rx_ring - Free Rx Buffers
 955 * @adapter:  Board private structure
 956 * @rx_ring:  Ring to free buffers from
 957 */
 958static void
 959pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
 960                      struct pch_gbe_rx_ring *rx_ring)
 961{
 962        struct pch_gbe_hw *hw = &adapter->hw;
 963        struct pch_gbe_buffer *buffer_info;
 964        unsigned long size;
 965        unsigned int i;
 966
 967        /* Free all the Rx ring sk_buffs */
 968        for (i = 0; i < rx_ring->count; i++) {
 969                buffer_info = &rx_ring->buffer_info[i];
 970                pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 971        }
 972        netdev_dbg(adapter->netdev,
 973                   "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 974        size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 975        memset(rx_ring->buffer_info, 0, size);
 976
 977        /* Zero out the descriptor ring */
 978        memset(rx_ring->desc, 0, rx_ring->size);
 979        rx_ring->next_to_clean = 0;
 980        rx_ring->next_to_use = 0;
 981        iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
 982        iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
 983}
 984
 985static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
 986                                    u16 duplex)
 987{
 988        struct pch_gbe_hw *hw = &adapter->hw;
 989        unsigned long rgmii = 0;
 990
 991        /* Set the RGMII control. */
 992        switch (speed) {
 993        case SPEED_10:
 994                rgmii = (PCH_GBE_RGMII_RATE_2_5M |
 995                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
 996                break;
 997        case SPEED_100:
 998                rgmii = (PCH_GBE_RGMII_RATE_25M |
 999                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
1000                break;
1001        case SPEED_1000:
1002                rgmii = (PCH_GBE_RGMII_RATE_125M |
1003                         PCH_GBE_MAC_RGMII_CTRL_SETTING);
1004                break;
1005        }
1006        iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1007}
1008static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1009                              u16 duplex)
1010{
1011        struct net_device *netdev = adapter->netdev;
1012        struct pch_gbe_hw *hw = &adapter->hw;
1013        unsigned long mode = 0;
1014
1015        /* Set the communication mode */
1016        switch (speed) {
1017        case SPEED_10:
1018                mode = PCH_GBE_MODE_MII_ETHER;
1019                netdev->tx_queue_len = 10;
1020                break;
1021        case SPEED_100:
1022                mode = PCH_GBE_MODE_MII_ETHER;
1023                netdev->tx_queue_len = 100;
1024                break;
1025        case SPEED_1000:
1026                mode = PCH_GBE_MODE_GMII_ETHER;
1027                break;
1028        }
1029        if (duplex == DUPLEX_FULL)
1030                mode |= PCH_GBE_MODE_FULL_DUPLEX;
1031        else
1032                mode |= PCH_GBE_MODE_HALF_DUPLEX;
1033        iowrite32(mode, &hw->reg->MODE);
1034}
1035
1036/**
1037 * pch_gbe_watchdog - Watchdog process
1038 * @t:  timer list containing a Board private structure
1039 */
1040static void pch_gbe_watchdog(struct timer_list *t)
1041{
1042        struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1043                                                     watchdog_timer);
1044        struct net_device *netdev = adapter->netdev;
1045        struct pch_gbe_hw *hw = &adapter->hw;
1046
1047        netdev_dbg(netdev, "right now = %ld\n", jiffies);
1048
1049        pch_gbe_update_stats(adapter);
1050        if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1051                struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1052                netdev->tx_queue_len = adapter->tx_queue_len;
1053                /* mii library handles link maintenance tasks */
1054                if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1055                        netdev_err(netdev, "ethtool get setting Error\n");
1056                        mod_timer(&adapter->watchdog_timer,
1057                                  round_jiffies(jiffies +
1058                                                PCH_GBE_WATCHDOG_PERIOD));
1059                        return;
1060                }
1061                hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1062                hw->mac.link_duplex = cmd.duplex;
1063                /* Set the RGMII control. */
1064                pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1065                                                hw->mac.link_duplex);
1066                /* Set the communication mode */
1067                pch_gbe_set_mode(adapter, hw->mac.link_speed,
1068                                 hw->mac.link_duplex);
1069                netdev_dbg(netdev,
1070                           "Link is Up %d Mbps %s-Duplex\n",
1071                           hw->mac.link_speed,
1072                           cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1073                netif_carrier_on(netdev);
1074                netif_wake_queue(netdev);
1075        } else if ((!mii_link_ok(&adapter->mii)) &&
1076                   (netif_carrier_ok(netdev))) {
1077                netdev_dbg(netdev, "NIC Link is Down\n");
1078                hw->mac.link_speed = SPEED_10;
1079                hw->mac.link_duplex = DUPLEX_HALF;
1080                netif_carrier_off(netdev);
1081                netif_stop_queue(netdev);
1082        }
1083        mod_timer(&adapter->watchdog_timer,
1084                  round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1085}
1086
1087/**
1088 * pch_gbe_tx_queue - Carry out queuing of the transmission data
1089 * @adapter:  Board private structure
1090 * @tx_ring:  Tx descriptor ring structure
1091 * @skb:      Sockt buffer structure
1092 */
1093static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1094                              struct pch_gbe_tx_ring *tx_ring,
1095                              struct sk_buff *skb)
1096{
1097        struct pch_gbe_hw *hw = &adapter->hw;
1098        struct pch_gbe_tx_desc *tx_desc;
1099        struct pch_gbe_buffer *buffer_info;
1100        struct sk_buff *tmp_skb;
1101        unsigned int frame_ctrl;
1102        unsigned int ring_num;
1103
1104        /*-- Set frame control --*/
1105        frame_ctrl = 0;
1106        if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1107                frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1108        if (skb->ip_summed == CHECKSUM_NONE)
1109                frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1110
1111        /* Performs checksum processing */
1112        /*
1113         * It is because the hardware accelerator does not support a checksum,
1114         * when the received data size is less than 64 bytes.
1115         */
1116        if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1117                frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1118                              PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1119                if (skb->protocol == htons(ETH_P_IP)) {
1120                        struct iphdr *iph = ip_hdr(skb);
1121                        unsigned int offset;
1122                        offset = skb_transport_offset(skb);
1123                        if (iph->protocol == IPPROTO_TCP) {
1124                                skb->csum = 0;
1125                                tcp_hdr(skb)->check = 0;
1126                                skb->csum = skb_checksum(skb, offset,
1127                                                         skb->len - offset, 0);
1128                                tcp_hdr(skb)->check =
1129                                        csum_tcpudp_magic(iph->saddr,
1130                                                          iph->daddr,
1131                                                          skb->len - offset,
1132                                                          IPPROTO_TCP,
1133                                                          skb->csum);
1134                        } else if (iph->protocol == IPPROTO_UDP) {
1135                                skb->csum = 0;
1136                                udp_hdr(skb)->check = 0;
1137                                skb->csum =
1138                                        skb_checksum(skb, offset,
1139                                                     skb->len - offset, 0);
1140                                udp_hdr(skb)->check =
1141                                        csum_tcpudp_magic(iph->saddr,
1142                                                          iph->daddr,
1143                                                          skb->len - offset,
1144                                                          IPPROTO_UDP,
1145                                                          skb->csum);
1146                        }
1147                }
1148        }
1149
1150        ring_num = tx_ring->next_to_use;
1151        if (unlikely((ring_num + 1) == tx_ring->count))
1152                tx_ring->next_to_use = 0;
1153        else
1154                tx_ring->next_to_use = ring_num + 1;
1155
1156
1157        buffer_info = &tx_ring->buffer_info[ring_num];
1158        tmp_skb = buffer_info->skb;
1159
1160        /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1161        memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1162        tmp_skb->data[ETH_HLEN] = 0x00;
1163        tmp_skb->data[ETH_HLEN + 1] = 0x00;
1164        tmp_skb->len = skb->len;
1165        memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1166               (skb->len - ETH_HLEN));
1167        /*-- Set Buffer information --*/
1168        buffer_info->length = tmp_skb->len;
1169        buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1170                                          buffer_info->length,
1171                                          DMA_TO_DEVICE);
1172        if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1173                netdev_err(adapter->netdev, "TX DMA map failed\n");
1174                buffer_info->dma = 0;
1175                buffer_info->time_stamp = 0;
1176                tx_ring->next_to_use = ring_num;
1177                return;
1178        }
1179        buffer_info->mapped = true;
1180        buffer_info->time_stamp = jiffies;
1181
1182        /*-- Set Tx descriptor --*/
1183        tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1184        tx_desc->buffer_addr = (buffer_info->dma);
1185        tx_desc->length = (tmp_skb->len);
1186        tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1187        tx_desc->tx_frame_ctrl = (frame_ctrl);
1188        tx_desc->gbec_status = (DSC_INIT16);
1189
1190        if (unlikely(++ring_num == tx_ring->count))
1191                ring_num = 0;
1192
1193        /* Update software pointer of TX descriptor */
1194        iowrite32(tx_ring->dma +
1195                  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1196                  &hw->reg->TX_DSC_SW_P);
1197
1198        pch_tx_timestamp(adapter, skb);
1199
1200        dev_kfree_skb_any(skb);
1201}
1202
1203/**
1204 * pch_gbe_update_stats - Update the board statistics counters
1205 * @adapter:  Board private structure
1206 */
1207void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1208{
1209        struct net_device *netdev = adapter->netdev;
1210        struct pci_dev *pdev = adapter->pdev;
1211        struct pch_gbe_hw_stats *stats = &adapter->stats;
1212        unsigned long flags;
1213
1214        /*
1215         * Prevent stats update while adapter is being reset, or if the pci
1216         * connection is down.
1217         */
1218        if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1219                return;
1220
1221        spin_lock_irqsave(&adapter->stats_lock, flags);
1222
1223        /* Update device status "adapter->stats" */
1224        stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1225        stats->tx_errors = stats->tx_length_errors +
1226            stats->tx_aborted_errors +
1227            stats->tx_carrier_errors + stats->tx_timeout_count;
1228
1229        /* Update network device status "adapter->net_stats" */
1230        netdev->stats.rx_packets = stats->rx_packets;
1231        netdev->stats.rx_bytes = stats->rx_bytes;
1232        netdev->stats.rx_dropped = stats->rx_dropped;
1233        netdev->stats.tx_packets = stats->tx_packets;
1234        netdev->stats.tx_bytes = stats->tx_bytes;
1235        netdev->stats.tx_dropped = stats->tx_dropped;
1236        /* Fill out the OS statistics structure */
1237        netdev->stats.multicast = stats->multicast;
1238        netdev->stats.collisions = stats->collisions;
1239        /* Rx Errors */
1240        netdev->stats.rx_errors = stats->rx_errors;
1241        netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1242        netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1243        /* Tx Errors */
1244        netdev->stats.tx_errors = stats->tx_errors;
1245        netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1246        netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1247
1248        spin_unlock_irqrestore(&adapter->stats_lock, flags);
1249}
1250
1251static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1252{
1253        u32 rxdma;
1254
1255        /* Disable Receive DMA */
1256        rxdma = ioread32(&hw->reg->DMA_CTRL);
1257        rxdma &= ~PCH_GBE_RX_DMA_EN;
1258        iowrite32(rxdma, &hw->reg->DMA_CTRL);
1259}
1260
1261static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1262{
1263        u32 rxdma;
1264
1265        /* Enables Receive DMA */
1266        rxdma = ioread32(&hw->reg->DMA_CTRL);
1267        rxdma |= PCH_GBE_RX_DMA_EN;
1268        iowrite32(rxdma, &hw->reg->DMA_CTRL);
1269}
1270
1271/**
1272 * pch_gbe_intr - Interrupt Handler
1273 * @irq:   Interrupt number
1274 * @data:  Pointer to a network interface device structure
1275 * Returns:
1276 *      - IRQ_HANDLED:  Our interrupt
1277 *      - IRQ_NONE:     Not our interrupt
1278 */
1279static irqreturn_t pch_gbe_intr(int irq, void *data)
1280{
1281        struct net_device *netdev = data;
1282        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1283        struct pch_gbe_hw *hw = &adapter->hw;
1284        u32 int_st;
1285        u32 int_en;
1286
1287        /* Check request status */
1288        int_st = ioread32(&hw->reg->INT_ST);
1289        int_st = int_st & ioread32(&hw->reg->INT_EN);
1290        /* When request status is no interruption factor */
1291        if (unlikely(!int_st))
1292                return IRQ_NONE;        /* Not our interrupt. End processing. */
1293        netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1294        if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1295                adapter->stats.intr_rx_frame_err_count++;
1296        if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1297                if (!adapter->rx_stop_flag) {
1298                        adapter->stats.intr_rx_fifo_err_count++;
1299                        netdev_dbg(netdev, "Rx fifo over run\n");
1300                        adapter->rx_stop_flag = true;
1301                        int_en = ioread32(&hw->reg->INT_EN);
1302                        iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1303                                  &hw->reg->INT_EN);
1304                        pch_gbe_disable_dma_rx(&adapter->hw);
1305                        int_st |= ioread32(&hw->reg->INT_ST);
1306                        int_st = int_st & ioread32(&hw->reg->INT_EN);
1307                }
1308        if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1309                adapter->stats.intr_rx_dma_err_count++;
1310        if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1311                adapter->stats.intr_tx_fifo_err_count++;
1312        if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1313                adapter->stats.intr_tx_dma_err_count++;
1314        if (int_st & PCH_GBE_INT_TCPIP_ERR)
1315                adapter->stats.intr_tcpip_err_count++;
1316        /* When Rx descriptor is empty  */
1317        if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1318                adapter->stats.intr_rx_dsc_empty_count++;
1319                netdev_dbg(netdev, "Rx descriptor is empty\n");
1320                int_en = ioread32(&hw->reg->INT_EN);
1321                iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1322                if (hw->mac.tx_fc_enable) {
1323                        /* Set Pause packet */
1324                        pch_gbe_mac_set_pause_packet(hw);
1325                }
1326        }
1327
1328        /* When request status is Receive interruption */
1329        if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1330            (adapter->rx_stop_flag)) {
1331                if (likely(napi_schedule_prep(&adapter->napi))) {
1332                        /* Enable only Rx Descriptor empty */
1333                        atomic_inc(&adapter->irq_sem);
1334                        int_en = ioread32(&hw->reg->INT_EN);
1335                        int_en &=
1336                            ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1337                        iowrite32(int_en, &hw->reg->INT_EN);
1338                        /* Start polling for NAPI */
1339                        __napi_schedule(&adapter->napi);
1340                }
1341        }
1342        netdev_dbg(netdev, "return = 0x%08x  INT_EN reg = 0x%08x\n",
1343                   IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1344        return IRQ_HANDLED;
1345}
1346
1347/**
1348 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1349 * @adapter:       Board private structure
1350 * @rx_ring:       Rx descriptor ring
1351 * @cleaned_count: Cleaned count
1352 */
1353static void
1354pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1355                         struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1356{
1357        struct net_device *netdev = adapter->netdev;
1358        struct pci_dev *pdev = adapter->pdev;
1359        struct pch_gbe_hw *hw = &adapter->hw;
1360        struct pch_gbe_rx_desc *rx_desc;
1361        struct pch_gbe_buffer *buffer_info;
1362        struct sk_buff *skb;
1363        unsigned int i;
1364        unsigned int bufsz;
1365
1366        bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1367        i = rx_ring->next_to_use;
1368
1369        while ((cleaned_count--)) {
1370                buffer_info = &rx_ring->buffer_info[i];
1371                skb = netdev_alloc_skb(netdev, bufsz);
1372                if (unlikely(!skb)) {
1373                        /* Better luck next round */
1374                        adapter->stats.rx_alloc_buff_failed++;
1375                        break;
1376                }
1377                /* align */
1378                skb_reserve(skb, NET_IP_ALIGN);
1379                buffer_info->skb = skb;
1380
1381                buffer_info->dma = dma_map_single(&pdev->dev,
1382                                                  buffer_info->rx_buffer,
1383                                                  buffer_info->length,
1384                                                  DMA_FROM_DEVICE);
1385                if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1386                        dev_kfree_skb(skb);
1387                        buffer_info->skb = NULL;
1388                        buffer_info->dma = 0;
1389                        adapter->stats.rx_alloc_buff_failed++;
1390                        break; /* while !buffer_info->skb */
1391                }
1392                buffer_info->mapped = true;
1393                rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1394                rx_desc->buffer_addr = (buffer_info->dma);
1395                rx_desc->gbec_status = DSC_INIT16;
1396
1397                netdev_dbg(netdev,
1398                           "i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1399                           i, (unsigned long long)buffer_info->dma,
1400                           buffer_info->length);
1401
1402                if (unlikely(++i == rx_ring->count))
1403                        i = 0;
1404        }
1405        if (likely(rx_ring->next_to_use != i)) {
1406                rx_ring->next_to_use = i;
1407                if (unlikely(i-- == 0))
1408                        i = (rx_ring->count - 1);
1409                iowrite32(rx_ring->dma +
1410                          (int)sizeof(struct pch_gbe_rx_desc) * i,
1411                          &hw->reg->RX_DSC_SW_P);
1412        }
1413        return;
1414}
1415
1416static int
1417pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1418                         struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1419{
1420        struct pci_dev *pdev = adapter->pdev;
1421        struct pch_gbe_buffer *buffer_info;
1422        unsigned int i;
1423        unsigned int bufsz;
1424        unsigned int size;
1425
1426        bufsz = adapter->rx_buffer_len;
1427
1428        size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1429        rx_ring->rx_buff_pool =
1430                dma_alloc_coherent(&pdev->dev, size,
1431                                   &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1432        if (!rx_ring->rx_buff_pool)
1433                return -ENOMEM;
1434
1435        rx_ring->rx_buff_pool_size = size;
1436        for (i = 0; i < rx_ring->count; i++) {
1437                buffer_info = &rx_ring->buffer_info[i];
1438                buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1439                buffer_info->length = bufsz;
1440        }
1441        return 0;
1442}
1443
1444/**
1445 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1446 * @adapter:   Board private structure
1447 * @tx_ring:   Tx descriptor ring
1448 */
1449static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1450                                        struct pch_gbe_tx_ring *tx_ring)
1451{
1452        struct pch_gbe_buffer *buffer_info;
1453        struct sk_buff *skb;
1454        unsigned int i;
1455        unsigned int bufsz;
1456        struct pch_gbe_tx_desc *tx_desc;
1457
1458        bufsz =
1459            adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1460
1461        for (i = 0; i < tx_ring->count; i++) {
1462                buffer_info = &tx_ring->buffer_info[i];
1463                skb = netdev_alloc_skb(adapter->netdev, bufsz);
1464                skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1465                buffer_info->skb = skb;
1466                tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1467                tx_desc->gbec_status = (DSC_INIT16);
1468        }
1469        return;
1470}
1471
1472/**
1473 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1474 * @adapter:   Board private structure
1475 * @tx_ring:   Tx descriptor ring
1476 * Returns:
1477 *      true:  Cleaned the descriptor
1478 *      false: Not cleaned the descriptor
1479 */
1480static bool
1481pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1482                 struct pch_gbe_tx_ring *tx_ring)
1483{
1484        struct pch_gbe_tx_desc *tx_desc;
1485        struct pch_gbe_buffer *buffer_info;
1486        struct sk_buff *skb;
1487        unsigned int i;
1488        unsigned int cleaned_count = 0;
1489        bool cleaned = false;
1490        int unused, thresh;
1491
1492        netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1493                   tx_ring->next_to_clean);
1494
1495        i = tx_ring->next_to_clean;
1496        tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1497        netdev_dbg(adapter->netdev, "gbec_status:0x%04x  dma_status:0x%04x\n",
1498                   tx_desc->gbec_status, tx_desc->dma_status);
1499
1500        unused = PCH_GBE_DESC_UNUSED(tx_ring);
1501        thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1502        if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1503        {  /* current marked clean, tx queue filling up, do extra clean */
1504                int j, k;
1505                if (unused < 8) {  /* tx queue nearly full */
1506                        netdev_dbg(adapter->netdev,
1507                                   "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1508                                   tx_ring->next_to_clean, tx_ring->next_to_use,
1509                                   unused);
1510                }
1511
1512                /* current marked clean, scan for more that need cleaning. */
1513                k = i;
1514                for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1515                {
1516                        tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1517                        if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
1518                        if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
1519                }
1520                if (j < PCH_GBE_TX_WEIGHT) {
1521                        netdev_dbg(adapter->netdev,
1522                                   "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1523                                   unused, j, i, k, tx_ring->next_to_use,
1524                                   tx_desc->gbec_status);
1525                        i = k;  /*found one to clean, usu gbec_status==2000.*/
1526                }
1527        }
1528
1529        while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1530                netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1531                           tx_desc->gbec_status);
1532                buffer_info = &tx_ring->buffer_info[i];
1533                skb = buffer_info->skb;
1534                cleaned = true;
1535
1536                if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1537                        adapter->stats.tx_aborted_errors++;
1538                        netdev_err(adapter->netdev, "Transfer Abort Error\n");
1539                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1540                          ) {
1541                        adapter->stats.tx_carrier_errors++;
1542                        netdev_err(adapter->netdev,
1543                                   "Transfer Carrier Sense Error\n");
1544                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1545                          ) {
1546                        adapter->stats.tx_aborted_errors++;
1547                        netdev_err(adapter->netdev,
1548                                   "Transfer Collision Abort Error\n");
1549                } else if ((tx_desc->gbec_status &
1550                            (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1551                             PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1552                        adapter->stats.collisions++;
1553                        adapter->stats.tx_packets++;
1554                        adapter->stats.tx_bytes += skb->len;
1555                        netdev_dbg(adapter->netdev, "Transfer Collision\n");
1556                } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1557                          ) {
1558                        adapter->stats.tx_packets++;
1559                        adapter->stats.tx_bytes += skb->len;
1560                }
1561                if (buffer_info->mapped) {
1562                        netdev_dbg(adapter->netdev,
1563                                   "unmap buffer_info->dma : %d\n", i);
1564                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1565                                         buffer_info->length, DMA_TO_DEVICE);
1566                        buffer_info->mapped = false;
1567                }
1568                if (buffer_info->skb) {
1569                        netdev_dbg(adapter->netdev,
1570                                   "trim buffer_info->skb : %d\n", i);
1571                        skb_trim(buffer_info->skb, 0);
1572                }
1573                tx_desc->gbec_status = DSC_INIT16;
1574                if (unlikely(++i == tx_ring->count))
1575                        i = 0;
1576                tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1577
1578                /* weight of a sort for tx, to avoid endless transmit cleanup */
1579                if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1580                        cleaned = false;
1581                        break;
1582                }
1583        }
1584        netdev_dbg(adapter->netdev,
1585                   "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1586                   cleaned_count);
1587        if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
1588                /* Recover from running out of Tx resources in xmit_frame */
1589                netif_tx_lock(adapter->netdev);
1590                if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1591                {
1592                        netif_wake_queue(adapter->netdev);
1593                        adapter->stats.tx_restart_count++;
1594                        netdev_dbg(adapter->netdev, "Tx wake queue\n");
1595                }
1596
1597                tx_ring->next_to_clean = i;
1598
1599                netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1600                           tx_ring->next_to_clean);
1601                netif_tx_unlock(adapter->netdev);
1602        }
1603        return cleaned;
1604}
1605
1606/**
1607 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1608 * @adapter:     Board private structure
1609 * @rx_ring:     Rx descriptor ring
1610 * @work_done:   Completed count
1611 * @work_to_do:  Request count
1612 * Returns:
1613 *      true:  Cleaned the descriptor
1614 *      false: Not cleaned the descriptor
1615 */
1616static bool
1617pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1618                 struct pch_gbe_rx_ring *rx_ring,
1619                 int *work_done, int work_to_do)
1620{
1621        struct net_device *netdev = adapter->netdev;
1622        struct pci_dev *pdev = adapter->pdev;
1623        struct pch_gbe_buffer *buffer_info;
1624        struct pch_gbe_rx_desc *rx_desc;
1625        u32 length;
1626        unsigned int i;
1627        unsigned int cleaned_count = 0;
1628        bool cleaned = false;
1629        struct sk_buff *skb;
1630        u8 dma_status;
1631        u16 gbec_status;
1632        u32 tcp_ip_status;
1633
1634        i = rx_ring->next_to_clean;
1635
1636        while (*work_done < work_to_do) {
1637                /* Check Rx descriptor status */
1638                rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1639                if (rx_desc->gbec_status == DSC_INIT16)
1640                        break;
1641                cleaned = true;
1642                cleaned_count++;
1643
1644                dma_status = rx_desc->dma_status;
1645                gbec_status = rx_desc->gbec_status;
1646                tcp_ip_status = rx_desc->tcp_ip_status;
1647                rx_desc->gbec_status = DSC_INIT16;
1648                buffer_info = &rx_ring->buffer_info[i];
1649                skb = buffer_info->skb;
1650                buffer_info->skb = NULL;
1651
1652                /* unmap dma */
1653                dma_unmap_single(&pdev->dev, buffer_info->dma,
1654                                   buffer_info->length, DMA_FROM_DEVICE);
1655                buffer_info->mapped = false;
1656
1657                netdev_dbg(netdev,
1658                           "RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x]  BufInf = 0x%p\n",
1659                           i, dma_status, gbec_status, tcp_ip_status,
1660                           buffer_info);
1661                /* Error check */
1662                if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1663                        adapter->stats.rx_frame_errors++;
1664                        netdev_err(netdev, "Receive Not Octal Error\n");
1665                } else if (unlikely(gbec_status &
1666                                PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1667                        adapter->stats.rx_frame_errors++;
1668                        netdev_err(netdev, "Receive Nibble Error\n");
1669                } else if (unlikely(gbec_status &
1670                                PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1671                        adapter->stats.rx_crc_errors++;
1672                        netdev_err(netdev, "Receive CRC Error\n");
1673                } else {
1674                        /* get receive length */
1675                        /* length convert[-3], length includes FCS length */
1676                        length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1677                        if (rx_desc->rx_words_eob & 0x02)
1678                                length = length - 4;
1679                        /*
1680                         * buffer_info->rx_buffer: [Header:14][payload]
1681                         * skb->data: [Reserve:2][Header:14][payload]
1682                         */
1683                        memcpy(skb->data, buffer_info->rx_buffer, length);
1684
1685                        /* update status of driver */
1686                        adapter->stats.rx_bytes += length;
1687                        adapter->stats.rx_packets++;
1688                        if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1689                                adapter->stats.multicast++;
1690                        /* Write meta date of skb */
1691                        skb_put(skb, length);
1692
1693                        pch_rx_timestamp(adapter, skb);
1694
1695                        skb->protocol = eth_type_trans(skb, netdev);
1696                        if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1697                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1698                        else
1699                                skb->ip_summed = CHECKSUM_NONE;
1700
1701                        napi_gro_receive(&adapter->napi, skb);
1702                        (*work_done)++;
1703                        netdev_dbg(netdev,
1704                                   "Receive skb->ip_summed: %d length: %d\n",
1705                                   skb->ip_summed, length);
1706                }
1707                /* return some buffers to hardware, one at a time is too slow */
1708                if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1709                        pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1710                                                 cleaned_count);
1711                        cleaned_count = 0;
1712                }
1713                if (++i == rx_ring->count)
1714                        i = 0;
1715        }
1716        rx_ring->next_to_clean = i;
1717        if (cleaned_count)
1718                pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1719        return cleaned;
1720}
1721
1722/**
1723 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1724 * @adapter:  Board private structure
1725 * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1726 * Returns:
1727 *      0:              Successfully
1728 *      Negative value: Failed
1729 */
1730int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1731                                struct pch_gbe_tx_ring *tx_ring)
1732{
1733        struct pci_dev *pdev = adapter->pdev;
1734        struct pch_gbe_tx_desc *tx_desc;
1735        int size;
1736        int desNo;
1737
1738        size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1739        tx_ring->buffer_info = vzalloc(size);
1740        if (!tx_ring->buffer_info)
1741                return -ENOMEM;
1742
1743        tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1744
1745        tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1746                                           &tx_ring->dma, GFP_KERNEL);
1747        if (!tx_ring->desc) {
1748                vfree(tx_ring->buffer_info);
1749                return -ENOMEM;
1750        }
1751
1752        tx_ring->next_to_use = 0;
1753        tx_ring->next_to_clean = 0;
1754
1755        for (desNo = 0; desNo < tx_ring->count; desNo++) {
1756                tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1757                tx_desc->gbec_status = DSC_INIT16;
1758        }
1759        netdev_dbg(adapter->netdev,
1760                   "tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1761                   tx_ring->desc, (unsigned long long)tx_ring->dma,
1762                   tx_ring->next_to_clean, tx_ring->next_to_use);
1763        return 0;
1764}
1765
1766/**
1767 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1768 * @adapter:  Board private structure
1769 * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1770 * Returns:
1771 *      0:              Successfully
1772 *      Negative value: Failed
1773 */
1774int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1775                                struct pch_gbe_rx_ring *rx_ring)
1776{
1777        struct pci_dev *pdev = adapter->pdev;
1778        struct pch_gbe_rx_desc *rx_desc;
1779        int size;
1780        int desNo;
1781
1782        size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1783        rx_ring->buffer_info = vzalloc(size);
1784        if (!rx_ring->buffer_info)
1785                return -ENOMEM;
1786
1787        rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1788        rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1789                                                  &rx_ring->dma, GFP_KERNEL);
1790        if (!rx_ring->desc) {
1791                vfree(rx_ring->buffer_info);
1792                return -ENOMEM;
1793        }
1794        rx_ring->next_to_clean = 0;
1795        rx_ring->next_to_use = 0;
1796        for (desNo = 0; desNo < rx_ring->count; desNo++) {
1797                rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1798                rx_desc->gbec_status = DSC_INIT16;
1799        }
1800        netdev_dbg(adapter->netdev,
1801                   "rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1802                   rx_ring->desc, (unsigned long long)rx_ring->dma,
1803                   rx_ring->next_to_clean, rx_ring->next_to_use);
1804        return 0;
1805}
1806
1807/**
1808 * pch_gbe_free_tx_resources - Free Tx Resources
1809 * @adapter:  Board private structure
1810 * @tx_ring:  Tx descriptor ring for a specific queue
1811 */
1812void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1813                                struct pch_gbe_tx_ring *tx_ring)
1814{
1815        struct pci_dev *pdev = adapter->pdev;
1816
1817        pch_gbe_clean_tx_ring(adapter, tx_ring);
1818        vfree(tx_ring->buffer_info);
1819        tx_ring->buffer_info = NULL;
1820        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1821                          tx_ring->dma);
1822        tx_ring->desc = NULL;
1823}
1824
1825/**
1826 * pch_gbe_free_rx_resources - Free Rx Resources
1827 * @adapter:  Board private structure
1828 * @rx_ring:  Ring to clean the resources from
1829 */
1830void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1831                                struct pch_gbe_rx_ring *rx_ring)
1832{
1833        struct pci_dev *pdev = adapter->pdev;
1834
1835        pch_gbe_clean_rx_ring(adapter, rx_ring);
1836        vfree(rx_ring->buffer_info);
1837        rx_ring->buffer_info = NULL;
1838        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1839                          rx_ring->dma);
1840        rx_ring->desc = NULL;
1841}
1842
1843/**
1844 * pch_gbe_request_irq - Allocate an interrupt line
1845 * @adapter:  Board private structure
1846 * Returns:
1847 *      0:              Successfully
1848 *      Negative value: Failed
1849 */
1850static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1851{
1852        struct net_device *netdev = adapter->netdev;
1853        int err;
1854
1855        err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1856        if (err < 0)
1857                return err;
1858
1859        adapter->irq = pci_irq_vector(adapter->pdev, 0);
1860
1861        err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1862                          netdev->name, netdev);
1863        if (err)
1864                netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1865                           err);
1866        netdev_dbg(netdev, "have_msi : %d  return : 0x%04x\n",
1867                   pci_dev_msi_enabled(adapter->pdev), err);
1868        return err;
1869}
1870
1871/**
1872 * pch_gbe_up - Up GbE network device
1873 * @adapter:  Board private structure
1874 * Returns:
1875 *      0:              Successfully
1876 *      Negative value: Failed
1877 */
1878int pch_gbe_up(struct pch_gbe_adapter *adapter)
1879{
1880        struct net_device *netdev = adapter->netdev;
1881        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1882        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1883        int err = -EINVAL;
1884
1885        /* Ensure we have a valid MAC */
1886        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1887                netdev_err(netdev, "Error: Invalid MAC address\n");
1888                goto out;
1889        }
1890
1891        /* hardware has been reset, we need to reload some things */
1892        pch_gbe_set_multi(netdev);
1893
1894        pch_gbe_setup_tctl(adapter);
1895        pch_gbe_configure_tx(adapter);
1896        pch_gbe_setup_rctl(adapter);
1897        pch_gbe_configure_rx(adapter);
1898
1899        err = pch_gbe_request_irq(adapter);
1900        if (err) {
1901                netdev_err(netdev,
1902                           "Error: can't bring device up - irq request failed\n");
1903                goto out;
1904        }
1905        err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1906        if (err) {
1907                netdev_err(netdev,
1908                           "Error: can't bring device up - alloc rx buffers pool failed\n");
1909                goto freeirq;
1910        }
1911        pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1912        pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1913        adapter->tx_queue_len = netdev->tx_queue_len;
1914        pch_gbe_enable_dma_rx(&adapter->hw);
1915        pch_gbe_enable_mac_rx(&adapter->hw);
1916
1917        mod_timer(&adapter->watchdog_timer, jiffies);
1918
1919        napi_enable(&adapter->napi);
1920        pch_gbe_irq_enable(adapter);
1921        netif_start_queue(adapter->netdev);
1922
1923        return 0;
1924
1925freeirq:
1926        pch_gbe_free_irq(adapter);
1927out:
1928        return err;
1929}
1930
1931/**
1932 * pch_gbe_down - Down GbE network device
1933 * @adapter:  Board private structure
1934 */
1935void pch_gbe_down(struct pch_gbe_adapter *adapter)
1936{
1937        struct net_device *netdev = adapter->netdev;
1938        struct pci_dev *pdev = adapter->pdev;
1939        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1940
1941        /* signal that we're down so the interrupt handler does not
1942         * reschedule our watchdog timer */
1943        napi_disable(&adapter->napi);
1944        atomic_set(&adapter->irq_sem, 0);
1945
1946        pch_gbe_irq_disable(adapter);
1947        pch_gbe_free_irq(adapter);
1948
1949        del_timer_sync(&adapter->watchdog_timer);
1950
1951        netdev->tx_queue_len = adapter->tx_queue_len;
1952        netif_carrier_off(netdev);
1953        netif_stop_queue(netdev);
1954
1955        if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1956                pch_gbe_reset(adapter);
1957        pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1958        pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1959
1960        dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
1961                          rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1962        rx_ring->rx_buff_pool_logic = 0;
1963        rx_ring->rx_buff_pool_size = 0;
1964        rx_ring->rx_buff_pool = NULL;
1965}
1966
1967/**
1968 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1969 * @adapter:  Board private structure to initialize
1970 * Returns:
1971 *      0:              Successfully
1972 *      Negative value: Failed
1973 */
1974static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1975{
1976        struct pch_gbe_hw *hw = &adapter->hw;
1977        struct net_device *netdev = adapter->netdev;
1978
1979        adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1980        hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1981        hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1982        hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1983
1984        if (pch_gbe_alloc_queues(adapter)) {
1985                netdev_err(netdev, "Unable to allocate memory for queues\n");
1986                return -ENOMEM;
1987        }
1988        spin_lock_init(&adapter->hw.miim_lock);
1989        spin_lock_init(&adapter->stats_lock);
1990        spin_lock_init(&adapter->ethtool_lock);
1991        atomic_set(&adapter->irq_sem, 0);
1992        pch_gbe_irq_disable(adapter);
1993
1994        pch_gbe_init_stats(adapter);
1995
1996        netdev_dbg(netdev,
1997                   "rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
1998                   (u32) adapter->rx_buffer_len,
1999                   hw->mac.min_frame_size, hw->mac.max_frame_size);
2000        return 0;
2001}
2002
2003/**
2004 * pch_gbe_open - Called when a network interface is made active
2005 * @netdev:     Network interface device structure
2006 * Returns:
2007 *      0:              Successfully
2008 *      Negative value: Failed
2009 */
2010static int pch_gbe_open(struct net_device *netdev)
2011{
2012        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2013        struct pch_gbe_hw *hw = &adapter->hw;
2014        int err;
2015
2016        /* allocate transmit descriptors */
2017        err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2018        if (err)
2019                goto err_setup_tx;
2020        /* allocate receive descriptors */
2021        err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2022        if (err)
2023                goto err_setup_rx;
2024        pch_gbe_phy_power_up(hw);
2025        err = pch_gbe_up(adapter);
2026        if (err)
2027                goto err_up;
2028        netdev_dbg(netdev, "Success End\n");
2029        return 0;
2030
2031err_up:
2032        if (!adapter->wake_up_evt)
2033                pch_gbe_phy_power_down(hw);
2034        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2035err_setup_rx:
2036        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2037err_setup_tx:
2038        pch_gbe_reset(adapter);
2039        netdev_err(netdev, "Error End\n");
2040        return err;
2041}
2042
2043/**
2044 * pch_gbe_stop - Disables a network interface
2045 * @netdev:  Network interface device structure
2046 * Returns:
2047 *      0: Successfully
2048 */
2049static int pch_gbe_stop(struct net_device *netdev)
2050{
2051        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2052        struct pch_gbe_hw *hw = &adapter->hw;
2053
2054        pch_gbe_down(adapter);
2055        if (!adapter->wake_up_evt)
2056                pch_gbe_phy_power_down(hw);
2057        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2058        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2059        return 0;
2060}
2061
2062/**
2063 * pch_gbe_xmit_frame - Packet transmitting start
2064 * @skb:     Socket buffer structure
2065 * @netdev:  Network interface device structure
2066 * Returns:
2067 *      - NETDEV_TX_OK:   Normal end
2068 *      - NETDEV_TX_BUSY: Error end
2069 */
2070static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2071{
2072        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2073        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2074
2075        if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2076                netif_stop_queue(netdev);
2077                netdev_dbg(netdev,
2078                           "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2079                           tx_ring->next_to_use, tx_ring->next_to_clean);
2080                return NETDEV_TX_BUSY;
2081        }
2082
2083        /* CRC,ITAG no support */
2084        pch_gbe_tx_queue(adapter, tx_ring, skb);
2085        return NETDEV_TX_OK;
2086}
2087
2088/**
2089 * pch_gbe_set_multi - Multicast and Promiscuous mode set
2090 * @netdev:   Network interface device structure
2091 */
2092static void pch_gbe_set_multi(struct net_device *netdev)
2093{
2094        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2095        struct pch_gbe_hw *hw = &adapter->hw;
2096        struct netdev_hw_addr *ha;
2097        u32 rctl, adrmask;
2098        int mc_count, i;
2099
2100        netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2101
2102        /* By default enable address & multicast filtering */
2103        rctl = ioread32(&hw->reg->RX_MODE);
2104        rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2105
2106        /* Promiscuous mode disables all hardware address filtering */
2107        if (netdev->flags & IFF_PROMISC)
2108                rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2109
2110        /* If we want to monitor more multicast addresses than the hardware can
2111         * support then disable hardware multicast filtering.
2112         */
2113        mc_count = netdev_mc_count(netdev);
2114        if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2115                rctl &= ~PCH_GBE_MLT_FIL_EN;
2116
2117        iowrite32(rctl, &hw->reg->RX_MODE);
2118
2119        /* If we're not using multicast filtering then there's no point
2120         * configuring the unused MAC address registers.
2121         */
2122        if (!(rctl & PCH_GBE_MLT_FIL_EN))
2123                return;
2124
2125        /* Load the first set of multicast addresses into MAC address registers
2126         * for use by hardware filtering.
2127         */
2128        i = 1;
2129        netdev_for_each_mc_addr(ha, netdev)
2130                pch_gbe_mac_mar_set(hw, ha->addr, i++);
2131
2132        /* If there are spare MAC registers, mask & clear them */
2133        for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2134                /* Clear MAC address mask */
2135                adrmask = ioread32(&hw->reg->ADDR_MASK);
2136                iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2137                /* wait busy */
2138                pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2139                /* Clear MAC address */
2140                iowrite32(0, &hw->reg->mac_adr[i].high);
2141                iowrite32(0, &hw->reg->mac_adr[i].low);
2142        }
2143
2144        netdev_dbg(netdev,
2145                 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2146                 ioread32(&hw->reg->RX_MODE), mc_count);
2147}
2148
2149/**
2150 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2151 * @netdev: Network interface device structure
2152 * @addr:   Pointer to an address structure
2153 * Returns:
2154 *      0:              Successfully
2155 *      -EADDRNOTAVAIL: Failed
2156 */
2157static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2158{
2159        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2160        struct sockaddr *skaddr = addr;
2161        int ret_val;
2162
2163        if (!is_valid_ether_addr(skaddr->sa_data)) {
2164                ret_val = -EADDRNOTAVAIL;
2165        } else {
2166                memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2167                memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2168                pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2169                ret_val = 0;
2170        }
2171        netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2172        netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2173        netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2174        netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2175                   ioread32(&adapter->hw.reg->mac_adr[0].high),
2176                   ioread32(&adapter->hw.reg->mac_adr[0].low));
2177        return ret_val;
2178}
2179
2180/**
2181 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2182 * @netdev:   Network interface device structure
2183 * @new_mtu:  New value for maximum frame size
2184 * Returns:
2185 *      0:              Successfully
2186 *      -EINVAL:        Failed
2187 */
2188static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2189{
2190        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2191        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2192        unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2193        int err;
2194
2195        if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2196                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2197        else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2198                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2199        else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2200                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2201        else
2202                adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2203
2204        if (netif_running(netdev)) {
2205                pch_gbe_down(adapter);
2206                err = pch_gbe_up(adapter);
2207                if (err) {
2208                        adapter->rx_buffer_len = old_rx_buffer_len;
2209                        pch_gbe_up(adapter);
2210                        return err;
2211                } else {
2212                        netdev->mtu = new_mtu;
2213                        adapter->hw.mac.max_frame_size = max_frame;
2214                }
2215        } else {
2216                pch_gbe_reset(adapter);
2217                netdev->mtu = new_mtu;
2218                adapter->hw.mac.max_frame_size = max_frame;
2219        }
2220
2221        netdev_dbg(netdev,
2222                   "max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2223                   max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2224                   adapter->hw.mac.max_frame_size);
2225        return 0;
2226}
2227
2228/**
2229 * pch_gbe_set_features - Reset device after features changed
2230 * @netdev:   Network interface device structure
2231 * @features:  New features
2232 * Returns:
2233 *      0:              HW state updated successfully
2234 */
2235static int pch_gbe_set_features(struct net_device *netdev,
2236        netdev_features_t features)
2237{
2238        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2239        netdev_features_t changed = features ^ netdev->features;
2240
2241        if (!(changed & NETIF_F_RXCSUM))
2242                return 0;
2243
2244        if (netif_running(netdev))
2245                pch_gbe_reinit_locked(adapter);
2246        else
2247                pch_gbe_reset(adapter);
2248
2249        return 0;
2250}
2251
2252/**
2253 * pch_gbe_ioctl - Controls register through a MII interface
2254 * @netdev:   Network interface device structure
2255 * @ifr:      Pointer to ifr structure
2256 * @cmd:      Control command
2257 * Returns:
2258 *      0:      Successfully
2259 *      Negative value: Failed
2260 */
2261static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2262{
2263        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2264
2265        netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2266
2267        if (cmd == SIOCSHWTSTAMP)
2268                return hwtstamp_ioctl(netdev, ifr, cmd);
2269
2270        return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2271}
2272
2273/**
2274 * pch_gbe_tx_timeout - Respond to a Tx Hang
2275 * @netdev:   Network interface device structure
2276 * @txqueue: index of hanging queue
2277 */
2278static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2279{
2280        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2281
2282        /* Do the reset outside of interrupt context */
2283        adapter->stats.tx_timeout_count++;
2284        schedule_work(&adapter->reset_task);
2285}
2286
2287/**
2288 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2289 * @napi:    Pointer of polling device struct
2290 * @budget:  The maximum number of a packet
2291 * Returns:
2292 *      false:  Exit the polling mode
2293 *      true:   Continue the polling mode
2294 */
2295static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2296{
2297        struct pch_gbe_adapter *adapter =
2298            container_of(napi, struct pch_gbe_adapter, napi);
2299        int work_done = 0;
2300        bool poll_end_flag = false;
2301        bool cleaned = false;
2302
2303        netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2304
2305        pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2306        cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2307
2308        if (cleaned)
2309                work_done = budget;
2310        /* If no Tx and not enough Rx work done,
2311         * exit the polling mode
2312         */
2313        if (work_done < budget)
2314                poll_end_flag = true;
2315
2316        if (poll_end_flag) {
2317                napi_complete_done(napi, work_done);
2318                pch_gbe_irq_enable(adapter);
2319        }
2320
2321        if (adapter->rx_stop_flag) {
2322                adapter->rx_stop_flag = false;
2323                pch_gbe_enable_dma_rx(&adapter->hw);
2324        }
2325
2326        netdev_dbg(adapter->netdev,
2327                   "poll_end_flag : %d  work_done : %d  budget : %d\n",
2328                   poll_end_flag, work_done, budget);
2329
2330        return work_done;
2331}
2332
2333#ifdef CONFIG_NET_POLL_CONTROLLER
2334/**
2335 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2336 * @netdev:  Network interface device structure
2337 */
2338static void pch_gbe_netpoll(struct net_device *netdev)
2339{
2340        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2341
2342        disable_irq(adapter->irq);
2343        pch_gbe_intr(adapter->irq, netdev);
2344        enable_irq(adapter->irq);
2345}
2346#endif
2347
2348static const struct net_device_ops pch_gbe_netdev_ops = {
2349        .ndo_open = pch_gbe_open,
2350        .ndo_stop = pch_gbe_stop,
2351        .ndo_start_xmit = pch_gbe_xmit_frame,
2352        .ndo_set_mac_address = pch_gbe_set_mac,
2353        .ndo_tx_timeout = pch_gbe_tx_timeout,
2354        .ndo_change_mtu = pch_gbe_change_mtu,
2355        .ndo_set_features = pch_gbe_set_features,
2356        .ndo_do_ioctl = pch_gbe_ioctl,
2357        .ndo_set_rx_mode = pch_gbe_set_multi,
2358#ifdef CONFIG_NET_POLL_CONTROLLER
2359        .ndo_poll_controller = pch_gbe_netpoll,
2360#endif
2361};
2362
2363static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2364                                                pci_channel_state_t state)
2365{
2366        struct net_device *netdev = pci_get_drvdata(pdev);
2367        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2368
2369        netif_device_detach(netdev);
2370        if (netif_running(netdev))
2371                pch_gbe_down(adapter);
2372        pci_disable_device(pdev);
2373        /* Request a slot slot reset. */
2374        return PCI_ERS_RESULT_NEED_RESET;
2375}
2376
2377static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2378{
2379        struct net_device *netdev = pci_get_drvdata(pdev);
2380        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2381        struct pch_gbe_hw *hw = &adapter->hw;
2382
2383        if (pci_enable_device(pdev)) {
2384                netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2385                return PCI_ERS_RESULT_DISCONNECT;
2386        }
2387        pci_set_master(pdev);
2388        pci_enable_wake(pdev, PCI_D0, 0);
2389        pch_gbe_phy_power_up(hw);
2390        pch_gbe_reset(adapter);
2391        /* Clear wake up status */
2392        pch_gbe_mac_set_wol_event(hw, 0);
2393
2394        return PCI_ERS_RESULT_RECOVERED;
2395}
2396
2397static void pch_gbe_io_resume(struct pci_dev *pdev)
2398{
2399        struct net_device *netdev = pci_get_drvdata(pdev);
2400        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2401
2402        if (netif_running(netdev)) {
2403                if (pch_gbe_up(adapter)) {
2404                        netdev_dbg(netdev,
2405                                   "can't bring device back up after reset\n");
2406                        return;
2407                }
2408        }
2409        netif_device_attach(netdev);
2410}
2411
2412static int __pch_gbe_suspend(struct pci_dev *pdev)
2413{
2414        struct net_device *netdev = pci_get_drvdata(pdev);
2415        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2416        struct pch_gbe_hw *hw = &adapter->hw;
2417        u32 wufc = adapter->wake_up_evt;
2418
2419        netif_device_detach(netdev);
2420        if (netif_running(netdev))
2421                pch_gbe_down(adapter);
2422        if (wufc) {
2423                pch_gbe_set_multi(netdev);
2424                pch_gbe_setup_rctl(adapter);
2425                pch_gbe_configure_rx(adapter);
2426                pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2427                                        hw->mac.link_duplex);
2428                pch_gbe_set_mode(adapter, hw->mac.link_speed,
2429                                        hw->mac.link_duplex);
2430                pch_gbe_mac_set_wol_event(hw, wufc);
2431                pci_disable_device(pdev);
2432        } else {
2433                pch_gbe_phy_power_down(hw);
2434                pch_gbe_mac_set_wol_event(hw, wufc);
2435                pci_disable_device(pdev);
2436        }
2437        return 0;
2438}
2439
2440#ifdef CONFIG_PM
2441static int pch_gbe_suspend(struct device *device)
2442{
2443        struct pci_dev *pdev = to_pci_dev(device);
2444
2445        return __pch_gbe_suspend(pdev);
2446}
2447
2448static int pch_gbe_resume(struct device *device)
2449{
2450        struct pci_dev *pdev = to_pci_dev(device);
2451        struct net_device *netdev = pci_get_drvdata(pdev);
2452        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2453        struct pch_gbe_hw *hw = &adapter->hw;
2454        u32 err;
2455
2456        err = pci_enable_device(pdev);
2457        if (err) {
2458                netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2459                return err;
2460        }
2461        pci_set_master(pdev);
2462        pch_gbe_phy_power_up(hw);
2463        pch_gbe_reset(adapter);
2464        /* Clear wake on lan control and status */
2465        pch_gbe_mac_set_wol_event(hw, 0);
2466
2467        if (netif_running(netdev))
2468                pch_gbe_up(adapter);
2469        netif_device_attach(netdev);
2470
2471        return 0;
2472}
2473#endif /* CONFIG_PM */
2474
2475static void pch_gbe_shutdown(struct pci_dev *pdev)
2476{
2477        __pch_gbe_suspend(pdev);
2478        if (system_state == SYSTEM_POWER_OFF) {
2479                pci_wake_from_d3(pdev, true);
2480                pci_set_power_state(pdev, PCI_D3hot);
2481        }
2482}
2483
2484static void pch_gbe_remove(struct pci_dev *pdev)
2485{
2486        struct net_device *netdev = pci_get_drvdata(pdev);
2487        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2488
2489        cancel_work_sync(&adapter->reset_task);
2490        unregister_netdev(netdev);
2491
2492        pch_gbe_phy_hw_reset(&adapter->hw);
2493
2494        free_netdev(netdev);
2495}
2496
2497static int pch_gbe_probe(struct pci_dev *pdev,
2498                          const struct pci_device_id *pci_id)
2499{
2500        struct net_device *netdev;
2501        struct pch_gbe_adapter *adapter;
2502        int ret;
2503
2504        ret = pcim_enable_device(pdev);
2505        if (ret)
2506                return ret;
2507
2508        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
2509                ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2510                if (ret) {
2511                        dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
2512                        return ret;
2513                }
2514        }
2515
2516        ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2517        if (ret) {
2518                dev_err(&pdev->dev,
2519                        "ERR: Can't reserve PCI I/O and memory resources\n");
2520                return ret;
2521        }
2522        pci_set_master(pdev);
2523
2524        netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2525        if (!netdev)
2526                return -ENOMEM;
2527        SET_NETDEV_DEV(netdev, &pdev->dev);
2528
2529        pci_set_drvdata(pdev, netdev);
2530        adapter = netdev_priv(netdev);
2531        adapter->netdev = netdev;
2532        adapter->pdev = pdev;
2533        adapter->hw.back = adapter;
2534        adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2535        adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2536        if (adapter->pdata && adapter->pdata->platform_init)
2537                adapter->pdata->platform_init(pdev);
2538
2539        adapter->ptp_pdev =
2540                pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2541                                            adapter->pdev->bus->number,
2542                                            PCI_DEVFN(12, 4));
2543
2544        netdev->netdev_ops = &pch_gbe_netdev_ops;
2545        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2546        netif_napi_add(netdev, &adapter->napi,
2547                       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2548        netdev->hw_features = NETIF_F_RXCSUM |
2549                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2550        netdev->features = netdev->hw_features;
2551        pch_gbe_set_ethtool_ops(netdev);
2552
2553        /* MTU range: 46 - 10300 */
2554        netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2555        netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2556                          (ETH_HLEN + ETH_FCS_LEN);
2557
2558        pch_gbe_mac_load_mac_addr(&adapter->hw);
2559        pch_gbe_mac_reset_hw(&adapter->hw);
2560
2561        /* setup the private structure */
2562        ret = pch_gbe_sw_init(adapter);
2563        if (ret)
2564                goto err_free_netdev;
2565
2566        /* Initialize PHY */
2567        ret = pch_gbe_init_phy(adapter);
2568        if (ret) {
2569                dev_err(&pdev->dev, "PHY initialize error\n");
2570                goto err_free_adapter;
2571        }
2572
2573        /* Read the MAC address. and store to the private data */
2574        ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2575        if (ret) {
2576                dev_err(&pdev->dev, "MAC address Read Error\n");
2577                goto err_free_adapter;
2578        }
2579
2580        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2581        if (!is_valid_ether_addr(netdev->dev_addr)) {
2582                /*
2583                 * If the MAC is invalid (or just missing), display a warning
2584                 * but do not abort setting up the device. pch_gbe_up will
2585                 * prevent the interface from being brought up until a valid MAC
2586                 * is set.
2587                 */
2588                dev_err(&pdev->dev, "Invalid MAC address, "
2589                                    "interface disabled.\n");
2590        }
2591        timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2592
2593        INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2594
2595        pch_gbe_check_options(adapter);
2596
2597        /* initialize the wol settings based on the eeprom settings */
2598        adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2599        dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2600
2601        /* reset the hardware with the new settings */
2602        pch_gbe_reset(adapter);
2603
2604        ret = register_netdev(netdev);
2605        if (ret)
2606                goto err_free_adapter;
2607        /* tell the stack to leave us alone until pch_gbe_open() is called */
2608        netif_carrier_off(netdev);
2609        netif_stop_queue(netdev);
2610
2611        dev_dbg(&pdev->dev, "PCH Network Connection\n");
2612
2613        /* Disable hibernation on certain platforms */
2614        if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2615                pch_gbe_phy_disable_hibernate(&adapter->hw);
2616
2617        device_set_wakeup_enable(&pdev->dev, 1);
2618        return 0;
2619
2620err_free_adapter:
2621        pch_gbe_phy_hw_reset(&adapter->hw);
2622err_free_netdev:
2623        free_netdev(netdev);
2624        return ret;
2625}
2626
2627/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
2628 * ensure it is awake for probe and init. Request the line and reset the PHY.
2629 */
2630static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2631{
2632        unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2633        unsigned gpio = MINNOW_PHY_RESET_GPIO;
2634        int ret;
2635
2636        ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2637                                    "minnow_phy_reset");
2638        if (ret) {
2639                dev_err(&pdev->dev,
2640                        "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2641                return ret;
2642        }
2643
2644        gpio_set_value(gpio, 0);
2645        usleep_range(1250, 1500);
2646        gpio_set_value(gpio, 1);
2647        usleep_range(1250, 1500);
2648
2649        return ret;
2650}
2651
2652static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2653        .phy_tx_clk_delay = true,
2654        .phy_disable_hibernate = true,
2655        .platform_init = pch_gbe_minnow_platform_init,
2656};
2657
2658static const struct pci_device_id pch_gbe_pcidev_id[] = {
2659        {.vendor = PCI_VENDOR_ID_INTEL,
2660         .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2661         .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2662         .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2663         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2664         .class_mask = (0xFFFF00),
2665         .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2666         },
2667        {.vendor = PCI_VENDOR_ID_INTEL,
2668         .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2669         .subvendor = PCI_ANY_ID,
2670         .subdevice = PCI_ANY_ID,
2671         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2672         .class_mask = (0xFFFF00)
2673         },
2674        {.vendor = PCI_VENDOR_ID_ROHM,
2675         .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2676         .subvendor = PCI_ANY_ID,
2677         .subdevice = PCI_ANY_ID,
2678         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2679         .class_mask = (0xFFFF00)
2680         },
2681        {.vendor = PCI_VENDOR_ID_ROHM,
2682         .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2683         .subvendor = PCI_ANY_ID,
2684         .subdevice = PCI_ANY_ID,
2685         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2686         .class_mask = (0xFFFF00)
2687         },
2688        /* required last entry */
2689        {0}
2690};
2691
2692#ifdef CONFIG_PM
2693static const struct dev_pm_ops pch_gbe_pm_ops = {
2694        .suspend = pch_gbe_suspend,
2695        .resume = pch_gbe_resume,
2696        .freeze = pch_gbe_suspend,
2697        .thaw = pch_gbe_resume,
2698        .poweroff = pch_gbe_suspend,
2699        .restore = pch_gbe_resume,
2700};
2701#endif
2702
2703static const struct pci_error_handlers pch_gbe_err_handler = {
2704        .error_detected = pch_gbe_io_error_detected,
2705        .slot_reset = pch_gbe_io_slot_reset,
2706        .resume = pch_gbe_io_resume
2707};
2708
2709static struct pci_driver pch_gbe_driver = {
2710        .name = KBUILD_MODNAME,
2711        .id_table = pch_gbe_pcidev_id,
2712        .probe = pch_gbe_probe,
2713        .remove = pch_gbe_remove,
2714#ifdef CONFIG_PM
2715        .driver.pm = &pch_gbe_pm_ops,
2716#endif
2717        .shutdown = pch_gbe_shutdown,
2718        .err_handler = &pch_gbe_err_handler
2719};
2720module_pci_driver(pch_gbe_driver);
2721
2722MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2723MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2724MODULE_LICENSE("GPL");
2725MODULE_VERSION(DRV_VERSION);
2726MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2727
2728/* pch_gbe_main.c */
2729