linux/drivers/net/usb/lan78xx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Microchip Technology
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version 2
   7 * of the License, or (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 */
  17#include <linux/version.h>
  18#include <linux/module.h>
  19#include <linux/netdevice.h>
  20#include <linux/etherdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/usb.h>
  23#include <linux/crc32.h>
  24#include <linux/signal.h>
  25#include <linux/slab.h>
  26#include <linux/if_vlan.h>
  27#include <linux/uaccess.h>
  28#include <linux/list.h>
  29#include <linux/ip.h>
  30#include <linux/ipv6.h>
  31#include <linux/mdio.h>
  32#include <linux/phy.h>
  33#include <net/ip6_checksum.h>
  34#include <linux/interrupt.h>
  35#include <linux/irqdomain.h>
  36#include <linux/irq.h>
  37#include <linux/irqchip/chained_irq.h>
  38#include <linux/microchipphy.h>
  39#include <linux/phy.h>
  40#include "lan78xx.h"
  41
  42#define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
  43#define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
  44#define DRIVER_NAME     "lan78xx"
  45#define DRIVER_VERSION  "1.0.6"
  46
  47#define TX_TIMEOUT_JIFFIES              (5 * HZ)
  48#define THROTTLE_JIFFIES                (HZ / 8)
  49#define UNLINK_TIMEOUT_MS               3
  50
  51#define RX_MAX_QUEUE_MEMORY             (60 * 1518)
  52
  53#define SS_USB_PKT_SIZE                 (1024)
  54#define HS_USB_PKT_SIZE                 (512)
  55#define FS_USB_PKT_SIZE                 (64)
  56
  57#define MAX_RX_FIFO_SIZE                (12 * 1024)
  58#define MAX_TX_FIFO_SIZE                (12 * 1024)
  59#define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
  60#define DEFAULT_BULK_IN_DELAY           (0x0800)
  61#define MAX_SINGLE_PACKET_SIZE          (9000)
  62#define DEFAULT_TX_CSUM_ENABLE          (true)
  63#define DEFAULT_RX_CSUM_ENABLE          (true)
  64#define DEFAULT_TSO_CSUM_ENABLE         (true)
  65#define DEFAULT_VLAN_FILTER_ENABLE      (true)
  66#define TX_OVERHEAD                     (8)
  67#define RXW_PADDING                     2
  68
  69#define LAN78XX_USB_VENDOR_ID           (0x0424)
  70#define LAN7800_USB_PRODUCT_ID          (0x7800)
  71#define LAN7850_USB_PRODUCT_ID          (0x7850)
  72#define LAN7801_USB_PRODUCT_ID          (0x7801)
  73#define LAN78XX_EEPROM_MAGIC            (0x78A5)
  74#define LAN78XX_OTP_MAGIC               (0x78F3)
  75
  76#define MII_READ                        1
  77#define MII_WRITE                       0
  78
  79#define EEPROM_INDICATOR                (0xA5)
  80#define EEPROM_MAC_OFFSET               (0x01)
  81#define MAX_EEPROM_SIZE                 512
  82#define OTP_INDICATOR_1                 (0xF3)
  83#define OTP_INDICATOR_2                 (0xF7)
  84
  85#define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
  86                                         WAKE_MCAST | WAKE_BCAST | \
  87                                         WAKE_ARP | WAKE_MAGIC)
  88
  89/* USB related defines */
  90#define BULK_IN_PIPE                    1
  91#define BULK_OUT_PIPE                   2
  92
  93/* default autosuspend delay (mSec)*/
  94#define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
  95
  96/* statistic update interval (mSec) */
  97#define STAT_UPDATE_TIMER               (1 * 1000)
  98
  99/* defines interrupts from interrupt EP */
 100#define MAX_INT_EP                      (32)
 101#define INT_EP_INTEP                    (31)
 102#define INT_EP_OTP_WR_DONE              (28)
 103#define INT_EP_EEE_TX_LPI_START         (26)
 104#define INT_EP_EEE_TX_LPI_STOP          (25)
 105#define INT_EP_EEE_RX_LPI               (24)
 106#define INT_EP_MAC_RESET_TIMEOUT        (23)
 107#define INT_EP_RDFO                     (22)
 108#define INT_EP_TXE                      (21)
 109#define INT_EP_USB_STATUS               (20)
 110#define INT_EP_TX_DIS                   (19)
 111#define INT_EP_RX_DIS                   (18)
 112#define INT_EP_PHY                      (17)
 113#define INT_EP_DP                       (16)
 114#define INT_EP_MAC_ERR                  (15)
 115#define INT_EP_TDFU                     (14)
 116#define INT_EP_TDFO                     (13)
 117#define INT_EP_UTX                      (12)
 118#define INT_EP_GPIO_11                  (11)
 119#define INT_EP_GPIO_10                  (10)
 120#define INT_EP_GPIO_9                   (9)
 121#define INT_EP_GPIO_8                   (8)
 122#define INT_EP_GPIO_7                   (7)
 123#define INT_EP_GPIO_6                   (6)
 124#define INT_EP_GPIO_5                   (5)
 125#define INT_EP_GPIO_4                   (4)
 126#define INT_EP_GPIO_3                   (3)
 127#define INT_EP_GPIO_2                   (2)
 128#define INT_EP_GPIO_1                   (1)
 129#define INT_EP_GPIO_0                   (0)
 130
 131static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
 132        "RX FCS Errors",
 133        "RX Alignment Errors",
 134        "Rx Fragment Errors",
 135        "RX Jabber Errors",
 136        "RX Undersize Frame Errors",
 137        "RX Oversize Frame Errors",
 138        "RX Dropped Frames",
 139        "RX Unicast Byte Count",
 140        "RX Broadcast Byte Count",
 141        "RX Multicast Byte Count",
 142        "RX Unicast Frames",
 143        "RX Broadcast Frames",
 144        "RX Multicast Frames",
 145        "RX Pause Frames",
 146        "RX 64 Byte Frames",
 147        "RX 65 - 127 Byte Frames",
 148        "RX 128 - 255 Byte Frames",
 149        "RX 256 - 511 Bytes Frames",
 150        "RX 512 - 1023 Byte Frames",
 151        "RX 1024 - 1518 Byte Frames",
 152        "RX Greater 1518 Byte Frames",
 153        "EEE RX LPI Transitions",
 154        "EEE RX LPI Time",
 155        "TX FCS Errors",
 156        "TX Excess Deferral Errors",
 157        "TX Carrier Errors",
 158        "TX Bad Byte Count",
 159        "TX Single Collisions",
 160        "TX Multiple Collisions",
 161        "TX Excessive Collision",
 162        "TX Late Collisions",
 163        "TX Unicast Byte Count",
 164        "TX Broadcast Byte Count",
 165        "TX Multicast Byte Count",
 166        "TX Unicast Frames",
 167        "TX Broadcast Frames",
 168        "TX Multicast Frames",
 169        "TX Pause Frames",
 170        "TX 64 Byte Frames",
 171        "TX 65 - 127 Byte Frames",
 172        "TX 128 - 255 Byte Frames",
 173        "TX 256 - 511 Bytes Frames",
 174        "TX 512 - 1023 Byte Frames",
 175        "TX 1024 - 1518 Byte Frames",
 176        "TX Greater 1518 Byte Frames",
 177        "EEE TX LPI Transitions",
 178        "EEE TX LPI Time",
 179};
 180
 181struct lan78xx_statstage {
 182        u32 rx_fcs_errors;
 183        u32 rx_alignment_errors;
 184        u32 rx_fragment_errors;
 185        u32 rx_jabber_errors;
 186        u32 rx_undersize_frame_errors;
 187        u32 rx_oversize_frame_errors;
 188        u32 rx_dropped_frames;
 189        u32 rx_unicast_byte_count;
 190        u32 rx_broadcast_byte_count;
 191        u32 rx_multicast_byte_count;
 192        u32 rx_unicast_frames;
 193        u32 rx_broadcast_frames;
 194        u32 rx_multicast_frames;
 195        u32 rx_pause_frames;
 196        u32 rx_64_byte_frames;
 197        u32 rx_65_127_byte_frames;
 198        u32 rx_128_255_byte_frames;
 199        u32 rx_256_511_bytes_frames;
 200        u32 rx_512_1023_byte_frames;
 201        u32 rx_1024_1518_byte_frames;
 202        u32 rx_greater_1518_byte_frames;
 203        u32 eee_rx_lpi_transitions;
 204        u32 eee_rx_lpi_time;
 205        u32 tx_fcs_errors;
 206        u32 tx_excess_deferral_errors;
 207        u32 tx_carrier_errors;
 208        u32 tx_bad_byte_count;
 209        u32 tx_single_collisions;
 210        u32 tx_multiple_collisions;
 211        u32 tx_excessive_collision;
 212        u32 tx_late_collisions;
 213        u32 tx_unicast_byte_count;
 214        u32 tx_broadcast_byte_count;
 215        u32 tx_multicast_byte_count;
 216        u32 tx_unicast_frames;
 217        u32 tx_broadcast_frames;
 218        u32 tx_multicast_frames;
 219        u32 tx_pause_frames;
 220        u32 tx_64_byte_frames;
 221        u32 tx_65_127_byte_frames;
 222        u32 tx_128_255_byte_frames;
 223        u32 tx_256_511_bytes_frames;
 224        u32 tx_512_1023_byte_frames;
 225        u32 tx_1024_1518_byte_frames;
 226        u32 tx_greater_1518_byte_frames;
 227        u32 eee_tx_lpi_transitions;
 228        u32 eee_tx_lpi_time;
 229};
 230
 231struct lan78xx_statstage64 {
 232        u64 rx_fcs_errors;
 233        u64 rx_alignment_errors;
 234        u64 rx_fragment_errors;
 235        u64 rx_jabber_errors;
 236        u64 rx_undersize_frame_errors;
 237        u64 rx_oversize_frame_errors;
 238        u64 rx_dropped_frames;
 239        u64 rx_unicast_byte_count;
 240        u64 rx_broadcast_byte_count;
 241        u64 rx_multicast_byte_count;
 242        u64 rx_unicast_frames;
 243        u64 rx_broadcast_frames;
 244        u64 rx_multicast_frames;
 245        u64 rx_pause_frames;
 246        u64 rx_64_byte_frames;
 247        u64 rx_65_127_byte_frames;
 248        u64 rx_128_255_byte_frames;
 249        u64 rx_256_511_bytes_frames;
 250        u64 rx_512_1023_byte_frames;
 251        u64 rx_1024_1518_byte_frames;
 252        u64 rx_greater_1518_byte_frames;
 253        u64 eee_rx_lpi_transitions;
 254        u64 eee_rx_lpi_time;
 255        u64 tx_fcs_errors;
 256        u64 tx_excess_deferral_errors;
 257        u64 tx_carrier_errors;
 258        u64 tx_bad_byte_count;
 259        u64 tx_single_collisions;
 260        u64 tx_multiple_collisions;
 261        u64 tx_excessive_collision;
 262        u64 tx_late_collisions;
 263        u64 tx_unicast_byte_count;
 264        u64 tx_broadcast_byte_count;
 265        u64 tx_multicast_byte_count;
 266        u64 tx_unicast_frames;
 267        u64 tx_broadcast_frames;
 268        u64 tx_multicast_frames;
 269        u64 tx_pause_frames;
 270        u64 tx_64_byte_frames;
 271        u64 tx_65_127_byte_frames;
 272        u64 tx_128_255_byte_frames;
 273        u64 tx_256_511_bytes_frames;
 274        u64 tx_512_1023_byte_frames;
 275        u64 tx_1024_1518_byte_frames;
 276        u64 tx_greater_1518_byte_frames;
 277        u64 eee_tx_lpi_transitions;
 278        u64 eee_tx_lpi_time;
 279};
 280
 281struct lan78xx_net;
 282
 283struct lan78xx_priv {
 284        struct lan78xx_net *dev;
 285        u32 rfe_ctl;
 286        u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
 287        u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
 288        u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
 289        struct mutex dataport_mutex; /* for dataport access */
 290        spinlock_t rfe_ctl_lock; /* for rfe register access */
 291        struct work_struct set_multicast;
 292        struct work_struct set_vlan;
 293        u32 wol;
 294};
 295
 296enum skb_state {
 297        illegal = 0,
 298        tx_start,
 299        tx_done,
 300        rx_start,
 301        rx_done,
 302        rx_cleanup,
 303        unlink_start
 304};
 305
 306struct skb_data {               /* skb->cb is one of these */
 307        struct urb *urb;
 308        struct lan78xx_net *dev;
 309        enum skb_state state;
 310        size_t length;
 311        int num_of_packet;
 312};
 313
 314struct usb_context {
 315        struct usb_ctrlrequest req;
 316        struct lan78xx_net *dev;
 317};
 318
 319#define EVENT_TX_HALT                   0
 320#define EVENT_RX_HALT                   1
 321#define EVENT_RX_MEMORY                 2
 322#define EVENT_STS_SPLIT                 3
 323#define EVENT_LINK_RESET                4
 324#define EVENT_RX_PAUSED                 5
 325#define EVENT_DEV_WAKING                6
 326#define EVENT_DEV_ASLEEP                7
 327#define EVENT_DEV_OPEN                  8
 328#define EVENT_STAT_UPDATE               9
 329
 330struct statstage {
 331        struct mutex                    access_lock;    /* for stats access */
 332        struct lan78xx_statstage        saved;
 333        struct lan78xx_statstage        rollover_count;
 334        struct lan78xx_statstage        rollover_max;
 335        struct lan78xx_statstage64      curr_stat;
 336};
 337
 338struct irq_domain_data {
 339        struct irq_domain       *irqdomain;
 340        unsigned int            phyirq;
 341        struct irq_chip         *irqchip;
 342        irq_flow_handler_t      irq_handler;
 343        u32                     irqenable;
 344        struct mutex            irq_lock;               /* for irq bus access */
 345};
 346
 347struct lan78xx_net {
 348        struct net_device       *net;
 349        struct usb_device       *udev;
 350        struct usb_interface    *intf;
 351        void                    *driver_priv;
 352
 353        int                     rx_qlen;
 354        int                     tx_qlen;
 355        struct sk_buff_head     rxq;
 356        struct sk_buff_head     txq;
 357        struct sk_buff_head     done;
 358        struct sk_buff_head     rxq_pause;
 359        struct sk_buff_head     txq_pend;
 360
 361        struct tasklet_struct   bh;
 362        struct delayed_work     wq;
 363
 364        struct usb_host_endpoint *ep_blkin;
 365        struct usb_host_endpoint *ep_blkout;
 366        struct usb_host_endpoint *ep_intr;
 367
 368        int                     msg_enable;
 369
 370        struct urb              *urb_intr;
 371        struct usb_anchor       deferred;
 372
 373        struct mutex            phy_mutex; /* for phy access */
 374        unsigned                pipe_in, pipe_out, pipe_intr;
 375
 376        u32                     hard_mtu;       /* count any extra framing */
 377        size_t                  rx_urb_size;    /* size for rx urbs */
 378
 379        unsigned long           flags;
 380
 381        wait_queue_head_t       *wait;
 382        unsigned char           suspend_count;
 383
 384        unsigned                maxpacket;
 385        struct timer_list       delay;
 386        struct timer_list       stat_monitor;
 387
 388        unsigned long           data[5];
 389
 390        int                     link_on;
 391        u8                      mdix_ctrl;
 392
 393        u32                     chipid;
 394        u32                     chiprev;
 395        struct mii_bus          *mdiobus;
 396        phy_interface_t         interface;
 397
 398        int                     fc_autoneg;
 399        u8                      fc_request_control;
 400
 401        int                     delta;
 402        struct statstage        stats;
 403
 404        struct irq_domain_data  domain_data;
 405};
 406
 407/* define external phy id */
 408#define PHY_LAN8835                     (0x0007C130)
 409#define PHY_KSZ9031RNX                  (0x00221620)
 410
 411/* use ethtool to change the level for any given device */
 412static int msg_level = -1;
 413module_param(msg_level, int, 0);
 414MODULE_PARM_DESC(msg_level, "Override default message level");
 415
 416static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
 417{
 418        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
 419        int ret;
 420
 421        if (!buf)
 422                return -ENOMEM;
 423
 424        ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
 425                              USB_VENDOR_REQUEST_READ_REGISTER,
 426                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 427                              0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
 428        if (likely(ret >= 0)) {
 429                le32_to_cpus(buf);
 430                *data = *buf;
 431        } else {
 432                netdev_warn(dev->net,
 433                            "Failed to read register index 0x%08x. ret = %d",
 434                            index, ret);
 435        }
 436
 437        kfree(buf);
 438
 439        return ret;
 440}
 441
 442static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
 443{
 444        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
 445        int ret;
 446
 447        if (!buf)
 448                return -ENOMEM;
 449
 450        *buf = data;
 451        cpu_to_le32s(buf);
 452
 453        ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
 454                              USB_VENDOR_REQUEST_WRITE_REGISTER,
 455                              USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 456                              0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
 457        if (unlikely(ret < 0)) {
 458                netdev_warn(dev->net,
 459                            "Failed to write register index 0x%08x. ret = %d",
 460                            index, ret);
 461        }
 462
 463        kfree(buf);
 464
 465        return ret;
 466}
 467
 468static int lan78xx_read_stats(struct lan78xx_net *dev,
 469                              struct lan78xx_statstage *data)
 470{
 471        int ret = 0;
 472        int i;
 473        struct lan78xx_statstage *stats;
 474        u32 *src;
 475        u32 *dst;
 476
 477        stats = kmalloc(sizeof(*stats), GFP_KERNEL);
 478        if (!stats)
 479                return -ENOMEM;
 480
 481        ret = usb_control_msg(dev->udev,
 482                              usb_rcvctrlpipe(dev->udev, 0),
 483                              USB_VENDOR_REQUEST_GET_STATS,
 484                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 485                              0,
 486                              0,
 487                              (void *)stats,
 488                              sizeof(*stats),
 489                              USB_CTRL_SET_TIMEOUT);
 490        if (likely(ret >= 0)) {
 491                src = (u32 *)stats;
 492                dst = (u32 *)data;
 493                for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
 494                        le32_to_cpus(&src[i]);
 495                        dst[i] = src[i];
 496                }
 497        } else {
 498                netdev_warn(dev->net,
 499                            "Failed to read stat ret = 0x%x", ret);
 500        }
 501
 502        kfree(stats);
 503
 504        return ret;
 505}
 506
 507#define check_counter_rollover(struct1, dev_stats, member) {    \
 508        if (struct1->member < dev_stats.saved.member)           \
 509                dev_stats.rollover_count.member++;              \
 510        }
 511
 512static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
 513                                        struct lan78xx_statstage *stats)
 514{
 515        check_counter_rollover(stats, dev->stats, rx_fcs_errors);
 516        check_counter_rollover(stats, dev->stats, rx_alignment_errors);
 517        check_counter_rollover(stats, dev->stats, rx_fragment_errors);
 518        check_counter_rollover(stats, dev->stats, rx_jabber_errors);
 519        check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
 520        check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
 521        check_counter_rollover(stats, dev->stats, rx_dropped_frames);
 522        check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
 523        check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
 524        check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
 525        check_counter_rollover(stats, dev->stats, rx_unicast_frames);
 526        check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
 527        check_counter_rollover(stats, dev->stats, rx_multicast_frames);
 528        check_counter_rollover(stats, dev->stats, rx_pause_frames);
 529        check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
 530        check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
 531        check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
 532        check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
 533        check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
 534        check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
 535        check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
 536        check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
 537        check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
 538        check_counter_rollover(stats, dev->stats, tx_fcs_errors);
 539        check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
 540        check_counter_rollover(stats, dev->stats, tx_carrier_errors);
 541        check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
 542        check_counter_rollover(stats, dev->stats, tx_single_collisions);
 543        check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
 544        check_counter_rollover(stats, dev->stats, tx_excessive_collision);
 545        check_counter_rollover(stats, dev->stats, tx_late_collisions);
 546        check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
 547        check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
 548        check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
 549        check_counter_rollover(stats, dev->stats, tx_unicast_frames);
 550        check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
 551        check_counter_rollover(stats, dev->stats, tx_multicast_frames);
 552        check_counter_rollover(stats, dev->stats, tx_pause_frames);
 553        check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
 554        check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
 555        check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
 556        check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
 557        check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
 558        check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
 559        check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
 560        check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
 561        check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
 562
 563        memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
 564}
 565
 566static void lan78xx_update_stats(struct lan78xx_net *dev)
 567{
 568        u32 *p, *count, *max;
 569        u64 *data;
 570        int i;
 571        struct lan78xx_statstage lan78xx_stats;
 572
 573        if (usb_autopm_get_interface(dev->intf) < 0)
 574                return;
 575
 576        p = (u32 *)&lan78xx_stats;
 577        count = (u32 *)&dev->stats.rollover_count;
 578        max = (u32 *)&dev->stats.rollover_max;
 579        data = (u64 *)&dev->stats.curr_stat;
 580
 581        mutex_lock(&dev->stats.access_lock);
 582
 583        if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
 584                lan78xx_check_stat_rollover(dev, &lan78xx_stats);
 585
 586        for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
 587                data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
 588
 589        mutex_unlock(&dev->stats.access_lock);
 590
 591        usb_autopm_put_interface(dev->intf);
 592}
 593
 594/* Loop until the read is completed with timeout called with phy_mutex held */
 595static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
 596{
 597        unsigned long start_time = jiffies;
 598        u32 val;
 599        int ret;
 600
 601        do {
 602                ret = lan78xx_read_reg(dev, MII_ACC, &val);
 603                if (unlikely(ret < 0))
 604                        return -EIO;
 605
 606                if (!(val & MII_ACC_MII_BUSY_))
 607                        return 0;
 608        } while (!time_after(jiffies, start_time + HZ));
 609
 610        return -EIO;
 611}
 612
 613static inline u32 mii_access(int id, int index, int read)
 614{
 615        u32 ret;
 616
 617        ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
 618        ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
 619        if (read)
 620                ret |= MII_ACC_MII_READ_;
 621        else
 622                ret |= MII_ACC_MII_WRITE_;
 623        ret |= MII_ACC_MII_BUSY_;
 624
 625        return ret;
 626}
 627
 628static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
 629{
 630        unsigned long start_time = jiffies;
 631        u32 val;
 632        int ret;
 633
 634        do {
 635                ret = lan78xx_read_reg(dev, E2P_CMD, &val);
 636                if (unlikely(ret < 0))
 637                        return -EIO;
 638
 639                if (!(val & E2P_CMD_EPC_BUSY_) ||
 640                    (val & E2P_CMD_EPC_TIMEOUT_))
 641                        break;
 642                usleep_range(40, 100);
 643        } while (!time_after(jiffies, start_time + HZ));
 644
 645        if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
 646                netdev_warn(dev->net, "EEPROM read operation timeout");
 647                return -EIO;
 648        }
 649
 650        return 0;
 651}
 652
 653static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
 654{
 655        unsigned long start_time = jiffies;
 656        u32 val;
 657        int ret;
 658
 659        do {
 660                ret = lan78xx_read_reg(dev, E2P_CMD, &val);
 661                if (unlikely(ret < 0))
 662                        return -EIO;
 663
 664                if (!(val & E2P_CMD_EPC_BUSY_))
 665                        return 0;
 666
 667                usleep_range(40, 100);
 668        } while (!time_after(jiffies, start_time + HZ));
 669
 670        netdev_warn(dev->net, "EEPROM is busy");
 671        return -EIO;
 672}
 673
 674static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
 675                                   u32 length, u8 *data)
 676{
 677        u32 val;
 678        u32 saved;
 679        int i, ret;
 680        int retval;
 681
 682        /* depends on chip, some EEPROM pins are muxed with LED function.
 683         * disable & restore LED function to access EEPROM.
 684         */
 685        ret = lan78xx_read_reg(dev, HW_CFG, &val);
 686        saved = val;
 687        if (dev->chipid == ID_REV_CHIP_ID_7800_) {
 688                val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
 689                ret = lan78xx_write_reg(dev, HW_CFG, val);
 690        }
 691
 692        retval = lan78xx_eeprom_confirm_not_busy(dev);
 693        if (retval)
 694                return retval;
 695
 696        for (i = 0; i < length; i++) {
 697                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
 698                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
 699                ret = lan78xx_write_reg(dev, E2P_CMD, val);
 700                if (unlikely(ret < 0)) {
 701                        retval = -EIO;
 702                        goto exit;
 703                }
 704
 705                retval = lan78xx_wait_eeprom(dev);
 706                if (retval < 0)
 707                        goto exit;
 708
 709                ret = lan78xx_read_reg(dev, E2P_DATA, &val);
 710                if (unlikely(ret < 0)) {
 711                        retval = -EIO;
 712                        goto exit;
 713                }
 714
 715                data[i] = val & 0xFF;
 716                offset++;
 717        }
 718
 719        retval = 0;
 720exit:
 721        if (dev->chipid == ID_REV_CHIP_ID_7800_)
 722                ret = lan78xx_write_reg(dev, HW_CFG, saved);
 723
 724        return retval;
 725}
 726
 727static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
 728                               u32 length, u8 *data)
 729{
 730        u8 sig;
 731        int ret;
 732
 733        ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
 734        if ((ret == 0) && (sig == EEPROM_INDICATOR))
 735                ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
 736        else
 737                ret = -EINVAL;
 738
 739        return ret;
 740}
 741
 742static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
 743                                    u32 length, u8 *data)
 744{
 745        u32 val;
 746        u32 saved;
 747        int i, ret;
 748        int retval;
 749
 750        /* depends on chip, some EEPROM pins are muxed with LED function.
 751         * disable & restore LED function to access EEPROM.
 752         */
 753        ret = lan78xx_read_reg(dev, HW_CFG, &val);
 754        saved = val;
 755        if (dev->chipid == ID_REV_CHIP_ID_7800_) {
 756                val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
 757                ret = lan78xx_write_reg(dev, HW_CFG, val);
 758        }
 759
 760        retval = lan78xx_eeprom_confirm_not_busy(dev);
 761        if (retval)
 762                goto exit;
 763
 764        /* Issue write/erase enable command */
 765        val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
 766        ret = lan78xx_write_reg(dev, E2P_CMD, val);
 767        if (unlikely(ret < 0)) {
 768                retval = -EIO;
 769                goto exit;
 770        }
 771
 772        retval = lan78xx_wait_eeprom(dev);
 773        if (retval < 0)
 774                goto exit;
 775
 776        for (i = 0; i < length; i++) {
 777                /* Fill data register */
 778                val = data[i];
 779                ret = lan78xx_write_reg(dev, E2P_DATA, val);
 780                if (ret < 0) {
 781                        retval = -EIO;
 782                        goto exit;
 783                }
 784
 785                /* Send "write" command */
 786                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
 787                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
 788                ret = lan78xx_write_reg(dev, E2P_CMD, val);
 789                if (ret < 0) {
 790                        retval = -EIO;
 791                        goto exit;
 792                }
 793
 794                retval = lan78xx_wait_eeprom(dev);
 795                if (retval < 0)
 796                        goto exit;
 797
 798                offset++;
 799        }
 800
 801        retval = 0;
 802exit:
 803        if (dev->chipid == ID_REV_CHIP_ID_7800_)
 804                ret = lan78xx_write_reg(dev, HW_CFG, saved);
 805
 806        return retval;
 807}
 808
 809static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
 810                                u32 length, u8 *data)
 811{
 812        int i;
 813        int ret;
 814        u32 buf;
 815        unsigned long timeout;
 816
 817        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 818
 819        if (buf & OTP_PWR_DN_PWRDN_N_) {
 820                /* clear it and wait to be cleared */
 821                ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
 822
 823                timeout = jiffies + HZ;
 824                do {
 825                        usleep_range(1, 10);
 826                        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 827                        if (time_after(jiffies, timeout)) {
 828                                netdev_warn(dev->net,
 829                                            "timeout on OTP_PWR_DN");
 830                                return -EIO;
 831                        }
 832                } while (buf & OTP_PWR_DN_PWRDN_N_);
 833        }
 834
 835        for (i = 0; i < length; i++) {
 836                ret = lan78xx_write_reg(dev, OTP_ADDR1,
 837                                        ((offset + i) >> 8) & OTP_ADDR1_15_11);
 838                ret = lan78xx_write_reg(dev, OTP_ADDR2,
 839                                        ((offset + i) & OTP_ADDR2_10_3));
 840
 841                ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
 842                ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
 843
 844                timeout = jiffies + HZ;
 845                do {
 846                        udelay(1);
 847                        ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
 848                        if (time_after(jiffies, timeout)) {
 849                                netdev_warn(dev->net,
 850                                            "timeout on OTP_STATUS");
 851                                return -EIO;
 852                        }
 853                } while (buf & OTP_STATUS_BUSY_);
 854
 855                ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
 856
 857                data[i] = (u8)(buf & 0xFF);
 858        }
 859
 860        return 0;
 861}
 862
 863static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
 864                                 u32 length, u8 *data)
 865{
 866        int i;
 867        int ret;
 868        u32 buf;
 869        unsigned long timeout;
 870
 871        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 872
 873        if (buf & OTP_PWR_DN_PWRDN_N_) {
 874                /* clear it and wait to be cleared */
 875                ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
 876
 877                timeout = jiffies + HZ;
 878                do {
 879                        udelay(1);
 880                        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 881                        if (time_after(jiffies, timeout)) {
 882                                netdev_warn(dev->net,
 883                                            "timeout on OTP_PWR_DN completion");
 884                                return -EIO;
 885                        }
 886                } while (buf & OTP_PWR_DN_PWRDN_N_);
 887        }
 888
 889        /* set to BYTE program mode */
 890        ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
 891
 892        for (i = 0; i < length; i++) {
 893                ret = lan78xx_write_reg(dev, OTP_ADDR1,
 894                                        ((offset + i) >> 8) & OTP_ADDR1_15_11);
 895                ret = lan78xx_write_reg(dev, OTP_ADDR2,
 896                                        ((offset + i) & OTP_ADDR2_10_3));
 897                ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
 898                ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
 899                ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
 900
 901                timeout = jiffies + HZ;
 902                do {
 903                        udelay(1);
 904                        ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
 905                        if (time_after(jiffies, timeout)) {
 906                                netdev_warn(dev->net,
 907                                            "Timeout on OTP_STATUS completion");
 908                                return -EIO;
 909                        }
 910                } while (buf & OTP_STATUS_BUSY_);
 911        }
 912
 913        return 0;
 914}
 915
 916static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
 917                            u32 length, u8 *data)
 918{
 919        u8 sig;
 920        int ret;
 921
 922        ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
 923
 924        if (ret == 0) {
 925                if (sig == OTP_INDICATOR_1)
 926                        offset = offset;
 927                else if (sig == OTP_INDICATOR_2)
 928                        offset += 0x100;
 929                else
 930                        ret = -EINVAL;
 931                ret = lan78xx_read_raw_otp(dev, offset, length, data);
 932        }
 933
 934        return ret;
 935}
 936
 937static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
 938{
 939        int i, ret;
 940
 941        for (i = 0; i < 100; i++) {
 942                u32 dp_sel;
 943
 944                ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
 945                if (unlikely(ret < 0))
 946                        return -EIO;
 947
 948                if (dp_sel & DP_SEL_DPRDY_)
 949                        return 0;
 950
 951                usleep_range(40, 100);
 952        }
 953
 954        netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
 955
 956        return -EIO;
 957}
 958
 959static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
 960                                  u32 addr, u32 length, u32 *buf)
 961{
 962        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
 963        u32 dp_sel;
 964        int i, ret;
 965
 966        if (usb_autopm_get_interface(dev->intf) < 0)
 967                        return 0;
 968
 969        mutex_lock(&pdata->dataport_mutex);
 970
 971        ret = lan78xx_dataport_wait_not_busy(dev);
 972        if (ret < 0)
 973                goto done;
 974
 975        ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
 976
 977        dp_sel &= ~DP_SEL_RSEL_MASK_;
 978        dp_sel |= ram_select;
 979        ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
 980
 981        for (i = 0; i < length; i++) {
 982                ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
 983
 984                ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
 985
 986                ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
 987
 988                ret = lan78xx_dataport_wait_not_busy(dev);
 989                if (ret < 0)
 990                        goto done;
 991        }
 992
 993done:
 994        mutex_unlock(&pdata->dataport_mutex);
 995        usb_autopm_put_interface(dev->intf);
 996
 997        return ret;
 998}
 999
1000static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1001                                    int index, u8 addr[ETH_ALEN])
1002{
1003        u32     temp;
1004
1005        if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1006                temp = addr[3];
1007                temp = addr[2] | (temp << 8);
1008                temp = addr[1] | (temp << 8);
1009                temp = addr[0] | (temp << 8);
1010                pdata->pfilter_table[index][1] = temp;
1011                temp = addr[5];
1012                temp = addr[4] | (temp << 8);
1013                temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1014                pdata->pfilter_table[index][0] = temp;
1015        }
1016}
1017
1018/* returns hash bit number for given MAC address */
1019static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1020{
1021        return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1022}
1023
1024static void lan78xx_deferred_multicast_write(struct work_struct *param)
1025{
1026        struct lan78xx_priv *pdata =
1027                        container_of(param, struct lan78xx_priv, set_multicast);
1028        struct lan78xx_net *dev = pdata->dev;
1029        int i;
1030        int ret;
1031
1032        netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1033                  pdata->rfe_ctl);
1034
1035        lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1036                               DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1037
1038        for (i = 1; i < NUM_OF_MAF; i++) {
1039                ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1040                ret = lan78xx_write_reg(dev, MAF_LO(i),
1041                                        pdata->pfilter_table[i][1]);
1042                ret = lan78xx_write_reg(dev, MAF_HI(i),
1043                                        pdata->pfilter_table[i][0]);
1044        }
1045
1046        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1047}
1048
1049static void lan78xx_set_multicast(struct net_device *netdev)
1050{
1051        struct lan78xx_net *dev = netdev_priv(netdev);
1052        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1053        unsigned long flags;
1054        int i;
1055
1056        spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1057
1058        pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1059                            RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1060
1061        for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1062                        pdata->mchash_table[i] = 0;
1063        /* pfilter_table[0] has own HW address */
1064        for (i = 1; i < NUM_OF_MAF; i++) {
1065                        pdata->pfilter_table[i][0] =
1066                        pdata->pfilter_table[i][1] = 0;
1067        }
1068
1069        pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1070
1071        if (dev->net->flags & IFF_PROMISC) {
1072                netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1073                pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1074        } else {
1075                if (dev->net->flags & IFF_ALLMULTI) {
1076                        netif_dbg(dev, drv, dev->net,
1077                                  "receive all multicast enabled");
1078                        pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1079                }
1080        }
1081
1082        if (netdev_mc_count(dev->net)) {
1083                struct netdev_hw_addr *ha;
1084                int i;
1085
1086                netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1087
1088                pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1089
1090                i = 1;
1091                netdev_for_each_mc_addr(ha, netdev) {
1092                        /* set first 32 into Perfect Filter */
1093                        if (i < 33) {
1094                                lan78xx_set_addr_filter(pdata, i, ha->addr);
1095                        } else {
1096                                u32 bitnum = lan78xx_hash(ha->addr);
1097
1098                                pdata->mchash_table[bitnum / 32] |=
1099                                                        (1 << (bitnum % 32));
1100                                pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1101                        }
1102                        i++;
1103                }
1104        }
1105
1106        spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1107
1108        /* defer register writes to a sleepable context */
1109        schedule_work(&pdata->set_multicast);
1110}
1111
1112static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1113                                      u16 lcladv, u16 rmtadv)
1114{
1115        u32 flow = 0, fct_flow = 0;
1116        int ret;
1117        u8 cap;
1118
1119        if (dev->fc_autoneg)
1120                cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1121        else
1122                cap = dev->fc_request_control;
1123
1124        if (cap & FLOW_CTRL_TX)
1125                flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1126
1127        if (cap & FLOW_CTRL_RX)
1128                flow |= FLOW_CR_RX_FCEN_;
1129
1130        if (dev->udev->speed == USB_SPEED_SUPER)
1131                fct_flow = 0x817;
1132        else if (dev->udev->speed == USB_SPEED_HIGH)
1133                fct_flow = 0x211;
1134
1135        netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1136                  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1137                  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1138
1139        ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1140
1141        /* threshold value should be set before enabling flow */
1142        ret = lan78xx_write_reg(dev, FLOW, flow);
1143
1144        return 0;
1145}
1146
1147static int lan78xx_link_reset(struct lan78xx_net *dev)
1148{
1149        struct phy_device *phydev = dev->net->phydev;
1150        struct ethtool_link_ksettings ecmd;
1151        int ladv, radv, ret;
1152        u32 buf;
1153
1154        /* clear LAN78xx interrupt status */
1155        ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1156        if (unlikely(ret < 0))
1157                return -EIO;
1158
1159        phy_read_status(phydev);
1160
1161        if (!phydev->link && dev->link_on) {
1162                dev->link_on = false;
1163
1164                /* reset MAC */
1165                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166                if (unlikely(ret < 0))
1167                        return -EIO;
1168                buf |= MAC_CR_RST_;
1169                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170                if (unlikely(ret < 0))
1171                        return -EIO;
1172
1173                del_timer(&dev->stat_monitor);
1174        } else if (phydev->link && !dev->link_on) {
1175                dev->link_on = true;
1176
1177                phy_ethtool_ksettings_get(phydev, &ecmd);
1178
1179                if (dev->udev->speed == USB_SPEED_SUPER) {
1180                        if (ecmd.base.speed == 1000) {
1181                                /* disable U2 */
1182                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183                                buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1185                                /* enable U1 */
1186                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187                                buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1189                        } else {
1190                                /* enable U1 & U2 */
1191                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192                                buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193                                buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1195                        }
1196                }
1197
1198                ladv = phy_read(phydev, MII_ADVERTISE);
1199                if (ladv < 0)
1200                        return ladv;
1201
1202                radv = phy_read(phydev, MII_LPA);
1203                if (radv < 0)
1204                        return radv;
1205
1206                netif_dbg(dev, link, dev->net,
1207                          "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208                          ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1209
1210                ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1211                                                 radv);
1212
1213                if (!timer_pending(&dev->stat_monitor)) {
1214                        dev->delta = 1;
1215                        mod_timer(&dev->stat_monitor,
1216                                  jiffies + STAT_UPDATE_TIMER);
1217                }
1218        }
1219
1220        return ret;
1221}
1222
1223/* some work can't be done in tasklets, so we use keventd
1224 *
1225 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1226 * but tasklet_schedule() doesn't.      hope the failure is rare.
1227 */
1228static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1229{
1230        set_bit(work, &dev->flags);
1231        if (!schedule_delayed_work(&dev->wq, 0))
1232                netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1233}
1234
1235static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1236{
1237        u32 intdata;
1238
1239        if (urb->actual_length != 4) {
1240                netdev_warn(dev->net,
1241                            "unexpected urb length %d", urb->actual_length);
1242                return;
1243        }
1244
1245        memcpy(&intdata, urb->transfer_buffer, 4);
1246        le32_to_cpus(&intdata);
1247
1248        if (intdata & INT_ENP_PHY_INT) {
1249                netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1250                lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1251
1252                if (dev->domain_data.phyirq > 0)
1253                        generic_handle_irq(dev->domain_data.phyirq);
1254        } else
1255                netdev_warn(dev->net,
1256                            "unexpected interrupt: 0x%08x\n", intdata);
1257}
1258
1259static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1260{
1261        return MAX_EEPROM_SIZE;
1262}
1263
1264static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1265                                      struct ethtool_eeprom *ee, u8 *data)
1266{
1267        struct lan78xx_net *dev = netdev_priv(netdev);
1268        int ret;
1269
1270        ret = usb_autopm_get_interface(dev->intf);
1271        if (ret)
1272                return ret;
1273
1274        ee->magic = LAN78XX_EEPROM_MAGIC;
1275
1276        ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1277
1278        usb_autopm_put_interface(dev->intf);
1279
1280        return ret;
1281}
1282
1283static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1284                                      struct ethtool_eeprom *ee, u8 *data)
1285{
1286        struct lan78xx_net *dev = netdev_priv(netdev);
1287        int ret;
1288
1289        ret = usb_autopm_get_interface(dev->intf);
1290        if (ret)
1291                return ret;
1292
1293        /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1294         * to load data from EEPROM
1295         */
1296        if (ee->magic == LAN78XX_EEPROM_MAGIC)
1297                ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1298        else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1299                 (ee->offset == 0) &&
1300                 (ee->len == 512) &&
1301                 (data[0] == OTP_INDICATOR_1))
1302                ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1303
1304        usb_autopm_put_interface(dev->intf);
1305
1306        return ret;
1307}
1308
1309static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1310                                u8 *data)
1311{
1312        if (stringset == ETH_SS_STATS)
1313                memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1314}
1315
1316static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1317{
1318        if (sset == ETH_SS_STATS)
1319                return ARRAY_SIZE(lan78xx_gstrings);
1320        else
1321                return -EOPNOTSUPP;
1322}
1323
1324static void lan78xx_get_stats(struct net_device *netdev,
1325                              struct ethtool_stats *stats, u64 *data)
1326{
1327        struct lan78xx_net *dev = netdev_priv(netdev);
1328
1329        lan78xx_update_stats(dev);
1330
1331        mutex_lock(&dev->stats.access_lock);
1332        memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1333        mutex_unlock(&dev->stats.access_lock);
1334}
1335
1336static void lan78xx_get_wol(struct net_device *netdev,
1337                            struct ethtool_wolinfo *wol)
1338{
1339        struct lan78xx_net *dev = netdev_priv(netdev);
1340        int ret;
1341        u32 buf;
1342        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1343
1344        if (usb_autopm_get_interface(dev->intf) < 0)
1345                        return;
1346
1347        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1348        if (unlikely(ret < 0)) {
1349                wol->supported = 0;
1350                wol->wolopts = 0;
1351        } else {
1352                if (buf & USB_CFG_RMT_WKP_) {
1353                        wol->supported = WAKE_ALL;
1354                        wol->wolopts = pdata->wol;
1355                } else {
1356                        wol->supported = 0;
1357                        wol->wolopts = 0;
1358                }
1359        }
1360
1361        usb_autopm_put_interface(dev->intf);
1362}
1363
1364static int lan78xx_set_wol(struct net_device *netdev,
1365                           struct ethtool_wolinfo *wol)
1366{
1367        struct lan78xx_net *dev = netdev_priv(netdev);
1368        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1369        int ret;
1370
1371        ret = usb_autopm_get_interface(dev->intf);
1372        if (ret < 0)
1373                return ret;
1374
1375        pdata->wol = 0;
1376        if (wol->wolopts & WAKE_UCAST)
1377                pdata->wol |= WAKE_UCAST;
1378        if (wol->wolopts & WAKE_MCAST)
1379                pdata->wol |= WAKE_MCAST;
1380        if (wol->wolopts & WAKE_BCAST)
1381                pdata->wol |= WAKE_BCAST;
1382        if (wol->wolopts & WAKE_MAGIC)
1383                pdata->wol |= WAKE_MAGIC;
1384        if (wol->wolopts & WAKE_PHY)
1385                pdata->wol |= WAKE_PHY;
1386        if (wol->wolopts & WAKE_ARP)
1387                pdata->wol |= WAKE_ARP;
1388
1389        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1390
1391        phy_ethtool_set_wol(netdev->phydev, wol);
1392
1393        usb_autopm_put_interface(dev->intf);
1394
1395        return ret;
1396}
1397
1398static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1399{
1400        struct lan78xx_net *dev = netdev_priv(net);
1401        struct phy_device *phydev = net->phydev;
1402        int ret;
1403        u32 buf;
1404
1405        ret = usb_autopm_get_interface(dev->intf);
1406        if (ret < 0)
1407                return ret;
1408
1409        ret = phy_ethtool_get_eee(phydev, edata);
1410        if (ret < 0)
1411                goto exit;
1412
1413        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1414        if (buf & MAC_CR_EEE_EN_) {
1415                edata->eee_enabled = true;
1416                edata->eee_active = !!(edata->advertised &
1417                                       edata->lp_advertised);
1418                edata->tx_lpi_enabled = true;
1419                /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1420                ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1421                edata->tx_lpi_timer = buf;
1422        } else {
1423                edata->eee_enabled = false;
1424                edata->eee_active = false;
1425                edata->tx_lpi_enabled = false;
1426                edata->tx_lpi_timer = 0;
1427        }
1428
1429        ret = 0;
1430exit:
1431        usb_autopm_put_interface(dev->intf);
1432
1433        return ret;
1434}
1435
1436static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1437{
1438        struct lan78xx_net *dev = netdev_priv(net);
1439        int ret;
1440        u32 buf;
1441
1442        ret = usb_autopm_get_interface(dev->intf);
1443        if (ret < 0)
1444                return ret;
1445
1446        if (edata->eee_enabled) {
1447                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1448                buf |= MAC_CR_EEE_EN_;
1449                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1450
1451                phy_ethtool_set_eee(net->phydev, edata);
1452
1453                buf = (u32)edata->tx_lpi_timer;
1454                ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1455        } else {
1456                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1457                buf &= ~MAC_CR_EEE_EN_;
1458                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1459        }
1460
1461        usb_autopm_put_interface(dev->intf);
1462
1463        return 0;
1464}
1465
1466static u32 lan78xx_get_link(struct net_device *net)
1467{
1468        phy_read_status(net->phydev);
1469
1470        return net->phydev->link;
1471}
1472
1473static void lan78xx_get_drvinfo(struct net_device *net,
1474                                struct ethtool_drvinfo *info)
1475{
1476        struct lan78xx_net *dev = netdev_priv(net);
1477
1478        strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1479        strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1480        usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1481}
1482
1483static u32 lan78xx_get_msglevel(struct net_device *net)
1484{
1485        struct lan78xx_net *dev = netdev_priv(net);
1486
1487        return dev->msg_enable;
1488}
1489
1490static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1491{
1492        struct lan78xx_net *dev = netdev_priv(net);
1493
1494        dev->msg_enable = level;
1495}
1496
1497static int lan78xx_get_link_ksettings(struct net_device *net,
1498                                      struct ethtool_link_ksettings *cmd)
1499{
1500        struct lan78xx_net *dev = netdev_priv(net);
1501        struct phy_device *phydev = net->phydev;
1502        int ret;
1503
1504        ret = usb_autopm_get_interface(dev->intf);
1505        if (ret < 0)
1506                return ret;
1507
1508        phy_ethtool_ksettings_get(phydev, cmd);
1509
1510        usb_autopm_put_interface(dev->intf);
1511
1512        return ret;
1513}
1514
1515static int lan78xx_set_link_ksettings(struct net_device *net,
1516                                      const struct ethtool_link_ksettings *cmd)
1517{
1518        struct lan78xx_net *dev = netdev_priv(net);
1519        struct phy_device *phydev = net->phydev;
1520        int ret = 0;
1521        int temp;
1522
1523        ret = usb_autopm_get_interface(dev->intf);
1524        if (ret < 0)
1525                return ret;
1526
1527        /* change speed & duplex */
1528        ret = phy_ethtool_ksettings_set(phydev, cmd);
1529
1530        if (!cmd->base.autoneg) {
1531                /* force link down */
1532                temp = phy_read(phydev, MII_BMCR);
1533                phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1534                mdelay(1);
1535                phy_write(phydev, MII_BMCR, temp);
1536        }
1537
1538        usb_autopm_put_interface(dev->intf);
1539
1540        return ret;
1541}
1542
1543static void lan78xx_get_pause(struct net_device *net,
1544                              struct ethtool_pauseparam *pause)
1545{
1546        struct lan78xx_net *dev = netdev_priv(net);
1547        struct phy_device *phydev = net->phydev;
1548        struct ethtool_link_ksettings ecmd;
1549
1550        phy_ethtool_ksettings_get(phydev, &ecmd);
1551
1552        pause->autoneg = dev->fc_autoneg;
1553
1554        if (dev->fc_request_control & FLOW_CTRL_TX)
1555                pause->tx_pause = 1;
1556
1557        if (dev->fc_request_control & FLOW_CTRL_RX)
1558                pause->rx_pause = 1;
1559}
1560
1561static int lan78xx_set_pause(struct net_device *net,
1562                             struct ethtool_pauseparam *pause)
1563{
1564        struct lan78xx_net *dev = netdev_priv(net);
1565        struct phy_device *phydev = net->phydev;
1566        struct ethtool_link_ksettings ecmd;
1567        int ret;
1568
1569        phy_ethtool_ksettings_get(phydev, &ecmd);
1570
1571        if (pause->autoneg && !ecmd.base.autoneg) {
1572                ret = -EINVAL;
1573                goto exit;
1574        }
1575
1576        dev->fc_request_control = 0;
1577        if (pause->rx_pause)
1578                dev->fc_request_control |= FLOW_CTRL_RX;
1579
1580        if (pause->tx_pause)
1581                dev->fc_request_control |= FLOW_CTRL_TX;
1582
1583        if (ecmd.base.autoneg) {
1584                u32 mii_adv;
1585                u32 advertising;
1586
1587                ethtool_convert_link_mode_to_legacy_u32(
1588                        &advertising, ecmd.link_modes.advertising);
1589
1590                advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1591                mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1592                advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1593
1594                ethtool_convert_legacy_u32_to_link_mode(
1595                        ecmd.link_modes.advertising, advertising);
1596
1597                phy_ethtool_ksettings_set(phydev, &ecmd);
1598        }
1599
1600        dev->fc_autoneg = pause->autoneg;
1601
1602        ret = 0;
1603exit:
1604        return ret;
1605}
1606
1607static const struct ethtool_ops lan78xx_ethtool_ops = {
1608        .get_link       = lan78xx_get_link,
1609        .nway_reset     = phy_ethtool_nway_reset,
1610        .get_drvinfo    = lan78xx_get_drvinfo,
1611        .get_msglevel   = lan78xx_get_msglevel,
1612        .set_msglevel   = lan78xx_set_msglevel,
1613        .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1614        .get_eeprom     = lan78xx_ethtool_get_eeprom,
1615        .set_eeprom     = lan78xx_ethtool_set_eeprom,
1616        .get_ethtool_stats = lan78xx_get_stats,
1617        .get_sset_count = lan78xx_get_sset_count,
1618        .get_strings    = lan78xx_get_strings,
1619        .get_wol        = lan78xx_get_wol,
1620        .set_wol        = lan78xx_set_wol,
1621        .get_eee        = lan78xx_get_eee,
1622        .set_eee        = lan78xx_set_eee,
1623        .get_pauseparam = lan78xx_get_pause,
1624        .set_pauseparam = lan78xx_set_pause,
1625        .get_link_ksettings = lan78xx_get_link_ksettings,
1626        .set_link_ksettings = lan78xx_set_link_ksettings,
1627};
1628
1629static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1630{
1631        if (!netif_running(netdev))
1632                return -EINVAL;
1633
1634        return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635}
1636
1637static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1638{
1639        u32 addr_lo, addr_hi;
1640        int ret;
1641        u8 addr[6];
1642
1643        ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1644        ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1645
1646        addr[0] = addr_lo & 0xFF;
1647        addr[1] = (addr_lo >> 8) & 0xFF;
1648        addr[2] = (addr_lo >> 16) & 0xFF;
1649        addr[3] = (addr_lo >> 24) & 0xFF;
1650        addr[4] = addr_hi & 0xFF;
1651        addr[5] = (addr_hi >> 8) & 0xFF;
1652
1653        if (!is_valid_ether_addr(addr)) {
1654                /* reading mac address from EEPROM or OTP */
1655                if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1656                                         addr) == 0) ||
1657                    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1658                                      addr) == 0)) {
1659                        if (is_valid_ether_addr(addr)) {
1660                                /* eeprom values are valid so use them */
1661                                netif_dbg(dev, ifup, dev->net,
1662                                          "MAC address read from EEPROM");
1663                        } else {
1664                                /* generate random MAC */
1665                                random_ether_addr(addr);
1666                                netif_dbg(dev, ifup, dev->net,
1667                                          "MAC address set to random addr");
1668                        }
1669
1670                        addr_lo = addr[0] | (addr[1] << 8) |
1671                                  (addr[2] << 16) | (addr[3] << 24);
1672                        addr_hi = addr[4] | (addr[5] << 8);
1673
1674                        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1675                        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1676                } else {
1677                        /* generate random MAC */
1678                        random_ether_addr(addr);
1679                        netif_dbg(dev, ifup, dev->net,
1680                                  "MAC address set to random addr");
1681                }
1682        }
1683
1684        ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1685        ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1686
1687        ether_addr_copy(dev->net->dev_addr, addr);
1688}
1689
1690/* MDIO read and write wrappers for phylib */
1691static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1692{
1693        struct lan78xx_net *dev = bus->priv;
1694        u32 val, addr;
1695        int ret;
1696
1697        ret = usb_autopm_get_interface(dev->intf);
1698        if (ret < 0)
1699                return ret;
1700
1701        mutex_lock(&dev->phy_mutex);
1702
1703        /* confirm MII not busy */
1704        ret = lan78xx_phy_wait_not_busy(dev);
1705        if (ret < 0)
1706                goto done;
1707
1708        /* set the address, index & direction (read from PHY) */
1709        addr = mii_access(phy_id, idx, MII_READ);
1710        ret = lan78xx_write_reg(dev, MII_ACC, addr);
1711
1712        ret = lan78xx_phy_wait_not_busy(dev);
1713        if (ret < 0)
1714                goto done;
1715
1716        ret = lan78xx_read_reg(dev, MII_DATA, &val);
1717
1718        ret = (int)(val & 0xFFFF);
1719
1720done:
1721        mutex_unlock(&dev->phy_mutex);
1722        usb_autopm_put_interface(dev->intf);
1723
1724        return ret;
1725}
1726
1727static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1728                                 u16 regval)
1729{
1730        struct lan78xx_net *dev = bus->priv;
1731        u32 val, addr;
1732        int ret;
1733
1734        ret = usb_autopm_get_interface(dev->intf);
1735        if (ret < 0)
1736                return ret;
1737
1738        mutex_lock(&dev->phy_mutex);
1739
1740        /* confirm MII not busy */
1741        ret = lan78xx_phy_wait_not_busy(dev);
1742        if (ret < 0)
1743                goto done;
1744
1745        val = (u32)regval;
1746        ret = lan78xx_write_reg(dev, MII_DATA, val);
1747
1748        /* set the address, index & direction (write to PHY) */
1749        addr = mii_access(phy_id, idx, MII_WRITE);
1750        ret = lan78xx_write_reg(dev, MII_ACC, addr);
1751
1752        ret = lan78xx_phy_wait_not_busy(dev);
1753        if (ret < 0)
1754                goto done;
1755
1756done:
1757        mutex_unlock(&dev->phy_mutex);
1758        usb_autopm_put_interface(dev->intf);
1759        return 0;
1760}
1761
1762static int lan78xx_mdio_init(struct lan78xx_net *dev)
1763{
1764        int ret;
1765
1766        dev->mdiobus = mdiobus_alloc();
1767        if (!dev->mdiobus) {
1768                netdev_err(dev->net, "can't allocate MDIO bus\n");
1769                return -ENOMEM;
1770        }
1771
1772        dev->mdiobus->priv = (void *)dev;
1773        dev->mdiobus->read = lan78xx_mdiobus_read;
1774        dev->mdiobus->write = lan78xx_mdiobus_write;
1775        dev->mdiobus->name = "lan78xx-mdiobus";
1776
1777        snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1778                 dev->udev->bus->busnum, dev->udev->devnum);
1779
1780        switch (dev->chipid) {
1781        case ID_REV_CHIP_ID_7800_:
1782        case ID_REV_CHIP_ID_7850_:
1783                /* set to internal PHY id */
1784                dev->mdiobus->phy_mask = ~(1 << 1);
1785                break;
1786        case ID_REV_CHIP_ID_7801_:
1787                /* scan thru PHYAD[2..0] */
1788                dev->mdiobus->phy_mask = ~(0xFF);
1789                break;
1790        }
1791
1792        ret = mdiobus_register(dev->mdiobus);
1793        if (ret) {
1794                netdev_err(dev->net, "can't register MDIO bus\n");
1795                goto exit1;
1796        }
1797
1798        netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1799        return 0;
1800exit1:
1801        mdiobus_free(dev->mdiobus);
1802        return ret;
1803}
1804
1805static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1806{
1807        mdiobus_unregister(dev->mdiobus);
1808        mdiobus_free(dev->mdiobus);
1809}
1810
1811static void lan78xx_link_status_change(struct net_device *net)
1812{
1813        struct phy_device *phydev = net->phydev;
1814        int ret, temp;
1815
1816        /* At forced 100 F/H mode, chip may fail to set mode correctly
1817         * when cable is switched between long(~50+m) and short one.
1818         * As workaround, set to 10 before setting to 100
1819         * at forced 100 F/H mode.
1820         */
1821        if (!phydev->autoneg && (phydev->speed == 100)) {
1822                /* disable phy interrupt */
1823                temp = phy_read(phydev, LAN88XX_INT_MASK);
1824                temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1825                ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1826
1827                temp = phy_read(phydev, MII_BMCR);
1828                temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1829                phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1830                temp |= BMCR_SPEED100;
1831                phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1832
1833                /* clear pending interrupt generated while workaround */
1834                temp = phy_read(phydev, LAN88XX_INT_STS);
1835
1836                /* enable phy interrupt back */
1837                temp = phy_read(phydev, LAN88XX_INT_MASK);
1838                temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1839                ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1840        }
1841}
1842
1843static int irq_map(struct irq_domain *d, unsigned int irq,
1844                   irq_hw_number_t hwirq)
1845{
1846        struct irq_domain_data *data = d->host_data;
1847
1848        irq_set_chip_data(irq, data);
1849        irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1850        irq_set_noprobe(irq);
1851
1852        return 0;
1853}
1854
1855static void irq_unmap(struct irq_domain *d, unsigned int irq)
1856{
1857        irq_set_chip_and_handler(irq, NULL, NULL);
1858        irq_set_chip_data(irq, NULL);
1859}
1860
1861static const struct irq_domain_ops chip_domain_ops = {
1862        .map    = irq_map,
1863        .unmap  = irq_unmap,
1864};
1865
1866static void lan78xx_irq_mask(struct irq_data *irqd)
1867{
1868        struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1869
1870        data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1871}
1872
1873static void lan78xx_irq_unmask(struct irq_data *irqd)
1874{
1875        struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1876
1877        data->irqenable |= BIT(irqd_to_hwirq(irqd));
1878}
1879
1880static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1881{
1882        struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1883
1884        mutex_lock(&data->irq_lock);
1885}
1886
1887static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1888{
1889        struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1890        struct lan78xx_net *dev =
1891                        container_of(data, struct lan78xx_net, domain_data);
1892        u32 buf;
1893        int ret;
1894
1895        /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1896         * are only two callbacks executed in non-atomic contex.
1897         */
1898        ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1899        if (buf != data->irqenable)
1900                ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1901
1902        mutex_unlock(&data->irq_lock);
1903}
1904
1905static struct irq_chip lan78xx_irqchip = {
1906        .name                   = "lan78xx-irqs",
1907        .irq_mask               = lan78xx_irq_mask,
1908        .irq_unmask             = lan78xx_irq_unmask,
1909        .irq_bus_lock           = lan78xx_irq_bus_lock,
1910        .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1911};
1912
1913static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1914{
1915        struct device_node *of_node;
1916        struct irq_domain *irqdomain;
1917        unsigned int irqmap = 0;
1918        u32 buf;
1919        int ret = 0;
1920
1921        of_node = dev->udev->dev.parent->of_node;
1922
1923        mutex_init(&dev->domain_data.irq_lock);
1924
1925        lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1926        dev->domain_data.irqenable = buf;
1927
1928        dev->domain_data.irqchip = &lan78xx_irqchip;
1929        dev->domain_data.irq_handler = handle_simple_irq;
1930
1931        irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1932                                          &chip_domain_ops, &dev->domain_data);
1933        if (irqdomain) {
1934                /* create mapping for PHY interrupt */
1935                irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1936                if (!irqmap) {
1937                        irq_domain_remove(irqdomain);
1938
1939                        irqdomain = NULL;
1940                        ret = -EINVAL;
1941                }
1942        } else {
1943                ret = -EINVAL;
1944        }
1945
1946        dev->domain_data.irqdomain = irqdomain;
1947        dev->domain_data.phyirq = irqmap;
1948
1949        return ret;
1950}
1951
1952static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1953{
1954        if (dev->domain_data.phyirq > 0) {
1955                irq_dispose_mapping(dev->domain_data.phyirq);
1956
1957                if (dev->domain_data.irqdomain)
1958                        irq_domain_remove(dev->domain_data.irqdomain);
1959        }
1960        dev->domain_data.phyirq = 0;
1961        dev->domain_data.irqdomain = NULL;
1962}
1963
1964static int lan8835_fixup(struct phy_device *phydev)
1965{
1966        int buf;
1967        int ret;
1968        struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1969
1970        /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1971        buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1972        buf &= ~0x1800;
1973        buf |= 0x0800;
1974        phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1975
1976        /* RGMII MAC TXC Delay Enable */
1977        ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1978                                MAC_RGMII_ID_TXC_DELAY_EN_);
1979
1980        /* RGMII TX DLL Tune Adjust */
1981        ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1982
1983        dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1984
1985        return 1;
1986}
1987
1988static int ksz9031rnx_fixup(struct phy_device *phydev)
1989{
1990        struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1991
1992        /* Micrel9301RNX PHY configuration */
1993        /* RGMII Control Signal Pad Skew */
1994        phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1995        /* RGMII RX Data Pad Skew */
1996        phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1997        /* RGMII RX Clock Pad Skew */
1998        phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1999
2000        dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2001
2002        return 1;
2003}
2004
2005static int lan78xx_phy_init(struct lan78xx_net *dev)
2006{
2007        int ret;
2008        u32 mii_adv;
2009        struct phy_device *phydev = dev->net->phydev;
2010
2011        phydev = phy_find_first(dev->mdiobus);
2012        if (!phydev) {
2013                netdev_err(dev->net, "no PHY found\n");
2014                return -EIO;
2015        }
2016
2017        if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2018            (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2019                phydev->is_internal = true;
2020                dev->interface = PHY_INTERFACE_MODE_GMII;
2021
2022        } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2023                if (!phydev->drv) {
2024                        netdev_err(dev->net, "no PHY driver found\n");
2025                        return -EIO;
2026                }
2027
2028                dev->interface = PHY_INTERFACE_MODE_RGMII;
2029
2030                /* external PHY fixup for KSZ9031RNX */
2031                ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2032                                                 ksz9031rnx_fixup);
2033                if (ret < 0) {
2034                        netdev_err(dev->net, "fail to register fixup\n");
2035                        return ret;
2036                }
2037                /* external PHY fixup for LAN8835 */
2038                ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2039                                                 lan8835_fixup);
2040                if (ret < 0) {
2041                        netdev_err(dev->net, "fail to register fixup\n");
2042                        return ret;
2043                }
2044                /* add more external PHY fixup here if needed */
2045
2046                phydev->is_internal = false;
2047        } else {
2048                netdev_err(dev->net, "unknown ID found\n");
2049                ret = -EIO;
2050                goto error;
2051        }
2052
2053        /* if phyirq is not set, use polling mode in phylib */
2054        if (dev->domain_data.phyirq > 0)
2055                phydev->irq = dev->domain_data.phyirq;
2056        else
2057                phydev->irq = 0;
2058        netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2059
2060        /* set to AUTOMDIX */
2061        phydev->mdix = ETH_TP_MDI_AUTO;
2062
2063        ret = phy_connect_direct(dev->net, phydev,
2064                                 lan78xx_link_status_change,
2065                                 dev->interface);
2066        if (ret) {
2067                netdev_err(dev->net, "can't attach PHY to %s\n",
2068                           dev->mdiobus->id);
2069                return -EIO;
2070        }
2071
2072        /* MAC doesn't support 1000T Half */
2073        phydev->supported &= ~SUPPORTED_1000baseT_Half;
2074
2075        /* support both flow controls */
2076        dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2077        phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2078        mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2079        phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2080
2081        genphy_config_aneg(phydev);
2082
2083        dev->fc_autoneg = phydev->autoneg;
2084
2085        phy_start(phydev);
2086
2087        netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2088
2089        return 0;
2090
2091error:
2092        phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2093        phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2094
2095        return ret;
2096}
2097
2098static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2099{
2100        int ret = 0;
2101        u32 buf;
2102        bool rxenabled;
2103
2104        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2105
2106        rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2107
2108        if (rxenabled) {
2109                buf &= ~MAC_RX_RXEN_;
2110                ret = lan78xx_write_reg(dev, MAC_RX, buf);
2111        }
2112
2113        /* add 4 to size for FCS */
2114        buf &= ~MAC_RX_MAX_SIZE_MASK_;
2115        buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2116
2117        ret = lan78xx_write_reg(dev, MAC_RX, buf);
2118
2119        if (rxenabled) {
2120                buf |= MAC_RX_RXEN_;
2121                ret = lan78xx_write_reg(dev, MAC_RX, buf);
2122        }
2123
2124        return 0;
2125}
2126
2127static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2128{
2129        struct sk_buff *skb;
2130        unsigned long flags;
2131        int count = 0;
2132
2133        spin_lock_irqsave(&q->lock, flags);
2134        while (!skb_queue_empty(q)) {
2135                struct skb_data *entry;
2136                struct urb *urb;
2137                int ret;
2138
2139                skb_queue_walk(q, skb) {
2140                        entry = (struct skb_data *)skb->cb;
2141                        if (entry->state != unlink_start)
2142                                goto found;
2143                }
2144                break;
2145found:
2146                entry->state = unlink_start;
2147                urb = entry->urb;
2148
2149                /* Get reference count of the URB to avoid it to be
2150                 * freed during usb_unlink_urb, which may trigger
2151                 * use-after-free problem inside usb_unlink_urb since
2152                 * usb_unlink_urb is always racing with .complete
2153                 * handler(include defer_bh).
2154                 */
2155                usb_get_urb(urb);
2156                spin_unlock_irqrestore(&q->lock, flags);
2157                /* during some PM-driven resume scenarios,
2158                 * these (async) unlinks complete immediately
2159                 */
2160                ret = usb_unlink_urb(urb);
2161                if (ret != -EINPROGRESS && ret != 0)
2162                        netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2163                else
2164                        count++;
2165                usb_put_urb(urb);
2166                spin_lock_irqsave(&q->lock, flags);
2167        }
2168        spin_unlock_irqrestore(&q->lock, flags);
2169        return count;
2170}
2171
2172static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2173{
2174        struct lan78xx_net *dev = netdev_priv(netdev);
2175        int ll_mtu = new_mtu + netdev->hard_header_len;
2176        int old_hard_mtu = dev->hard_mtu;
2177        int old_rx_urb_size = dev->rx_urb_size;
2178        int ret;
2179
2180        /* no second zero-length packet read wanted after mtu-sized packets */
2181        if ((ll_mtu % dev->maxpacket) == 0)
2182                return -EDOM;
2183
2184        ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2185
2186        netdev->mtu = new_mtu;
2187
2188        dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2189        if (dev->rx_urb_size == old_hard_mtu) {
2190                dev->rx_urb_size = dev->hard_mtu;
2191                if (dev->rx_urb_size > old_rx_urb_size) {
2192                        if (netif_running(dev->net)) {
2193                                unlink_urbs(dev, &dev->rxq);
2194                                tasklet_schedule(&dev->bh);
2195                        }
2196                }
2197        }
2198
2199        return 0;
2200}
2201
2202static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2203{
2204        struct lan78xx_net *dev = netdev_priv(netdev);
2205        struct sockaddr *addr = p;
2206        u32 addr_lo, addr_hi;
2207        int ret;
2208
2209        if (netif_running(netdev))
2210                return -EBUSY;
2211
2212        if (!is_valid_ether_addr(addr->sa_data))
2213                return -EADDRNOTAVAIL;
2214
2215        ether_addr_copy(netdev->dev_addr, addr->sa_data);
2216
2217        addr_lo = netdev->dev_addr[0] |
2218                  netdev->dev_addr[1] << 8 |
2219                  netdev->dev_addr[2] << 16 |
2220                  netdev->dev_addr[3] << 24;
2221        addr_hi = netdev->dev_addr[4] |
2222                  netdev->dev_addr[5] << 8;
2223
2224        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2225        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2226
2227        return 0;
2228}
2229
2230/* Enable or disable Rx checksum offload engine */
2231static int lan78xx_set_features(struct net_device *netdev,
2232                                netdev_features_t features)
2233{
2234        struct lan78xx_net *dev = netdev_priv(netdev);
2235        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2236        unsigned long flags;
2237        int ret;
2238
2239        spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2240
2241        if (features & NETIF_F_RXCSUM) {
2242                pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2243                pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2244        } else {
2245                pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2246                pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2247        }
2248
2249        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2250                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2251        else
2252                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2253
2254        spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2255
2256        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2257
2258        return 0;
2259}
2260
2261static void lan78xx_deferred_vlan_write(struct work_struct *param)
2262{
2263        struct lan78xx_priv *pdata =
2264                        container_of(param, struct lan78xx_priv, set_vlan);
2265        struct lan78xx_net *dev = pdata->dev;
2266
2267        lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2268                               DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2269}
2270
2271static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2272                                   __be16 proto, u16 vid)
2273{
2274        struct lan78xx_net *dev = netdev_priv(netdev);
2275        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2276        u16 vid_bit_index;
2277        u16 vid_dword_index;
2278
2279        vid_dword_index = (vid >> 5) & 0x7F;
2280        vid_bit_index = vid & 0x1F;
2281
2282        pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2283
2284        /* defer register writes to a sleepable context */
2285        schedule_work(&pdata->set_vlan);
2286
2287        return 0;
2288}
2289
2290static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2291                                    __be16 proto, u16 vid)
2292{
2293        struct lan78xx_net *dev = netdev_priv(netdev);
2294        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2295        u16 vid_bit_index;
2296        u16 vid_dword_index;
2297
2298        vid_dword_index = (vid >> 5) & 0x7F;
2299        vid_bit_index = vid & 0x1F;
2300
2301        pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2302
2303        /* defer register writes to a sleepable context */
2304        schedule_work(&pdata->set_vlan);
2305
2306        return 0;
2307}
2308
2309static void lan78xx_init_ltm(struct lan78xx_net *dev)
2310{
2311        int ret;
2312        u32 buf;
2313        u32 regs[6] = { 0 };
2314
2315        ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2316        if (buf & USB_CFG1_LTM_ENABLE_) {
2317                u8 temp[2];
2318                /* Get values from EEPROM first */
2319                if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2320                        if (temp[0] == 24) {
2321                                ret = lan78xx_read_raw_eeprom(dev,
2322                                                              temp[1] * 2,
2323                                                              24,
2324                                                              (u8 *)regs);
2325                                if (ret < 0)
2326                                        return;
2327                        }
2328                } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2329                        if (temp[0] == 24) {
2330                                ret = lan78xx_read_raw_otp(dev,
2331                                                           temp[1] * 2,
2332                                                           24,
2333                                                           (u8 *)regs);
2334                                if (ret < 0)
2335                                        return;
2336                        }
2337                }
2338        }
2339
2340        lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2341        lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2342        lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2343        lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2344        lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2345        lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2346}
2347
2348static int lan78xx_reset(struct lan78xx_net *dev)
2349{
2350        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2351        u32 buf;
2352        int ret = 0;
2353        unsigned long timeout;
2354
2355        ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2356        buf |= HW_CFG_LRST_;
2357        ret = lan78xx_write_reg(dev, HW_CFG, buf);
2358
2359        timeout = jiffies + HZ;
2360        do {
2361                mdelay(1);
2362                ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2363                if (time_after(jiffies, timeout)) {
2364                        netdev_warn(dev->net,
2365                                    "timeout on completion of LiteReset");
2366                        return -EIO;
2367                }
2368        } while (buf & HW_CFG_LRST_);
2369
2370        lan78xx_init_mac_address(dev);
2371
2372        /* save DEVID for later usage */
2373        ret = lan78xx_read_reg(dev, ID_REV, &buf);
2374        dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2375        dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2376
2377        /* Respond to the IN token with a NAK */
2378        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2379        buf |= USB_CFG_BIR_;
2380        ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2381
2382        /* Init LTM */
2383        lan78xx_init_ltm(dev);
2384
2385        if (dev->udev->speed == USB_SPEED_SUPER) {
2386                buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2387                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2388                dev->rx_qlen = 4;
2389                dev->tx_qlen = 4;
2390        } else if (dev->udev->speed == USB_SPEED_HIGH) {
2391                buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2392                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2393                dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2394                dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2395        } else {
2396                buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2397                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2398                dev->rx_qlen = 4;
2399                dev->tx_qlen = 4;
2400        }
2401
2402        ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2403        ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2404
2405        ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2406        buf |= HW_CFG_MEF_;
2407        ret = lan78xx_write_reg(dev, HW_CFG, buf);
2408
2409        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2410        buf |= USB_CFG_BCE_;
2411        ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2412
2413        /* set FIFO sizes */
2414        buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2415        ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2416
2417        buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2418        ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2419
2420        ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2421        ret = lan78xx_write_reg(dev, FLOW, 0);
2422        ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2423
2424        /* Don't need rfe_ctl_lock during initialisation */
2425        ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2426        pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2427        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2428
2429        /* Enable or disable checksum offload engines */
2430        lan78xx_set_features(dev->net, dev->net->features);
2431
2432        lan78xx_set_multicast(dev->net);
2433
2434        /* reset PHY */
2435        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2436        buf |= PMT_CTL_PHY_RST_;
2437        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2438
2439        timeout = jiffies + HZ;
2440        do {
2441                mdelay(1);
2442                ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2443                if (time_after(jiffies, timeout)) {
2444                        netdev_warn(dev->net, "timeout waiting for PHY Reset");
2445                        return -EIO;
2446                }
2447        } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2448
2449        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2450        /* LAN7801 only has RGMII mode */
2451        if (dev->chipid == ID_REV_CHIP_ID_7801_)
2452                buf &= ~MAC_CR_GMII_EN_;
2453        ret = lan78xx_write_reg(dev, MAC_CR, buf);
2454
2455        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2456        buf |= MAC_TX_TXEN_;
2457        ret = lan78xx_write_reg(dev, MAC_TX, buf);
2458
2459        ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2460        buf |= FCT_TX_CTL_EN_;
2461        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2462
2463        ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2464
2465        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2466        buf |= MAC_RX_RXEN_;
2467        ret = lan78xx_write_reg(dev, MAC_RX, buf);
2468
2469        ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2470        buf |= FCT_RX_CTL_EN_;
2471        ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2472
2473        return 0;
2474}
2475
2476static void lan78xx_init_stats(struct lan78xx_net *dev)
2477{
2478        u32 *p;
2479        int i;
2480
2481        /* initialize for stats update
2482         * some counters are 20bits and some are 32bits
2483         */
2484        p = (u32 *)&dev->stats.rollover_max;
2485        for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2486                p[i] = 0xFFFFF;
2487
2488        dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2489        dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2490        dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2491        dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2492        dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2493        dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2494        dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2495        dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2496        dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2497        dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2498
2499        lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2500}
2501
2502static int lan78xx_open(struct net_device *net)
2503{
2504        struct lan78xx_net *dev = netdev_priv(net);
2505        int ret;
2506
2507        ret = usb_autopm_get_interface(dev->intf);
2508        if (ret < 0)
2509                goto out;
2510
2511        ret = lan78xx_reset(dev);
2512        if (ret < 0)
2513                goto done;
2514
2515        ret = lan78xx_phy_init(dev);
2516        if (ret < 0)
2517                goto done;
2518
2519        /* for Link Check */
2520        if (dev->urb_intr) {
2521                ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2522                if (ret < 0) {
2523                        netif_err(dev, ifup, dev->net,
2524                                  "intr submit %d\n", ret);
2525                        goto done;
2526                }
2527        }
2528
2529        lan78xx_init_stats(dev);
2530
2531        set_bit(EVENT_DEV_OPEN, &dev->flags);
2532
2533        netif_start_queue(net);
2534
2535        dev->link_on = false;
2536
2537        lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2538done:
2539        usb_autopm_put_interface(dev->intf);
2540
2541out:
2542        return ret;
2543}
2544
2545static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2546{
2547        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2548        DECLARE_WAITQUEUE(wait, current);
2549        int temp;
2550
2551        /* ensure there are no more active urbs */
2552        add_wait_queue(&unlink_wakeup, &wait);
2553        set_current_state(TASK_UNINTERRUPTIBLE);
2554        dev->wait = &unlink_wakeup;
2555        temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2556
2557        /* maybe wait for deletions to finish. */
2558        while (!skb_queue_empty(&dev->rxq) &&
2559               !skb_queue_empty(&dev->txq) &&
2560               !skb_queue_empty(&dev->done)) {
2561                schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2562                set_current_state(TASK_UNINTERRUPTIBLE);
2563                netif_dbg(dev, ifdown, dev->net,
2564                          "waited for %d urb completions\n", temp);
2565        }
2566        set_current_state(TASK_RUNNING);
2567        dev->wait = NULL;
2568        remove_wait_queue(&unlink_wakeup, &wait);
2569}
2570
2571static int lan78xx_stop(struct net_device *net)
2572{
2573        struct lan78xx_net              *dev = netdev_priv(net);
2574
2575        if (timer_pending(&dev->stat_monitor))
2576                del_timer_sync(&dev->stat_monitor);
2577
2578        phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2579        phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2580
2581        phy_stop(net->phydev);
2582        phy_disconnect(net->phydev);
2583
2584        net->phydev = NULL;
2585
2586        clear_bit(EVENT_DEV_OPEN, &dev->flags);
2587        netif_stop_queue(net);
2588
2589        netif_info(dev, ifdown, dev->net,
2590                   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2591                   net->stats.rx_packets, net->stats.tx_packets,
2592                   net->stats.rx_errors, net->stats.tx_errors);
2593
2594        lan78xx_terminate_urbs(dev);
2595
2596        usb_kill_urb(dev->urb_intr);
2597
2598        skb_queue_purge(&dev->rxq_pause);
2599
2600        /* deferred work (task, timer, softirq) must also stop.
2601         * can't flush_scheduled_work() until we drop rtnl (later),
2602         * else workers could deadlock; so make workers a NOP.
2603         */
2604        dev->flags = 0;
2605        cancel_delayed_work_sync(&dev->wq);
2606        tasklet_kill(&dev->bh);
2607
2608        usb_autopm_put_interface(dev->intf);
2609
2610        return 0;
2611}
2612
2613static int lan78xx_linearize(struct sk_buff *skb)
2614{
2615        return skb_linearize(skb);
2616}
2617
2618static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2619                                       struct sk_buff *skb, gfp_t flags)
2620{
2621        u32 tx_cmd_a, tx_cmd_b;
2622
2623        if (skb_cow_head(skb, TX_OVERHEAD)) {
2624                dev_kfree_skb_any(skb);
2625                return NULL;
2626        }
2627
2628        if (lan78xx_linearize(skb) < 0)
2629                return NULL;
2630
2631        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2632
2633        if (skb->ip_summed == CHECKSUM_PARTIAL)
2634                tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2635
2636        tx_cmd_b = 0;
2637        if (skb_is_gso(skb)) {
2638                u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2639
2640                tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2641
2642                tx_cmd_a |= TX_CMD_A_LSO_;
2643        }
2644
2645        if (skb_vlan_tag_present(skb)) {
2646                tx_cmd_a |= TX_CMD_A_IVTG_;
2647                tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2648        }
2649
2650        skb_push(skb, 4);
2651        cpu_to_le32s(&tx_cmd_b);
2652        memcpy(skb->data, &tx_cmd_b, 4);
2653
2654        skb_push(skb, 4);
2655        cpu_to_le32s(&tx_cmd_a);
2656        memcpy(skb->data, &tx_cmd_a, 4);
2657
2658        return skb;
2659}
2660
2661static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2662                               struct sk_buff_head *list, enum skb_state state)
2663{
2664        unsigned long flags;
2665        enum skb_state old_state;
2666        struct skb_data *entry = (struct skb_data *)skb->cb;
2667
2668        spin_lock_irqsave(&list->lock, flags);
2669        old_state = entry->state;
2670        entry->state = state;
2671
2672        __skb_unlink(skb, list);
2673        spin_unlock(&list->lock);
2674        spin_lock(&dev->done.lock);
2675
2676        __skb_queue_tail(&dev->done, skb);
2677        if (skb_queue_len(&dev->done) == 1)
2678                tasklet_schedule(&dev->bh);
2679        spin_unlock_irqrestore(&dev->done.lock, flags);
2680
2681        return old_state;
2682}
2683
2684static void tx_complete(struct urb *urb)
2685{
2686        struct sk_buff *skb = (struct sk_buff *)urb->context;
2687        struct skb_data *entry = (struct skb_data *)skb->cb;
2688        struct lan78xx_net *dev = entry->dev;
2689
2690        if (urb->status == 0) {
2691                dev->net->stats.tx_packets += entry->num_of_packet;
2692                dev->net->stats.tx_bytes += entry->length;
2693        } else {
2694                dev->net->stats.tx_errors++;
2695
2696                switch (urb->status) {
2697                case -EPIPE:
2698                        lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2699                        break;
2700
2701                /* software-driven interface shutdown */
2702                case -ECONNRESET:
2703                case -ESHUTDOWN:
2704                        break;
2705
2706                case -EPROTO:
2707                case -ETIME:
2708                case -EILSEQ:
2709                        netif_stop_queue(dev->net);
2710                        break;
2711                default:
2712                        netif_dbg(dev, tx_err, dev->net,
2713                                  "tx err %d\n", entry->urb->status);
2714                        break;
2715                }
2716        }
2717
2718        usb_autopm_put_interface_async(dev->intf);
2719
2720        defer_bh(dev, skb, &dev->txq, tx_done);
2721}
2722
2723static void lan78xx_queue_skb(struct sk_buff_head *list,
2724                              struct sk_buff *newsk, enum skb_state state)
2725{
2726        struct skb_data *entry = (struct skb_data *)newsk->cb;
2727
2728        __skb_queue_tail(list, newsk);
2729        entry->state = state;
2730}
2731
2732static netdev_tx_t
2733lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2734{
2735        struct lan78xx_net *dev = netdev_priv(net);
2736        struct sk_buff *skb2 = NULL;
2737
2738        if (skb) {
2739                skb_tx_timestamp(skb);
2740                skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2741        }
2742
2743        if (skb2) {
2744                skb_queue_tail(&dev->txq_pend, skb2);
2745
2746                /* throttle TX patch at slower than SUPER SPEED USB */
2747                if ((dev->udev->speed < USB_SPEED_SUPER) &&
2748                    (skb_queue_len(&dev->txq_pend) > 10))
2749                        netif_stop_queue(net);
2750        } else {
2751                netif_dbg(dev, tx_err, dev->net,
2752                          "lan78xx_tx_prep return NULL\n");
2753                dev->net->stats.tx_errors++;
2754                dev->net->stats.tx_dropped++;
2755        }
2756
2757        tasklet_schedule(&dev->bh);
2758
2759        return NETDEV_TX_OK;
2760}
2761
2762static int
2763lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2764{
2765        int tmp;
2766        struct usb_host_interface *alt = NULL;
2767        struct usb_host_endpoint *in = NULL, *out = NULL;
2768        struct usb_host_endpoint *status = NULL;
2769
2770        for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2771                unsigned ep;
2772
2773                in = NULL;
2774                out = NULL;
2775                status = NULL;
2776                alt = intf->altsetting + tmp;
2777
2778                for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2779                        struct usb_host_endpoint *e;
2780                        int intr = 0;
2781
2782                        e = alt->endpoint + ep;
2783                        switch (e->desc.bmAttributes) {
2784                        case USB_ENDPOINT_XFER_INT:
2785                                if (!usb_endpoint_dir_in(&e->desc))
2786                                        continue;
2787                                intr = 1;
2788                                /* FALLTHROUGH */
2789                        case USB_ENDPOINT_XFER_BULK:
2790                                break;
2791                        default:
2792                                continue;
2793                        }
2794                        if (usb_endpoint_dir_in(&e->desc)) {
2795                                if (!intr && !in)
2796                                        in = e;
2797                                else if (intr && !status)
2798                                        status = e;
2799                        } else {
2800                                if (!out)
2801                                        out = e;
2802                        }
2803                }
2804                if (in && out)
2805                        break;
2806        }
2807        if (!alt || !in || !out)
2808                return -EINVAL;
2809
2810        dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2811                                       in->desc.bEndpointAddress &
2812                                       USB_ENDPOINT_NUMBER_MASK);
2813        dev->pipe_out = usb_sndbulkpipe(dev->udev,
2814                                        out->desc.bEndpointAddress &
2815                                        USB_ENDPOINT_NUMBER_MASK);
2816        dev->ep_intr = status;
2817
2818        return 0;
2819}
2820
2821static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2822{
2823        struct lan78xx_priv *pdata = NULL;
2824        int ret;
2825        int i;
2826
2827        ret = lan78xx_get_endpoints(dev, intf);
2828
2829        dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2830
2831        pdata = (struct lan78xx_priv *)(dev->data[0]);
2832        if (!pdata) {
2833                netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2834                return -ENOMEM;
2835        }
2836
2837        pdata->dev = dev;
2838
2839        spin_lock_init(&pdata->rfe_ctl_lock);
2840        mutex_init(&pdata->dataport_mutex);
2841
2842        INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2843
2844        for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2845                pdata->vlan_table[i] = 0;
2846
2847        INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2848
2849        dev->net->features = 0;
2850
2851        if (DEFAULT_TX_CSUM_ENABLE)
2852                dev->net->features |= NETIF_F_HW_CSUM;
2853
2854        if (DEFAULT_RX_CSUM_ENABLE)
2855                dev->net->features |= NETIF_F_RXCSUM;
2856
2857        if (DEFAULT_TSO_CSUM_ENABLE)
2858                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2859
2860        dev->net->hw_features = dev->net->features;
2861
2862        ret = lan78xx_setup_irq_domain(dev);
2863        if (ret < 0) {
2864                netdev_warn(dev->net,
2865                            "lan78xx_setup_irq_domain() failed : %d", ret);
2866                kfree(pdata);
2867                return ret;
2868        }
2869
2870        dev->net->hard_header_len += TX_OVERHEAD;
2871        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2872
2873        /* Init all registers */
2874        ret = lan78xx_reset(dev);
2875
2876        ret = lan78xx_mdio_init(dev);
2877
2878        dev->net->flags |= IFF_MULTICAST;
2879
2880        pdata->wol = WAKE_MAGIC;
2881
2882        return ret;
2883}
2884
2885static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2886{
2887        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2888
2889        lan78xx_remove_irq_domain(dev);
2890
2891        lan78xx_remove_mdio(dev);
2892
2893        if (pdata) {
2894                netif_dbg(dev, ifdown, dev->net, "free pdata");
2895                kfree(pdata);
2896                pdata = NULL;
2897                dev->data[0] = 0;
2898        }
2899}
2900
2901static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2902                                    struct sk_buff *skb,
2903                                    u32 rx_cmd_a, u32 rx_cmd_b)
2904{
2905        if (!(dev->net->features & NETIF_F_RXCSUM) ||
2906            unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2907                skb->ip_summed = CHECKSUM_NONE;
2908        } else {
2909                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2910                skb->ip_summed = CHECKSUM_COMPLETE;
2911        }
2912}
2913
2914static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2915{
2916        int             status;
2917
2918        if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2919                skb_queue_tail(&dev->rxq_pause, skb);
2920                return;
2921        }
2922
2923        dev->net->stats.rx_packets++;
2924        dev->net->stats.rx_bytes += skb->len;
2925
2926        skb->protocol = eth_type_trans(skb, dev->net);
2927
2928        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2929                  skb->len + sizeof(struct ethhdr), skb->protocol);
2930        memset(skb->cb, 0, sizeof(struct skb_data));
2931
2932        if (skb_defer_rx_timestamp(skb))
2933                return;
2934
2935        status = netif_rx(skb);
2936        if (status != NET_RX_SUCCESS)
2937                netif_dbg(dev, rx_err, dev->net,
2938                          "netif_rx status %d\n", status);
2939}
2940
2941static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2942{
2943        if (skb->len < dev->net->hard_header_len)
2944                return 0;
2945
2946        while (skb->len > 0) {
2947                u32 rx_cmd_a, rx_cmd_b, align_count, size;
2948                u16 rx_cmd_c;
2949                struct sk_buff *skb2;
2950                unsigned char *packet;
2951
2952                memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2953                le32_to_cpus(&rx_cmd_a);
2954                skb_pull(skb, sizeof(rx_cmd_a));
2955
2956                memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2957                le32_to_cpus(&rx_cmd_b);
2958                skb_pull(skb, sizeof(rx_cmd_b));
2959
2960                memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2961                le16_to_cpus(&rx_cmd_c);
2962                skb_pull(skb, sizeof(rx_cmd_c));
2963
2964                packet = skb->data;
2965
2966                /* get the packet length */
2967                size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2968                align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2969
2970                if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2971                        netif_dbg(dev, rx_err, dev->net,
2972                                  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2973                } else {
2974                        /* last frame in this batch */
2975                        if (skb->len == size) {
2976                                lan78xx_rx_csum_offload(dev, skb,
2977                                                        rx_cmd_a, rx_cmd_b);
2978
2979                                skb_trim(skb, skb->len - 4); /* remove fcs */
2980                                skb->truesize = size + sizeof(struct sk_buff);
2981
2982                                return 1;
2983                        }
2984
2985                        skb2 = skb_clone(skb, GFP_ATOMIC);
2986                        if (unlikely(!skb2)) {
2987                                netdev_warn(dev->net, "Error allocating skb");
2988                                return 0;
2989                        }
2990
2991                        skb2->len = size;
2992                        skb2->data = packet;
2993                        skb_set_tail_pointer(skb2, size);
2994
2995                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2996
2997                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
2998                        skb2->truesize = size + sizeof(struct sk_buff);
2999
3000                        lan78xx_skb_return(dev, skb2);
3001                }
3002
3003                skb_pull(skb, size);
3004
3005                /* padding bytes before the next frame starts */
3006                if (skb->len)
3007                        skb_pull(skb, align_count);
3008        }
3009
3010        return 1;
3011}
3012
3013static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3014{
3015        if (!lan78xx_rx(dev, skb)) {
3016                dev->net->stats.rx_errors++;
3017                goto done;
3018        }
3019
3020        if (skb->len) {
3021                lan78xx_skb_return(dev, skb);
3022                return;
3023        }
3024
3025        netif_dbg(dev, rx_err, dev->net, "drop\n");
3026        dev->net->stats.rx_errors++;
3027done:
3028        skb_queue_tail(&dev->done, skb);
3029}
3030
3031static void rx_complete(struct urb *urb);
3032
3033static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3034{
3035        struct sk_buff *skb;
3036        struct skb_data *entry;
3037        unsigned long lockflags;
3038        size_t size = dev->rx_urb_size;
3039        int ret = 0;
3040
3041        skb = netdev_alloc_skb_ip_align(dev->net, size);
3042        if (!skb) {
3043                usb_free_urb(urb);
3044                return -ENOMEM;
3045        }
3046
3047        entry = (struct skb_data *)skb->cb;
3048        entry->urb = urb;
3049        entry->dev = dev;
3050        entry->length = 0;
3051
3052        usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3053                          skb->data, size, rx_complete, skb);
3054
3055        spin_lock_irqsave(&dev->rxq.lock, lockflags);
3056
3057        if (netif_device_present(dev->net) &&
3058            netif_running(dev->net) &&
3059            !test_bit(EVENT_RX_HALT, &dev->flags) &&
3060            !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3061                ret = usb_submit_urb(urb, GFP_ATOMIC);
3062                switch (ret) {
3063                case 0:
3064                        lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3065                        break;
3066                case -EPIPE:
3067                        lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3068                        break;
3069                case -ENODEV:
3070                        netif_dbg(dev, ifdown, dev->net, "device gone\n");
3071                        netif_device_detach(dev->net);
3072                        break;
3073                case -EHOSTUNREACH:
3074                        ret = -ENOLINK;
3075                        break;
3076                default:
3077                        netif_dbg(dev, rx_err, dev->net,
3078                                  "rx submit, %d\n", ret);
3079                        tasklet_schedule(&dev->bh);
3080                }
3081        } else {
3082                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3083                ret = -ENOLINK;
3084        }
3085        spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3086        if (ret) {
3087                dev_kfree_skb_any(skb);
3088                usb_free_urb(urb);
3089        }
3090        return ret;
3091}
3092
3093static void rx_complete(struct urb *urb)
3094{
3095        struct sk_buff  *skb = (struct sk_buff *)urb->context;
3096        struct skb_data *entry = (struct skb_data *)skb->cb;
3097        struct lan78xx_net *dev = entry->dev;
3098        int urb_status = urb->status;
3099        enum skb_state state;
3100
3101        skb_put(skb, urb->actual_length);
3102        state = rx_done;
3103        entry->urb = NULL;
3104
3105        switch (urb_status) {
3106        case 0:
3107                if (skb->len < dev->net->hard_header_len) {
3108                        state = rx_cleanup;
3109                        dev->net->stats.rx_errors++;
3110                        dev->net->stats.rx_length_errors++;
3111                        netif_dbg(dev, rx_err, dev->net,
3112                                  "rx length %d\n", skb->len);
3113                }
3114                usb_mark_last_busy(dev->udev);
3115                break;
3116        case -EPIPE:
3117                dev->net->stats.rx_errors++;
3118                lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3119                /* FALLTHROUGH */
3120        case -ECONNRESET:                               /* async unlink */
3121        case -ESHUTDOWN:                                /* hardware gone */
3122                netif_dbg(dev, ifdown, dev->net,
3123                          "rx shutdown, code %d\n", urb_status);
3124                state = rx_cleanup;
3125                entry->urb = urb;
3126                urb = NULL;
3127                break;
3128        case -EPROTO:
3129        case -ETIME:
3130        case -EILSEQ:
3131                dev->net->stats.rx_errors++;
3132                state = rx_cleanup;
3133                entry->urb = urb;
3134                urb = NULL;
3135                break;
3136
3137        /* data overrun ... flush fifo? */
3138        case -EOVERFLOW:
3139                dev->net->stats.rx_over_errors++;
3140                /* FALLTHROUGH */
3141
3142        default:
3143                state = rx_cleanup;
3144                dev->net->stats.rx_errors++;
3145                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3146                break;
3147        }
3148
3149        state = defer_bh(dev, skb, &dev->rxq, state);
3150
3151        if (urb) {
3152                if (netif_running(dev->net) &&
3153                    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3154                    state != unlink_start) {
3155                        rx_submit(dev, urb, GFP_ATOMIC);
3156                        return;
3157                }
3158                usb_free_urb(urb);
3159        }
3160        netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3161}
3162
3163static void lan78xx_tx_bh(struct lan78xx_net *dev)
3164{
3165        int length;
3166        struct urb *urb = NULL;
3167        struct skb_data *entry;
3168        unsigned long flags;
3169        struct sk_buff_head *tqp = &dev->txq_pend;
3170        struct sk_buff *skb, *skb2;
3171        int ret;
3172        int count, pos;
3173        int skb_totallen, pkt_cnt;
3174
3175        skb_totallen = 0;
3176        pkt_cnt = 0;
3177        count = 0;
3178        length = 0;
3179        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3180                if (skb_is_gso(skb)) {
3181                        if (pkt_cnt) {
3182                                /* handle previous packets first */
3183                                break;
3184                        }
3185                        count = 1;
3186                        length = skb->len - TX_OVERHEAD;
3187                        skb2 = skb_dequeue(tqp);
3188                        goto gso_skb;
3189                }
3190
3191                if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3192                        break;
3193                skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3194                pkt_cnt++;
3195        }
3196
3197        /* copy to a single skb */
3198        skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3199        if (!skb)
3200                goto drop;
3201
3202        skb_put(skb, skb_totallen);
3203
3204        for (count = pos = 0; count < pkt_cnt; count++) {
3205                skb2 = skb_dequeue(tqp);
3206                if (skb2) {
3207                        length += (skb2->len - TX_OVERHEAD);
3208                        memcpy(skb->data + pos, skb2->data, skb2->len);
3209                        pos += roundup(skb2->len, sizeof(u32));
3210                        dev_kfree_skb(skb2);
3211                }
3212        }
3213
3214gso_skb:
3215        urb = usb_alloc_urb(0, GFP_ATOMIC);
3216        if (!urb)
3217                goto drop;
3218
3219        entry = (struct skb_data *)skb->cb;
3220        entry->urb = urb;
3221        entry->dev = dev;
3222        entry->length = length;
3223        entry->num_of_packet = count;
3224
3225        spin_lock_irqsave(&dev->txq.lock, flags);
3226        ret = usb_autopm_get_interface_async(dev->intf);
3227        if (ret < 0) {
3228                spin_unlock_irqrestore(&dev->txq.lock, flags);
3229                goto drop;
3230        }
3231
3232        usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3233                          skb->data, skb->len, tx_complete, skb);
3234
3235        if (length % dev->maxpacket == 0) {
3236                /* send USB_ZERO_PACKET */
3237                urb->transfer_flags |= URB_ZERO_PACKET;
3238        }
3239
3240#ifdef CONFIG_PM
3241        /* if this triggers the device is still a sleep */
3242        if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3243                /* transmission will be done in resume */
3244                usb_anchor_urb(urb, &dev->deferred);
3245                /* no use to process more packets */
3246                netif_stop_queue(dev->net);
3247                usb_put_urb(urb);
3248                spin_unlock_irqrestore(&dev->txq.lock, flags);
3249                netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3250                return;
3251        }
3252#endif
3253
3254        ret = usb_submit_urb(urb, GFP_ATOMIC);
3255        switch (ret) {
3256        case 0:
3257                netif_trans_update(dev->net);
3258                lan78xx_queue_skb(&dev->txq, skb, tx_start);
3259                if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3260                        netif_stop_queue(dev->net);
3261                break;
3262        case -EPIPE:
3263                netif_stop_queue(dev->net);
3264                lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3265                usb_autopm_put_interface_async(dev->intf);
3266                break;
3267        default:
3268                usb_autopm_put_interface_async(dev->intf);
3269                netif_dbg(dev, tx_err, dev->net,
3270                          "tx: submit urb err %d\n", ret);
3271                break;
3272        }
3273
3274        spin_unlock_irqrestore(&dev->txq.lock, flags);
3275
3276        if (ret) {
3277                netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3278drop:
3279                dev->net->stats.tx_dropped++;
3280                if (skb)
3281                        dev_kfree_skb_any(skb);
3282                usb_free_urb(urb);
3283        } else
3284                netif_dbg(dev, tx_queued, dev->net,
3285                          "> tx, len %d, type 0x%x\n", length, skb->protocol);
3286}
3287
3288static void lan78xx_rx_bh(struct lan78xx_net *dev)
3289{
3290        struct urb *urb;
3291        int i;
3292
3293        if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3294                for (i = 0; i < 10; i++) {
3295                        if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3296                                break;
3297                        urb = usb_alloc_urb(0, GFP_ATOMIC);
3298                        if (urb)
3299                                if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3300                                        return;
3301                }
3302
3303                if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3304                        tasklet_schedule(&dev->bh);
3305        }
3306        if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3307                netif_wake_queue(dev->net);
3308}
3309
3310static void lan78xx_bh(unsigned long param)
3311{
3312        struct lan78xx_net *dev = (struct lan78xx_net *)param;
3313        struct sk_buff *skb;
3314        struct skb_data *entry;
3315
3316        while ((skb = skb_dequeue(&dev->done))) {
3317                entry = (struct skb_data *)(skb->cb);
3318                switch (entry->state) {
3319                case rx_done:
3320                        entry->state = rx_cleanup;
3321                        rx_process(dev, skb);
3322                        continue;
3323                case tx_done:
3324                        usb_free_urb(entry->urb);
3325                        dev_kfree_skb(skb);
3326                        continue;
3327                case rx_cleanup:
3328                        usb_free_urb(entry->urb);
3329                        dev_kfree_skb(skb);
3330                        continue;
3331                default:
3332                        netdev_dbg(dev->net, "skb state %d\n", entry->state);
3333                        return;
3334                }
3335        }
3336
3337        if (netif_device_present(dev->net) && netif_running(dev->net)) {
3338                /* reset update timer delta */
3339                if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3340                        dev->delta = 1;
3341                        mod_timer(&dev->stat_monitor,
3342                                  jiffies + STAT_UPDATE_TIMER);
3343                }
3344
3345                if (!skb_queue_empty(&dev->txq_pend))
3346                        lan78xx_tx_bh(dev);
3347
3348                if (!timer_pending(&dev->delay) &&
3349                    !test_bit(EVENT_RX_HALT, &dev->flags))
3350                        lan78xx_rx_bh(dev);
3351        }
3352}
3353
3354static void lan78xx_delayedwork(struct work_struct *work)
3355{
3356        int status;
3357        struct lan78xx_net *dev;
3358
3359        dev = container_of(work, struct lan78xx_net, wq.work);
3360
3361        if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3362                unlink_urbs(dev, &dev->txq);
3363                status = usb_autopm_get_interface(dev->intf);
3364                if (status < 0)
3365                        goto fail_pipe;
3366                status = usb_clear_halt(dev->udev, dev->pipe_out);
3367                usb_autopm_put_interface(dev->intf);
3368                if (status < 0 &&
3369                    status != -EPIPE &&
3370                    status != -ESHUTDOWN) {
3371                        if (netif_msg_tx_err(dev))
3372fail_pipe:
3373                                netdev_err(dev->net,
3374                                           "can't clear tx halt, status %d\n",
3375                                           status);
3376                } else {
3377                        clear_bit(EVENT_TX_HALT, &dev->flags);
3378                        if (status != -ESHUTDOWN)
3379                                netif_wake_queue(dev->net);
3380                }
3381        }
3382        if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3383                unlink_urbs(dev, &dev->rxq);
3384                status = usb_autopm_get_interface(dev->intf);
3385                if (status < 0)
3386                                goto fail_halt;
3387                status = usb_clear_halt(dev->udev, dev->pipe_in);
3388                usb_autopm_put_interface(dev->intf);
3389                if (status < 0 &&
3390                    status != -EPIPE &&
3391                    status != -ESHUTDOWN) {
3392                        if (netif_msg_rx_err(dev))
3393fail_halt:
3394                                netdev_err(dev->net,
3395                                           "can't clear rx halt, status %d\n",
3396                                           status);
3397                } else {
3398                        clear_bit(EVENT_RX_HALT, &dev->flags);
3399                        tasklet_schedule(&dev->bh);
3400                }
3401        }
3402
3403        if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3404                int ret = 0;
3405
3406                clear_bit(EVENT_LINK_RESET, &dev->flags);
3407                status = usb_autopm_get_interface(dev->intf);
3408                if (status < 0)
3409                        goto skip_reset;
3410                if (lan78xx_link_reset(dev) < 0) {
3411                        usb_autopm_put_interface(dev->intf);
3412skip_reset:
3413                        netdev_info(dev->net, "link reset failed (%d)\n",
3414                                    ret);
3415                } else {
3416                        usb_autopm_put_interface(dev->intf);
3417                }
3418        }
3419
3420        if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3421                lan78xx_update_stats(dev);
3422
3423                clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3424
3425                mod_timer(&dev->stat_monitor,
3426                          jiffies + (STAT_UPDATE_TIMER * dev->delta));
3427
3428                dev->delta = min((dev->delta * 2), 50);
3429        }
3430}
3431
3432static void intr_complete(struct urb *urb)
3433{
3434        struct lan78xx_net *dev = urb->context;
3435        int status = urb->status;
3436
3437        switch (status) {
3438        /* success */
3439        case 0:
3440                lan78xx_status(dev, urb);
3441                break;
3442
3443        /* software-driven interface shutdown */
3444        case -ENOENT:                   /* urb killed */
3445        case -ESHUTDOWN:                /* hardware gone */
3446                netif_dbg(dev, ifdown, dev->net,
3447                          "intr shutdown, code %d\n", status);
3448                return;
3449
3450        /* NOTE:  not throttling like RX/TX, since this endpoint
3451         * already polls infrequently
3452         */
3453        default:
3454                netdev_dbg(dev->net, "intr status %d\n", status);
3455                break;
3456        }
3457
3458        if (!netif_running(dev->net))
3459                return;
3460
3461        memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3462        status = usb_submit_urb(urb, GFP_ATOMIC);
3463        if (status != 0)
3464                netif_err(dev, timer, dev->net,
3465                          "intr resubmit --> %d\n", status);
3466}
3467
3468static void lan78xx_disconnect(struct usb_interface *intf)
3469{
3470        struct lan78xx_net              *dev;
3471        struct usb_device               *udev;
3472        struct net_device               *net;
3473
3474        dev = usb_get_intfdata(intf);
3475        usb_set_intfdata(intf, NULL);
3476        if (!dev)
3477                return;
3478
3479        udev = interface_to_usbdev(intf);
3480
3481        net = dev->net;
3482        unregister_netdev(net);
3483
3484        cancel_delayed_work_sync(&dev->wq);
3485
3486        usb_scuttle_anchored_urbs(&dev->deferred);
3487
3488        lan78xx_unbind(dev, intf);
3489
3490        usb_kill_urb(dev->urb_intr);
3491        usb_free_urb(dev->urb_intr);
3492
3493        free_netdev(net);
3494        usb_put_dev(udev);
3495}
3496
3497static void lan78xx_tx_timeout(struct net_device *net)
3498{
3499        struct lan78xx_net *dev = netdev_priv(net);
3500
3501        unlink_urbs(dev, &dev->txq);
3502        tasklet_schedule(&dev->bh);
3503}
3504
3505static const struct net_device_ops lan78xx_netdev_ops = {
3506        .ndo_open               = lan78xx_open,
3507        .ndo_stop               = lan78xx_stop,
3508        .ndo_start_xmit         = lan78xx_start_xmit,
3509        .ndo_tx_timeout         = lan78xx_tx_timeout,
3510        .ndo_change_mtu         = lan78xx_change_mtu,
3511        .ndo_set_mac_address    = lan78xx_set_mac_addr,
3512        .ndo_validate_addr      = eth_validate_addr,
3513        .ndo_do_ioctl           = lan78xx_ioctl,
3514        .ndo_set_rx_mode        = lan78xx_set_multicast,
3515        .ndo_set_features       = lan78xx_set_features,
3516        .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3517        .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3518};
3519
3520static void lan78xx_stat_monitor(struct timer_list *t)
3521{
3522        struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3523
3524        lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3525}
3526
3527static int lan78xx_probe(struct usb_interface *intf,
3528                         const struct usb_device_id *id)
3529{
3530        struct lan78xx_net *dev;
3531        struct net_device *netdev;
3532        struct usb_device *udev;
3533        int ret;
3534        unsigned maxp;
3535        unsigned period;
3536        u8 *buf = NULL;
3537
3538        udev = interface_to_usbdev(intf);
3539        udev = usb_get_dev(udev);
3540
3541        netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3542        if (!netdev) {
3543                dev_err(&intf->dev, "Error: OOM\n");
3544                ret = -ENOMEM;
3545                goto out1;
3546        }
3547
3548        /* netdev_printk() needs this */
3549        SET_NETDEV_DEV(netdev, &intf->dev);
3550
3551        dev = netdev_priv(netdev);
3552        dev->udev = udev;
3553        dev->intf = intf;
3554        dev->net = netdev;
3555        dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3556                                        | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3557
3558        skb_queue_head_init(&dev->rxq);
3559        skb_queue_head_init(&dev->txq);
3560        skb_queue_head_init(&dev->done);
3561        skb_queue_head_init(&dev->rxq_pause);
3562        skb_queue_head_init(&dev->txq_pend);
3563        mutex_init(&dev->phy_mutex);
3564
3565        tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3566        INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3567        init_usb_anchor(&dev->deferred);
3568
3569        netdev->netdev_ops = &lan78xx_netdev_ops;
3570        netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3571        netdev->ethtool_ops = &lan78xx_ethtool_ops;
3572
3573        dev->delta = 1;
3574        timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3575
3576        mutex_init(&dev->stats.access_lock);
3577
3578        ret = lan78xx_bind(dev, intf);
3579        if (ret < 0)
3580                goto out2;
3581        strcpy(netdev->name, "eth%d");
3582
3583        if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3584                netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3585
3586        /* MTU range: 68 - 9000 */
3587        netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3588
3589        dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3590        dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3591        dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3592
3593        dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3594        dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3595
3596        dev->pipe_intr = usb_rcvintpipe(dev->udev,
3597                                        dev->ep_intr->desc.bEndpointAddress &
3598                                        USB_ENDPOINT_NUMBER_MASK);
3599        period = dev->ep_intr->desc.bInterval;
3600
3601        maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3602        buf = kmalloc(maxp, GFP_KERNEL);
3603        if (buf) {
3604                dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3605                if (!dev->urb_intr) {
3606                        ret = -ENOMEM;
3607                        kfree(buf);
3608                        goto out3;
3609                } else {
3610                        usb_fill_int_urb(dev->urb_intr, dev->udev,
3611                                         dev->pipe_intr, buf, maxp,
3612                                         intr_complete, dev, period);
3613                }
3614        }
3615
3616        dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3617
3618        /* driver requires remote-wakeup capability during autosuspend. */
3619        intf->needs_remote_wakeup = 1;
3620
3621        ret = register_netdev(netdev);
3622        if (ret != 0) {
3623                netif_err(dev, probe, netdev, "couldn't register the device\n");
3624                goto out3;
3625        }
3626
3627        usb_set_intfdata(intf, dev);
3628
3629        ret = device_set_wakeup_enable(&udev->dev, true);
3630
3631         /* Default delay of 2sec has more overhead than advantage.
3632          * Set to 10sec as default.
3633          */
3634        pm_runtime_set_autosuspend_delay(&udev->dev,
3635                                         DEFAULT_AUTOSUSPEND_DELAY);
3636
3637        return 0;
3638
3639out3:
3640        lan78xx_unbind(dev, intf);
3641out2:
3642        free_netdev(netdev);
3643out1:
3644        usb_put_dev(udev);
3645
3646        return ret;
3647}
3648
3649static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3650{
3651        const u16 crc16poly = 0x8005;
3652        int i;
3653        u16 bit, crc, msb;
3654        u8 data;
3655
3656        crc = 0xFFFF;
3657        for (i = 0; i < len; i++) {
3658                data = *buf++;
3659                for (bit = 0; bit < 8; bit++) {
3660                        msb = crc >> 15;
3661                        crc <<= 1;
3662
3663                        if (msb ^ (u16)(data & 1)) {
3664                                crc ^= crc16poly;
3665                                crc |= (u16)0x0001U;
3666                        }
3667                        data >>= 1;
3668                }
3669        }
3670
3671        return crc;
3672}
3673
3674static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3675{
3676        u32 buf;
3677        int ret;
3678        int mask_index;
3679        u16 crc;
3680        u32 temp_wucsr;
3681        u32 temp_pmt_ctl;
3682        const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3683        const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3684        const u8 arp_type[2] = { 0x08, 0x06 };
3685
3686        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3687        buf &= ~MAC_TX_TXEN_;
3688        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3689        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3690        buf &= ~MAC_RX_RXEN_;
3691        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3692
3693        ret = lan78xx_write_reg(dev, WUCSR, 0);
3694        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3695        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3696
3697        temp_wucsr = 0;
3698
3699        temp_pmt_ctl = 0;
3700        ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3701        temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3702        temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3703
3704        for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3705                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3706
3707        mask_index = 0;
3708        if (wol & WAKE_PHY) {
3709                temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3710
3711                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3712                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3713                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3714        }
3715        if (wol & WAKE_MAGIC) {
3716                temp_wucsr |= WUCSR_MPEN_;
3717
3718                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3719                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3720                temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3721        }
3722        if (wol & WAKE_BCAST) {
3723                temp_wucsr |= WUCSR_BCST_EN_;
3724
3725                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3726                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3727                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3728        }
3729        if (wol & WAKE_MCAST) {
3730                temp_wucsr |= WUCSR_WAKE_EN_;
3731
3732                /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3733                crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3734                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3735                                        WUF_CFGX_EN_ |
3736                                        WUF_CFGX_TYPE_MCAST_ |
3737                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3738                                        (crc & WUF_CFGX_CRC16_MASK_));
3739
3740                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3741                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3742                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3743                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3744                mask_index++;
3745
3746                /* for IPv6 Multicast */
3747                crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3748                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3749                                        WUF_CFGX_EN_ |
3750                                        WUF_CFGX_TYPE_MCAST_ |
3751                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3752                                        (crc & WUF_CFGX_CRC16_MASK_));
3753
3754                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3755                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3756                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3757                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3758                mask_index++;
3759
3760                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3761                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3762                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3763        }
3764        if (wol & WAKE_UCAST) {
3765                temp_wucsr |= WUCSR_PFDA_EN_;
3766
3767                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3768                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3769                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3770        }
3771        if (wol & WAKE_ARP) {
3772                temp_wucsr |= WUCSR_WAKE_EN_;
3773
3774                /* set WUF_CFG & WUF_MASK
3775                 * for packettype (offset 12,13) = ARP (0x0806)
3776                 */
3777                crc = lan78xx_wakeframe_crc16(arp_type, 2);
3778                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3779                                        WUF_CFGX_EN_ |
3780                                        WUF_CFGX_TYPE_ALL_ |
3781                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3782                                        (crc & WUF_CFGX_CRC16_MASK_));
3783
3784                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3785                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3786                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3787                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3788                mask_index++;
3789
3790                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3791                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3792                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3793        }
3794
3795        ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3796
3797        /* when multiple WOL bits are set */
3798        if (hweight_long((unsigned long)wol) > 1) {
3799                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3800                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3801                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3802        }
3803        ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3804
3805        /* clear WUPS */
3806        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3807        buf |= PMT_CTL_WUPS_MASK_;
3808        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3809
3810        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3811        buf |= MAC_RX_RXEN_;
3812        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3813
3814        return 0;
3815}
3816
3817static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3818{
3819        struct lan78xx_net *dev = usb_get_intfdata(intf);
3820        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3821        u32 buf;
3822        int ret;
3823        int event;
3824
3825        event = message.event;
3826
3827        if (!dev->suspend_count++) {
3828                spin_lock_irq(&dev->txq.lock);
3829                /* don't autosuspend while transmitting */
3830                if ((skb_queue_len(&dev->txq) ||
3831                     skb_queue_len(&dev->txq_pend)) &&
3832                        PMSG_IS_AUTO(message)) {
3833                        spin_unlock_irq(&dev->txq.lock);
3834                        ret = -EBUSY;
3835                        goto out;
3836                } else {
3837                        set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3838                        spin_unlock_irq(&dev->txq.lock);
3839                }
3840
3841                /* stop TX & RX */
3842                ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3843                buf &= ~MAC_TX_TXEN_;
3844                ret = lan78xx_write_reg(dev, MAC_TX, buf);
3845                ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3846                buf &= ~MAC_RX_RXEN_;
3847                ret = lan78xx_write_reg(dev, MAC_RX, buf);
3848
3849                /* empty out the rx and queues */
3850                netif_device_detach(dev->net);
3851                lan78xx_terminate_urbs(dev);
3852                usb_kill_urb(dev->urb_intr);
3853
3854                /* reattach */
3855                netif_device_attach(dev->net);
3856        }
3857
3858        if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3859                del_timer(&dev->stat_monitor);
3860
3861                if (PMSG_IS_AUTO(message)) {
3862                        /* auto suspend (selective suspend) */
3863                        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3864                        buf &= ~MAC_TX_TXEN_;
3865                        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3866                        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3867                        buf &= ~MAC_RX_RXEN_;
3868                        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3869
3870                        ret = lan78xx_write_reg(dev, WUCSR, 0);
3871                        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3872                        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3873
3874                        /* set goodframe wakeup */
3875                        ret = lan78xx_read_reg(dev, WUCSR, &buf);
3876
3877                        buf |= WUCSR_RFE_WAKE_EN_;
3878                        buf |= WUCSR_STORE_WAKE_;
3879
3880                        ret = lan78xx_write_reg(dev, WUCSR, buf);
3881
3882                        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3883
3884                        buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3885                        buf |= PMT_CTL_RES_CLR_WKP_STS_;
3886
3887                        buf |= PMT_CTL_PHY_WAKE_EN_;
3888                        buf |= PMT_CTL_WOL_EN_;
3889                        buf &= ~PMT_CTL_SUS_MODE_MASK_;
3890                        buf |= PMT_CTL_SUS_MODE_3_;
3891
3892                        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3893
3894                        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3895
3896                        buf |= PMT_CTL_WUPS_MASK_;
3897
3898                        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3899
3900                        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3901                        buf |= MAC_RX_RXEN_;
3902                        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3903                } else {
3904                        lan78xx_set_suspend(dev, pdata->wol);
3905                }
3906        }
3907
3908        ret = 0;
3909out:
3910        return ret;
3911}
3912
3913static int lan78xx_resume(struct usb_interface *intf)
3914{
3915        struct lan78xx_net *dev = usb_get_intfdata(intf);
3916        struct sk_buff *skb;
3917        struct urb *res;
3918        int ret;
3919        u32 buf;
3920
3921        if (!timer_pending(&dev->stat_monitor)) {
3922                dev->delta = 1;
3923                mod_timer(&dev->stat_monitor,
3924                          jiffies + STAT_UPDATE_TIMER);
3925        }
3926
3927        if (!--dev->suspend_count) {
3928                /* resume interrupt URBs */
3929                if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3930                                usb_submit_urb(dev->urb_intr, GFP_NOIO);
3931
3932                spin_lock_irq(&dev->txq.lock);
3933                while ((res = usb_get_from_anchor(&dev->deferred))) {
3934                        skb = (struct sk_buff *)res->context;
3935                        ret = usb_submit_urb(res, GFP_ATOMIC);
3936                        if (ret < 0) {
3937                                dev_kfree_skb_any(skb);
3938                                usb_free_urb(res);
3939                                usb_autopm_put_interface_async(dev->intf);
3940                        } else {
3941                                netif_trans_update(dev->net);
3942                                lan78xx_queue_skb(&dev->txq, skb, tx_start);
3943                        }
3944                }
3945
3946                clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3947                spin_unlock_irq(&dev->txq.lock);
3948
3949                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3950                        if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3951                                netif_start_queue(dev->net);
3952                        tasklet_schedule(&dev->bh);
3953                }
3954        }
3955
3956        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3957        ret = lan78xx_write_reg(dev, WUCSR, 0);
3958        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3959
3960        ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3961                                             WUCSR2_ARP_RCD_ |
3962                                             WUCSR2_IPV6_TCPSYN_RCD_ |
3963                                             WUCSR2_IPV4_TCPSYN_RCD_);
3964
3965        ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3966                                            WUCSR_EEE_RX_WAKE_ |
3967                                            WUCSR_PFDA_FR_ |
3968                                            WUCSR_RFE_WAKE_FR_ |
3969                                            WUCSR_WUFR_ |
3970                                            WUCSR_MPR_ |
3971                                            WUCSR_BCST_FR_);
3972
3973        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3974        buf |= MAC_TX_TXEN_;
3975        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3976
3977        return 0;
3978}
3979
3980static int lan78xx_reset_resume(struct usb_interface *intf)
3981{
3982        struct lan78xx_net *dev = usb_get_intfdata(intf);
3983
3984        lan78xx_reset(dev);
3985
3986        lan78xx_phy_init(dev);
3987
3988        return lan78xx_resume(intf);
3989}
3990
3991static const struct usb_device_id products[] = {
3992        {
3993        /* LAN7800 USB Gigabit Ethernet Device */
3994        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3995        },
3996        {
3997        /* LAN7850 USB Gigabit Ethernet Device */
3998        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3999        },
4000        {
4001        /* LAN7801 USB Gigabit Ethernet Device */
4002        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4003        },
4004        {},
4005};
4006MODULE_DEVICE_TABLE(usb, products);
4007
4008static struct usb_driver lan78xx_driver = {
4009        .name                   = DRIVER_NAME,
4010        .id_table               = products,
4011        .probe                  = lan78xx_probe,
4012        .disconnect             = lan78xx_disconnect,
4013        .suspend                = lan78xx_suspend,
4014        .resume                 = lan78xx_resume,
4015        .reset_resume           = lan78xx_reset_resume,
4016        .supports_autosuspend   = 1,
4017        .disable_hub_initiated_lpm = 1,
4018};
4019
4020module_usb_driver(lan78xx_driver);
4021
4022MODULE_AUTHOR(DRIVER_AUTHOR);
4023MODULE_DESCRIPTION(DRIVER_DESC);
4024MODULE_LICENSE("GPL");
4025