linux/drivers/net/usb/lan78xx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Microchip Technology
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version 2
   7 * of the License, or (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 */
  17#include <linux/version.h>
  18#include <linux/module.h>
  19#include <linux/netdevice.h>
  20#include <linux/etherdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/usb.h>
  23#include <linux/crc32.h>
  24#include <linux/signal.h>
  25#include <linux/slab.h>
  26#include <linux/if_vlan.h>
  27#include <linux/uaccess.h>
  28#include <linux/list.h>
  29#include <linux/ip.h>
  30#include <linux/ipv6.h>
  31#include <linux/mdio.h>
  32#include <net/ip6_checksum.h>
  33#include <linux/microchipphy.h>
  34#include "lan78xx.h"
  35
  36#define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
  37#define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
  38#define DRIVER_NAME     "lan78xx"
  39#define DRIVER_VERSION  "1.0.4"
  40
  41#define TX_TIMEOUT_JIFFIES              (5 * HZ)
  42#define THROTTLE_JIFFIES                (HZ / 8)
  43#define UNLINK_TIMEOUT_MS               3
  44
  45#define RX_MAX_QUEUE_MEMORY             (60 * 1518)
  46
  47#define SS_USB_PKT_SIZE                 (1024)
  48#define HS_USB_PKT_SIZE                 (512)
  49#define FS_USB_PKT_SIZE                 (64)
  50
  51#define MAX_RX_FIFO_SIZE                (12 * 1024)
  52#define MAX_TX_FIFO_SIZE                (12 * 1024)
  53#define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
  54#define DEFAULT_BULK_IN_DELAY           (0x0800)
  55#define MAX_SINGLE_PACKET_SIZE          (9000)
  56#define DEFAULT_TX_CSUM_ENABLE          (true)
  57#define DEFAULT_RX_CSUM_ENABLE          (true)
  58#define DEFAULT_TSO_CSUM_ENABLE         (true)
  59#define DEFAULT_VLAN_FILTER_ENABLE      (true)
  60#define TX_OVERHEAD                     (8)
  61#define RXW_PADDING                     2
  62
  63#define LAN78XX_USB_VENDOR_ID           (0x0424)
  64#define LAN7800_USB_PRODUCT_ID          (0x7800)
  65#define LAN7850_USB_PRODUCT_ID          (0x7850)
  66#define LAN78XX_EEPROM_MAGIC            (0x78A5)
  67#define LAN78XX_OTP_MAGIC               (0x78F3)
  68
  69#define MII_READ                        1
  70#define MII_WRITE                       0
  71
  72#define EEPROM_INDICATOR                (0xA5)
  73#define EEPROM_MAC_OFFSET               (0x01)
  74#define MAX_EEPROM_SIZE                 512
  75#define OTP_INDICATOR_1                 (0xF3)
  76#define OTP_INDICATOR_2                 (0xF7)
  77
  78#define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
  79                                         WAKE_MCAST | WAKE_BCAST | \
  80                                         WAKE_ARP | WAKE_MAGIC)
  81
  82/* USB related defines */
  83#define BULK_IN_PIPE                    1
  84#define BULK_OUT_PIPE                   2
  85
  86/* default autosuspend delay (mSec)*/
  87#define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
  88
  89/* statistic update interval (mSec) */
  90#define STAT_UPDATE_TIMER               (1 * 1000)
  91
  92static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
  93        "RX FCS Errors",
  94        "RX Alignment Errors",
  95        "Rx Fragment Errors",
  96        "RX Jabber Errors",
  97        "RX Undersize Frame Errors",
  98        "RX Oversize Frame Errors",
  99        "RX Dropped Frames",
 100        "RX Unicast Byte Count",
 101        "RX Broadcast Byte Count",
 102        "RX Multicast Byte Count",
 103        "RX Unicast Frames",
 104        "RX Broadcast Frames",
 105        "RX Multicast Frames",
 106        "RX Pause Frames",
 107        "RX 64 Byte Frames",
 108        "RX 65 - 127 Byte Frames",
 109        "RX 128 - 255 Byte Frames",
 110        "RX 256 - 511 Bytes Frames",
 111        "RX 512 - 1023 Byte Frames",
 112        "RX 1024 - 1518 Byte Frames",
 113        "RX Greater 1518 Byte Frames",
 114        "EEE RX LPI Transitions",
 115        "EEE RX LPI Time",
 116        "TX FCS Errors",
 117        "TX Excess Deferral Errors",
 118        "TX Carrier Errors",
 119        "TX Bad Byte Count",
 120        "TX Single Collisions",
 121        "TX Multiple Collisions",
 122        "TX Excessive Collision",
 123        "TX Late Collisions",
 124        "TX Unicast Byte Count",
 125        "TX Broadcast Byte Count",
 126        "TX Multicast Byte Count",
 127        "TX Unicast Frames",
 128        "TX Broadcast Frames",
 129        "TX Multicast Frames",
 130        "TX Pause Frames",
 131        "TX 64 Byte Frames",
 132        "TX 65 - 127 Byte Frames",
 133        "TX 128 - 255 Byte Frames",
 134        "TX 256 - 511 Bytes Frames",
 135        "TX 512 - 1023 Byte Frames",
 136        "TX 1024 - 1518 Byte Frames",
 137        "TX Greater 1518 Byte Frames",
 138        "EEE TX LPI Transitions",
 139        "EEE TX LPI Time",
 140};
 141
 142struct lan78xx_statstage {
 143        u32 rx_fcs_errors;
 144        u32 rx_alignment_errors;
 145        u32 rx_fragment_errors;
 146        u32 rx_jabber_errors;
 147        u32 rx_undersize_frame_errors;
 148        u32 rx_oversize_frame_errors;
 149        u32 rx_dropped_frames;
 150        u32 rx_unicast_byte_count;
 151        u32 rx_broadcast_byte_count;
 152        u32 rx_multicast_byte_count;
 153        u32 rx_unicast_frames;
 154        u32 rx_broadcast_frames;
 155        u32 rx_multicast_frames;
 156        u32 rx_pause_frames;
 157        u32 rx_64_byte_frames;
 158        u32 rx_65_127_byte_frames;
 159        u32 rx_128_255_byte_frames;
 160        u32 rx_256_511_bytes_frames;
 161        u32 rx_512_1023_byte_frames;
 162        u32 rx_1024_1518_byte_frames;
 163        u32 rx_greater_1518_byte_frames;
 164        u32 eee_rx_lpi_transitions;
 165        u32 eee_rx_lpi_time;
 166        u32 tx_fcs_errors;
 167        u32 tx_excess_deferral_errors;
 168        u32 tx_carrier_errors;
 169        u32 tx_bad_byte_count;
 170        u32 tx_single_collisions;
 171        u32 tx_multiple_collisions;
 172        u32 tx_excessive_collision;
 173        u32 tx_late_collisions;
 174        u32 tx_unicast_byte_count;
 175        u32 tx_broadcast_byte_count;
 176        u32 tx_multicast_byte_count;
 177        u32 tx_unicast_frames;
 178        u32 tx_broadcast_frames;
 179        u32 tx_multicast_frames;
 180        u32 tx_pause_frames;
 181        u32 tx_64_byte_frames;
 182        u32 tx_65_127_byte_frames;
 183        u32 tx_128_255_byte_frames;
 184        u32 tx_256_511_bytes_frames;
 185        u32 tx_512_1023_byte_frames;
 186        u32 tx_1024_1518_byte_frames;
 187        u32 tx_greater_1518_byte_frames;
 188        u32 eee_tx_lpi_transitions;
 189        u32 eee_tx_lpi_time;
 190};
 191
 192struct lan78xx_statstage64 {
 193        u64 rx_fcs_errors;
 194        u64 rx_alignment_errors;
 195        u64 rx_fragment_errors;
 196        u64 rx_jabber_errors;
 197        u64 rx_undersize_frame_errors;
 198        u64 rx_oversize_frame_errors;
 199        u64 rx_dropped_frames;
 200        u64 rx_unicast_byte_count;
 201        u64 rx_broadcast_byte_count;
 202        u64 rx_multicast_byte_count;
 203        u64 rx_unicast_frames;
 204        u64 rx_broadcast_frames;
 205        u64 rx_multicast_frames;
 206        u64 rx_pause_frames;
 207        u64 rx_64_byte_frames;
 208        u64 rx_65_127_byte_frames;
 209        u64 rx_128_255_byte_frames;
 210        u64 rx_256_511_bytes_frames;
 211        u64 rx_512_1023_byte_frames;
 212        u64 rx_1024_1518_byte_frames;
 213        u64 rx_greater_1518_byte_frames;
 214        u64 eee_rx_lpi_transitions;
 215        u64 eee_rx_lpi_time;
 216        u64 tx_fcs_errors;
 217        u64 tx_excess_deferral_errors;
 218        u64 tx_carrier_errors;
 219        u64 tx_bad_byte_count;
 220        u64 tx_single_collisions;
 221        u64 tx_multiple_collisions;
 222        u64 tx_excessive_collision;
 223        u64 tx_late_collisions;
 224        u64 tx_unicast_byte_count;
 225        u64 tx_broadcast_byte_count;
 226        u64 tx_multicast_byte_count;
 227        u64 tx_unicast_frames;
 228        u64 tx_broadcast_frames;
 229        u64 tx_multicast_frames;
 230        u64 tx_pause_frames;
 231        u64 tx_64_byte_frames;
 232        u64 tx_65_127_byte_frames;
 233        u64 tx_128_255_byte_frames;
 234        u64 tx_256_511_bytes_frames;
 235        u64 tx_512_1023_byte_frames;
 236        u64 tx_1024_1518_byte_frames;
 237        u64 tx_greater_1518_byte_frames;
 238        u64 eee_tx_lpi_transitions;
 239        u64 eee_tx_lpi_time;
 240};
 241
 242struct lan78xx_net;
 243
 244struct lan78xx_priv {
 245        struct lan78xx_net *dev;
 246        u32 rfe_ctl;
 247        u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
 248        u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
 249        u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
 250        struct mutex dataport_mutex; /* for dataport access */
 251        spinlock_t rfe_ctl_lock; /* for rfe register access */
 252        struct work_struct set_multicast;
 253        struct work_struct set_vlan;
 254        u32 wol;
 255};
 256
 257enum skb_state {
 258        illegal = 0,
 259        tx_start,
 260        tx_done,
 261        rx_start,
 262        rx_done,
 263        rx_cleanup,
 264        unlink_start
 265};
 266
 267struct skb_data {               /* skb->cb is one of these */
 268        struct urb *urb;
 269        struct lan78xx_net *dev;
 270        enum skb_state state;
 271        size_t length;
 272        int num_of_packet;
 273};
 274
 275struct usb_context {
 276        struct usb_ctrlrequest req;
 277        struct lan78xx_net *dev;
 278};
 279
 280#define EVENT_TX_HALT                   0
 281#define EVENT_RX_HALT                   1
 282#define EVENT_RX_MEMORY                 2
 283#define EVENT_STS_SPLIT                 3
 284#define EVENT_LINK_RESET                4
 285#define EVENT_RX_PAUSED                 5
 286#define EVENT_DEV_WAKING                6
 287#define EVENT_DEV_ASLEEP                7
 288#define EVENT_DEV_OPEN                  8
 289#define EVENT_STAT_UPDATE               9
 290
 291struct statstage {
 292        struct mutex                    access_lock;    /* for stats access */
 293        struct lan78xx_statstage        saved;
 294        struct lan78xx_statstage        rollover_count;
 295        struct lan78xx_statstage        rollover_max;
 296        struct lan78xx_statstage64      curr_stat;
 297};
 298
 299struct lan78xx_net {
 300        struct net_device       *net;
 301        struct usb_device       *udev;
 302        struct usb_interface    *intf;
 303        void                    *driver_priv;
 304
 305        int                     rx_qlen;
 306        int                     tx_qlen;
 307        struct sk_buff_head     rxq;
 308        struct sk_buff_head     txq;
 309        struct sk_buff_head     done;
 310        struct sk_buff_head     rxq_pause;
 311        struct sk_buff_head     txq_pend;
 312
 313        struct tasklet_struct   bh;
 314        struct delayed_work     wq;
 315
 316        struct usb_host_endpoint *ep_blkin;
 317        struct usb_host_endpoint *ep_blkout;
 318        struct usb_host_endpoint *ep_intr;
 319
 320        int                     msg_enable;
 321
 322        struct urb              *urb_intr;
 323        struct usb_anchor       deferred;
 324
 325        struct mutex            phy_mutex; /* for phy access */
 326        unsigned                pipe_in, pipe_out, pipe_intr;
 327
 328        u32                     hard_mtu;       /* count any extra framing */
 329        size_t                  rx_urb_size;    /* size for rx urbs */
 330
 331        unsigned long           flags;
 332
 333        wait_queue_head_t       *wait;
 334        unsigned char           suspend_count;
 335
 336        unsigned                maxpacket;
 337        struct timer_list       delay;
 338        struct timer_list       stat_monitor;
 339
 340        unsigned long           data[5];
 341
 342        int                     link_on;
 343        u8                      mdix_ctrl;
 344
 345        u32                     chipid;
 346        u32                     chiprev;
 347        struct mii_bus          *mdiobus;
 348
 349        int                     fc_autoneg;
 350        u8                      fc_request_control;
 351
 352        int                     delta;
 353        struct statstage        stats;
 354};
 355
 356/* use ethtool to change the level for any given device */
 357static int msg_level = -1;
 358module_param(msg_level, int, 0);
 359MODULE_PARM_DESC(msg_level, "Override default message level");
 360
 361static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
 362{
 363        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
 364        int ret;
 365
 366        if (!buf)
 367                return -ENOMEM;
 368
 369        ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
 370                              USB_VENDOR_REQUEST_READ_REGISTER,
 371                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 372                              0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
 373        if (likely(ret >= 0)) {
 374                le32_to_cpus(buf);
 375                *data = *buf;
 376        } else {
 377                netdev_warn(dev->net,
 378                            "Failed to read register index 0x%08x. ret = %d",
 379                            index, ret);
 380        }
 381
 382        kfree(buf);
 383
 384        return ret;
 385}
 386
 387static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
 388{
 389        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
 390        int ret;
 391
 392        if (!buf)
 393                return -ENOMEM;
 394
 395        *buf = data;
 396        cpu_to_le32s(buf);
 397
 398        ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
 399                              USB_VENDOR_REQUEST_WRITE_REGISTER,
 400                              USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 401                              0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
 402        if (unlikely(ret < 0)) {
 403                netdev_warn(dev->net,
 404                            "Failed to write register index 0x%08x. ret = %d",
 405                            index, ret);
 406        }
 407
 408        kfree(buf);
 409
 410        return ret;
 411}
 412
 413static int lan78xx_read_stats(struct lan78xx_net *dev,
 414                              struct lan78xx_statstage *data)
 415{
 416        int ret = 0;
 417        int i;
 418        struct lan78xx_statstage *stats;
 419        u32 *src;
 420        u32 *dst;
 421
 422        stats = kmalloc(sizeof(*stats), GFP_KERNEL);
 423        if (!stats)
 424                return -ENOMEM;
 425
 426        ret = usb_control_msg(dev->udev,
 427                              usb_rcvctrlpipe(dev->udev, 0),
 428                              USB_VENDOR_REQUEST_GET_STATS,
 429                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 430                              0,
 431                              0,
 432                              (void *)stats,
 433                              sizeof(*stats),
 434                              USB_CTRL_SET_TIMEOUT);
 435        if (likely(ret >= 0)) {
 436                src = (u32 *)stats;
 437                dst = (u32 *)data;
 438                for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
 439                        le32_to_cpus(&src[i]);
 440                        dst[i] = src[i];
 441                }
 442        } else {
 443                netdev_warn(dev->net,
 444                            "Failed to read stat ret = 0x%x", ret);
 445        }
 446
 447        kfree(stats);
 448
 449        return ret;
 450}
 451
 452#define check_counter_rollover(struct1, dev_stats, member) {    \
 453        if (struct1->member < dev_stats.saved.member)           \
 454                dev_stats.rollover_count.member++;              \
 455        }
 456
 457static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
 458                                        struct lan78xx_statstage *stats)
 459{
 460        check_counter_rollover(stats, dev->stats, rx_fcs_errors);
 461        check_counter_rollover(stats, dev->stats, rx_alignment_errors);
 462        check_counter_rollover(stats, dev->stats, rx_fragment_errors);
 463        check_counter_rollover(stats, dev->stats, rx_jabber_errors);
 464        check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
 465        check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
 466        check_counter_rollover(stats, dev->stats, rx_dropped_frames);
 467        check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
 468        check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
 469        check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
 470        check_counter_rollover(stats, dev->stats, rx_unicast_frames);
 471        check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
 472        check_counter_rollover(stats, dev->stats, rx_multicast_frames);
 473        check_counter_rollover(stats, dev->stats, rx_pause_frames);
 474        check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
 475        check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
 476        check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
 477        check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
 478        check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
 479        check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
 480        check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
 481        check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
 482        check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
 483        check_counter_rollover(stats, dev->stats, tx_fcs_errors);
 484        check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
 485        check_counter_rollover(stats, dev->stats, tx_carrier_errors);
 486        check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
 487        check_counter_rollover(stats, dev->stats, tx_single_collisions);
 488        check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
 489        check_counter_rollover(stats, dev->stats, tx_excessive_collision);
 490        check_counter_rollover(stats, dev->stats, tx_late_collisions);
 491        check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
 492        check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
 493        check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
 494        check_counter_rollover(stats, dev->stats, tx_unicast_frames);
 495        check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
 496        check_counter_rollover(stats, dev->stats, tx_multicast_frames);
 497        check_counter_rollover(stats, dev->stats, tx_pause_frames);
 498        check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
 499        check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
 500        check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
 501        check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
 502        check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
 503        check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
 504        check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
 505        check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
 506        check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
 507
 508        memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
 509}
 510
 511static void lan78xx_update_stats(struct lan78xx_net *dev)
 512{
 513        u32 *p, *count, *max;
 514        u64 *data;
 515        int i;
 516        struct lan78xx_statstage lan78xx_stats;
 517
 518        if (usb_autopm_get_interface(dev->intf) < 0)
 519                return;
 520
 521        p = (u32 *)&lan78xx_stats;
 522        count = (u32 *)&dev->stats.rollover_count;
 523        max = (u32 *)&dev->stats.rollover_max;
 524        data = (u64 *)&dev->stats.curr_stat;
 525
 526        mutex_lock(&dev->stats.access_lock);
 527
 528        if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
 529                lan78xx_check_stat_rollover(dev, &lan78xx_stats);
 530
 531        for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
 532                data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
 533
 534        mutex_unlock(&dev->stats.access_lock);
 535
 536        usb_autopm_put_interface(dev->intf);
 537}
 538
 539/* Loop until the read is completed with timeout called with phy_mutex held */
 540static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
 541{
 542        unsigned long start_time = jiffies;
 543        u32 val;
 544        int ret;
 545
 546        do {
 547                ret = lan78xx_read_reg(dev, MII_ACC, &val);
 548                if (unlikely(ret < 0))
 549                        return -EIO;
 550
 551                if (!(val & MII_ACC_MII_BUSY_))
 552                        return 0;
 553        } while (!time_after(jiffies, start_time + HZ));
 554
 555        return -EIO;
 556}
 557
 558static inline u32 mii_access(int id, int index, int read)
 559{
 560        u32 ret;
 561
 562        ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
 563        ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
 564        if (read)
 565                ret |= MII_ACC_MII_READ_;
 566        else
 567                ret |= MII_ACC_MII_WRITE_;
 568        ret |= MII_ACC_MII_BUSY_;
 569
 570        return ret;
 571}
 572
 573static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
 574{
 575        unsigned long start_time = jiffies;
 576        u32 val;
 577        int ret;
 578
 579        do {
 580                ret = lan78xx_read_reg(dev, E2P_CMD, &val);
 581                if (unlikely(ret < 0))
 582                        return -EIO;
 583
 584                if (!(val & E2P_CMD_EPC_BUSY_) ||
 585                    (val & E2P_CMD_EPC_TIMEOUT_))
 586                        break;
 587                usleep_range(40, 100);
 588        } while (!time_after(jiffies, start_time + HZ));
 589
 590        if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
 591                netdev_warn(dev->net, "EEPROM read operation timeout");
 592                return -EIO;
 593        }
 594
 595        return 0;
 596}
 597
 598static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
 599{
 600        unsigned long start_time = jiffies;
 601        u32 val;
 602        int ret;
 603
 604        do {
 605                ret = lan78xx_read_reg(dev, E2P_CMD, &val);
 606                if (unlikely(ret < 0))
 607                        return -EIO;
 608
 609                if (!(val & E2P_CMD_EPC_BUSY_))
 610                        return 0;
 611
 612                usleep_range(40, 100);
 613        } while (!time_after(jiffies, start_time + HZ));
 614
 615        netdev_warn(dev->net, "EEPROM is busy");
 616        return -EIO;
 617}
 618
 619static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
 620                                   u32 length, u8 *data)
 621{
 622        u32 val;
 623        u32 saved;
 624        int i, ret;
 625        int retval;
 626
 627        /* depends on chip, some EEPROM pins are muxed with LED function.
 628         * disable & restore LED function to access EEPROM.
 629         */
 630        ret = lan78xx_read_reg(dev, HW_CFG, &val);
 631        saved = val;
 632        if (dev->chipid == ID_REV_CHIP_ID_7800_) {
 633                val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
 634                ret = lan78xx_write_reg(dev, HW_CFG, val);
 635        }
 636
 637        retval = lan78xx_eeprom_confirm_not_busy(dev);
 638        if (retval)
 639                return retval;
 640
 641        for (i = 0; i < length; i++) {
 642                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
 643                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
 644                ret = lan78xx_write_reg(dev, E2P_CMD, val);
 645                if (unlikely(ret < 0)) {
 646                        retval = -EIO;
 647                        goto exit;
 648                }
 649
 650                retval = lan78xx_wait_eeprom(dev);
 651                if (retval < 0)
 652                        goto exit;
 653
 654                ret = lan78xx_read_reg(dev, E2P_DATA, &val);
 655                if (unlikely(ret < 0)) {
 656                        retval = -EIO;
 657                        goto exit;
 658                }
 659
 660                data[i] = val & 0xFF;
 661                offset++;
 662        }
 663
 664        retval = 0;
 665exit:
 666        if (dev->chipid == ID_REV_CHIP_ID_7800_)
 667                ret = lan78xx_write_reg(dev, HW_CFG, saved);
 668
 669        return retval;
 670}
 671
 672static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
 673                               u32 length, u8 *data)
 674{
 675        u8 sig;
 676        int ret;
 677
 678        ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
 679        if ((ret == 0) && (sig == EEPROM_INDICATOR))
 680                ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
 681        else
 682                ret = -EINVAL;
 683
 684        return ret;
 685}
 686
 687static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
 688                                    u32 length, u8 *data)
 689{
 690        u32 val;
 691        u32 saved;
 692        int i, ret;
 693        int retval;
 694
 695        /* depends on chip, some EEPROM pins are muxed with LED function.
 696         * disable & restore LED function to access EEPROM.
 697         */
 698        ret = lan78xx_read_reg(dev, HW_CFG, &val);
 699        saved = val;
 700        if (dev->chipid == ID_REV_CHIP_ID_7800_) {
 701                val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
 702                ret = lan78xx_write_reg(dev, HW_CFG, val);
 703        }
 704
 705        retval = lan78xx_eeprom_confirm_not_busy(dev);
 706        if (retval)
 707                goto exit;
 708
 709        /* Issue write/erase enable command */
 710        val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
 711        ret = lan78xx_write_reg(dev, E2P_CMD, val);
 712        if (unlikely(ret < 0)) {
 713                retval = -EIO;
 714                goto exit;
 715        }
 716
 717        retval = lan78xx_wait_eeprom(dev);
 718        if (retval < 0)
 719                goto exit;
 720
 721        for (i = 0; i < length; i++) {
 722                /* Fill data register */
 723                val = data[i];
 724                ret = lan78xx_write_reg(dev, E2P_DATA, val);
 725                if (ret < 0) {
 726                        retval = -EIO;
 727                        goto exit;
 728                }
 729
 730                /* Send "write" command */
 731                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
 732                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
 733                ret = lan78xx_write_reg(dev, E2P_CMD, val);
 734                if (ret < 0) {
 735                        retval = -EIO;
 736                        goto exit;
 737                }
 738
 739                retval = lan78xx_wait_eeprom(dev);
 740                if (retval < 0)
 741                        goto exit;
 742
 743                offset++;
 744        }
 745
 746        retval = 0;
 747exit:
 748        if (dev->chipid == ID_REV_CHIP_ID_7800_)
 749                ret = lan78xx_write_reg(dev, HW_CFG, saved);
 750
 751        return retval;
 752}
 753
 754static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
 755                                u32 length, u8 *data)
 756{
 757        int i;
 758        int ret;
 759        u32 buf;
 760        unsigned long timeout;
 761
 762        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 763
 764        if (buf & OTP_PWR_DN_PWRDN_N_) {
 765                /* clear it and wait to be cleared */
 766                ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
 767
 768                timeout = jiffies + HZ;
 769                do {
 770                        usleep_range(1, 10);
 771                        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 772                        if (time_after(jiffies, timeout)) {
 773                                netdev_warn(dev->net,
 774                                            "timeout on OTP_PWR_DN");
 775                                return -EIO;
 776                        }
 777                } while (buf & OTP_PWR_DN_PWRDN_N_);
 778        }
 779
 780        for (i = 0; i < length; i++) {
 781                ret = lan78xx_write_reg(dev, OTP_ADDR1,
 782                                        ((offset + i) >> 8) & OTP_ADDR1_15_11);
 783                ret = lan78xx_write_reg(dev, OTP_ADDR2,
 784                                        ((offset + i) & OTP_ADDR2_10_3));
 785
 786                ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
 787                ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
 788
 789                timeout = jiffies + HZ;
 790                do {
 791                        udelay(1);
 792                        ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
 793                        if (time_after(jiffies, timeout)) {
 794                                netdev_warn(dev->net,
 795                                            "timeout on OTP_STATUS");
 796                                return -EIO;
 797                        }
 798                } while (buf & OTP_STATUS_BUSY_);
 799
 800                ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
 801
 802                data[i] = (u8)(buf & 0xFF);
 803        }
 804
 805        return 0;
 806}
 807
 808static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
 809                                 u32 length, u8 *data)
 810{
 811        int i;
 812        int ret;
 813        u32 buf;
 814        unsigned long timeout;
 815
 816        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 817
 818        if (buf & OTP_PWR_DN_PWRDN_N_) {
 819                /* clear it and wait to be cleared */
 820                ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
 821
 822                timeout = jiffies + HZ;
 823                do {
 824                        udelay(1);
 825                        ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
 826                        if (time_after(jiffies, timeout)) {
 827                                netdev_warn(dev->net,
 828                                            "timeout on OTP_PWR_DN completion");
 829                                return -EIO;
 830                        }
 831                } while (buf & OTP_PWR_DN_PWRDN_N_);
 832        }
 833
 834        /* set to BYTE program mode */
 835        ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
 836
 837        for (i = 0; i < length; i++) {
 838                ret = lan78xx_write_reg(dev, OTP_ADDR1,
 839                                        ((offset + i) >> 8) & OTP_ADDR1_15_11);
 840                ret = lan78xx_write_reg(dev, OTP_ADDR2,
 841                                        ((offset + i) & OTP_ADDR2_10_3));
 842                ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
 843                ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
 844                ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
 845
 846                timeout = jiffies + HZ;
 847                do {
 848                        udelay(1);
 849                        ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
 850                        if (time_after(jiffies, timeout)) {
 851                                netdev_warn(dev->net,
 852                                            "Timeout on OTP_STATUS completion");
 853                                return -EIO;
 854                        }
 855                } while (buf & OTP_STATUS_BUSY_);
 856        }
 857
 858        return 0;
 859}
 860
 861static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
 862                            u32 length, u8 *data)
 863{
 864        u8 sig;
 865        int ret;
 866
 867        ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
 868
 869        if (ret == 0) {
 870                if (sig == OTP_INDICATOR_1)
 871                        offset = offset;
 872                else if (sig == OTP_INDICATOR_2)
 873                        offset += 0x100;
 874                else
 875                        ret = -EINVAL;
 876                ret = lan78xx_read_raw_otp(dev, offset, length, data);
 877        }
 878
 879        return ret;
 880}
 881
 882static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
 883{
 884        int i, ret;
 885
 886        for (i = 0; i < 100; i++) {
 887                u32 dp_sel;
 888
 889                ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
 890                if (unlikely(ret < 0))
 891                        return -EIO;
 892
 893                if (dp_sel & DP_SEL_DPRDY_)
 894                        return 0;
 895
 896                usleep_range(40, 100);
 897        }
 898
 899        netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
 900
 901        return -EIO;
 902}
 903
 904static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
 905                                  u32 addr, u32 length, u32 *buf)
 906{
 907        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
 908        u32 dp_sel;
 909        int i, ret;
 910
 911        if (usb_autopm_get_interface(dev->intf) < 0)
 912                        return 0;
 913
 914        mutex_lock(&pdata->dataport_mutex);
 915
 916        ret = lan78xx_dataport_wait_not_busy(dev);
 917        if (ret < 0)
 918                goto done;
 919
 920        ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
 921
 922        dp_sel &= ~DP_SEL_RSEL_MASK_;
 923        dp_sel |= ram_select;
 924        ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
 925
 926        for (i = 0; i < length; i++) {
 927                ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
 928
 929                ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
 930
 931                ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
 932
 933                ret = lan78xx_dataport_wait_not_busy(dev);
 934                if (ret < 0)
 935                        goto done;
 936        }
 937
 938done:
 939        mutex_unlock(&pdata->dataport_mutex);
 940        usb_autopm_put_interface(dev->intf);
 941
 942        return ret;
 943}
 944
 945static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
 946                                    int index, u8 addr[ETH_ALEN])
 947{
 948        u32     temp;
 949
 950        if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
 951                temp = addr[3];
 952                temp = addr[2] | (temp << 8);
 953                temp = addr[1] | (temp << 8);
 954                temp = addr[0] | (temp << 8);
 955                pdata->pfilter_table[index][1] = temp;
 956                temp = addr[5];
 957                temp = addr[4] | (temp << 8);
 958                temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
 959                pdata->pfilter_table[index][0] = temp;
 960        }
 961}
 962
 963/* returns hash bit number for given MAC address */
 964static inline u32 lan78xx_hash(char addr[ETH_ALEN])
 965{
 966        return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
 967}
 968
 969static void lan78xx_deferred_multicast_write(struct work_struct *param)
 970{
 971        struct lan78xx_priv *pdata =
 972                        container_of(param, struct lan78xx_priv, set_multicast);
 973        struct lan78xx_net *dev = pdata->dev;
 974        int i;
 975        int ret;
 976
 977        netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
 978                  pdata->rfe_ctl);
 979
 980        lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
 981                               DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
 982
 983        for (i = 1; i < NUM_OF_MAF; i++) {
 984                ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
 985                ret = lan78xx_write_reg(dev, MAF_LO(i),
 986                                        pdata->pfilter_table[i][1]);
 987                ret = lan78xx_write_reg(dev, MAF_HI(i),
 988                                        pdata->pfilter_table[i][0]);
 989        }
 990
 991        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
 992}
 993
 994static void lan78xx_set_multicast(struct net_device *netdev)
 995{
 996        struct lan78xx_net *dev = netdev_priv(netdev);
 997        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
 998        unsigned long flags;
 999        int i;
1000
1001        spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1002
1003        pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1004                            RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1005
1006        for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1007                        pdata->mchash_table[i] = 0;
1008        /* pfilter_table[0] has own HW address */
1009        for (i = 1; i < NUM_OF_MAF; i++) {
1010                        pdata->pfilter_table[i][0] =
1011                        pdata->pfilter_table[i][1] = 0;
1012        }
1013
1014        pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1015
1016        if (dev->net->flags & IFF_PROMISC) {
1017                netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1018                pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1019        } else {
1020                if (dev->net->flags & IFF_ALLMULTI) {
1021                        netif_dbg(dev, drv, dev->net,
1022                                  "receive all multicast enabled");
1023                        pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1024                }
1025        }
1026
1027        if (netdev_mc_count(dev->net)) {
1028                struct netdev_hw_addr *ha;
1029                int i;
1030
1031                netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1032
1033                pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1034
1035                i = 1;
1036                netdev_for_each_mc_addr(ha, netdev) {
1037                        /* set first 32 into Perfect Filter */
1038                        if (i < 33) {
1039                                lan78xx_set_addr_filter(pdata, i, ha->addr);
1040                        } else {
1041                                u32 bitnum = lan78xx_hash(ha->addr);
1042
1043                                pdata->mchash_table[bitnum / 32] |=
1044                                                        (1 << (bitnum % 32));
1045                                pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1046                        }
1047                        i++;
1048                }
1049        }
1050
1051        spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1052
1053        /* defer register writes to a sleepable context */
1054        schedule_work(&pdata->set_multicast);
1055}
1056
1057static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1058                                      u16 lcladv, u16 rmtadv)
1059{
1060        u32 flow = 0, fct_flow = 0;
1061        int ret;
1062        u8 cap;
1063
1064        if (dev->fc_autoneg)
1065                cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1066        else
1067                cap = dev->fc_request_control;
1068
1069        if (cap & FLOW_CTRL_TX)
1070                flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1071
1072        if (cap & FLOW_CTRL_RX)
1073                flow |= FLOW_CR_RX_FCEN_;
1074
1075        if (dev->udev->speed == USB_SPEED_SUPER)
1076                fct_flow = 0x817;
1077        else if (dev->udev->speed == USB_SPEED_HIGH)
1078                fct_flow = 0x211;
1079
1080        netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1081                  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1082                  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1083
1084        ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1085
1086        /* threshold value should be set before enabling flow */
1087        ret = lan78xx_write_reg(dev, FLOW, flow);
1088
1089        return 0;
1090}
1091
1092static int lan78xx_link_reset(struct lan78xx_net *dev)
1093{
1094        struct phy_device *phydev = dev->net->phydev;
1095        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1096        int ladv, radv, ret;
1097        u32 buf;
1098
1099        /* clear PHY interrupt status */
1100        ret = phy_read(phydev, LAN88XX_INT_STS);
1101        if (unlikely(ret < 0))
1102                return -EIO;
1103
1104        /* clear LAN78xx interrupt status */
1105        ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1106        if (unlikely(ret < 0))
1107                return -EIO;
1108
1109        phy_read_status(phydev);
1110
1111        if (!phydev->link && dev->link_on) {
1112                dev->link_on = false;
1113
1114                /* reset MAC */
1115                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1116                if (unlikely(ret < 0))
1117                        return -EIO;
1118                buf |= MAC_CR_RST_;
1119                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1120                if (unlikely(ret < 0))
1121                        return -EIO;
1122
1123                phy_mac_interrupt(phydev, 0);
1124
1125                del_timer(&dev->stat_monitor);
1126        } else if (phydev->link && !dev->link_on) {
1127                dev->link_on = true;
1128
1129                phy_ethtool_gset(phydev, &ecmd);
1130
1131                ret = phy_read(phydev, LAN88XX_INT_STS);
1132
1133                if (dev->udev->speed == USB_SPEED_SUPER) {
1134                        if (ethtool_cmd_speed(&ecmd) == 1000) {
1135                                /* disable U2 */
1136                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1137                                buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1138                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1139                                /* enable U1 */
1140                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1141                                buf |= USB_CFG1_DEV_U1_INIT_EN_;
1142                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1143                        } else {
1144                                /* enable U1 & U2 */
1145                                ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1146                                buf |= USB_CFG1_DEV_U2_INIT_EN_;
1147                                buf |= USB_CFG1_DEV_U1_INIT_EN_;
1148                                ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1149                        }
1150                }
1151
1152                ladv = phy_read(phydev, MII_ADVERTISE);
1153                if (ladv < 0)
1154                        return ladv;
1155
1156                radv = phy_read(phydev, MII_LPA);
1157                if (radv < 0)
1158                        return radv;
1159
1160                netif_dbg(dev, link, dev->net,
1161                          "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1162                          ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1163
1164                ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1165                phy_mac_interrupt(phydev, 1);
1166
1167                if (!timer_pending(&dev->stat_monitor)) {
1168                        dev->delta = 1;
1169                        mod_timer(&dev->stat_monitor,
1170                                  jiffies + STAT_UPDATE_TIMER);
1171                }
1172        }
1173
1174        return ret;
1175}
1176
1177/* some work can't be done in tasklets, so we use keventd
1178 *
1179 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1180 * but tasklet_schedule() doesn't.      hope the failure is rare.
1181 */
1182void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1183{
1184        set_bit(work, &dev->flags);
1185        if (!schedule_delayed_work(&dev->wq, 0))
1186                netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1187}
1188
1189static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1190{
1191        u32 intdata;
1192
1193        if (urb->actual_length != 4) {
1194                netdev_warn(dev->net,
1195                            "unexpected urb length %d", urb->actual_length);
1196                return;
1197        }
1198
1199        memcpy(&intdata, urb->transfer_buffer, 4);
1200        le32_to_cpus(&intdata);
1201
1202        if (intdata & INT_ENP_PHY_INT) {
1203                netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1204                          lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1205        } else
1206                netdev_warn(dev->net,
1207                            "unexpected interrupt: 0x%08x\n", intdata);
1208}
1209
1210static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1211{
1212        return MAX_EEPROM_SIZE;
1213}
1214
1215static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1216                                      struct ethtool_eeprom *ee, u8 *data)
1217{
1218        struct lan78xx_net *dev = netdev_priv(netdev);
1219
1220        ee->magic = LAN78XX_EEPROM_MAGIC;
1221
1222        return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1223}
1224
1225static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1226                                      struct ethtool_eeprom *ee, u8 *data)
1227{
1228        struct lan78xx_net *dev = netdev_priv(netdev);
1229
1230        /* Allow entire eeprom update only */
1231        if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1232            (ee->offset == 0) &&
1233            (ee->len == 512) &&
1234            (data[0] == EEPROM_INDICATOR))
1235                return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1236        else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1237                 (ee->offset == 0) &&
1238                 (ee->len == 512) &&
1239                 (data[0] == OTP_INDICATOR_1))
1240                return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1241
1242        return -EINVAL;
1243}
1244
1245static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1246                                u8 *data)
1247{
1248        if (stringset == ETH_SS_STATS)
1249                memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1250}
1251
1252static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1253{
1254        if (sset == ETH_SS_STATS)
1255                return ARRAY_SIZE(lan78xx_gstrings);
1256        else
1257                return -EOPNOTSUPP;
1258}
1259
1260static void lan78xx_get_stats(struct net_device *netdev,
1261                              struct ethtool_stats *stats, u64 *data)
1262{
1263        struct lan78xx_net *dev = netdev_priv(netdev);
1264
1265        lan78xx_update_stats(dev);
1266
1267        mutex_lock(&dev->stats.access_lock);
1268        memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1269        mutex_unlock(&dev->stats.access_lock);
1270}
1271
1272static void lan78xx_get_wol(struct net_device *netdev,
1273                            struct ethtool_wolinfo *wol)
1274{
1275        struct lan78xx_net *dev = netdev_priv(netdev);
1276        int ret;
1277        u32 buf;
1278        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1279
1280        if (usb_autopm_get_interface(dev->intf) < 0)
1281                        return;
1282
1283        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1284        if (unlikely(ret < 0)) {
1285                wol->supported = 0;
1286                wol->wolopts = 0;
1287        } else {
1288                if (buf & USB_CFG_RMT_WKP_) {
1289                        wol->supported = WAKE_ALL;
1290                        wol->wolopts = pdata->wol;
1291                } else {
1292                        wol->supported = 0;
1293                        wol->wolopts = 0;
1294                }
1295        }
1296
1297        usb_autopm_put_interface(dev->intf);
1298}
1299
1300static int lan78xx_set_wol(struct net_device *netdev,
1301                           struct ethtool_wolinfo *wol)
1302{
1303        struct lan78xx_net *dev = netdev_priv(netdev);
1304        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1305        int ret;
1306
1307        ret = usb_autopm_get_interface(dev->intf);
1308        if (ret < 0)
1309                return ret;
1310
1311        pdata->wol = 0;
1312        if (wol->wolopts & WAKE_UCAST)
1313                pdata->wol |= WAKE_UCAST;
1314        if (wol->wolopts & WAKE_MCAST)
1315                pdata->wol |= WAKE_MCAST;
1316        if (wol->wolopts & WAKE_BCAST)
1317                pdata->wol |= WAKE_BCAST;
1318        if (wol->wolopts & WAKE_MAGIC)
1319                pdata->wol |= WAKE_MAGIC;
1320        if (wol->wolopts & WAKE_PHY)
1321                pdata->wol |= WAKE_PHY;
1322        if (wol->wolopts & WAKE_ARP)
1323                pdata->wol |= WAKE_ARP;
1324
1325        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1326
1327        phy_ethtool_set_wol(netdev->phydev, wol);
1328
1329        usb_autopm_put_interface(dev->intf);
1330
1331        return ret;
1332}
1333
1334static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1335{
1336        struct lan78xx_net *dev = netdev_priv(net);
1337        struct phy_device *phydev = net->phydev;
1338        int ret;
1339        u32 buf;
1340
1341        ret = usb_autopm_get_interface(dev->intf);
1342        if (ret < 0)
1343                return ret;
1344
1345        ret = phy_ethtool_get_eee(phydev, edata);
1346        if (ret < 0)
1347                goto exit;
1348
1349        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1350        if (buf & MAC_CR_EEE_EN_) {
1351                edata->eee_enabled = true;
1352                edata->eee_active = !!(edata->advertised &
1353                                       edata->lp_advertised);
1354                edata->tx_lpi_enabled = true;
1355                /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1356                ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1357                edata->tx_lpi_timer = buf;
1358        } else {
1359                edata->eee_enabled = false;
1360                edata->eee_active = false;
1361                edata->tx_lpi_enabled = false;
1362                edata->tx_lpi_timer = 0;
1363        }
1364
1365        ret = 0;
1366exit:
1367        usb_autopm_put_interface(dev->intf);
1368
1369        return ret;
1370}
1371
1372static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1373{
1374        struct lan78xx_net *dev = netdev_priv(net);
1375        int ret;
1376        u32 buf;
1377
1378        ret = usb_autopm_get_interface(dev->intf);
1379        if (ret < 0)
1380                return ret;
1381
1382        if (edata->eee_enabled) {
1383                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1384                buf |= MAC_CR_EEE_EN_;
1385                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1386
1387                phy_ethtool_set_eee(net->phydev, edata);
1388
1389                buf = (u32)edata->tx_lpi_timer;
1390                ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1391        } else {
1392                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1393                buf &= ~MAC_CR_EEE_EN_;
1394                ret = lan78xx_write_reg(dev, MAC_CR, buf);
1395        }
1396
1397        usb_autopm_put_interface(dev->intf);
1398
1399        return 0;
1400}
1401
1402static u32 lan78xx_get_link(struct net_device *net)
1403{
1404        phy_read_status(net->phydev);
1405
1406        return net->phydev->link;
1407}
1408
1409int lan78xx_nway_reset(struct net_device *net)
1410{
1411        return phy_start_aneg(net->phydev);
1412}
1413
1414static void lan78xx_get_drvinfo(struct net_device *net,
1415                                struct ethtool_drvinfo *info)
1416{
1417        struct lan78xx_net *dev = netdev_priv(net);
1418
1419        strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1420        strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1421        usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1422}
1423
1424static u32 lan78xx_get_msglevel(struct net_device *net)
1425{
1426        struct lan78xx_net *dev = netdev_priv(net);
1427
1428        return dev->msg_enable;
1429}
1430
1431static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1432{
1433        struct lan78xx_net *dev = netdev_priv(net);
1434
1435        dev->msg_enable = level;
1436}
1437
1438static int lan78xx_get_mdix_status(struct net_device *net)
1439{
1440        struct phy_device *phydev = net->phydev;
1441        int buf;
1442
1443        phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1444        buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1445        phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1446
1447        return buf;
1448}
1449
1450static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1451{
1452        struct lan78xx_net *dev = netdev_priv(net);
1453        struct phy_device *phydev = net->phydev;
1454        int buf;
1455
1456        if (mdix_ctrl == ETH_TP_MDI) {
1457                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1458                          LAN88XX_EXT_PAGE_SPACE_1);
1459                buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1460                buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1461                phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1462                          buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1463                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1464                          LAN88XX_EXT_PAGE_SPACE_0);
1465        } else if (mdix_ctrl == ETH_TP_MDI_X) {
1466                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1467                          LAN88XX_EXT_PAGE_SPACE_1);
1468                buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1469                buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1470                phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1471                          buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1472                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1473                          LAN88XX_EXT_PAGE_SPACE_0);
1474        } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1475                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1476                          LAN88XX_EXT_PAGE_SPACE_1);
1477                buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1478                buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1479                phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1480                          buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1481                phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1482                          LAN88XX_EXT_PAGE_SPACE_0);
1483        }
1484        dev->mdix_ctrl = mdix_ctrl;
1485}
1486
1487static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1488{
1489        struct lan78xx_net *dev = netdev_priv(net);
1490        struct phy_device *phydev = net->phydev;
1491        int ret;
1492        int buf;
1493
1494        ret = usb_autopm_get_interface(dev->intf);
1495        if (ret < 0)
1496                return ret;
1497
1498        ret = phy_ethtool_gset(phydev, cmd);
1499
1500        buf = lan78xx_get_mdix_status(net);
1501
1502        buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1503        if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1504                cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1505                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1506        } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1507                cmd->eth_tp_mdix = ETH_TP_MDI;
1508                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1509        } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1510                cmd->eth_tp_mdix = ETH_TP_MDI_X;
1511                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1512        }
1513
1514        usb_autopm_put_interface(dev->intf);
1515
1516        return ret;
1517}
1518
1519static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1520{
1521        struct lan78xx_net *dev = netdev_priv(net);
1522        struct phy_device *phydev = net->phydev;
1523        int ret = 0;
1524        int temp;
1525
1526        ret = usb_autopm_get_interface(dev->intf);
1527        if (ret < 0)
1528                return ret;
1529
1530        if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1531                lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1532        }
1533
1534        /* change speed & duplex */
1535        ret = phy_ethtool_sset(phydev, cmd);
1536
1537        if (!cmd->autoneg) {
1538                /* force link down */
1539                temp = phy_read(phydev, MII_BMCR);
1540                phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1541                mdelay(1);
1542                phy_write(phydev, MII_BMCR, temp);
1543        }
1544
1545        usb_autopm_put_interface(dev->intf);
1546
1547        return ret;
1548}
1549
1550static void lan78xx_get_pause(struct net_device *net,
1551                              struct ethtool_pauseparam *pause)
1552{
1553        struct lan78xx_net *dev = netdev_priv(net);
1554        struct phy_device *phydev = net->phydev;
1555        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1556
1557        phy_ethtool_gset(phydev, &ecmd);
1558
1559        pause->autoneg = dev->fc_autoneg;
1560
1561        if (dev->fc_request_control & FLOW_CTRL_TX)
1562                pause->tx_pause = 1;
1563
1564        if (dev->fc_request_control & FLOW_CTRL_RX)
1565                pause->rx_pause = 1;
1566}
1567
1568static int lan78xx_set_pause(struct net_device *net,
1569                             struct ethtool_pauseparam *pause)
1570{
1571        struct lan78xx_net *dev = netdev_priv(net);
1572        struct phy_device *phydev = net->phydev;
1573        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1574        int ret;
1575
1576        phy_ethtool_gset(phydev, &ecmd);
1577
1578        if (pause->autoneg && !ecmd.autoneg) {
1579                ret = -EINVAL;
1580                goto exit;
1581        }
1582
1583        dev->fc_request_control = 0;
1584        if (pause->rx_pause)
1585                dev->fc_request_control |= FLOW_CTRL_RX;
1586
1587        if (pause->tx_pause)
1588                dev->fc_request_control |= FLOW_CTRL_TX;
1589
1590        if (ecmd.autoneg) {
1591                u32 mii_adv;
1592
1593                ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1594                mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1595                ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1596                phy_ethtool_sset(phydev, &ecmd);
1597        }
1598
1599        dev->fc_autoneg = pause->autoneg;
1600
1601        ret = 0;
1602exit:
1603        return ret;
1604}
1605
1606static const struct ethtool_ops lan78xx_ethtool_ops = {
1607        .get_link       = lan78xx_get_link,
1608        .nway_reset     = lan78xx_nway_reset,
1609        .get_drvinfo    = lan78xx_get_drvinfo,
1610        .get_msglevel   = lan78xx_get_msglevel,
1611        .set_msglevel   = lan78xx_set_msglevel,
1612        .get_settings   = lan78xx_get_settings,
1613        .set_settings   = lan78xx_set_settings,
1614        .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1615        .get_eeprom     = lan78xx_ethtool_get_eeprom,
1616        .set_eeprom     = lan78xx_ethtool_set_eeprom,
1617        .get_ethtool_stats = lan78xx_get_stats,
1618        .get_sset_count = lan78xx_get_sset_count,
1619        .get_strings    = lan78xx_get_strings,
1620        .get_wol        = lan78xx_get_wol,
1621        .set_wol        = lan78xx_set_wol,
1622        .get_eee        = lan78xx_get_eee,
1623        .set_eee        = lan78xx_set_eee,
1624        .get_pauseparam = lan78xx_get_pause,
1625        .set_pauseparam = lan78xx_set_pause,
1626};
1627
1628static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1629{
1630        if (!netif_running(netdev))
1631                return -EINVAL;
1632
1633        return phy_mii_ioctl(netdev->phydev, rq, cmd);
1634}
1635
1636static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1637{
1638        u32 addr_lo, addr_hi;
1639        int ret;
1640        u8 addr[6];
1641
1642        ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1643        ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1644
1645        addr[0] = addr_lo & 0xFF;
1646        addr[1] = (addr_lo >> 8) & 0xFF;
1647        addr[2] = (addr_lo >> 16) & 0xFF;
1648        addr[3] = (addr_lo >> 24) & 0xFF;
1649        addr[4] = addr_hi & 0xFF;
1650        addr[5] = (addr_hi >> 8) & 0xFF;
1651
1652        if (!is_valid_ether_addr(addr)) {
1653                /* reading mac address from EEPROM or OTP */
1654                if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1655                                         addr) == 0) ||
1656                    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1657                                      addr) == 0)) {
1658                        if (is_valid_ether_addr(addr)) {
1659                                /* eeprom values are valid so use them */
1660                                netif_dbg(dev, ifup, dev->net,
1661                                          "MAC address read from EEPROM");
1662                        } else {
1663                                /* generate random MAC */
1664                                random_ether_addr(addr);
1665                                netif_dbg(dev, ifup, dev->net,
1666                                          "MAC address set to random addr");
1667                        }
1668
1669                        addr_lo = addr[0] | (addr[1] << 8) |
1670                                  (addr[2] << 16) | (addr[3] << 24);
1671                        addr_hi = addr[4] | (addr[5] << 8);
1672
1673                        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1674                        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1675                } else {
1676                        /* generate random MAC */
1677                        random_ether_addr(addr);
1678                        netif_dbg(dev, ifup, dev->net,
1679                                  "MAC address set to random addr");
1680                }
1681        }
1682
1683        ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1684        ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1685
1686        ether_addr_copy(dev->net->dev_addr, addr);
1687}
1688
1689/* MDIO read and write wrappers for phylib */
1690static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1691{
1692        struct lan78xx_net *dev = bus->priv;
1693        u32 val, addr;
1694        int ret;
1695
1696        ret = usb_autopm_get_interface(dev->intf);
1697        if (ret < 0)
1698                return ret;
1699
1700        mutex_lock(&dev->phy_mutex);
1701
1702        /* confirm MII not busy */
1703        ret = lan78xx_phy_wait_not_busy(dev);
1704        if (ret < 0)
1705                goto done;
1706
1707        /* set the address, index & direction (read from PHY) */
1708        addr = mii_access(phy_id, idx, MII_READ);
1709        ret = lan78xx_write_reg(dev, MII_ACC, addr);
1710
1711        ret = lan78xx_phy_wait_not_busy(dev);
1712        if (ret < 0)
1713                goto done;
1714
1715        ret = lan78xx_read_reg(dev, MII_DATA, &val);
1716
1717        ret = (int)(val & 0xFFFF);
1718
1719done:
1720        mutex_unlock(&dev->phy_mutex);
1721        usb_autopm_put_interface(dev->intf);
1722        return ret;
1723}
1724
1725static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1726                                 u16 regval)
1727{
1728        struct lan78xx_net *dev = bus->priv;
1729        u32 val, addr;
1730        int ret;
1731
1732        ret = usb_autopm_get_interface(dev->intf);
1733        if (ret < 0)
1734                return ret;
1735
1736        mutex_lock(&dev->phy_mutex);
1737
1738        /* confirm MII not busy */
1739        ret = lan78xx_phy_wait_not_busy(dev);
1740        if (ret < 0)
1741                goto done;
1742
1743        val = (u32)regval;
1744        ret = lan78xx_write_reg(dev, MII_DATA, val);
1745
1746        /* set the address, index & direction (write to PHY) */
1747        addr = mii_access(phy_id, idx, MII_WRITE);
1748        ret = lan78xx_write_reg(dev, MII_ACC, addr);
1749
1750        ret = lan78xx_phy_wait_not_busy(dev);
1751        if (ret < 0)
1752                goto done;
1753
1754done:
1755        mutex_unlock(&dev->phy_mutex);
1756        usb_autopm_put_interface(dev->intf);
1757        return 0;
1758}
1759
1760static int lan78xx_mdio_init(struct lan78xx_net *dev)
1761{
1762        int ret;
1763
1764        dev->mdiobus = mdiobus_alloc();
1765        if (!dev->mdiobus) {
1766                netdev_err(dev->net, "can't allocate MDIO bus\n");
1767                return -ENOMEM;
1768        }
1769
1770        dev->mdiobus->priv = (void *)dev;
1771        dev->mdiobus->read = lan78xx_mdiobus_read;
1772        dev->mdiobus->write = lan78xx_mdiobus_write;
1773        dev->mdiobus->name = "lan78xx-mdiobus";
1774
1775        snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1776                 dev->udev->bus->busnum, dev->udev->devnum);
1777
1778        switch (dev->chipid) {
1779        case ID_REV_CHIP_ID_7800_:
1780        case ID_REV_CHIP_ID_7850_:
1781                /* set to internal PHY id */
1782                dev->mdiobus->phy_mask = ~(1 << 1);
1783                break;
1784        }
1785
1786        ret = mdiobus_register(dev->mdiobus);
1787        if (ret) {
1788                netdev_err(dev->net, "can't register MDIO bus\n");
1789                goto exit1;
1790        }
1791
1792        netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1793        return 0;
1794exit1:
1795        mdiobus_free(dev->mdiobus);
1796        return ret;
1797}
1798
1799static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1800{
1801        mdiobus_unregister(dev->mdiobus);
1802        mdiobus_free(dev->mdiobus);
1803}
1804
1805static void lan78xx_link_status_change(struct net_device *net)
1806{
1807        struct phy_device *phydev = net->phydev;
1808        int ret, temp;
1809
1810        /* At forced 100 F/H mode, chip may fail to set mode correctly
1811         * when cable is switched between long(~50+m) and short one.
1812         * As workaround, set to 10 before setting to 100
1813         * at forced 100 F/H mode.
1814         */
1815        if (!phydev->autoneg && (phydev->speed == 100)) {
1816                /* disable phy interrupt */
1817                temp = phy_read(phydev, LAN88XX_INT_MASK);
1818                temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819                ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1820
1821                temp = phy_read(phydev, MII_BMCR);
1822                temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823                phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824                temp |= BMCR_SPEED100;
1825                phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1826
1827                /* clear pending interrupt generated while workaround */
1828                temp = phy_read(phydev, LAN88XX_INT_STS);
1829
1830                /* enable phy interrupt back */
1831                temp = phy_read(phydev, LAN88XX_INT_MASK);
1832                temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833                ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1834        }
1835}
1836
1837static int lan78xx_phy_init(struct lan78xx_net *dev)
1838{
1839        int ret;
1840        u32 mii_adv;
1841        struct phy_device *phydev = dev->net->phydev;
1842
1843        phydev = phy_find_first(dev->mdiobus);
1844        if (!phydev) {
1845                netdev_err(dev->net, "no PHY found\n");
1846                return -EIO;
1847        }
1848
1849        /* Enable PHY interrupts.
1850         * We handle our own interrupt
1851         */
1852        ret = phy_read(phydev, LAN88XX_INT_STS);
1853        ret = phy_write(phydev, LAN88XX_INT_MASK,
1854                        LAN88XX_INT_MASK_MDINTPIN_EN_ |
1855                        LAN88XX_INT_MASK_LINK_CHANGE_);
1856
1857        phydev->irq = PHY_IGNORE_INTERRUPT;
1858
1859        ret = phy_connect_direct(dev->net, phydev,
1860                                 lan78xx_link_status_change,
1861                                 PHY_INTERFACE_MODE_GMII);
1862        if (ret) {
1863                netdev_err(dev->net, "can't attach PHY to %s\n",
1864                           dev->mdiobus->id);
1865                return -EIO;
1866        }
1867
1868        /* set to AUTOMDIX */
1869        lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1870
1871        /* MAC doesn't support 1000T Half */
1872        phydev->supported &= ~SUPPORTED_1000baseT_Half;
1873
1874        /* support both flow controls */
1875        dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1876        phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1877        mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1878        phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1879
1880        genphy_config_aneg(phydev);
1881
1882        dev->fc_autoneg = phydev->autoneg;
1883
1884        phy_start(phydev);
1885
1886        netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1887
1888        return 0;
1889}
1890
1891static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1892{
1893        int ret = 0;
1894        u32 buf;
1895        bool rxenabled;
1896
1897        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1898
1899        rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1900
1901        if (rxenabled) {
1902                buf &= ~MAC_RX_RXEN_;
1903                ret = lan78xx_write_reg(dev, MAC_RX, buf);
1904        }
1905
1906        /* add 4 to size for FCS */
1907        buf &= ~MAC_RX_MAX_SIZE_MASK_;
1908        buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1909
1910        ret = lan78xx_write_reg(dev, MAC_RX, buf);
1911
1912        if (rxenabled) {
1913                buf |= MAC_RX_RXEN_;
1914                ret = lan78xx_write_reg(dev, MAC_RX, buf);
1915        }
1916
1917        return 0;
1918}
1919
1920static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1921{
1922        struct sk_buff *skb;
1923        unsigned long flags;
1924        int count = 0;
1925
1926        spin_lock_irqsave(&q->lock, flags);
1927        while (!skb_queue_empty(q)) {
1928                struct skb_data *entry;
1929                struct urb *urb;
1930                int ret;
1931
1932                skb_queue_walk(q, skb) {
1933                        entry = (struct skb_data *)skb->cb;
1934                        if (entry->state != unlink_start)
1935                                goto found;
1936                }
1937                break;
1938found:
1939                entry->state = unlink_start;
1940                urb = entry->urb;
1941
1942                /* Get reference count of the URB to avoid it to be
1943                 * freed during usb_unlink_urb, which may trigger
1944                 * use-after-free problem inside usb_unlink_urb since
1945                 * usb_unlink_urb is always racing with .complete
1946                 * handler(include defer_bh).
1947                 */
1948                usb_get_urb(urb);
1949                spin_unlock_irqrestore(&q->lock, flags);
1950                /* during some PM-driven resume scenarios,
1951                 * these (async) unlinks complete immediately
1952                 */
1953                ret = usb_unlink_urb(urb);
1954                if (ret != -EINPROGRESS && ret != 0)
1955                        netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1956                else
1957                        count++;
1958                usb_put_urb(urb);
1959                spin_lock_irqsave(&q->lock, flags);
1960        }
1961        spin_unlock_irqrestore(&q->lock, flags);
1962        return count;
1963}
1964
1965static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1966{
1967        struct lan78xx_net *dev = netdev_priv(netdev);
1968        int ll_mtu = new_mtu + netdev->hard_header_len;
1969        int old_hard_mtu = dev->hard_mtu;
1970        int old_rx_urb_size = dev->rx_urb_size;
1971        int ret;
1972
1973        if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1974                return -EINVAL;
1975
1976        if (new_mtu <= 0)
1977                return -EINVAL;
1978        /* no second zero-length packet read wanted after mtu-sized packets */
1979        if ((ll_mtu % dev->maxpacket) == 0)
1980                return -EDOM;
1981
1982        ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1983
1984        netdev->mtu = new_mtu;
1985
1986        dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1987        if (dev->rx_urb_size == old_hard_mtu) {
1988                dev->rx_urb_size = dev->hard_mtu;
1989                if (dev->rx_urb_size > old_rx_urb_size) {
1990                        if (netif_running(dev->net)) {
1991                                unlink_urbs(dev, &dev->rxq);
1992                                tasklet_schedule(&dev->bh);
1993                        }
1994                }
1995        }
1996
1997        return 0;
1998}
1999
2000int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2001{
2002        struct lan78xx_net *dev = netdev_priv(netdev);
2003        struct sockaddr *addr = p;
2004        u32 addr_lo, addr_hi;
2005        int ret;
2006
2007        if (netif_running(netdev))
2008                return -EBUSY;
2009
2010        if (!is_valid_ether_addr(addr->sa_data))
2011                return -EADDRNOTAVAIL;
2012
2013        ether_addr_copy(netdev->dev_addr, addr->sa_data);
2014
2015        addr_lo = netdev->dev_addr[0] |
2016                  netdev->dev_addr[1] << 8 |
2017                  netdev->dev_addr[2] << 16 |
2018                  netdev->dev_addr[3] << 24;
2019        addr_hi = netdev->dev_addr[4] |
2020                  netdev->dev_addr[5] << 8;
2021
2022        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2023        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2024
2025        return 0;
2026}
2027
2028/* Enable or disable Rx checksum offload engine */
2029static int lan78xx_set_features(struct net_device *netdev,
2030                                netdev_features_t features)
2031{
2032        struct lan78xx_net *dev = netdev_priv(netdev);
2033        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2034        unsigned long flags;
2035        int ret;
2036
2037        spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2038
2039        if (features & NETIF_F_RXCSUM) {
2040                pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2041                pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2042        } else {
2043                pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2044                pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2045        }
2046
2047        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2048                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2049        else
2050                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2051
2052        spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2053
2054        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2055
2056        return 0;
2057}
2058
2059static void lan78xx_deferred_vlan_write(struct work_struct *param)
2060{
2061        struct lan78xx_priv *pdata =
2062                        container_of(param, struct lan78xx_priv, set_vlan);
2063        struct lan78xx_net *dev = pdata->dev;
2064
2065        lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2066                               DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2067}
2068
2069static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2070                                   __be16 proto, u16 vid)
2071{
2072        struct lan78xx_net *dev = netdev_priv(netdev);
2073        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2074        u16 vid_bit_index;
2075        u16 vid_dword_index;
2076
2077        vid_dword_index = (vid >> 5) & 0x7F;
2078        vid_bit_index = vid & 0x1F;
2079
2080        pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2081
2082        /* defer register writes to a sleepable context */
2083        schedule_work(&pdata->set_vlan);
2084
2085        return 0;
2086}
2087
2088static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2089                                    __be16 proto, u16 vid)
2090{
2091        struct lan78xx_net *dev = netdev_priv(netdev);
2092        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2093        u16 vid_bit_index;
2094        u16 vid_dword_index;
2095
2096        vid_dword_index = (vid >> 5) & 0x7F;
2097        vid_bit_index = vid & 0x1F;
2098
2099        pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2100
2101        /* defer register writes to a sleepable context */
2102        schedule_work(&pdata->set_vlan);
2103
2104        return 0;
2105}
2106
2107static void lan78xx_init_ltm(struct lan78xx_net *dev)
2108{
2109        int ret;
2110        u32 buf;
2111        u32 regs[6] = { 0 };
2112
2113        ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2114        if (buf & USB_CFG1_LTM_ENABLE_) {
2115                u8 temp[2];
2116                /* Get values from EEPROM first */
2117                if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2118                        if (temp[0] == 24) {
2119                                ret = lan78xx_read_raw_eeprom(dev,
2120                                                              temp[1] * 2,
2121                                                              24,
2122                                                              (u8 *)regs);
2123                                if (ret < 0)
2124                                        return;
2125                        }
2126                } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2127                        if (temp[0] == 24) {
2128                                ret = lan78xx_read_raw_otp(dev,
2129                                                           temp[1] * 2,
2130                                                           24,
2131                                                           (u8 *)regs);
2132                                if (ret < 0)
2133                                        return;
2134                        }
2135                }
2136        }
2137
2138        lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2139        lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2140        lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2141        lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2142        lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2143        lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2144}
2145
2146static int lan78xx_reset(struct lan78xx_net *dev)
2147{
2148        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2149        u32 buf;
2150        int ret = 0;
2151        unsigned long timeout;
2152
2153        ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2154        buf |= HW_CFG_LRST_;
2155        ret = lan78xx_write_reg(dev, HW_CFG, buf);
2156
2157        timeout = jiffies + HZ;
2158        do {
2159                mdelay(1);
2160                ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2161                if (time_after(jiffies, timeout)) {
2162                        netdev_warn(dev->net,
2163                                    "timeout on completion of LiteReset");
2164                        return -EIO;
2165                }
2166        } while (buf & HW_CFG_LRST_);
2167
2168        lan78xx_init_mac_address(dev);
2169
2170        /* save DEVID for later usage */
2171        ret = lan78xx_read_reg(dev, ID_REV, &buf);
2172        dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2173        dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2174
2175        /* Respond to the IN token with a NAK */
2176        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2177        buf |= USB_CFG_BIR_;
2178        ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2179
2180        /* Init LTM */
2181        lan78xx_init_ltm(dev);
2182
2183        dev->net->hard_header_len += TX_OVERHEAD;
2184        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2185
2186        if (dev->udev->speed == USB_SPEED_SUPER) {
2187                buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2188                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2189                dev->rx_qlen = 4;
2190                dev->tx_qlen = 4;
2191        } else if (dev->udev->speed == USB_SPEED_HIGH) {
2192                buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2193                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2194                dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2195                dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2196        } else {
2197                buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2198                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2199                dev->rx_qlen = 4;
2200        }
2201
2202        ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2203        ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2204
2205        ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2206        buf |= HW_CFG_MEF_;
2207        ret = lan78xx_write_reg(dev, HW_CFG, buf);
2208
2209        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2210        buf |= USB_CFG_BCE_;
2211        ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2212
2213        /* set FIFO sizes */
2214        buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2215        ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2216
2217        buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2218        ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2219
2220        ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2221        ret = lan78xx_write_reg(dev, FLOW, 0);
2222        ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2223
2224        /* Don't need rfe_ctl_lock during initialisation */
2225        ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2226        pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2227        ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2228
2229        /* Enable or disable checksum offload engines */
2230        lan78xx_set_features(dev->net, dev->net->features);
2231
2232        lan78xx_set_multicast(dev->net);
2233
2234        /* reset PHY */
2235        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2236        buf |= PMT_CTL_PHY_RST_;
2237        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2238
2239        timeout = jiffies + HZ;
2240        do {
2241                mdelay(1);
2242                ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2243                if (time_after(jiffies, timeout)) {
2244                        netdev_warn(dev->net, "timeout waiting for PHY Reset");
2245                        return -EIO;
2246                }
2247        } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2248
2249        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2250        buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2251        ret = lan78xx_write_reg(dev, MAC_CR, buf);
2252
2253        /* enable PHY interrupts */
2254        ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2255        buf |= INT_ENP_PHY_INT;
2256        ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2257
2258        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2259        buf |= MAC_TX_TXEN_;
2260        ret = lan78xx_write_reg(dev, MAC_TX, buf);
2261
2262        ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2263        buf |= FCT_TX_CTL_EN_;
2264        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2265
2266        ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2267
2268        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2269        buf |= MAC_RX_RXEN_;
2270        ret = lan78xx_write_reg(dev, MAC_RX, buf);
2271
2272        ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2273        buf |= FCT_RX_CTL_EN_;
2274        ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2275
2276        return 0;
2277}
2278
2279static void lan78xx_init_stats(struct lan78xx_net *dev)
2280{
2281        u32 *p;
2282        int i;
2283
2284        /* initialize for stats update
2285         * some counters are 20bits and some are 32bits
2286         */
2287        p = (u32 *)&dev->stats.rollover_max;
2288        for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2289                p[i] = 0xFFFFF;
2290
2291        dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2292        dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2293        dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2294        dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2295        dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2296        dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2297        dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2298        dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2299        dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2300        dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2301
2302        lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2303}
2304
2305static int lan78xx_open(struct net_device *net)
2306{
2307        struct lan78xx_net *dev = netdev_priv(net);
2308        int ret;
2309
2310        ret = usb_autopm_get_interface(dev->intf);
2311        if (ret < 0)
2312                goto out;
2313
2314        ret = lan78xx_reset(dev);
2315        if (ret < 0)
2316                goto done;
2317
2318        ret = lan78xx_phy_init(dev);
2319        if (ret < 0)
2320                goto done;
2321
2322        /* for Link Check */
2323        if (dev->urb_intr) {
2324                ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2325                if (ret < 0) {
2326                        netif_err(dev, ifup, dev->net,
2327                                  "intr submit %d\n", ret);
2328                        goto done;
2329                }
2330        }
2331
2332        lan78xx_init_stats(dev);
2333
2334        set_bit(EVENT_DEV_OPEN, &dev->flags);
2335
2336        netif_start_queue(net);
2337
2338        dev->link_on = false;
2339
2340        lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2341done:
2342        usb_autopm_put_interface(dev->intf);
2343
2344out:
2345        return ret;
2346}
2347
2348static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2349{
2350        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2351        DECLARE_WAITQUEUE(wait, current);
2352        int temp;
2353
2354        /* ensure there are no more active urbs */
2355        add_wait_queue(&unlink_wakeup, &wait);
2356        set_current_state(TASK_UNINTERRUPTIBLE);
2357        dev->wait = &unlink_wakeup;
2358        temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2359
2360        /* maybe wait for deletions to finish. */
2361        while (!skb_queue_empty(&dev->rxq) &&
2362               !skb_queue_empty(&dev->txq) &&
2363               !skb_queue_empty(&dev->done)) {
2364                schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2365                set_current_state(TASK_UNINTERRUPTIBLE);
2366                netif_dbg(dev, ifdown, dev->net,
2367                          "waited for %d urb completions\n", temp);
2368        }
2369        set_current_state(TASK_RUNNING);
2370        dev->wait = NULL;
2371        remove_wait_queue(&unlink_wakeup, &wait);
2372}
2373
2374int lan78xx_stop(struct net_device *net)
2375{
2376        struct lan78xx_net              *dev = netdev_priv(net);
2377
2378        if (timer_pending(&dev->stat_monitor))
2379                del_timer_sync(&dev->stat_monitor);
2380
2381        phy_stop(net->phydev);
2382        phy_disconnect(net->phydev);
2383        net->phydev = NULL;
2384
2385        clear_bit(EVENT_DEV_OPEN, &dev->flags);
2386        netif_stop_queue(net);
2387
2388        netif_info(dev, ifdown, dev->net,
2389                   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2390                   net->stats.rx_packets, net->stats.tx_packets,
2391                   net->stats.rx_errors, net->stats.tx_errors);
2392
2393        lan78xx_terminate_urbs(dev);
2394
2395        usb_kill_urb(dev->urb_intr);
2396
2397        skb_queue_purge(&dev->rxq_pause);
2398
2399        /* deferred work (task, timer, softirq) must also stop.
2400         * can't flush_scheduled_work() until we drop rtnl (later),
2401         * else workers could deadlock; so make workers a NOP.
2402         */
2403        dev->flags = 0;
2404        cancel_delayed_work_sync(&dev->wq);
2405        tasklet_kill(&dev->bh);
2406
2407        usb_autopm_put_interface(dev->intf);
2408
2409        return 0;
2410}
2411
2412static int lan78xx_linearize(struct sk_buff *skb)
2413{
2414        return skb_linearize(skb);
2415}
2416
2417static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2418                                       struct sk_buff *skb, gfp_t flags)
2419{
2420        u32 tx_cmd_a, tx_cmd_b;
2421
2422        if (skb_headroom(skb) < TX_OVERHEAD) {
2423                struct sk_buff *skb2;
2424
2425                skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2426                dev_kfree_skb_any(skb);
2427                skb = skb2;
2428                if (!skb)
2429                        return NULL;
2430        }
2431
2432        if (lan78xx_linearize(skb) < 0)
2433                return NULL;
2434
2435        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2436
2437        if (skb->ip_summed == CHECKSUM_PARTIAL)
2438                tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2439
2440        tx_cmd_b = 0;
2441        if (skb_is_gso(skb)) {
2442                u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2443
2444                tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2445
2446                tx_cmd_a |= TX_CMD_A_LSO_;
2447        }
2448
2449        if (skb_vlan_tag_present(skb)) {
2450                tx_cmd_a |= TX_CMD_A_IVTG_;
2451                tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2452        }
2453
2454        skb_push(skb, 4);
2455        cpu_to_le32s(&tx_cmd_b);
2456        memcpy(skb->data, &tx_cmd_b, 4);
2457
2458        skb_push(skb, 4);
2459        cpu_to_le32s(&tx_cmd_a);
2460        memcpy(skb->data, &tx_cmd_a, 4);
2461
2462        return skb;
2463}
2464
2465static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2466                               struct sk_buff_head *list, enum skb_state state)
2467{
2468        unsigned long flags;
2469        enum skb_state old_state;
2470        struct skb_data *entry = (struct skb_data *)skb->cb;
2471
2472        spin_lock_irqsave(&list->lock, flags);
2473        old_state = entry->state;
2474        entry->state = state;
2475
2476        __skb_unlink(skb, list);
2477        spin_unlock(&list->lock);
2478        spin_lock(&dev->done.lock);
2479
2480        __skb_queue_tail(&dev->done, skb);
2481        if (skb_queue_len(&dev->done) == 1)
2482                tasklet_schedule(&dev->bh);
2483        spin_unlock_irqrestore(&dev->done.lock, flags);
2484
2485        return old_state;
2486}
2487
2488static void tx_complete(struct urb *urb)
2489{
2490        struct sk_buff *skb = (struct sk_buff *)urb->context;
2491        struct skb_data *entry = (struct skb_data *)skb->cb;
2492        struct lan78xx_net *dev = entry->dev;
2493
2494        if (urb->status == 0) {
2495                dev->net->stats.tx_packets += entry->num_of_packet;
2496                dev->net->stats.tx_bytes += entry->length;
2497        } else {
2498                dev->net->stats.tx_errors++;
2499
2500                switch (urb->status) {
2501                case -EPIPE:
2502                        lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2503                        break;
2504
2505                /* software-driven interface shutdown */
2506                case -ECONNRESET:
2507                case -ESHUTDOWN:
2508                        break;
2509
2510                case -EPROTO:
2511                case -ETIME:
2512                case -EILSEQ:
2513                        netif_stop_queue(dev->net);
2514                        break;
2515                default:
2516                        netif_dbg(dev, tx_err, dev->net,
2517                                  "tx err %d\n", entry->urb->status);
2518                        break;
2519                }
2520        }
2521
2522        usb_autopm_put_interface_async(dev->intf);
2523
2524        defer_bh(dev, skb, &dev->txq, tx_done);
2525}
2526
2527static void lan78xx_queue_skb(struct sk_buff_head *list,
2528                              struct sk_buff *newsk, enum skb_state state)
2529{
2530        struct skb_data *entry = (struct skb_data *)newsk->cb;
2531
2532        __skb_queue_tail(list, newsk);
2533        entry->state = state;
2534}
2535
2536netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2537{
2538        struct lan78xx_net *dev = netdev_priv(net);
2539        struct sk_buff *skb2 = NULL;
2540
2541        if (skb) {
2542                skb_tx_timestamp(skb);
2543                skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2544        }
2545
2546        if (skb2) {
2547                skb_queue_tail(&dev->txq_pend, skb2);
2548
2549                /* throttle TX patch at slower than SUPER SPEED USB */
2550                if ((dev->udev->speed < USB_SPEED_SUPER) &&
2551                    (skb_queue_len(&dev->txq_pend) > 10))
2552                        netif_stop_queue(net);
2553        } else {
2554                netif_dbg(dev, tx_err, dev->net,
2555                          "lan78xx_tx_prep return NULL\n");
2556                dev->net->stats.tx_errors++;
2557                dev->net->stats.tx_dropped++;
2558        }
2559
2560        tasklet_schedule(&dev->bh);
2561
2562        return NETDEV_TX_OK;
2563}
2564
2565int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2566{
2567        int tmp;
2568        struct usb_host_interface *alt = NULL;
2569        struct usb_host_endpoint *in = NULL, *out = NULL;
2570        struct usb_host_endpoint *status = NULL;
2571
2572        for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2573                unsigned ep;
2574
2575                in = NULL;
2576                out = NULL;
2577                status = NULL;
2578                alt = intf->altsetting + tmp;
2579
2580                for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2581                        struct usb_host_endpoint *e;
2582                        int intr = 0;
2583
2584                        e = alt->endpoint + ep;
2585                        switch (e->desc.bmAttributes) {
2586                        case USB_ENDPOINT_XFER_INT:
2587                                if (!usb_endpoint_dir_in(&e->desc))
2588                                        continue;
2589                                intr = 1;
2590                                /* FALLTHROUGH */
2591                        case USB_ENDPOINT_XFER_BULK:
2592                                break;
2593                        default:
2594                                continue;
2595                        }
2596                        if (usb_endpoint_dir_in(&e->desc)) {
2597                                if (!intr && !in)
2598                                        in = e;
2599                                else if (intr && !status)
2600                                        status = e;
2601                        } else {
2602                                if (!out)
2603                                        out = e;
2604                        }
2605                }
2606                if (in && out)
2607                        break;
2608        }
2609        if (!alt || !in || !out)
2610                return -EINVAL;
2611
2612        dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2613                                       in->desc.bEndpointAddress &
2614                                       USB_ENDPOINT_NUMBER_MASK);
2615        dev->pipe_out = usb_sndbulkpipe(dev->udev,
2616                                        out->desc.bEndpointAddress &
2617                                        USB_ENDPOINT_NUMBER_MASK);
2618        dev->ep_intr = status;
2619
2620        return 0;
2621}
2622
2623static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2624{
2625        struct lan78xx_priv *pdata = NULL;
2626        int ret;
2627        int i;
2628
2629        ret = lan78xx_get_endpoints(dev, intf);
2630
2631        dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2632
2633        pdata = (struct lan78xx_priv *)(dev->data[0]);
2634        if (!pdata) {
2635                netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2636                return -ENOMEM;
2637        }
2638
2639        pdata->dev = dev;
2640
2641        spin_lock_init(&pdata->rfe_ctl_lock);
2642        mutex_init(&pdata->dataport_mutex);
2643
2644        INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2645
2646        for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2647                pdata->vlan_table[i] = 0;
2648
2649        INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2650
2651        dev->net->features = 0;
2652
2653        if (DEFAULT_TX_CSUM_ENABLE)
2654                dev->net->features |= NETIF_F_HW_CSUM;
2655
2656        if (DEFAULT_RX_CSUM_ENABLE)
2657                dev->net->features |= NETIF_F_RXCSUM;
2658
2659        if (DEFAULT_TSO_CSUM_ENABLE)
2660                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2661
2662        dev->net->hw_features = dev->net->features;
2663
2664        /* Init all registers */
2665        ret = lan78xx_reset(dev);
2666
2667        lan78xx_mdio_init(dev);
2668
2669        dev->net->flags |= IFF_MULTICAST;
2670
2671        pdata->wol = WAKE_MAGIC;
2672
2673        return 0;
2674}
2675
2676static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2677{
2678        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2679
2680        lan78xx_remove_mdio(dev);
2681
2682        if (pdata) {
2683                netif_dbg(dev, ifdown, dev->net, "free pdata");
2684                kfree(pdata);
2685                pdata = NULL;
2686                dev->data[0] = 0;
2687        }
2688}
2689
2690static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2691                                    struct sk_buff *skb,
2692                                    u32 rx_cmd_a, u32 rx_cmd_b)
2693{
2694        if (!(dev->net->features & NETIF_F_RXCSUM) ||
2695            unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2696                skb->ip_summed = CHECKSUM_NONE;
2697        } else {
2698                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2699                skb->ip_summed = CHECKSUM_COMPLETE;
2700        }
2701}
2702
2703void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2704{
2705        int             status;
2706
2707        if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2708                skb_queue_tail(&dev->rxq_pause, skb);
2709                return;
2710        }
2711
2712        dev->net->stats.rx_packets++;
2713        dev->net->stats.rx_bytes += skb->len;
2714
2715        skb->protocol = eth_type_trans(skb, dev->net);
2716
2717        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2718                  skb->len + sizeof(struct ethhdr), skb->protocol);
2719        memset(skb->cb, 0, sizeof(struct skb_data));
2720
2721        if (skb_defer_rx_timestamp(skb))
2722                return;
2723
2724        status = netif_rx(skb);
2725        if (status != NET_RX_SUCCESS)
2726                netif_dbg(dev, rx_err, dev->net,
2727                          "netif_rx status %d\n", status);
2728}
2729
2730static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2731{
2732        if (skb->len < dev->net->hard_header_len)
2733                return 0;
2734
2735        while (skb->len > 0) {
2736                u32 rx_cmd_a, rx_cmd_b, align_count, size;
2737                u16 rx_cmd_c;
2738                struct sk_buff *skb2;
2739                unsigned char *packet;
2740
2741                memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2742                le32_to_cpus(&rx_cmd_a);
2743                skb_pull(skb, sizeof(rx_cmd_a));
2744
2745                memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2746                le32_to_cpus(&rx_cmd_b);
2747                skb_pull(skb, sizeof(rx_cmd_b));
2748
2749                memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2750                le16_to_cpus(&rx_cmd_c);
2751                skb_pull(skb, sizeof(rx_cmd_c));
2752
2753                packet = skb->data;
2754
2755                /* get the packet length */
2756                size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2757                align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2758
2759                if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2760                        netif_dbg(dev, rx_err, dev->net,
2761                                  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2762                } else {
2763                        /* last frame in this batch */
2764                        if (skb->len == size) {
2765                                lan78xx_rx_csum_offload(dev, skb,
2766                                                        rx_cmd_a, rx_cmd_b);
2767
2768                                skb_trim(skb, skb->len - 4); /* remove fcs */
2769                                skb->truesize = size + sizeof(struct sk_buff);
2770
2771                                return 1;
2772                        }
2773
2774                        skb2 = skb_clone(skb, GFP_ATOMIC);
2775                        if (unlikely(!skb2)) {
2776                                netdev_warn(dev->net, "Error allocating skb");
2777                                return 0;
2778                        }
2779
2780                        skb2->len = size;
2781                        skb2->data = packet;
2782                        skb_set_tail_pointer(skb2, size);
2783
2784                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2785
2786                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
2787                        skb2->truesize = size + sizeof(struct sk_buff);
2788
2789                        lan78xx_skb_return(dev, skb2);
2790                }
2791
2792                skb_pull(skb, size);
2793
2794                /* padding bytes before the next frame starts */
2795                if (skb->len)
2796                        skb_pull(skb, align_count);
2797        }
2798
2799        return 1;
2800}
2801
2802static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2803{
2804        if (!lan78xx_rx(dev, skb)) {
2805                dev->net->stats.rx_errors++;
2806                goto done;
2807        }
2808
2809        if (skb->len) {
2810                lan78xx_skb_return(dev, skb);
2811                return;
2812        }
2813
2814        netif_dbg(dev, rx_err, dev->net, "drop\n");
2815        dev->net->stats.rx_errors++;
2816done:
2817        skb_queue_tail(&dev->done, skb);
2818}
2819
2820static void rx_complete(struct urb *urb);
2821
2822static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2823{
2824        struct sk_buff *skb;
2825        struct skb_data *entry;
2826        unsigned long lockflags;
2827        size_t size = dev->rx_urb_size;
2828        int ret = 0;
2829
2830        skb = netdev_alloc_skb_ip_align(dev->net, size);
2831        if (!skb) {
2832                usb_free_urb(urb);
2833                return -ENOMEM;
2834        }
2835
2836        entry = (struct skb_data *)skb->cb;
2837        entry->urb = urb;
2838        entry->dev = dev;
2839        entry->length = 0;
2840
2841        usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2842                          skb->data, size, rx_complete, skb);
2843
2844        spin_lock_irqsave(&dev->rxq.lock, lockflags);
2845
2846        if (netif_device_present(dev->net) &&
2847            netif_running(dev->net) &&
2848            !test_bit(EVENT_RX_HALT, &dev->flags) &&
2849            !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2850                ret = usb_submit_urb(urb, GFP_ATOMIC);
2851                switch (ret) {
2852                case 0:
2853                        lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2854                        break;
2855                case -EPIPE:
2856                        lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2857                        break;
2858                case -ENODEV:
2859                        netif_dbg(dev, ifdown, dev->net, "device gone\n");
2860                        netif_device_detach(dev->net);
2861                        break;
2862                case -EHOSTUNREACH:
2863                        ret = -ENOLINK;
2864                        break;
2865                default:
2866                        netif_dbg(dev, rx_err, dev->net,
2867                                  "rx submit, %d\n", ret);
2868                        tasklet_schedule(&dev->bh);
2869                }
2870        } else {
2871                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2872                ret = -ENOLINK;
2873        }
2874        spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2875        if (ret) {
2876                dev_kfree_skb_any(skb);
2877                usb_free_urb(urb);
2878        }
2879        return ret;
2880}
2881
2882static void rx_complete(struct urb *urb)
2883{
2884        struct sk_buff  *skb = (struct sk_buff *)urb->context;
2885        struct skb_data *entry = (struct skb_data *)skb->cb;
2886        struct lan78xx_net *dev = entry->dev;
2887        int urb_status = urb->status;
2888        enum skb_state state;
2889
2890        skb_put(skb, urb->actual_length);
2891        state = rx_done;
2892        entry->urb = NULL;
2893
2894        switch (urb_status) {
2895        case 0:
2896                if (skb->len < dev->net->hard_header_len) {
2897                        state = rx_cleanup;
2898                        dev->net->stats.rx_errors++;
2899                        dev->net->stats.rx_length_errors++;
2900                        netif_dbg(dev, rx_err, dev->net,
2901                                  "rx length %d\n", skb->len);
2902                }
2903                usb_mark_last_busy(dev->udev);
2904                break;
2905        case -EPIPE:
2906                dev->net->stats.rx_errors++;
2907                lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2908                /* FALLTHROUGH */
2909        case -ECONNRESET:                               /* async unlink */
2910        case -ESHUTDOWN:                                /* hardware gone */
2911                netif_dbg(dev, ifdown, dev->net,
2912                          "rx shutdown, code %d\n", urb_status);
2913                state = rx_cleanup;
2914                entry->urb = urb;
2915                urb = NULL;
2916                break;
2917        case -EPROTO:
2918        case -ETIME:
2919        case -EILSEQ:
2920                dev->net->stats.rx_errors++;
2921                state = rx_cleanup;
2922                entry->urb = urb;
2923                urb = NULL;
2924                break;
2925
2926        /* data overrun ... flush fifo? */
2927        case -EOVERFLOW:
2928                dev->net->stats.rx_over_errors++;
2929                /* FALLTHROUGH */
2930
2931        default:
2932                state = rx_cleanup;
2933                dev->net->stats.rx_errors++;
2934                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2935                break;
2936        }
2937
2938        state = defer_bh(dev, skb, &dev->rxq, state);
2939
2940        if (urb) {
2941                if (netif_running(dev->net) &&
2942                    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2943                    state != unlink_start) {
2944                        rx_submit(dev, urb, GFP_ATOMIC);
2945                        return;
2946                }
2947                usb_free_urb(urb);
2948        }
2949        netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2950}
2951
2952static void lan78xx_tx_bh(struct lan78xx_net *dev)
2953{
2954        int length;
2955        struct urb *urb = NULL;
2956        struct skb_data *entry;
2957        unsigned long flags;
2958        struct sk_buff_head *tqp = &dev->txq_pend;
2959        struct sk_buff *skb, *skb2;
2960        int ret;
2961        int count, pos;
2962        int skb_totallen, pkt_cnt;
2963
2964        skb_totallen = 0;
2965        pkt_cnt = 0;
2966        count = 0;
2967        length = 0;
2968        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2969                if (skb_is_gso(skb)) {
2970                        if (pkt_cnt) {
2971                                /* handle previous packets first */
2972                                break;
2973                        }
2974                        count = 1;
2975                        length = skb->len - TX_OVERHEAD;
2976                        skb2 = skb_dequeue(tqp);
2977                        goto gso_skb;
2978                }
2979
2980                if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2981                        break;
2982                skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2983                pkt_cnt++;
2984        }
2985
2986        /* copy to a single skb */
2987        skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2988        if (!skb)
2989                goto drop;
2990
2991        skb_put(skb, skb_totallen);
2992
2993        for (count = pos = 0; count < pkt_cnt; count++) {
2994                skb2 = skb_dequeue(tqp);
2995                if (skb2) {
2996                        length += (skb2->len - TX_OVERHEAD);
2997                        memcpy(skb->data + pos, skb2->data, skb2->len);
2998                        pos += roundup(skb2->len, sizeof(u32));
2999                        dev_kfree_skb(skb2);
3000                }
3001        }
3002
3003gso_skb:
3004        urb = usb_alloc_urb(0, GFP_ATOMIC);
3005        if (!urb) {
3006                netif_dbg(dev, tx_err, dev->net, "no urb\n");
3007                goto drop;
3008        }
3009
3010        entry = (struct skb_data *)skb->cb;
3011        entry->urb = urb;
3012        entry->dev = dev;
3013        entry->length = length;
3014        entry->num_of_packet = count;
3015
3016        spin_lock_irqsave(&dev->txq.lock, flags);
3017        ret = usb_autopm_get_interface_async(dev->intf);
3018        if (ret < 0) {
3019                spin_unlock_irqrestore(&dev->txq.lock, flags);
3020                goto drop;
3021        }
3022
3023        usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3024                          skb->data, skb->len, tx_complete, skb);
3025
3026        if (length % dev->maxpacket == 0) {
3027                /* send USB_ZERO_PACKET */
3028                urb->transfer_flags |= URB_ZERO_PACKET;
3029        }
3030
3031#ifdef CONFIG_PM
3032        /* if this triggers the device is still a sleep */
3033        if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3034                /* transmission will be done in resume */
3035                usb_anchor_urb(urb, &dev->deferred);
3036                /* no use to process more packets */
3037                netif_stop_queue(dev->net);
3038                usb_put_urb(urb);
3039                spin_unlock_irqrestore(&dev->txq.lock, flags);
3040                netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3041                return;
3042        }
3043#endif
3044
3045        ret = usb_submit_urb(urb, GFP_ATOMIC);
3046        switch (ret) {
3047        case 0:
3048                netif_trans_update(dev->net);
3049                lan78xx_queue_skb(&dev->txq, skb, tx_start);
3050                if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3051                        netif_stop_queue(dev->net);
3052                break;
3053        case -EPIPE:
3054                netif_stop_queue(dev->net);
3055                lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3056                usb_autopm_put_interface_async(dev->intf);
3057                break;
3058        default:
3059                usb_autopm_put_interface_async(dev->intf);
3060                netif_dbg(dev, tx_err, dev->net,
3061                          "tx: submit urb err %d\n", ret);
3062                break;
3063        }
3064
3065        spin_unlock_irqrestore(&dev->txq.lock, flags);
3066
3067        if (ret) {
3068                netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3069drop:
3070                dev->net->stats.tx_dropped++;
3071                if (skb)
3072                        dev_kfree_skb_any(skb);
3073                usb_free_urb(urb);
3074        } else
3075                netif_dbg(dev, tx_queued, dev->net,
3076                          "> tx, len %d, type 0x%x\n", length, skb->protocol);
3077}
3078
3079static void lan78xx_rx_bh(struct lan78xx_net *dev)
3080{
3081        struct urb *urb;
3082        int i;
3083
3084        if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3085                for (i = 0; i < 10; i++) {
3086                        if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3087                                break;
3088                        urb = usb_alloc_urb(0, GFP_ATOMIC);
3089                        if (urb)
3090                                if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3091                                        return;
3092                }
3093
3094                if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3095                        tasklet_schedule(&dev->bh);
3096        }
3097        if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3098                netif_wake_queue(dev->net);
3099}
3100
3101static void lan78xx_bh(unsigned long param)
3102{
3103        struct lan78xx_net *dev = (struct lan78xx_net *)param;
3104        struct sk_buff *skb;
3105        struct skb_data *entry;
3106
3107        while ((skb = skb_dequeue(&dev->done))) {
3108                entry = (struct skb_data *)(skb->cb);
3109                switch (entry->state) {
3110                case rx_done:
3111                        entry->state = rx_cleanup;
3112                        rx_process(dev, skb);
3113                        continue;
3114                case tx_done:
3115                        usb_free_urb(entry->urb);
3116                        dev_kfree_skb(skb);
3117                        continue;
3118                case rx_cleanup:
3119                        usb_free_urb(entry->urb);
3120                        dev_kfree_skb(skb);
3121                        continue;
3122                default:
3123                        netdev_dbg(dev->net, "skb state %d\n", entry->state);
3124                        return;
3125                }
3126        }
3127
3128        if (netif_device_present(dev->net) && netif_running(dev->net)) {
3129                /* reset update timer delta */
3130                if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3131                        dev->delta = 1;
3132                        mod_timer(&dev->stat_monitor,
3133                                  jiffies + STAT_UPDATE_TIMER);
3134                }
3135
3136                if (!skb_queue_empty(&dev->txq_pend))
3137                        lan78xx_tx_bh(dev);
3138
3139                if (!timer_pending(&dev->delay) &&
3140                    !test_bit(EVENT_RX_HALT, &dev->flags))
3141                        lan78xx_rx_bh(dev);
3142        }
3143}
3144
3145static void lan78xx_delayedwork(struct work_struct *work)
3146{
3147        int status;
3148        struct lan78xx_net *dev;
3149
3150        dev = container_of(work, struct lan78xx_net, wq.work);
3151
3152        if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3153                unlink_urbs(dev, &dev->txq);
3154                status = usb_autopm_get_interface(dev->intf);
3155                if (status < 0)
3156                        goto fail_pipe;
3157                status = usb_clear_halt(dev->udev, dev->pipe_out);
3158                usb_autopm_put_interface(dev->intf);
3159                if (status < 0 &&
3160                    status != -EPIPE &&
3161                    status != -ESHUTDOWN) {
3162                        if (netif_msg_tx_err(dev))
3163fail_pipe:
3164                                netdev_err(dev->net,
3165                                           "can't clear tx halt, status %d\n",
3166                                           status);
3167                } else {
3168                        clear_bit(EVENT_TX_HALT, &dev->flags);
3169                        if (status != -ESHUTDOWN)
3170                                netif_wake_queue(dev->net);
3171                }
3172        }
3173        if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3174                unlink_urbs(dev, &dev->rxq);
3175                status = usb_autopm_get_interface(dev->intf);
3176                if (status < 0)
3177                                goto fail_halt;
3178                status = usb_clear_halt(dev->udev, dev->pipe_in);
3179                usb_autopm_put_interface(dev->intf);
3180                if (status < 0 &&
3181                    status != -EPIPE &&
3182                    status != -ESHUTDOWN) {
3183                        if (netif_msg_rx_err(dev))
3184fail_halt:
3185                                netdev_err(dev->net,
3186                                           "can't clear rx halt, status %d\n",
3187                                           status);
3188                } else {
3189                        clear_bit(EVENT_RX_HALT, &dev->flags);
3190                        tasklet_schedule(&dev->bh);
3191                }
3192        }
3193
3194        if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3195                int ret = 0;
3196
3197                clear_bit(EVENT_LINK_RESET, &dev->flags);
3198                status = usb_autopm_get_interface(dev->intf);
3199                if (status < 0)
3200                        goto skip_reset;
3201                if (lan78xx_link_reset(dev) < 0) {
3202                        usb_autopm_put_interface(dev->intf);
3203skip_reset:
3204                        netdev_info(dev->net, "link reset failed (%d)\n",
3205                                    ret);
3206                } else {
3207                        usb_autopm_put_interface(dev->intf);
3208                }
3209        }
3210
3211        if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3212                lan78xx_update_stats(dev);
3213
3214                clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3215
3216                mod_timer(&dev->stat_monitor,
3217                          jiffies + (STAT_UPDATE_TIMER * dev->delta));
3218
3219                dev->delta = min((dev->delta * 2), 50);
3220        }
3221}
3222
3223static void intr_complete(struct urb *urb)
3224{
3225        struct lan78xx_net *dev = urb->context;
3226        int status = urb->status;
3227
3228        switch (status) {
3229        /* success */
3230        case 0:
3231                lan78xx_status(dev, urb);
3232                break;
3233
3234        /* software-driven interface shutdown */
3235        case -ENOENT:                   /* urb killed */
3236        case -ESHUTDOWN:                /* hardware gone */
3237                netif_dbg(dev, ifdown, dev->net,
3238                          "intr shutdown, code %d\n", status);
3239                return;
3240
3241        /* NOTE:  not throttling like RX/TX, since this endpoint
3242         * already polls infrequently
3243         */
3244        default:
3245                netdev_dbg(dev->net, "intr status %d\n", status);
3246                break;
3247        }
3248
3249        if (!netif_running(dev->net))
3250                return;
3251
3252        memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3253        status = usb_submit_urb(urb, GFP_ATOMIC);
3254        if (status != 0)
3255                netif_err(dev, timer, dev->net,
3256                          "intr resubmit --> %d\n", status);
3257}
3258
3259static void lan78xx_disconnect(struct usb_interface *intf)
3260{
3261        struct lan78xx_net              *dev;
3262        struct usb_device               *udev;
3263        struct net_device               *net;
3264
3265        dev = usb_get_intfdata(intf);
3266        usb_set_intfdata(intf, NULL);
3267        if (!dev)
3268                return;
3269
3270        udev = interface_to_usbdev(intf);
3271
3272        net = dev->net;
3273        unregister_netdev(net);
3274
3275        cancel_delayed_work_sync(&dev->wq);
3276
3277        usb_scuttle_anchored_urbs(&dev->deferred);
3278
3279        lan78xx_unbind(dev, intf);
3280
3281        usb_kill_urb(dev->urb_intr);
3282        usb_free_urb(dev->urb_intr);
3283
3284        free_netdev(net);
3285        usb_put_dev(udev);
3286}
3287
3288void lan78xx_tx_timeout(struct net_device *net)
3289{
3290        struct lan78xx_net *dev = netdev_priv(net);
3291
3292        unlink_urbs(dev, &dev->txq);
3293        tasklet_schedule(&dev->bh);
3294}
3295
3296static const struct net_device_ops lan78xx_netdev_ops = {
3297        .ndo_open               = lan78xx_open,
3298        .ndo_stop               = lan78xx_stop,
3299        .ndo_start_xmit         = lan78xx_start_xmit,
3300        .ndo_tx_timeout         = lan78xx_tx_timeout,
3301        .ndo_change_mtu         = lan78xx_change_mtu,
3302        .ndo_set_mac_address    = lan78xx_set_mac_addr,
3303        .ndo_validate_addr      = eth_validate_addr,
3304        .ndo_do_ioctl           = lan78xx_ioctl,
3305        .ndo_set_rx_mode        = lan78xx_set_multicast,
3306        .ndo_set_features       = lan78xx_set_features,
3307        .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3308        .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3309};
3310
3311static void lan78xx_stat_monitor(unsigned long param)
3312{
3313        struct lan78xx_net *dev;
3314
3315        dev = (struct lan78xx_net *)param;
3316
3317        lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3318}
3319
3320static int lan78xx_probe(struct usb_interface *intf,
3321                         const struct usb_device_id *id)
3322{
3323        struct lan78xx_net *dev;
3324        struct net_device *netdev;
3325        struct usb_device *udev;
3326        int ret;
3327        unsigned maxp;
3328        unsigned period;
3329        u8 *buf = NULL;
3330
3331        udev = interface_to_usbdev(intf);
3332        udev = usb_get_dev(udev);
3333
3334        ret = -ENOMEM;
3335        netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3336        if (!netdev) {
3337                        dev_err(&intf->dev, "Error: OOM\n");
3338                        goto out1;
3339        }
3340
3341        /* netdev_printk() needs this */
3342        SET_NETDEV_DEV(netdev, &intf->dev);
3343
3344        dev = netdev_priv(netdev);
3345        dev->udev = udev;
3346        dev->intf = intf;
3347        dev->net = netdev;
3348        dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3349                                        | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3350
3351        skb_queue_head_init(&dev->rxq);
3352        skb_queue_head_init(&dev->txq);
3353        skb_queue_head_init(&dev->done);
3354        skb_queue_head_init(&dev->rxq_pause);
3355        skb_queue_head_init(&dev->txq_pend);
3356        mutex_init(&dev->phy_mutex);
3357
3358        tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3359        INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3360        init_usb_anchor(&dev->deferred);
3361
3362        netdev->netdev_ops = &lan78xx_netdev_ops;
3363        netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3364        netdev->ethtool_ops = &lan78xx_ethtool_ops;
3365
3366        dev->stat_monitor.function = lan78xx_stat_monitor;
3367        dev->stat_monitor.data = (unsigned long)dev;
3368        dev->delta = 1;
3369        init_timer(&dev->stat_monitor);
3370
3371        mutex_init(&dev->stats.access_lock);
3372
3373        ret = lan78xx_bind(dev, intf);
3374        if (ret < 0)
3375                goto out2;
3376        strcpy(netdev->name, "eth%d");
3377
3378        if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3379                netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3380
3381        dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3382        dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3383        dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3384
3385        dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3386        dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3387
3388        dev->pipe_intr = usb_rcvintpipe(dev->udev,
3389                                        dev->ep_intr->desc.bEndpointAddress &
3390                                        USB_ENDPOINT_NUMBER_MASK);
3391        period = dev->ep_intr->desc.bInterval;
3392
3393        maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3394        buf = kmalloc(maxp, GFP_KERNEL);
3395        if (buf) {
3396                dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3397                if (!dev->urb_intr) {
3398                        kfree(buf);
3399                        goto out3;
3400                } else {
3401                        usb_fill_int_urb(dev->urb_intr, dev->udev,
3402                                         dev->pipe_intr, buf, maxp,
3403                                         intr_complete, dev, period);
3404                }
3405        }
3406
3407        dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3408
3409        /* driver requires remote-wakeup capability during autosuspend. */
3410        intf->needs_remote_wakeup = 1;
3411
3412        ret = register_netdev(netdev);
3413        if (ret != 0) {
3414                netif_err(dev, probe, netdev, "couldn't register the device\n");
3415                goto out2;
3416        }
3417
3418        usb_set_intfdata(intf, dev);
3419
3420        ret = device_set_wakeup_enable(&udev->dev, true);
3421
3422         /* Default delay of 2sec has more overhead than advantage.
3423          * Set to 10sec as default.
3424          */
3425        pm_runtime_set_autosuspend_delay(&udev->dev,
3426                                         DEFAULT_AUTOSUSPEND_DELAY);
3427
3428        return 0;
3429
3430out3:
3431        lan78xx_unbind(dev, intf);
3432out2:
3433        free_netdev(netdev);
3434out1:
3435        usb_put_dev(udev);
3436
3437        return ret;
3438}
3439
3440static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3441{
3442        const u16 crc16poly = 0x8005;
3443        int i;
3444        u16 bit, crc, msb;
3445        u8 data;
3446
3447        crc = 0xFFFF;
3448        for (i = 0; i < len; i++) {
3449                data = *buf++;
3450                for (bit = 0; bit < 8; bit++) {
3451                        msb = crc >> 15;
3452                        crc <<= 1;
3453
3454                        if (msb ^ (u16)(data & 1)) {
3455                                crc ^= crc16poly;
3456                                crc |= (u16)0x0001U;
3457                        }
3458                        data >>= 1;
3459                }
3460        }
3461
3462        return crc;
3463}
3464
3465static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3466{
3467        u32 buf;
3468        int ret;
3469        int mask_index;
3470        u16 crc;
3471        u32 temp_wucsr;
3472        u32 temp_pmt_ctl;
3473        const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3474        const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3475        const u8 arp_type[2] = { 0x08, 0x06 };
3476
3477        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3478        buf &= ~MAC_TX_TXEN_;
3479        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3480        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3481        buf &= ~MAC_RX_RXEN_;
3482        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3483
3484        ret = lan78xx_write_reg(dev, WUCSR, 0);
3485        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3486        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3487
3488        temp_wucsr = 0;
3489
3490        temp_pmt_ctl = 0;
3491        ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3492        temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3493        temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3494
3495        for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3496                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3497
3498        mask_index = 0;
3499        if (wol & WAKE_PHY) {
3500                temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3501
3502                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3503                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3504                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3505        }
3506        if (wol & WAKE_MAGIC) {
3507                temp_wucsr |= WUCSR_MPEN_;
3508
3509                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3510                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3511                temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3512        }
3513        if (wol & WAKE_BCAST) {
3514                temp_wucsr |= WUCSR_BCST_EN_;
3515
3516                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3517                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3518                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3519        }
3520        if (wol & WAKE_MCAST) {
3521                temp_wucsr |= WUCSR_WAKE_EN_;
3522
3523                /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3524                crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3525                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3526                                        WUF_CFGX_EN_ |
3527                                        WUF_CFGX_TYPE_MCAST_ |
3528                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3529                                        (crc & WUF_CFGX_CRC16_MASK_));
3530
3531                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3532                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3533                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3534                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3535                mask_index++;
3536
3537                /* for IPv6 Multicast */
3538                crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3539                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3540                                        WUF_CFGX_EN_ |
3541                                        WUF_CFGX_TYPE_MCAST_ |
3542                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3543                                        (crc & WUF_CFGX_CRC16_MASK_));
3544
3545                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3546                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3547                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3548                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3549                mask_index++;
3550
3551                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3552                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3553                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3554        }
3555        if (wol & WAKE_UCAST) {
3556                temp_wucsr |= WUCSR_PFDA_EN_;
3557
3558                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3559                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3560                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3561        }
3562        if (wol & WAKE_ARP) {
3563                temp_wucsr |= WUCSR_WAKE_EN_;
3564
3565                /* set WUF_CFG & WUF_MASK
3566                 * for packettype (offset 12,13) = ARP (0x0806)
3567                 */
3568                crc = lan78xx_wakeframe_crc16(arp_type, 2);
3569                ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3570                                        WUF_CFGX_EN_ |
3571                                        WUF_CFGX_TYPE_ALL_ |
3572                                        (0 << WUF_CFGX_OFFSET_SHIFT_) |
3573                                        (crc & WUF_CFGX_CRC16_MASK_));
3574
3575                ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3576                ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3577                ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3578                ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3579                mask_index++;
3580
3581                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3582                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3583                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3584        }
3585
3586        ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3587
3588        /* when multiple WOL bits are set */
3589        if (hweight_long((unsigned long)wol) > 1) {
3590                temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3591                temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3592                temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3593        }
3594        ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3595
3596        /* clear WUPS */
3597        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3598        buf |= PMT_CTL_WUPS_MASK_;
3599        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3600
3601        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3602        buf |= MAC_RX_RXEN_;
3603        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3604
3605        return 0;
3606}
3607
3608int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3609{
3610        struct lan78xx_net *dev = usb_get_intfdata(intf);
3611        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3612        u32 buf;
3613        int ret;
3614        int event;
3615
3616        event = message.event;
3617
3618        if (!dev->suspend_count++) {
3619                spin_lock_irq(&dev->txq.lock);
3620                /* don't autosuspend while transmitting */
3621                if ((skb_queue_len(&dev->txq) ||
3622                     skb_queue_len(&dev->txq_pend)) &&
3623                        PMSG_IS_AUTO(message)) {
3624                        spin_unlock_irq(&dev->txq.lock);
3625                        ret = -EBUSY;
3626                        goto out;
3627                } else {
3628                        set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3629                        spin_unlock_irq(&dev->txq.lock);
3630                }
3631
3632                /* stop TX & RX */
3633                ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3634                buf &= ~MAC_TX_TXEN_;
3635                ret = lan78xx_write_reg(dev, MAC_TX, buf);
3636                ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3637                buf &= ~MAC_RX_RXEN_;
3638                ret = lan78xx_write_reg(dev, MAC_RX, buf);
3639
3640                /* empty out the rx and queues */
3641                netif_device_detach(dev->net);
3642                lan78xx_terminate_urbs(dev);
3643                usb_kill_urb(dev->urb_intr);
3644
3645                /* reattach */
3646                netif_device_attach(dev->net);
3647        }
3648
3649        if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3650                del_timer(&dev->stat_monitor);
3651
3652                if (PMSG_IS_AUTO(message)) {
3653                        /* auto suspend (selective suspend) */
3654                        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3655                        buf &= ~MAC_TX_TXEN_;
3656                        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3657                        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3658                        buf &= ~MAC_RX_RXEN_;
3659                        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3660
3661                        ret = lan78xx_write_reg(dev, WUCSR, 0);
3662                        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3663                        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3664
3665                        /* set goodframe wakeup */
3666                        ret = lan78xx_read_reg(dev, WUCSR, &buf);
3667
3668                        buf |= WUCSR_RFE_WAKE_EN_;
3669                        buf |= WUCSR_STORE_WAKE_;
3670
3671                        ret = lan78xx_write_reg(dev, WUCSR, buf);
3672
3673                        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3674
3675                        buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3676                        buf |= PMT_CTL_RES_CLR_WKP_STS_;
3677
3678                        buf |= PMT_CTL_PHY_WAKE_EN_;
3679                        buf |= PMT_CTL_WOL_EN_;
3680                        buf &= ~PMT_CTL_SUS_MODE_MASK_;
3681                        buf |= PMT_CTL_SUS_MODE_3_;
3682
3683                        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3684
3685                        ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3686
3687                        buf |= PMT_CTL_WUPS_MASK_;
3688
3689                        ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3690
3691                        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3692                        buf |= MAC_RX_RXEN_;
3693                        ret = lan78xx_write_reg(dev, MAC_RX, buf);
3694                } else {
3695                        lan78xx_set_suspend(dev, pdata->wol);
3696                }
3697        }
3698
3699        ret = 0;
3700out:
3701        return ret;
3702}
3703
3704int lan78xx_resume(struct usb_interface *intf)
3705{
3706        struct lan78xx_net *dev = usb_get_intfdata(intf);
3707        struct sk_buff *skb;
3708        struct urb *res;
3709        int ret;
3710        u32 buf;
3711
3712        if (!timer_pending(&dev->stat_monitor)) {
3713                dev->delta = 1;
3714                mod_timer(&dev->stat_monitor,
3715                          jiffies + STAT_UPDATE_TIMER);
3716        }
3717
3718        if (!--dev->suspend_count) {
3719                /* resume interrupt URBs */
3720                if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3721                                usb_submit_urb(dev->urb_intr, GFP_NOIO);
3722
3723                spin_lock_irq(&dev->txq.lock);
3724                while ((res = usb_get_from_anchor(&dev->deferred))) {
3725                        skb = (struct sk_buff *)res->context;
3726                        ret = usb_submit_urb(res, GFP_ATOMIC);
3727                        if (ret < 0) {
3728                                dev_kfree_skb_any(skb);
3729                                usb_free_urb(res);
3730                                usb_autopm_put_interface_async(dev->intf);
3731                        } else {
3732                                netif_trans_update(dev->net);
3733                                lan78xx_queue_skb(&dev->txq, skb, tx_start);
3734                        }
3735                }
3736
3737                clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3738                spin_unlock_irq(&dev->txq.lock);
3739
3740                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3741                        if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3742                                netif_start_queue(dev->net);
3743                        tasklet_schedule(&dev->bh);
3744                }
3745        }
3746
3747        ret = lan78xx_write_reg(dev, WUCSR2, 0);
3748        ret = lan78xx_write_reg(dev, WUCSR, 0);
3749        ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3750
3751        ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3752                                             WUCSR2_ARP_RCD_ |
3753                                             WUCSR2_IPV6_TCPSYN_RCD_ |
3754                                             WUCSR2_IPV4_TCPSYN_RCD_);
3755
3756        ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3757                                            WUCSR_EEE_RX_WAKE_ |
3758                                            WUCSR_PFDA_FR_ |
3759                                            WUCSR_RFE_WAKE_FR_ |
3760                                            WUCSR_WUFR_ |
3761                                            WUCSR_MPR_ |
3762                                            WUCSR_BCST_FR_);
3763
3764        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3765        buf |= MAC_TX_TXEN_;
3766        ret = lan78xx_write_reg(dev, MAC_TX, buf);
3767
3768        return 0;
3769}
3770
3771int lan78xx_reset_resume(struct usb_interface *intf)
3772{
3773        struct lan78xx_net *dev = usb_get_intfdata(intf);
3774
3775        lan78xx_reset(dev);
3776
3777        lan78xx_phy_init(dev);
3778
3779        return lan78xx_resume(intf);
3780}
3781
3782static const struct usb_device_id products[] = {
3783        {
3784        /* LAN7800 USB Gigabit Ethernet Device */
3785        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3786        },
3787        {
3788        /* LAN7850 USB Gigabit Ethernet Device */
3789        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3790        },
3791        {},
3792};
3793MODULE_DEVICE_TABLE(usb, products);
3794
3795static struct usb_driver lan78xx_driver = {
3796        .name                   = DRIVER_NAME,
3797        .id_table               = products,
3798        .probe                  = lan78xx_probe,
3799        .disconnect             = lan78xx_disconnect,
3800        .suspend                = lan78xx_suspend,
3801        .resume                 = lan78xx_resume,
3802        .reset_resume           = lan78xx_reset_resume,
3803        .supports_autosuspend   = 1,
3804        .disable_hub_initiated_lpm = 1,
3805};
3806
3807module_usb_driver(lan78xx_driver);
3808
3809MODULE_AUTHOR(DRIVER_AUTHOR);
3810MODULE_DESCRIPTION(DRIVER_DESC);
3811MODULE_LICENSE("GPL");
3812