linux/drivers/net/usb/usbnet.c
<<
>>
Prefs
   1/*
   2 * USB Network driver infrastructure
   3 * Copyright (C) 2000-2005 by David Brownell
   4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20/*
  21 * This is a generic "USB networking" framework that works with several
  22 * kinds of full and high speed networking devices:  host-to-host cables,
  23 * smart usb peripherals, and actual Ethernet adapters.
  24 *
  25 * These devices usually differ in terms of control protocols (if they
  26 * even have one!) and sometimes they define new framing to wrap or batch
  27 * Ethernet packets.  Otherwise, they talk to USB pretty much the same,
  28 * so interface (un)binding, endpoint I/O queues, fault handling, and other
  29 * issues can usefully be addressed by this framework.
  30 */
  31
  32// #define      DEBUG                   // error path messages, extra info
  33// #define      VERBOSE                 // more; success messages
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/ctype.h>
  40#include <linux/ethtool.h>
  41#include <linux/workqueue.h>
  42#include <linux/mii.h>
  43#include <linux/usb.h>
  44#include <linux/usb/usbnet.h>
  45#include <linux/usb/cdc.h>
  46#include <linux/slab.h>
  47#include <linux/kernel.h>
  48#include <linux/pm_runtime.h>
  49
  50#define DRIVER_VERSION          "22-Aug-2005"
  51
  52
  53/*-------------------------------------------------------------------------*/
  54
  55/*
  56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
  57 * Several dozen bytes of IPv4 data can fit in two such transactions.
  58 * One maximum size Ethernet packet takes twenty four of them.
  59 * For high speed, each frame comfortably fits almost 36 max size
  60 * Ethernet packets (so queues should be bigger).
  61 *
  62 * The goal is to let the USB host controller be busy for 5msec or
  63 * more before an irq is required, under load.  Jumbograms change
  64 * the equation.
  65 */
  66#define MAX_QUEUE_MEMORY        (60 * 1518)
  67#define RX_QLEN(dev)            ((dev)->rx_qlen)
  68#define TX_QLEN(dev)            ((dev)->tx_qlen)
  69
  70// reawaken network queue this soon after stopping; else watchdog barks
  71#define TX_TIMEOUT_JIFFIES      (5*HZ)
  72
  73/* throttle rx/tx briefly after some faults, so hub_wq might disconnect()
  74 * us (it polls at HZ/4 usually) before we report too many false errors.
  75 */
  76#define THROTTLE_JIFFIES        (HZ/8)
  77
  78// between wakeups
  79#define UNLINK_TIMEOUT_MS       3
  80
  81/*-------------------------------------------------------------------------*/
  82
  83// randomly generated ethernet address
  84static u8       node_id [ETH_ALEN];
  85
  86static const char driver_name [] = "usbnet";
  87
  88/* use ethtool to change the level for any given device */
  89static int msg_level = -1;
  90module_param (msg_level, int, 0);
  91MODULE_PARM_DESC (msg_level, "Override default message level");
  92
  93/*-------------------------------------------------------------------------*/
  94
  95/* handles CDC Ethernet and many other network "bulk data" interfaces */
  96int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
  97{
  98        int                             tmp;
  99        struct usb_host_interface       *alt = NULL;
 100        struct usb_host_endpoint        *in = NULL, *out = NULL;
 101        struct usb_host_endpoint        *status = NULL;
 102
 103        for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
 104                unsigned        ep;
 105
 106                in = out = status = NULL;
 107                alt = intf->altsetting + tmp;
 108
 109                /* take the first altsetting with in-bulk + out-bulk;
 110                 * remember any status endpoint, just in case;
 111                 * ignore other endpoints and altsettings.
 112                 */
 113                for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
 114                        struct usb_host_endpoint        *e;
 115                        int                             intr = 0;
 116
 117                        e = alt->endpoint + ep;
 118                        switch (e->desc.bmAttributes) {
 119                        case USB_ENDPOINT_XFER_INT:
 120                                if (!usb_endpoint_dir_in(&e->desc))
 121                                        continue;
 122                                intr = 1;
 123                                /* FALLTHROUGH */
 124                        case USB_ENDPOINT_XFER_BULK:
 125                                break;
 126                        default:
 127                                continue;
 128                        }
 129                        if (usb_endpoint_dir_in(&e->desc)) {
 130                                if (!intr && !in)
 131                                        in = e;
 132                                else if (intr && !status)
 133                                        status = e;
 134                        } else {
 135                                if (!out)
 136                                        out = e;
 137                        }
 138                }
 139                if (in && out)
 140                        break;
 141        }
 142        if (!alt || !in || !out)
 143                return -EINVAL;
 144
 145        if (alt->desc.bAlternateSetting != 0 ||
 146            !(dev->driver_info->flags & FLAG_NO_SETINT)) {
 147                tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
 148                                alt->desc.bAlternateSetting);
 149                if (tmp < 0)
 150                        return tmp;
 151        }
 152
 153        dev->in = usb_rcvbulkpipe (dev->udev,
 154                        in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 155        dev->out = usb_sndbulkpipe (dev->udev,
 156                        out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 157        dev->status = status;
 158        return 0;
 159}
 160EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
 161
 162int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
 163{
 164        int             tmp = -1, ret;
 165        unsigned char   buf [13];
 166
 167        ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
 168        if (ret == 12)
 169                tmp = hex2bin(dev->net->dev_addr, buf, 6);
 170        if (tmp < 0) {
 171                dev_dbg(&dev->udev->dev,
 172                        "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
 173                if (ret >= 0)
 174                        ret = -EINVAL;
 175                return ret;
 176        }
 177        return 0;
 178}
 179EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
 180
 181static void intr_complete (struct urb *urb)
 182{
 183        struct usbnet   *dev = urb->context;
 184        int             status = urb->status;
 185
 186        switch (status) {
 187        /* success */
 188        case 0:
 189                dev->driver_info->status(dev, urb);
 190                break;
 191
 192        /* software-driven interface shutdown */
 193        case -ENOENT:           /* urb killed */
 194        case -ESHUTDOWN:        /* hardware gone */
 195                netif_dbg(dev, ifdown, dev->net,
 196                          "intr shutdown, code %d\n", status);
 197                return;
 198
 199        /* NOTE:  not throttling like RX/TX, since this endpoint
 200         * already polls infrequently
 201         */
 202        default:
 203                netdev_dbg(dev->net, "intr status %d\n", status);
 204                break;
 205        }
 206
 207        status = usb_submit_urb (urb, GFP_ATOMIC);
 208        if (status != 0)
 209                netif_err(dev, timer, dev->net,
 210                          "intr resubmit --> %d\n", status);
 211}
 212
 213static int init_status (struct usbnet *dev, struct usb_interface *intf)
 214{
 215        char            *buf = NULL;
 216        unsigned        pipe = 0;
 217        unsigned        maxp;
 218        unsigned        period;
 219
 220        if (!dev->driver_info->status)
 221                return 0;
 222
 223        pipe = usb_rcvintpipe (dev->udev,
 224                        dev->status->desc.bEndpointAddress
 225                                & USB_ENDPOINT_NUMBER_MASK);
 226        maxp = usb_maxpacket (dev->udev, pipe, 0);
 227
 228        /* avoid 1 msec chatter:  min 8 msec poll rate */
 229        period = max ((int) dev->status->desc.bInterval,
 230                (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
 231
 232        buf = kmalloc (maxp, GFP_KERNEL);
 233        if (buf) {
 234                dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
 235                if (!dev->interrupt) {
 236                        kfree (buf);
 237                        return -ENOMEM;
 238                } else {
 239                        usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
 240                                buf, maxp, intr_complete, dev, period);
 241                        dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
 242                        dev_dbg(&intf->dev,
 243                                "status ep%din, %d bytes period %d\n",
 244                                usb_pipeendpoint(pipe), maxp, period);
 245                }
 246        }
 247        return 0;
 248}
 249
 250/* Submit the interrupt URB if not previously submitted, increasing refcount */
 251int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
 252{
 253        int ret = 0;
 254
 255        WARN_ON_ONCE(dev->interrupt == NULL);
 256        if (dev->interrupt) {
 257                mutex_lock(&dev->interrupt_mutex);
 258
 259                if (++dev->interrupt_count == 1)
 260                        ret = usb_submit_urb(dev->interrupt, mem_flags);
 261
 262                dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
 263                        dev->interrupt_count);
 264                mutex_unlock(&dev->interrupt_mutex);
 265        }
 266        return ret;
 267}
 268EXPORT_SYMBOL_GPL(usbnet_status_start);
 269
 270/* For resume; submit interrupt URB if previously submitted */
 271static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
 272{
 273        int ret = 0;
 274
 275        mutex_lock(&dev->interrupt_mutex);
 276        if (dev->interrupt_count) {
 277                ret = usb_submit_urb(dev->interrupt, mem_flags);
 278                dev_dbg(&dev->udev->dev,
 279                        "submitted interrupt URB for resume\n");
 280        }
 281        mutex_unlock(&dev->interrupt_mutex);
 282        return ret;
 283}
 284
 285/* Kill the interrupt URB if all submitters want it killed */
 286void usbnet_status_stop(struct usbnet *dev)
 287{
 288        if (dev->interrupt) {
 289                mutex_lock(&dev->interrupt_mutex);
 290                WARN_ON(dev->interrupt_count == 0);
 291
 292                if (dev->interrupt_count && --dev->interrupt_count == 0)
 293                        usb_kill_urb(dev->interrupt);
 294
 295                dev_dbg(&dev->udev->dev,
 296                        "decremented interrupt URB count to %d\n",
 297                        dev->interrupt_count);
 298                mutex_unlock(&dev->interrupt_mutex);
 299        }
 300}
 301EXPORT_SYMBOL_GPL(usbnet_status_stop);
 302
 303/* For suspend; always kill interrupt URB */
 304static void __usbnet_status_stop_force(struct usbnet *dev)
 305{
 306        if (dev->interrupt) {
 307                mutex_lock(&dev->interrupt_mutex);
 308                usb_kill_urb(dev->interrupt);
 309                dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
 310                mutex_unlock(&dev->interrupt_mutex);
 311        }
 312}
 313
 314/* Passes this packet up the stack, updating its accounting.
 315 * Some link protocols batch packets, so their rx_fixup paths
 316 * can return clones as well as just modify the original skb.
 317 */
 318void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 319{
 320        int     status;
 321
 322        if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
 323                skb_queue_tail(&dev->rxq_pause, skb);
 324                return;
 325        }
 326
 327        /* only update if unset to allow minidriver rx_fixup override */
 328        if (skb->protocol == 0)
 329                skb->protocol = eth_type_trans (skb, dev->net);
 330
 331        dev->net->stats.rx_packets++;
 332        dev->net->stats.rx_bytes += skb->len;
 333
 334        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
 335                  skb->len + sizeof (struct ethhdr), skb->protocol);
 336        memset (skb->cb, 0, sizeof (struct skb_data));
 337
 338        if (skb_defer_rx_timestamp(skb))
 339                return;
 340
 341        status = netif_rx (skb);
 342        if (status != NET_RX_SUCCESS)
 343                netif_dbg(dev, rx_err, dev->net,
 344                          "netif_rx status %d\n", status);
 345}
 346EXPORT_SYMBOL_GPL(usbnet_skb_return);
 347
 348/* must be called if hard_mtu or rx_urb_size changed */
 349void usbnet_update_max_qlen(struct usbnet *dev)
 350{
 351        enum usb_device_speed speed = dev->udev->speed;
 352
 353        switch (speed) {
 354        case USB_SPEED_HIGH:
 355                dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
 356                dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
 357                break;
 358        case USB_SPEED_SUPER:
 359        case USB_SPEED_SUPER_PLUS:
 360                /*
 361                 * Not take default 5ms qlen for super speed HC to
 362                 * save memory, and iperf tests show 2.5ms qlen can
 363                 * work well
 364                 */
 365                dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
 366                dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
 367                break;
 368        default:
 369                dev->rx_qlen = dev->tx_qlen = 4;
 370        }
 371}
 372EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
 373
 374
 375/*-------------------------------------------------------------------------
 376 *
 377 * Network Device Driver (peer link to "Host Device", from USB host)
 378 *
 379 *-------------------------------------------------------------------------*/
 380
 381int usbnet_change_mtu (struct net_device *net, int new_mtu)
 382{
 383        struct usbnet   *dev = netdev_priv(net);
 384        int             ll_mtu = new_mtu + net->hard_header_len;
 385        int             old_hard_mtu = dev->hard_mtu;
 386        int             old_rx_urb_size = dev->rx_urb_size;
 387
 388        if (new_mtu <= 0)
 389                return -EINVAL;
 390        // no second zero-length packet read wanted after mtu-sized packets
 391        if ((ll_mtu % dev->maxpacket) == 0)
 392                return -EDOM;
 393        net->mtu = new_mtu;
 394
 395        dev->hard_mtu = net->mtu + net->hard_header_len;
 396        if (dev->rx_urb_size == old_hard_mtu) {
 397                dev->rx_urb_size = dev->hard_mtu;
 398                if (dev->rx_urb_size > old_rx_urb_size) {
 399                        usbnet_pause_rx(dev);
 400                        usbnet_unlink_rx_urbs(dev);
 401                        usbnet_resume_rx(dev);
 402                }
 403        }
 404
 405        /* max qlen depend on hard_mtu and rx_urb_size */
 406        usbnet_update_max_qlen(dev);
 407
 408        return 0;
 409}
 410EXPORT_SYMBOL_GPL(usbnet_change_mtu);
 411
 412/* The caller must hold list->lock */
 413static void __usbnet_queue_skb(struct sk_buff_head *list,
 414                        struct sk_buff *newsk, enum skb_state state)
 415{
 416        struct skb_data *entry = (struct skb_data *) newsk->cb;
 417
 418        __skb_queue_tail(list, newsk);
 419        entry->state = state;
 420}
 421
 422/*-------------------------------------------------------------------------*/
 423
 424/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
 425 * completion callbacks.  2.5 should have fixed those bugs...
 426 */
 427
 428static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 429                struct sk_buff_head *list, enum skb_state state)
 430{
 431        unsigned long           flags;
 432        enum skb_state          old_state;
 433        struct skb_data *entry = (struct skb_data *) skb->cb;
 434
 435        spin_lock_irqsave(&list->lock, flags);
 436        old_state = entry->state;
 437        entry->state = state;
 438        __skb_unlink(skb, list);
 439
 440        /* defer_bh() is never called with list == &dev->done.
 441         * spin_lock_nested() tells lockdep that it is OK to take
 442         * dev->done.lock here with list->lock held.
 443         */
 444        spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
 445
 446        __skb_queue_tail(&dev->done, skb);
 447        if (dev->done.qlen == 1)
 448                tasklet_schedule(&dev->bh);
 449        spin_unlock(&dev->done.lock);
 450        spin_unlock_irqrestore(&list->lock, flags);
 451        return old_state;
 452}
 453
 454/* some work can't be done in tasklets, so we use keventd
 455 *
 456 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
 457 * but tasklet_schedule() doesn't.  hope the failure is rare.
 458 */
 459void usbnet_defer_kevent (struct usbnet *dev, int work)
 460{
 461        set_bit (work, &dev->flags);
 462        if (!schedule_work (&dev->kevent)) {
 463                if (net_ratelimit())
 464                        netdev_err(dev->net, "kevent %d may have been dropped\n", work);
 465        } else {
 466                netdev_dbg(dev->net, "kevent %d scheduled\n", work);
 467        }
 468}
 469EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 470
 471/*-------------------------------------------------------------------------*/
 472
 473static void rx_complete (struct urb *urb);
 474
 475static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 476{
 477        struct sk_buff          *skb;
 478        struct skb_data         *entry;
 479        int                     retval = 0;
 480        unsigned long           lockflags;
 481        size_t                  size = dev->rx_urb_size;
 482
 483        /* prevent rx skb allocation when error ratio is high */
 484        if (test_bit(EVENT_RX_KILL, &dev->flags)) {
 485                usb_free_urb(urb);
 486                return -ENOLINK;
 487        }
 488
 489        skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
 490        if (!skb) {
 491                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
 492                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
 493                usb_free_urb (urb);
 494                return -ENOMEM;
 495        }
 496
 497        entry = (struct skb_data *) skb->cb;
 498        entry->urb = urb;
 499        entry->dev = dev;
 500        entry->length = 0;
 501
 502        usb_fill_bulk_urb (urb, dev->udev, dev->in,
 503                skb->data, size, rx_complete, skb);
 504
 505        spin_lock_irqsave (&dev->rxq.lock, lockflags);
 506
 507        if (netif_running (dev->net) &&
 508            netif_device_present (dev->net) &&
 509            !test_bit (EVENT_RX_HALT, &dev->flags) &&
 510            !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
 511                switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
 512                case -EPIPE:
 513                        usbnet_defer_kevent (dev, EVENT_RX_HALT);
 514                        break;
 515                case -ENOMEM:
 516                        usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
 517                        break;
 518                case -ENODEV:
 519                        netif_dbg(dev, ifdown, dev->net, "device gone\n");
 520                        netif_device_detach (dev->net);
 521                        break;
 522                case -EHOSTUNREACH:
 523                        retval = -ENOLINK;
 524                        break;
 525                default:
 526                        netif_dbg(dev, rx_err, dev->net,
 527                                  "rx submit, %d\n", retval);
 528                        tasklet_schedule (&dev->bh);
 529                        break;
 530                case 0:
 531                        __usbnet_queue_skb(&dev->rxq, skb, rx_start);
 532                }
 533        } else {
 534                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
 535                retval = -ENOLINK;
 536        }
 537        spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
 538        if (retval) {
 539                dev_kfree_skb_any (skb);
 540                usb_free_urb (urb);
 541        }
 542        return retval;
 543}
 544
 545
 546/*-------------------------------------------------------------------------*/
 547
 548static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
 549{
 550        if (dev->driver_info->rx_fixup &&
 551            !dev->driver_info->rx_fixup (dev, skb)) {
 552                /* With RX_ASSEMBLE, rx_fixup() must update counters */
 553                if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
 554                        dev->net->stats.rx_errors++;
 555                goto done;
 556        }
 557        // else network stack removes extra byte if we forced a short packet
 558
 559        /* all data was already cloned from skb inside the driver */
 560        if (dev->driver_info->flags & FLAG_MULTI_PACKET)
 561                goto done;
 562
 563        if (skb->len < ETH_HLEN) {
 564                dev->net->stats.rx_errors++;
 565                dev->net->stats.rx_length_errors++;
 566                netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
 567        } else {
 568                usbnet_skb_return(dev, skb);
 569                return;
 570        }
 571
 572done:
 573        skb_queue_tail(&dev->done, skb);
 574}
 575
 576/*-------------------------------------------------------------------------*/
 577
 578static void rx_complete (struct urb *urb)
 579{
 580        struct sk_buff          *skb = (struct sk_buff *) urb->context;
 581        struct skb_data         *entry = (struct skb_data *) skb->cb;
 582        struct usbnet           *dev = entry->dev;
 583        int                     urb_status = urb->status;
 584        enum skb_state          state;
 585
 586        skb_put (skb, urb->actual_length);
 587        state = rx_done;
 588        entry->urb = NULL;
 589
 590        switch (urb_status) {
 591        /* success */
 592        case 0:
 593                break;
 594
 595        /* stalls need manual reset. this is rare ... except that
 596         * when going through USB 2.0 TTs, unplug appears this way.
 597         * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
 598         * storm, recovering as needed.
 599         */
 600        case -EPIPE:
 601                dev->net->stats.rx_errors++;
 602                usbnet_defer_kevent (dev, EVENT_RX_HALT);
 603                // FALLTHROUGH
 604
 605        /* software-driven interface shutdown */
 606        case -ECONNRESET:               /* async unlink */
 607        case -ESHUTDOWN:                /* hardware gone */
 608                netif_dbg(dev, ifdown, dev->net,
 609                          "rx shutdown, code %d\n", urb_status);
 610                goto block;
 611
 612        /* we get controller i/o faults during hub_wq disconnect() delays.
 613         * throttle down resubmits, to avoid log floods; just temporarily,
 614         * so we still recover when the fault isn't a hub_wq delay.
 615         */
 616        case -EPROTO:
 617        case -ETIME:
 618        case -EILSEQ:
 619                dev->net->stats.rx_errors++;
 620                if (!timer_pending (&dev->delay)) {
 621                        mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
 622                        netif_dbg(dev, link, dev->net,
 623                                  "rx throttle %d\n", urb_status);
 624                }
 625block:
 626                state = rx_cleanup;
 627                entry->urb = urb;
 628                urb = NULL;
 629                break;
 630
 631        /* data overrun ... flush fifo? */
 632        case -EOVERFLOW:
 633                dev->net->stats.rx_over_errors++;
 634                // FALLTHROUGH
 635
 636        default:
 637                state = rx_cleanup;
 638                dev->net->stats.rx_errors++;
 639                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
 640                break;
 641        }
 642
 643        /* stop rx if packet error rate is high */
 644        if (++dev->pkt_cnt > 30) {
 645                dev->pkt_cnt = 0;
 646                dev->pkt_err = 0;
 647        } else {
 648                if (state == rx_cleanup)
 649                        dev->pkt_err++;
 650                if (dev->pkt_err > 20)
 651                        set_bit(EVENT_RX_KILL, &dev->flags);
 652        }
 653
 654        state = defer_bh(dev, skb, &dev->rxq, state);
 655
 656        if (urb) {
 657                if (netif_running (dev->net) &&
 658                    !test_bit (EVENT_RX_HALT, &dev->flags) &&
 659                    state != unlink_start) {
 660                        rx_submit (dev, urb, GFP_ATOMIC);
 661                        usb_mark_last_busy(dev->udev);
 662                        return;
 663                }
 664                usb_free_urb (urb);
 665        }
 666        netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
 667}
 668
 669/*-------------------------------------------------------------------------*/
 670void usbnet_pause_rx(struct usbnet *dev)
 671{
 672        set_bit(EVENT_RX_PAUSED, &dev->flags);
 673
 674        netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
 675}
 676EXPORT_SYMBOL_GPL(usbnet_pause_rx);
 677
 678void usbnet_resume_rx(struct usbnet *dev)
 679{
 680        struct sk_buff *skb;
 681        int num = 0;
 682
 683        clear_bit(EVENT_RX_PAUSED, &dev->flags);
 684
 685        while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
 686                usbnet_skb_return(dev, skb);
 687                num++;
 688        }
 689
 690        tasklet_schedule(&dev->bh);
 691
 692        netif_dbg(dev, rx_status, dev->net,
 693                  "paused rx queue disabled, %d skbs requeued\n", num);
 694}
 695EXPORT_SYMBOL_GPL(usbnet_resume_rx);
 696
 697void usbnet_purge_paused_rxq(struct usbnet *dev)
 698{
 699        skb_queue_purge(&dev->rxq_pause);
 700}
 701EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
 702
 703/*-------------------------------------------------------------------------*/
 704
 705// unlink pending rx/tx; completion handlers do all other cleanup
 706
 707static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
 708{
 709        unsigned long           flags;
 710        struct sk_buff          *skb;
 711        int                     count = 0;
 712
 713        spin_lock_irqsave (&q->lock, flags);
 714        while (!skb_queue_empty(q)) {
 715                struct skb_data         *entry;
 716                struct urb              *urb;
 717                int                     retval;
 718
 719                skb_queue_walk(q, skb) {
 720                        entry = (struct skb_data *) skb->cb;
 721                        if (entry->state != unlink_start)
 722                                goto found;
 723                }
 724                break;
 725found:
 726                entry->state = unlink_start;
 727                urb = entry->urb;
 728
 729                /*
 730                 * Get reference count of the URB to avoid it to be
 731                 * freed during usb_unlink_urb, which may trigger
 732                 * use-after-free problem inside usb_unlink_urb since
 733                 * usb_unlink_urb is always racing with .complete
 734                 * handler(include defer_bh).
 735                 */
 736                usb_get_urb(urb);
 737                spin_unlock_irqrestore(&q->lock, flags);
 738                // during some PM-driven resume scenarios,
 739                // these (async) unlinks complete immediately
 740                retval = usb_unlink_urb (urb);
 741                if (retval != -EINPROGRESS && retval != 0)
 742                        netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
 743                else
 744                        count++;
 745                usb_put_urb(urb);
 746                spin_lock_irqsave(&q->lock, flags);
 747        }
 748        spin_unlock_irqrestore (&q->lock, flags);
 749        return count;
 750}
 751
 752// Flush all pending rx urbs
 753// minidrivers may need to do this when the MTU changes
 754
 755void usbnet_unlink_rx_urbs(struct usbnet *dev)
 756{
 757        if (netif_running(dev->net)) {
 758                (void) unlink_urbs (dev, &dev->rxq);
 759                tasklet_schedule(&dev->bh);
 760        }
 761}
 762EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
 763
 764/*-------------------------------------------------------------------------*/
 765
 766static void wait_skb_queue_empty(struct sk_buff_head *q)
 767{
 768        unsigned long flags;
 769
 770        spin_lock_irqsave(&q->lock, flags);
 771        while (!skb_queue_empty(q)) {
 772                spin_unlock_irqrestore(&q->lock, flags);
 773                schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
 774                set_current_state(TASK_UNINTERRUPTIBLE);
 775                spin_lock_irqsave(&q->lock, flags);
 776        }
 777        spin_unlock_irqrestore(&q->lock, flags);
 778}
 779
 780// precondition: never called in_interrupt
 781static void usbnet_terminate_urbs(struct usbnet *dev)
 782{
 783        DECLARE_WAITQUEUE(wait, current);
 784        int temp;
 785
 786        /* ensure there are no more active urbs */
 787        add_wait_queue(&dev->wait, &wait);
 788        set_current_state(TASK_UNINTERRUPTIBLE);
 789        temp = unlink_urbs(dev, &dev->txq) +
 790                unlink_urbs(dev, &dev->rxq);
 791
 792        /* maybe wait for deletions to finish. */
 793        wait_skb_queue_empty(&dev->rxq);
 794        wait_skb_queue_empty(&dev->txq);
 795        wait_skb_queue_empty(&dev->done);
 796        netif_dbg(dev, ifdown, dev->net,
 797                  "waited for %d urb completions\n", temp);
 798        set_current_state(TASK_RUNNING);
 799        remove_wait_queue(&dev->wait, &wait);
 800}
 801
 802int usbnet_stop (struct net_device *net)
 803{
 804        struct usbnet           *dev = netdev_priv(net);
 805        struct driver_info      *info = dev->driver_info;
 806        int                     retval, pm, mpn;
 807
 808        clear_bit(EVENT_DEV_OPEN, &dev->flags);
 809        netif_stop_queue (net);
 810
 811        netif_info(dev, ifdown, dev->net,
 812                   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
 813                   net->stats.rx_packets, net->stats.tx_packets,
 814                   net->stats.rx_errors, net->stats.tx_errors);
 815
 816        /* to not race resume */
 817        pm = usb_autopm_get_interface(dev->intf);
 818        /* allow minidriver to stop correctly (wireless devices to turn off
 819         * radio etc) */
 820        if (info->stop) {
 821                retval = info->stop(dev);
 822                if (retval < 0)
 823                        netif_info(dev, ifdown, dev->net,
 824                                   "stop fail (%d) usbnet usb-%s-%s, %s\n",
 825                                   retval,
 826                                   dev->udev->bus->bus_name, dev->udev->devpath,
 827                                   info->description);
 828        }
 829
 830        if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
 831                usbnet_terminate_urbs(dev);
 832
 833        usbnet_status_stop(dev);
 834
 835        usbnet_purge_paused_rxq(dev);
 836
 837        mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
 838
 839        /* deferred work (task, timer, softirq) must also stop.
 840         * can't flush_scheduled_work() until we drop rtnl (later),
 841         * else workers could deadlock; so make workers a NOP.
 842         */
 843        dev->flags = 0;
 844        del_timer_sync (&dev->delay);
 845        tasklet_kill (&dev->bh);
 846        if (!pm)
 847                usb_autopm_put_interface(dev->intf);
 848
 849        if (info->manage_power && mpn)
 850                info->manage_power(dev, 0);
 851        else
 852                usb_autopm_put_interface(dev->intf);
 853
 854        return 0;
 855}
 856EXPORT_SYMBOL_GPL(usbnet_stop);
 857
 858/*-------------------------------------------------------------------------*/
 859
 860// posts reads, and enables write queuing
 861
 862// precondition: never called in_interrupt
 863
 864int usbnet_open (struct net_device *net)
 865{
 866        struct usbnet           *dev = netdev_priv(net);
 867        int                     retval;
 868        struct driver_info      *info = dev->driver_info;
 869
 870        if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
 871                netif_info(dev, ifup, dev->net,
 872                           "resumption fail (%d) usbnet usb-%s-%s, %s\n",
 873                           retval,
 874                           dev->udev->bus->bus_name,
 875                           dev->udev->devpath,
 876                           info->description);
 877                goto done_nopm;
 878        }
 879
 880        // put into "known safe" state
 881        if (info->reset && (retval = info->reset (dev)) < 0) {
 882                netif_info(dev, ifup, dev->net,
 883                           "open reset fail (%d) usbnet usb-%s-%s, %s\n",
 884                           retval,
 885                           dev->udev->bus->bus_name,
 886                           dev->udev->devpath,
 887                           info->description);
 888                goto done;
 889        }
 890
 891        /* hard_mtu or rx_urb_size may change in reset() */
 892        usbnet_update_max_qlen(dev);
 893
 894        // insist peer be connected
 895        if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
 896                netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
 897                goto done;
 898        }
 899
 900        /* start any status interrupt transfer */
 901        if (dev->interrupt) {
 902                retval = usbnet_status_start(dev, GFP_KERNEL);
 903                if (retval < 0) {
 904                        netif_err(dev, ifup, dev->net,
 905                                  "intr submit %d\n", retval);
 906                        goto done;
 907                }
 908        }
 909
 910        set_bit(EVENT_DEV_OPEN, &dev->flags);
 911        netif_start_queue (net);
 912        netif_info(dev, ifup, dev->net,
 913                   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
 914                   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
 915                   dev->net->mtu,
 916                   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
 917                   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
 918                   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
 919                   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
 920                   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
 921                   "simple");
 922
 923        /* reset rx error state */
 924        dev->pkt_cnt = 0;
 925        dev->pkt_err = 0;
 926        clear_bit(EVENT_RX_KILL, &dev->flags);
 927
 928        // delay posting reads until we're fully open
 929        tasklet_schedule (&dev->bh);
 930        if (info->manage_power) {
 931                retval = info->manage_power(dev, 1);
 932                if (retval < 0) {
 933                        retval = 0;
 934                        set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
 935                } else {
 936                        usb_autopm_put_interface(dev->intf);
 937                }
 938        }
 939        return retval;
 940done:
 941        usb_autopm_put_interface(dev->intf);
 942done_nopm:
 943        return retval;
 944}
 945EXPORT_SYMBOL_GPL(usbnet_open);
 946
 947/*-------------------------------------------------------------------------*/
 948
 949/* ethtool methods; minidrivers may need to add some more, but
 950 * they'll probably want to use this base set.
 951 */
 952
 953int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
 954{
 955        struct usbnet *dev = netdev_priv(net);
 956
 957        if (!dev->mii.mdio_read)
 958                return -EOPNOTSUPP;
 959
 960        return mii_ethtool_gset(&dev->mii, cmd);
 961}
 962EXPORT_SYMBOL_GPL(usbnet_get_settings);
 963
 964int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
 965{
 966        struct usbnet *dev = netdev_priv(net);
 967        int retval;
 968
 969        if (!dev->mii.mdio_write)
 970                return -EOPNOTSUPP;
 971
 972        retval = mii_ethtool_sset(&dev->mii, cmd);
 973
 974        /* link speed/duplex might have changed */
 975        if (dev->driver_info->link_reset)
 976                dev->driver_info->link_reset(dev);
 977
 978        /* hard_mtu or rx_urb_size may change in link_reset() */
 979        usbnet_update_max_qlen(dev);
 980
 981        return retval;
 982
 983}
 984EXPORT_SYMBOL_GPL(usbnet_set_settings);
 985
 986u32 usbnet_get_link (struct net_device *net)
 987{
 988        struct usbnet *dev = netdev_priv(net);
 989
 990        /* If a check_connect is defined, return its result */
 991        if (dev->driver_info->check_connect)
 992                return dev->driver_info->check_connect (dev) == 0;
 993
 994        /* if the device has mii operations, use those */
 995        if (dev->mii.mdio_read)
 996                return mii_link_ok(&dev->mii);
 997
 998        /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
 999        return ethtool_op_get_link(net);
1000}
1001EXPORT_SYMBOL_GPL(usbnet_get_link);
1002
1003int usbnet_nway_reset(struct net_device *net)
1004{
1005        struct usbnet *dev = netdev_priv(net);
1006
1007        if (!dev->mii.mdio_write)
1008                return -EOPNOTSUPP;
1009
1010        return mii_nway_restart(&dev->mii);
1011}
1012EXPORT_SYMBOL_GPL(usbnet_nway_reset);
1013
1014void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
1015{
1016        struct usbnet *dev = netdev_priv(net);
1017
1018        strlcpy (info->driver, dev->driver_name, sizeof info->driver);
1019        strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
1020        strlcpy (info->fw_version, dev->driver_info->description,
1021                sizeof info->fw_version);
1022        usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
1023}
1024EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
1025
1026u32 usbnet_get_msglevel (struct net_device *net)
1027{
1028        struct usbnet *dev = netdev_priv(net);
1029
1030        return dev->msg_enable;
1031}
1032EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
1033
1034void usbnet_set_msglevel (struct net_device *net, u32 level)
1035{
1036        struct usbnet *dev = netdev_priv(net);
1037
1038        dev->msg_enable = level;
1039}
1040EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
1041
1042/* drivers may override default ethtool_ops in their bind() routine */
1043static const struct ethtool_ops usbnet_ethtool_ops = {
1044        .get_settings           = usbnet_get_settings,
1045        .set_settings           = usbnet_set_settings,
1046        .get_link               = usbnet_get_link,
1047        .nway_reset             = usbnet_nway_reset,
1048        .get_drvinfo            = usbnet_get_drvinfo,
1049        .get_msglevel           = usbnet_get_msglevel,
1050        .set_msglevel           = usbnet_set_msglevel,
1051        .get_ts_info            = ethtool_op_get_ts_info,
1052};
1053
1054/*-------------------------------------------------------------------------*/
1055
1056static void __handle_link_change(struct usbnet *dev)
1057{
1058        if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
1059                return;
1060
1061        if (!netif_carrier_ok(dev->net)) {
1062                /* kill URBs for reading packets to save bus bandwidth */
1063                unlink_urbs(dev, &dev->rxq);
1064
1065                /*
1066                 * tx_timeout will unlink URBs for sending packets and
1067                 * tx queue is stopped by netcore after link becomes off
1068                 */
1069        } else {
1070                /* submitting URBs for reading packets */
1071                tasklet_schedule(&dev->bh);
1072        }
1073
1074        /* hard_mtu or rx_urb_size may change during link change */
1075        usbnet_update_max_qlen(dev);
1076
1077        clear_bit(EVENT_LINK_CHANGE, &dev->flags);
1078}
1079
1080static void usbnet_set_rx_mode(struct net_device *net)
1081{
1082        struct usbnet           *dev = netdev_priv(net);
1083
1084        usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
1085}
1086
1087static void __handle_set_rx_mode(struct usbnet *dev)
1088{
1089        if (dev->driver_info->set_rx_mode)
1090                (dev->driver_info->set_rx_mode)(dev);
1091
1092        clear_bit(EVENT_SET_RX_MODE, &dev->flags);
1093}
1094
1095/* work that cannot be done in interrupt context uses keventd.
1096 *
1097 * NOTE:  with 2.5 we could do more of this using completion callbacks,
1098 * especially now that control transfers can be queued.
1099 */
1100static void
1101usbnet_deferred_kevent (struct work_struct *work)
1102{
1103        struct usbnet           *dev =
1104                container_of(work, struct usbnet, kevent);
1105        int                     status;
1106
1107        /* usb_clear_halt() needs a thread context */
1108        if (test_bit (EVENT_TX_HALT, &dev->flags)) {
1109                unlink_urbs (dev, &dev->txq);
1110                status = usb_autopm_get_interface(dev->intf);
1111                if (status < 0)
1112                        goto fail_pipe;
1113                status = usb_clear_halt (dev->udev, dev->out);
1114                usb_autopm_put_interface(dev->intf);
1115                if (status < 0 &&
1116                    status != -EPIPE &&
1117                    status != -ESHUTDOWN) {
1118                        if (netif_msg_tx_err (dev))
1119fail_pipe:
1120                                netdev_err(dev->net, "can't clear tx halt, status %d\n",
1121                                           status);
1122                } else {
1123                        clear_bit (EVENT_TX_HALT, &dev->flags);
1124                        if (status != -ESHUTDOWN)
1125                                netif_wake_queue (dev->net);
1126                }
1127        }
1128        if (test_bit (EVENT_RX_HALT, &dev->flags)) {
1129                unlink_urbs (dev, &dev->rxq);
1130                status = usb_autopm_get_interface(dev->intf);
1131                if (status < 0)
1132                        goto fail_halt;
1133                status = usb_clear_halt (dev->udev, dev->in);
1134                usb_autopm_put_interface(dev->intf);
1135                if (status < 0 &&
1136                    status != -EPIPE &&
1137                    status != -ESHUTDOWN) {
1138                        if (netif_msg_rx_err (dev))
1139fail_halt:
1140                                netdev_err(dev->net, "can't clear rx halt, status %d\n",
1141                                           status);
1142                } else {
1143                        clear_bit (EVENT_RX_HALT, &dev->flags);
1144                        tasklet_schedule (&dev->bh);
1145                }
1146        }
1147
1148        /* tasklet could resubmit itself forever if memory is tight */
1149        if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
1150                struct urb      *urb = NULL;
1151                int resched = 1;
1152
1153                if (netif_running (dev->net))
1154                        urb = usb_alloc_urb (0, GFP_KERNEL);
1155                else
1156                        clear_bit (EVENT_RX_MEMORY, &dev->flags);
1157                if (urb != NULL) {
1158                        clear_bit (EVENT_RX_MEMORY, &dev->flags);
1159                        status = usb_autopm_get_interface(dev->intf);
1160                        if (status < 0) {
1161                                usb_free_urb(urb);
1162                                goto fail_lowmem;
1163                        }
1164                        if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
1165                                resched = 0;
1166                        usb_autopm_put_interface(dev->intf);
1167fail_lowmem:
1168                        if (resched)
1169                                tasklet_schedule (&dev->bh);
1170                }
1171        }
1172
1173        if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
1174                struct driver_info      *info = dev->driver_info;
1175                int                     retval = 0;
1176
1177                clear_bit (EVENT_LINK_RESET, &dev->flags);
1178                status = usb_autopm_get_interface(dev->intf);
1179                if (status < 0)
1180                        goto skip_reset;
1181                if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
1182                        usb_autopm_put_interface(dev->intf);
1183skip_reset:
1184                        netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
1185                                    retval,
1186                                    dev->udev->bus->bus_name,
1187                                    dev->udev->devpath,
1188                                    info->description);
1189                } else {
1190                        usb_autopm_put_interface(dev->intf);
1191                }
1192
1193                /* handle link change from link resetting */
1194                __handle_link_change(dev);
1195        }
1196
1197        if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
1198                __handle_link_change(dev);
1199
1200        if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
1201                __handle_set_rx_mode(dev);
1202
1203
1204        if (dev->flags)
1205                netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1206}
1207
1208/*-------------------------------------------------------------------------*/
1209
1210static void tx_complete (struct urb *urb)
1211{
1212        struct sk_buff          *skb = (struct sk_buff *) urb->context;
1213        struct skb_data         *entry = (struct skb_data *) skb->cb;
1214        struct usbnet           *dev = entry->dev;
1215
1216        if (urb->status == 0) {
1217                dev->net->stats.tx_packets += entry->packets;
1218                dev->net->stats.tx_bytes += entry->length;
1219        } else {
1220                dev->net->stats.tx_errors++;
1221
1222                switch (urb->status) {
1223                case -EPIPE:
1224                        usbnet_defer_kevent (dev, EVENT_TX_HALT);
1225                        break;
1226
1227                /* software-driven interface shutdown */
1228                case -ECONNRESET:               // async unlink
1229                case -ESHUTDOWN:                // hardware gone
1230                        break;
1231
1232                /* like rx, tx gets controller i/o faults during hub_wq
1233                 * delays and so it uses the same throttling mechanism.
1234                 */
1235                case -EPROTO:
1236                case -ETIME:
1237                case -EILSEQ:
1238                        usb_mark_last_busy(dev->udev);
1239                        if (!timer_pending (&dev->delay)) {
1240                                mod_timer (&dev->delay,
1241                                        jiffies + THROTTLE_JIFFIES);
1242                                netif_dbg(dev, link, dev->net,
1243                                          "tx throttle %d\n", urb->status);
1244                        }
1245                        netif_stop_queue (dev->net);
1246                        break;
1247                default:
1248                        netif_dbg(dev, tx_err, dev->net,
1249                                  "tx err %d\n", entry->urb->status);
1250                        break;
1251                }
1252        }
1253
1254        usb_autopm_put_interface_async(dev->intf);
1255        (void) defer_bh(dev, skb, &dev->txq, tx_done);
1256}
1257
1258/*-------------------------------------------------------------------------*/
1259
1260void usbnet_tx_timeout (struct net_device *net)
1261{
1262        struct usbnet           *dev = netdev_priv(net);
1263
1264        unlink_urbs (dev, &dev->txq);
1265        tasklet_schedule (&dev->bh);
1266        /* this needs to be handled individually because the generic layer
1267         * doesn't know what is sufficient and could not restore private
1268         * information if a remedy of an unconditional reset were used.
1269         */
1270        if (dev->driver_info->recover)
1271                (dev->driver_info->recover)(dev);
1272}
1273EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1274
1275/*-------------------------------------------------------------------------*/
1276
1277static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1278{
1279        unsigned num_sgs, total_len = 0;
1280        int i, s = 0;
1281
1282        num_sgs = skb_shinfo(skb)->nr_frags + 1;
1283        if (num_sgs == 1)
1284                return 0;
1285
1286        /* reserve one for zero packet */
1287        urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1288                          GFP_ATOMIC);
1289        if (!urb->sg)
1290                return -ENOMEM;
1291
1292        urb->num_sgs = num_sgs;
1293        sg_init_table(urb->sg, urb->num_sgs + 1);
1294
1295        sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
1296        total_len += skb_headlen(skb);
1297
1298        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1299                struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
1300
1301                total_len += skb_frag_size(f);
1302                sg_set_page(&urb->sg[i + s], f->page.p, f->size,
1303                                f->page_offset);
1304        }
1305        urb->transfer_buffer_length = total_len;
1306
1307        return 1;
1308}
1309
1310netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1311                                     struct net_device *net)
1312{
1313        struct usbnet           *dev = netdev_priv(net);
1314        unsigned int                    length;
1315        struct urb              *urb = NULL;
1316        struct skb_data         *entry;
1317        struct driver_info      *info = dev->driver_info;
1318        unsigned long           flags;
1319        int retval;
1320
1321        if (skb)
1322                skb_tx_timestamp(skb);
1323
1324        // some devices want funky USB-level framing, for
1325        // win32 driver (usually) and/or hardware quirks
1326        if (info->tx_fixup) {
1327                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1328                if (!skb) {
1329                        /* packet collected; minidriver waiting for more */
1330                        if (info->flags & FLAG_MULTI_PACKET)
1331                                goto not_drop;
1332                        netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1333                        goto drop;
1334                }
1335        }
1336
1337        if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1338                netif_dbg(dev, tx_err, dev->net, "no urb\n");
1339                goto drop;
1340        }
1341
1342        entry = (struct skb_data *) skb->cb;
1343        entry->urb = urb;
1344        entry->dev = dev;
1345
1346        usb_fill_bulk_urb (urb, dev->udev, dev->out,
1347                        skb->data, skb->len, tx_complete, skb);
1348        if (dev->can_dma_sg) {
1349                if (build_dma_sg(skb, urb) < 0)
1350                        goto drop;
1351        }
1352        length = urb->transfer_buffer_length;
1353
1354        /* don't assume the hardware handles USB_ZERO_PACKET
1355         * NOTE:  strictly conforming cdc-ether devices should expect
1356         * the ZLP here, but ignore the one-byte packet.
1357         * NOTE2: CDC NCM specification is different from CDC ECM when
1358         * handling ZLP/short packets, so cdc_ncm driver will make short
1359         * packet itself if needed.
1360         */
1361        if (length % dev->maxpacket == 0) {
1362                if (!(info->flags & FLAG_SEND_ZLP)) {
1363                        if (!(info->flags & FLAG_MULTI_PACKET)) {
1364                                length++;
1365                                if (skb_tailroom(skb) && !urb->num_sgs) {
1366                                        skb->data[skb->len] = 0;
1367                                        __skb_put(skb, 1);
1368                                } else if (urb->num_sgs)
1369                                        sg_set_buf(&urb->sg[urb->num_sgs++],
1370                                                        dev->padding_pkt, 1);
1371                        }
1372                } else
1373                        urb->transfer_flags |= URB_ZERO_PACKET;
1374        }
1375        urb->transfer_buffer_length = length;
1376
1377        if (info->flags & FLAG_MULTI_PACKET) {
1378                /* Driver has set number of packets and a length delta.
1379                 * Calculate the complete length and ensure that it's
1380                 * positive.
1381                 */
1382                entry->length += length;
1383                if (WARN_ON_ONCE(entry->length <= 0))
1384                        entry->length = length;
1385        } else {
1386                usbnet_set_skb_tx_stats(skb, 1, length);
1387        }
1388
1389        spin_lock_irqsave(&dev->txq.lock, flags);
1390        retval = usb_autopm_get_interface_async(dev->intf);
1391        if (retval < 0) {
1392                spin_unlock_irqrestore(&dev->txq.lock, flags);
1393                goto drop;
1394        }
1395
1396#ifdef CONFIG_PM
1397        /* if this triggers the device is still a sleep */
1398        if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
1399                /* transmission will be done in resume */
1400                usb_anchor_urb(urb, &dev->deferred);
1401                /* no use to process more packets */
1402                netif_stop_queue(net);
1403                usb_put_urb(urb);
1404                spin_unlock_irqrestore(&dev->txq.lock, flags);
1405                netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1406                goto deferred;
1407        }
1408#endif
1409
1410        switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1411        case -EPIPE:
1412                netif_stop_queue (net);
1413                usbnet_defer_kevent (dev, EVENT_TX_HALT);
1414                usb_autopm_put_interface_async(dev->intf);
1415                break;
1416        default:
1417                usb_autopm_put_interface_async(dev->intf);
1418                netif_dbg(dev, tx_err, dev->net,
1419                          "tx: submit urb err %d\n", retval);
1420                break;
1421        case 0:
1422                netif_trans_update(net);
1423                __usbnet_queue_skb(&dev->txq, skb, tx_start);
1424                if (dev->txq.qlen >= TX_QLEN (dev))
1425                        netif_stop_queue (net);
1426        }
1427        spin_unlock_irqrestore (&dev->txq.lock, flags);
1428
1429        if (retval) {
1430                netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1431drop:
1432                dev->net->stats.tx_dropped++;
1433not_drop:
1434                if (skb)
1435                        dev_kfree_skb_any (skb);
1436                if (urb) {
1437                        kfree(urb->sg);
1438                        usb_free_urb(urb);
1439                }
1440        } else
1441                netif_dbg(dev, tx_queued, dev->net,
1442                          "> tx, len %u, type 0x%x\n", length, skb->protocol);
1443#ifdef CONFIG_PM
1444deferred:
1445#endif
1446        return NETDEV_TX_OK;
1447}
1448EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1449
1450static int rx_alloc_submit(struct usbnet *dev, gfp_t flags)
1451{
1452        struct urb      *urb;
1453        int             i;
1454        int             ret = 0;
1455
1456        /* don't refill the queue all at once */
1457        for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
1458                urb = usb_alloc_urb(0, flags);
1459                if (urb != NULL) {
1460                        ret = rx_submit(dev, urb, flags);
1461                        if (ret)
1462                                goto err;
1463                } else {
1464                        ret = -ENOMEM;
1465                        goto err;
1466                }
1467        }
1468err:
1469        return ret;
1470}
1471
1472/*-------------------------------------------------------------------------*/
1473
1474// tasklet (work deferred from completions, in_irq) or timer
1475
1476static void usbnet_bh (unsigned long param)
1477{
1478        struct usbnet           *dev = (struct usbnet *) param;
1479        struct sk_buff          *skb;
1480        struct skb_data         *entry;
1481
1482        while ((skb = skb_dequeue (&dev->done))) {
1483                entry = (struct skb_data *) skb->cb;
1484                switch (entry->state) {
1485                case rx_done:
1486                        entry->state = rx_cleanup;
1487                        rx_process (dev, skb);
1488                        continue;
1489                case tx_done:
1490                        kfree(entry->urb->sg);
1491                case rx_cleanup:
1492                        usb_free_urb (entry->urb);
1493                        dev_kfree_skb (skb);
1494                        continue;
1495                default:
1496                        netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1497                }
1498        }
1499
1500        /* restart RX again after disabling due to high error rate */
1501        clear_bit(EVENT_RX_KILL, &dev->flags);
1502
1503        /* waiting for all pending urbs to complete?
1504         * only then can we forgo submitting anew
1505         */
1506        if (waitqueue_active(&dev->wait)) {
1507                if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
1508                        wake_up_all(&dev->wait);
1509
1510        // or are we maybe short a few urbs?
1511        } else if (netif_running (dev->net) &&
1512                   netif_device_present (dev->net) &&
1513                   netif_carrier_ok(dev->net) &&
1514                   !timer_pending(&dev->delay) &&
1515                   !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
1516                   !test_bit(EVENT_RX_HALT, &dev->flags)) {
1517                int     temp = dev->rxq.qlen;
1518
1519                if (temp < RX_QLEN(dev)) {
1520                        if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
1521                                return;
1522                        if (temp != dev->rxq.qlen)
1523                                netif_dbg(dev, link, dev->net,
1524                                          "rxqlen %d --> %d\n",
1525                                          temp, dev->rxq.qlen);
1526                        if (dev->rxq.qlen < RX_QLEN(dev))
1527                                tasklet_schedule (&dev->bh);
1528                }
1529                if (dev->txq.qlen < TX_QLEN (dev))
1530                        netif_wake_queue (dev->net);
1531        }
1532}
1533
1534
1535/*-------------------------------------------------------------------------
1536 *
1537 * USB Device Driver support
1538 *
1539 *-------------------------------------------------------------------------*/
1540
1541// precondition: never called in_interrupt
1542
1543void usbnet_disconnect (struct usb_interface *intf)
1544{
1545        struct usbnet           *dev;
1546        struct usb_device       *xdev;
1547        struct net_device       *net;
1548
1549        dev = usb_get_intfdata(intf);
1550        usb_set_intfdata(intf, NULL);
1551        if (!dev)
1552                return;
1553
1554        xdev = interface_to_usbdev (intf);
1555
1556        netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1557                   intf->dev.driver->name,
1558                   xdev->bus->bus_name, xdev->devpath,
1559                   dev->driver_info->description);
1560
1561        net = dev->net;
1562        unregister_netdev (net);
1563
1564        cancel_work_sync(&dev->kevent);
1565
1566        usb_scuttle_anchored_urbs(&dev->deferred);
1567
1568        if (dev->driver_info->unbind)
1569                dev->driver_info->unbind (dev, intf);
1570
1571        usb_kill_urb(dev->interrupt);
1572        usb_free_urb(dev->interrupt);
1573        kfree(dev->padding_pkt);
1574
1575        free_netdev(net);
1576}
1577EXPORT_SYMBOL_GPL(usbnet_disconnect);
1578
1579static const struct net_device_ops usbnet_netdev_ops = {
1580        .ndo_open               = usbnet_open,
1581        .ndo_stop               = usbnet_stop,
1582        .ndo_start_xmit         = usbnet_start_xmit,
1583        .ndo_tx_timeout         = usbnet_tx_timeout,
1584        .ndo_set_rx_mode        = usbnet_set_rx_mode,
1585        .ndo_change_mtu         = usbnet_change_mtu,
1586        .ndo_set_mac_address    = eth_mac_addr,
1587        .ndo_validate_addr      = eth_validate_addr,
1588};
1589
1590/*-------------------------------------------------------------------------*/
1591
1592// precondition: never called in_interrupt
1593
1594static struct device_type wlan_type = {
1595        .name   = "wlan",
1596};
1597
1598static struct device_type wwan_type = {
1599        .name   = "wwan",
1600};
1601
1602int
1603usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1604{
1605        struct usbnet                   *dev;
1606        struct net_device               *net;
1607        struct usb_host_interface       *interface;
1608        struct driver_info              *info;
1609        struct usb_device               *xdev;
1610        int                             status;
1611        const char                      *name;
1612        struct usb_driver       *driver = to_usb_driver(udev->dev.driver);
1613
1614        /* usbnet already took usb runtime pm, so have to enable the feature
1615         * for usb interface, otherwise usb_autopm_get_interface may return
1616         * failure if RUNTIME_PM is enabled.
1617         */
1618        if (!driver->supports_autosuspend) {
1619                driver->supports_autosuspend = 1;
1620                pm_runtime_enable(&udev->dev);
1621        }
1622
1623        name = udev->dev.driver->name;
1624        info = (struct driver_info *) prod->driver_info;
1625        if (!info) {
1626                dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1627                return -ENODEV;
1628        }
1629        xdev = interface_to_usbdev (udev);
1630        interface = udev->cur_altsetting;
1631
1632        status = -ENOMEM;
1633
1634        // set up our own records
1635        net = alloc_etherdev(sizeof(*dev));
1636        if (!net)
1637                goto out;
1638
1639        /* netdev_printk() needs this so do it as early as possible */
1640        SET_NETDEV_DEV(net, &udev->dev);
1641
1642        dev = netdev_priv(net);
1643        dev->udev = xdev;
1644        dev->intf = udev;
1645        dev->driver_info = info;
1646        dev->driver_name = name;
1647        dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1648                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1649        init_waitqueue_head(&dev->wait);
1650        skb_queue_head_init (&dev->rxq);
1651        skb_queue_head_init (&dev->txq);
1652        skb_queue_head_init (&dev->done);
1653        skb_queue_head_init(&dev->rxq_pause);
1654        dev->bh.func = usbnet_bh;
1655        dev->bh.data = (unsigned long) dev;
1656        INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
1657        init_usb_anchor(&dev->deferred);
1658        dev->delay.function = usbnet_bh;
1659        dev->delay.data = (unsigned long) dev;
1660        init_timer (&dev->delay);
1661        mutex_init (&dev->phy_mutex);
1662        mutex_init(&dev->interrupt_mutex);
1663        dev->interrupt_count = 0;
1664
1665        dev->net = net;
1666        strcpy (net->name, "usb%d");
1667        memcpy (net->dev_addr, node_id, sizeof node_id);
1668
1669        /* rx and tx sides can use different message sizes;
1670         * bind() should set rx_urb_size in that case.
1671         */
1672        dev->hard_mtu = net->mtu + net->hard_header_len;
1673
1674        net->netdev_ops = &usbnet_netdev_ops;
1675        net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1676        net->ethtool_ops = &usbnet_ethtool_ops;
1677
1678        // allow device-specific bind/init procedures
1679        // NOTE net->name still not usable ...
1680        if (info->bind) {
1681                status = info->bind (dev, udev);
1682                if (status < 0)
1683                        goto out1;
1684
1685                // heuristic:  "usb%d" for links we know are two-host,
1686                // else "eth%d" when there's reasonable doubt.  userspace
1687                // can rename the link if it knows better.
1688                if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1689                    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1690                     (net->dev_addr [0] & 0x02) == 0))
1691                        strcpy (net->name, "eth%d");
1692                /* WLAN devices should always be named "wlan%d" */
1693                if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1694                        strcpy(net->name, "wlan%d");
1695                /* WWAN devices should always be named "wwan%d" */
1696                if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1697                        strcpy(net->name, "wwan%d");
1698
1699                /* devices that cannot do ARP */
1700                if ((dev->driver_info->flags & FLAG_NOARP) != 0)
1701                        net->flags |= IFF_NOARP;
1702
1703                /* maybe the remote can't receive an Ethernet MTU */
1704                if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1705                        net->mtu = dev->hard_mtu - net->hard_header_len;
1706        } else if (!info->in || !info->out)
1707                status = usbnet_get_endpoints (dev, udev);
1708        else {
1709                dev->in = usb_rcvbulkpipe (xdev, info->in);
1710                dev->out = usb_sndbulkpipe (xdev, info->out);
1711                if (!(info->flags & FLAG_NO_SETINT))
1712                        status = usb_set_interface (xdev,
1713                                interface->desc.bInterfaceNumber,
1714                                interface->desc.bAlternateSetting);
1715                else
1716                        status = 0;
1717
1718        }
1719        if (status >= 0 && dev->status)
1720                status = init_status (dev, udev);
1721        if (status < 0)
1722                goto out3;
1723
1724        if (!dev->rx_urb_size)
1725                dev->rx_urb_size = dev->hard_mtu;
1726        dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1727
1728        /* let userspace know we have a random address */
1729        if (ether_addr_equal(net->dev_addr, node_id))
1730                net->addr_assign_type = NET_ADDR_RANDOM;
1731
1732        if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1733                SET_NETDEV_DEVTYPE(net, &wlan_type);
1734        if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1735                SET_NETDEV_DEVTYPE(net, &wwan_type);
1736
1737        /* initialize max rx_qlen and tx_qlen */
1738        usbnet_update_max_qlen(dev);
1739
1740        if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1741                !(info->flags & FLAG_MULTI_PACKET)) {
1742                dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1743                if (!dev->padding_pkt) {
1744                        status = -ENOMEM;
1745                        goto out4;
1746                }
1747        }
1748
1749        status = register_netdev (net);
1750        if (status)
1751                goto out5;
1752        netif_info(dev, probe, dev->net,
1753                   "register '%s' at usb-%s-%s, %s, %pM\n",
1754                   udev->dev.driver->name,
1755                   xdev->bus->bus_name, xdev->devpath,
1756                   dev->driver_info->description,
1757                   net->dev_addr);
1758
1759        // ok, it's ready to go.
1760        usb_set_intfdata (udev, dev);
1761
1762        netif_device_attach (net);
1763
1764        if (dev->driver_info->flags & FLAG_LINK_INTR)
1765                usbnet_link_change(dev, 0, 0);
1766
1767        return 0;
1768
1769out5:
1770        kfree(dev->padding_pkt);
1771out4:
1772        usb_free_urb(dev->interrupt);
1773out3:
1774        if (info->unbind)
1775                info->unbind (dev, udev);
1776out1:
1777        /* subdrivers must undo all they did in bind() if they
1778         * fail it, but we may fail later and a deferred kevent
1779         * may trigger an error resubmitting itself and, worse,
1780         * schedule a timer. So we kill it all just in case.
1781         */
1782        cancel_work_sync(&dev->kevent);
1783        del_timer_sync(&dev->delay);
1784        free_netdev(net);
1785out:
1786        return status;
1787}
1788EXPORT_SYMBOL_GPL(usbnet_probe);
1789
1790/*-------------------------------------------------------------------------*/
1791
1792/*
1793 * suspend the whole driver as soon as the first interface is suspended
1794 * resume only when the last interface is resumed
1795 */
1796
1797int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1798{
1799        struct usbnet           *dev = usb_get_intfdata(intf);
1800
1801        if (!dev->suspend_count++) {
1802                spin_lock_irq(&dev->txq.lock);
1803                /* don't autosuspend while transmitting */
1804                if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1805                        dev->suspend_count--;
1806                        spin_unlock_irq(&dev->txq.lock);
1807                        return -EBUSY;
1808                } else {
1809                        set_bit(EVENT_DEV_ASLEEP, &dev->flags);
1810                        spin_unlock_irq(&dev->txq.lock);
1811                }
1812                /*
1813                 * accelerate emptying of the rx and queues, to avoid
1814                 * having everything error out.
1815                 */
1816                netif_device_detach (dev->net);
1817                usbnet_terminate_urbs(dev);
1818                __usbnet_status_stop_force(dev);
1819
1820                /*
1821                 * reattach so runtime management can use and
1822                 * wake the device
1823                 */
1824                netif_device_attach (dev->net);
1825        }
1826        return 0;
1827}
1828EXPORT_SYMBOL_GPL(usbnet_suspend);
1829
1830int usbnet_resume (struct usb_interface *intf)
1831{
1832        struct usbnet           *dev = usb_get_intfdata(intf);
1833        struct sk_buff          *skb;
1834        struct urb              *res;
1835        int                     retval;
1836
1837        if (!--dev->suspend_count) {
1838                /* resume interrupt URB if it was previously submitted */
1839                __usbnet_status_start_force(dev, GFP_NOIO);
1840
1841                spin_lock_irq(&dev->txq.lock);
1842                while ((res = usb_get_from_anchor(&dev->deferred))) {
1843
1844                        skb = (struct sk_buff *)res->context;
1845                        retval = usb_submit_urb(res, GFP_ATOMIC);
1846                        if (retval < 0) {
1847                                dev_kfree_skb_any(skb);
1848                                kfree(res->sg);
1849                                usb_free_urb(res);
1850                                usb_autopm_put_interface_async(dev->intf);
1851                        } else {
1852                                netif_trans_update(dev->net);
1853                                __skb_queue_tail(&dev->txq, skb);
1854                        }
1855                }
1856
1857                smp_mb();
1858                clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1859                spin_unlock_irq(&dev->txq.lock);
1860
1861                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1862                        /* handle remote wakeup ASAP
1863                         * we cannot race against stop
1864                         */
1865                        if (netif_device_present(dev->net) &&
1866                                !timer_pending(&dev->delay) &&
1867                                !test_bit(EVENT_RX_HALT, &dev->flags))
1868                                        rx_alloc_submit(dev, GFP_NOIO);
1869
1870                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
1871                                netif_tx_wake_all_queues(dev->net);
1872                        tasklet_schedule (&dev->bh);
1873                }
1874        }
1875
1876        if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
1877                usb_autopm_get_interface_no_resume(intf);
1878
1879        return 0;
1880}
1881EXPORT_SYMBOL_GPL(usbnet_resume);
1882
1883/*
1884 * Either a subdriver implements manage_power, then it is assumed to always
1885 * be ready to be suspended or it reports the readiness to be suspended
1886 * explicitly
1887 */
1888void usbnet_device_suggests_idle(struct usbnet *dev)
1889{
1890        if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
1891                dev->intf->needs_remote_wakeup = 1;
1892                usb_autopm_put_interface_async(dev->intf);
1893        }
1894}
1895EXPORT_SYMBOL(usbnet_device_suggests_idle);
1896
1897/*
1898 * For devices that can do without special commands
1899 */
1900int usbnet_manage_power(struct usbnet *dev, int on)
1901{
1902        dev->intf->needs_remote_wakeup = on;
1903        return 0;
1904}
1905EXPORT_SYMBOL(usbnet_manage_power);
1906
1907void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
1908{
1909        /* update link after link is reseted */
1910        if (link && !need_reset)
1911                netif_carrier_on(dev->net);
1912        else
1913                netif_carrier_off(dev->net);
1914
1915        if (need_reset && link)
1916                usbnet_defer_kevent(dev, EVENT_LINK_RESET);
1917        else
1918                usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
1919}
1920EXPORT_SYMBOL(usbnet_link_change);
1921
1922/*-------------------------------------------------------------------------*/
1923static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1924                             u16 value, u16 index, void *data, u16 size)
1925{
1926        void *buf = NULL;
1927        int err = -ENOMEM;
1928
1929        netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
1930                   " value=0x%04x index=0x%04x size=%d\n",
1931                   cmd, reqtype, value, index, size);
1932
1933        if (data) {
1934                buf = kmalloc(size, GFP_KERNEL);
1935                if (!buf)
1936                        goto out;
1937        }
1938
1939        err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1940                              cmd, reqtype, value, index, buf, size,
1941                              USB_CTRL_GET_TIMEOUT);
1942        if (err > 0 && err <= size)
1943                memcpy(data, buf, err);
1944        kfree(buf);
1945out:
1946        return err;
1947}
1948
1949static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1950                              u16 value, u16 index, const void *data,
1951                              u16 size)
1952{
1953        void *buf = NULL;
1954        int err = -ENOMEM;
1955
1956        netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
1957                   " value=0x%04x index=0x%04x size=%d\n",
1958                   cmd, reqtype, value, index, size);
1959
1960        if (data) {
1961                buf = kmemdup(data, size, GFP_KERNEL);
1962                if (!buf)
1963                        goto out;
1964        }
1965
1966        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1967                              cmd, reqtype, value, index, buf, size,
1968                              USB_CTRL_SET_TIMEOUT);
1969        kfree(buf);
1970
1971out:
1972        return err;
1973}
1974
1975int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
1976                                struct usb_interface *intf,
1977                                u8 *buffer,
1978                                int buflen)
1979{
1980        /* duplicates are ignored */
1981        struct usb_cdc_union_desc *union_header = NULL;
1982
1983        /* duplicates are not tolerated */
1984        struct usb_cdc_header_desc *header = NULL;
1985        struct usb_cdc_ether_desc *ether = NULL;
1986        struct usb_cdc_mdlm_detail_desc *detail = NULL;
1987        struct usb_cdc_mdlm_desc *desc = NULL;
1988
1989        unsigned int elength;
1990        int cnt = 0;
1991
1992        memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
1993        hdr->phonet_magic_present = false;
1994        while (buflen > 0) {
1995                elength = buffer[0];
1996                if (!elength) {
1997                        dev_err(&intf->dev, "skipping garbage byte\n");
1998                        elength = 1;
1999                        goto next_desc;
2000                }
2001                if (buffer[1] != USB_DT_CS_INTERFACE) {
2002                        dev_err(&intf->dev, "skipping garbage\n");
2003                        goto next_desc;
2004                }
2005
2006                switch (buffer[2]) {
2007                case USB_CDC_UNION_TYPE: /* we've found it */
2008                        if (elength < sizeof(struct usb_cdc_union_desc))
2009                                goto next_desc;
2010                        if (union_header) {
2011                                dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
2012                                goto next_desc;
2013                        }
2014                        union_header = (struct usb_cdc_union_desc *)buffer;
2015                        break;
2016                case USB_CDC_COUNTRY_TYPE:
2017                        if (elength < sizeof(struct usb_cdc_country_functional_desc))
2018                                goto next_desc;
2019                        hdr->usb_cdc_country_functional_desc =
2020                                (struct usb_cdc_country_functional_desc *)buffer;
2021                        break;
2022                case USB_CDC_HEADER_TYPE:
2023                        if (elength != sizeof(struct usb_cdc_header_desc))
2024                                goto next_desc;
2025                        if (header)
2026                                return -EINVAL;
2027                        header = (struct usb_cdc_header_desc *)buffer;
2028                        break;
2029                case USB_CDC_ACM_TYPE:
2030                        if (elength < sizeof(struct usb_cdc_acm_descriptor))
2031                                goto next_desc;
2032                        hdr->usb_cdc_acm_descriptor =
2033                                (struct usb_cdc_acm_descriptor *)buffer;
2034                        break;
2035                case USB_CDC_ETHERNET_TYPE:
2036                        if (elength != sizeof(struct usb_cdc_ether_desc))
2037                                goto next_desc;
2038                        if (ether)
2039                                return -EINVAL;
2040                        ether = (struct usb_cdc_ether_desc *)buffer;
2041                        break;
2042                case USB_CDC_CALL_MANAGEMENT_TYPE:
2043                        if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
2044                                goto next_desc;
2045                        hdr->usb_cdc_call_mgmt_descriptor =
2046                                (struct usb_cdc_call_mgmt_descriptor *)buffer;
2047                        break;
2048                case USB_CDC_DMM_TYPE:
2049                        if (elength < sizeof(struct usb_cdc_dmm_desc))
2050                                goto next_desc;
2051                        hdr->usb_cdc_dmm_desc =
2052                                (struct usb_cdc_dmm_desc *)buffer;
2053                        break;
2054                case USB_CDC_MDLM_TYPE:
2055                        if (elength < sizeof(struct usb_cdc_mdlm_desc *))
2056                                goto next_desc;
2057                        if (desc)
2058                                return -EINVAL;
2059                        desc = (struct usb_cdc_mdlm_desc *)buffer;
2060                        break;
2061                case USB_CDC_MDLM_DETAIL_TYPE:
2062                        if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
2063                                goto next_desc;
2064                        if (detail)
2065                                return -EINVAL;
2066                        detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
2067                        break;
2068                case USB_CDC_NCM_TYPE:
2069                        if (elength < sizeof(struct usb_cdc_ncm_desc))
2070                                goto next_desc;
2071                        hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
2072                        break;
2073                case USB_CDC_MBIM_TYPE:
2074                        if (elength < sizeof(struct usb_cdc_mbim_desc))
2075                                goto next_desc;
2076
2077                        hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
2078                        break;
2079                case USB_CDC_MBIM_EXTENDED_TYPE:
2080                        if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
2081                                break;
2082                        hdr->usb_cdc_mbim_extended_desc =
2083                                (struct usb_cdc_mbim_extended_desc *)buffer;
2084                        break;
2085                case CDC_PHONET_MAGIC_NUMBER:
2086                        hdr->phonet_magic_present = true;
2087                        break;
2088                default:
2089                        /*
2090                         * there are LOTS more CDC descriptors that
2091                         * could legitimately be found here.
2092                         */
2093                        dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
2094                                        buffer[2], elength);
2095                        goto next_desc;
2096                }
2097                cnt++;
2098next_desc:
2099                buflen -= elength;
2100                buffer += elength;
2101        }
2102        hdr->usb_cdc_union_desc = union_header;
2103        hdr->usb_cdc_header_desc = header;
2104        hdr->usb_cdc_mdlm_detail_desc = detail;
2105        hdr->usb_cdc_mdlm_desc = desc;
2106        hdr->usb_cdc_ether_desc = ether;
2107        return cnt;
2108}
2109
2110EXPORT_SYMBOL(cdc_parse_cdc_header);
2111
2112/*
2113 * The function can't be called inside suspend/resume callback,
2114 * otherwise deadlock will be caused.
2115 */
2116int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
2117                    u16 value, u16 index, void *data, u16 size)
2118{
2119        int ret;
2120
2121        if (usb_autopm_get_interface(dev->intf) < 0)
2122                return -ENODEV;
2123        ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
2124                                data, size);
2125        usb_autopm_put_interface(dev->intf);
2126        return ret;
2127}
2128EXPORT_SYMBOL_GPL(usbnet_read_cmd);
2129
2130/*
2131 * The function can't be called inside suspend/resume callback,
2132 * otherwise deadlock will be caused.
2133 */
2134int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
2135                     u16 value, u16 index, const void *data, u16 size)
2136{
2137        int ret;
2138
2139        if (usb_autopm_get_interface(dev->intf) < 0)
2140                return -ENODEV;
2141        ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
2142                                 data, size);
2143        usb_autopm_put_interface(dev->intf);
2144        return ret;
2145}
2146EXPORT_SYMBOL_GPL(usbnet_write_cmd);
2147
2148/*
2149 * The function can be called inside suspend/resume callback safely
2150 * and should only be called by suspend/resume callback generally.
2151 */
2152int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
2153                          u16 value, u16 index, void *data, u16 size)
2154{
2155        return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
2156                                 data, size);
2157}
2158EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
2159
2160/*
2161 * The function can be called inside suspend/resume callback safely
2162 * and should only be called by suspend/resume callback generally.
2163 */
2164int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
2165                          u16 value, u16 index, const void *data,
2166                          u16 size)
2167{
2168        return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
2169                                  data, size);
2170}
2171EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
2172
2173static void usbnet_async_cmd_cb(struct urb *urb)
2174{
2175        struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
2176        int status = urb->status;
2177
2178        if (status < 0)
2179                dev_dbg(&urb->dev->dev, "%s failed with %d",
2180                        __func__, status);
2181
2182        kfree(req);
2183        usb_free_urb(urb);
2184}
2185
2186/*
2187 * The caller must make sure that device can't be put into suspend
2188 * state until the control URB completes.
2189 */
2190int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
2191                           u16 value, u16 index, const void *data, u16 size)
2192{
2193        struct usb_ctrlrequest *req = NULL;
2194        struct urb *urb;
2195        int err = -ENOMEM;
2196        void *buf = NULL;
2197
2198        netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
2199                   " value=0x%04x index=0x%04x size=%d\n",
2200                   cmd, reqtype, value, index, size);
2201
2202        urb = usb_alloc_urb(0, GFP_ATOMIC);
2203        if (!urb) {
2204                netdev_err(dev->net, "Error allocating URB in"
2205                           " %s!\n", __func__);
2206                goto fail;
2207        }
2208
2209        if (data) {
2210                buf = kmemdup(data, size, GFP_ATOMIC);
2211                if (!buf) {
2212                        netdev_err(dev->net, "Error allocating buffer"
2213                                   " in %s!\n", __func__);
2214                        goto fail_free;
2215                }
2216        }
2217
2218        req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
2219        if (!req)
2220                goto fail_free_buf;
2221
2222        req->bRequestType = reqtype;
2223        req->bRequest = cmd;
2224        req->wValue = cpu_to_le16(value);
2225        req->wIndex = cpu_to_le16(index);
2226        req->wLength = cpu_to_le16(size);
2227
2228        usb_fill_control_urb(urb, dev->udev,
2229                             usb_sndctrlpipe(dev->udev, 0),
2230                             (void *)req, buf, size,
2231                             usbnet_async_cmd_cb, req);
2232        urb->transfer_flags |= URB_FREE_BUFFER;
2233
2234        err = usb_submit_urb(urb, GFP_ATOMIC);
2235        if (err < 0) {
2236                netdev_err(dev->net, "Error submitting the control"
2237                           " message: status=%d\n", err);
2238                goto fail_free;
2239        }
2240        return 0;
2241
2242fail_free_buf:
2243        kfree(buf);
2244fail_free:
2245        kfree(req);
2246        usb_free_urb(urb);
2247fail:
2248        return err;
2249
2250}
2251EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
2252/*-------------------------------------------------------------------------*/
2253
2254static int __init usbnet_init(void)
2255{
2256        /* Compiler should optimize this out. */
2257        BUILD_BUG_ON(
2258                FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
2259
2260        eth_random_addr(node_id);
2261        return 0;
2262}
2263module_init(usbnet_init);
2264
2265static void __exit usbnet_exit(void)
2266{
2267}
2268module_exit(usbnet_exit);
2269
2270MODULE_AUTHOR("David Brownell");
2271MODULE_DESCRIPTION("USB network driver framework");
2272MODULE_LICENSE("GPL");
2273