linux/drivers/usb/gadget/u_ether.c
<<
>>
Prefs
   1/*
   2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   3 *
   4 * Copyright (C) 2003-2005,2008 David Brownell
   5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   6 * Copyright (C) 2008 Nokia Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14/* #define VERBOSE_DEBUG */
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/gfp.h>
  19#include <linux/device.h>
  20#include <linux/ctype.h>
  21#include <linux/etherdevice.h>
  22#include <linux/ethtool.h>
  23#include <linux/if_vlan.h>
  24
  25#include "u_ether.h"
  26
  27
  28/*
  29 * This component encapsulates the Ethernet link glue needed to provide
  30 * one (!) network link through the USB gadget stack, normally "usb0".
  31 *
  32 * The control and data models are handled by the function driver which
  33 * connects to this code; such as CDC Ethernet (ECM or EEM),
  34 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  35 * management.
  36 *
  37 * Link level addressing is handled by this component using module
  38 * parameters; if no such parameters are provided, random link level
  39 * addresses are used.  Each end of the link uses one address.  The
  40 * host end address is exported in various ways, and is often recorded
  41 * in configuration databases.
  42 *
  43 * The driver which assembles each configuration using such a link is
  44 * responsible for ensuring that each configuration includes at most one
  45 * instance of is network link.  (The network layer provides ways for
  46 * this single "physical" link to be used by multiple virtual links.)
  47 */
  48
  49#define UETH__VERSION   "29-May-2008"
  50
  51struct eth_dev {
  52        /* lock is held while accessing port_usb
  53         */
  54        spinlock_t              lock;
  55        struct gether           *port_usb;
  56
  57        struct net_device       *net;
  58        struct usb_gadget       *gadget;
  59
  60        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
  61        struct list_head        tx_reqs, rx_reqs;
  62        atomic_t                tx_qlen;
  63
  64        struct sk_buff_head     rx_frames;
  65
  66        unsigned                header_len;
  67        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
  68        int                     (*unwrap)(struct gether *,
  69                                                struct sk_buff *skb,
  70                                                struct sk_buff_head *list);
  71
  72        struct work_struct      work;
  73
  74        unsigned long           todo;
  75#define WORK_RX_MEMORY          0
  76
  77        bool                    zlp;
  78        u8                      host_mac[ETH_ALEN];
  79};
  80
  81/*-------------------------------------------------------------------------*/
  82
  83#define RX_EXTRA        20      /* bytes guarding against rx overflows */
  84
  85#define DEFAULT_QLEN    2       /* double buffering by default */
  86
  87static unsigned qmult = 5;
  88module_param(qmult, uint, S_IRUGO|S_IWUSR);
  89MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
  90
  91/* for dual-speed hardware, use deeper queues at high/super speed */
  92static inline int qlen(struct usb_gadget *gadget)
  93{
  94        if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  95                                            gadget->speed == USB_SPEED_SUPER))
  96                return qmult * DEFAULT_QLEN;
  97        else
  98                return DEFAULT_QLEN;
  99}
 100
 101/*-------------------------------------------------------------------------*/
 102
 103/* REVISIT there must be a better way than having two sets
 104 * of debug calls ...
 105 */
 106
 107#undef DBG
 108#undef VDBG
 109#undef ERROR
 110#undef INFO
 111
 112#define xprintk(d, level, fmt, args...) \
 113        printk(level "%s: " fmt , (d)->net->name , ## args)
 114
 115#ifdef DEBUG
 116#undef DEBUG
 117#define DBG(dev, fmt, args...) \
 118        xprintk(dev , KERN_DEBUG , fmt , ## args)
 119#else
 120#define DBG(dev, fmt, args...) \
 121        do { } while (0)
 122#endif /* DEBUG */
 123
 124#ifdef VERBOSE_DEBUG
 125#define VDBG    DBG
 126#else
 127#define VDBG(dev, fmt, args...) \
 128        do { } while (0)
 129#endif /* DEBUG */
 130
 131#define ERROR(dev, fmt, args...) \
 132        xprintk(dev , KERN_ERR , fmt , ## args)
 133#define INFO(dev, fmt, args...) \
 134        xprintk(dev , KERN_INFO , fmt , ## args)
 135
 136/*-------------------------------------------------------------------------*/
 137
 138/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 139
 140static int ueth_change_mtu(struct net_device *net, int new_mtu)
 141{
 142        struct eth_dev  *dev = netdev_priv(net);
 143        unsigned long   flags;
 144        int             status = 0;
 145
 146        /* don't change MTU on "live" link (peer won't know) */
 147        spin_lock_irqsave(&dev->lock, flags);
 148        if (dev->port_usb)
 149                status = -EBUSY;
 150        else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
 151                status = -ERANGE;
 152        else
 153                net->mtu = new_mtu;
 154        spin_unlock_irqrestore(&dev->lock, flags);
 155
 156        return status;
 157}
 158
 159static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 160{
 161        struct eth_dev *dev = netdev_priv(net);
 162
 163        strlcpy(p->driver, "g_ether", sizeof(p->driver));
 164        strlcpy(p->version, UETH__VERSION, sizeof(p->version));
 165        strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
 166        strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
 167}
 168
 169/* REVISIT can also support:
 170 *   - WOL (by tracking suspends and issuing remote wakeup)
 171 *   - msglevel (implies updated messaging)
 172 *   - ... probably more ethtool ops
 173 */
 174
 175static const struct ethtool_ops ops = {
 176        .get_drvinfo = eth_get_drvinfo,
 177        .get_link = ethtool_op_get_link,
 178};
 179
 180static void defer_kevent(struct eth_dev *dev, int flag)
 181{
 182        if (test_and_set_bit(flag, &dev->todo))
 183                return;
 184        if (!schedule_work(&dev->work))
 185                ERROR(dev, "kevent %d may have been dropped\n", flag);
 186        else
 187                DBG(dev, "kevent %d scheduled\n", flag);
 188}
 189
 190static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 191
 192static int
 193rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 194{
 195        struct sk_buff  *skb;
 196        int             retval = -ENOMEM;
 197        size_t          size = 0;
 198        struct usb_ep   *out;
 199        unsigned long   flags;
 200
 201        spin_lock_irqsave(&dev->lock, flags);
 202        if (dev->port_usb)
 203                out = dev->port_usb->out_ep;
 204        else
 205                out = NULL;
 206        spin_unlock_irqrestore(&dev->lock, flags);
 207
 208        if (!out)
 209                return -ENOTCONN;
 210
 211
 212        /* Padding up to RX_EXTRA handles minor disagreements with host.
 213         * Normally we use the USB "terminate on short read" convention;
 214         * so allow up to (N*maxpacket), since that memory is normally
 215         * already allocated.  Some hardware doesn't deal well with short
 216         * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 217         * byte off the end (to force hardware errors on overflow).
 218         *
 219         * RNDIS uses internal framing, and explicitly allows senders to
 220         * pad to end-of-packet.  That's potentially nice for speed, but
 221         * means receivers can't recover lost synch on their own (because
 222         * new packets don't only start after a short RX).
 223         */
 224        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 225        size += dev->port_usb->header_len;
 226        size += out->maxpacket - 1;
 227        size -= size % out->maxpacket;
 228
 229        if (dev->port_usb->is_fixed)
 230                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 231
 232        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 233        if (skb == NULL) {
 234                DBG(dev, "no rx skb\n");
 235                goto enomem;
 236        }
 237
 238        /* Some platforms perform better when IP packets are aligned,
 239         * but on at least one, checksumming fails otherwise.  Note:
 240         * RNDIS headers involve variable numbers of LE32 values.
 241         */
 242        skb_reserve(skb, NET_IP_ALIGN);
 243
 244        req->buf = skb->data;
 245        req->length = size;
 246        req->complete = rx_complete;
 247        req->context = skb;
 248
 249        retval = usb_ep_queue(out, req, gfp_flags);
 250        if (retval == -ENOMEM)
 251enomem:
 252                defer_kevent(dev, WORK_RX_MEMORY);
 253        if (retval) {
 254                DBG(dev, "rx submit --> %d\n", retval);
 255                if (skb)
 256                        dev_kfree_skb_any(skb);
 257                spin_lock_irqsave(&dev->req_lock, flags);
 258                list_add(&req->list, &dev->rx_reqs);
 259                spin_unlock_irqrestore(&dev->req_lock, flags);
 260        }
 261        return retval;
 262}
 263
 264static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 265{
 266        struct sk_buff  *skb = req->context, *skb2;
 267        struct eth_dev  *dev = ep->driver_data;
 268        int             status = req->status;
 269
 270        switch (status) {
 271
 272        /* normal completion */
 273        case 0:
 274                skb_put(skb, req->actual);
 275
 276                if (dev->unwrap) {
 277                        unsigned long   flags;
 278
 279                        spin_lock_irqsave(&dev->lock, flags);
 280                        if (dev->port_usb) {
 281                                status = dev->unwrap(dev->port_usb,
 282                                                        skb,
 283                                                        &dev->rx_frames);
 284                        } else {
 285                                dev_kfree_skb_any(skb);
 286                                status = -ENOTCONN;
 287                        }
 288                        spin_unlock_irqrestore(&dev->lock, flags);
 289                } else {
 290                        skb_queue_tail(&dev->rx_frames, skb);
 291                }
 292                skb = NULL;
 293
 294                skb2 = skb_dequeue(&dev->rx_frames);
 295                while (skb2) {
 296                        if (status < 0
 297                                        || ETH_HLEN > skb2->len
 298                                        || skb2->len > VLAN_ETH_FRAME_LEN) {
 299                                dev->net->stats.rx_errors++;
 300                                dev->net->stats.rx_length_errors++;
 301                                DBG(dev, "rx length %d\n", skb2->len);
 302                                dev_kfree_skb_any(skb2);
 303                                goto next_frame;
 304                        }
 305                        skb2->protocol = eth_type_trans(skb2, dev->net);
 306                        dev->net->stats.rx_packets++;
 307                        dev->net->stats.rx_bytes += skb2->len;
 308
 309                        /* no buffer copies needed, unless hardware can't
 310                         * use skb buffers.
 311                         */
 312                        status = netif_rx(skb2);
 313next_frame:
 314                        skb2 = skb_dequeue(&dev->rx_frames);
 315                }
 316                break;
 317
 318        /* software-driven interface shutdown */
 319        case -ECONNRESET:               /* unlink */
 320        case -ESHUTDOWN:                /* disconnect etc */
 321                VDBG(dev, "rx shutdown, code %d\n", status);
 322                goto quiesce;
 323
 324        /* for hardware automagic (such as pxa) */
 325        case -ECONNABORTED:             /* endpoint reset */
 326                DBG(dev, "rx %s reset\n", ep->name);
 327                defer_kevent(dev, WORK_RX_MEMORY);
 328quiesce:
 329                dev_kfree_skb_any(skb);
 330                goto clean;
 331
 332        /* data overrun */
 333        case -EOVERFLOW:
 334                dev->net->stats.rx_over_errors++;
 335                /* FALLTHROUGH */
 336
 337        default:
 338                dev->net->stats.rx_errors++;
 339                DBG(dev, "rx status %d\n", status);
 340                break;
 341        }
 342
 343        if (skb)
 344                dev_kfree_skb_any(skb);
 345        if (!netif_running(dev->net)) {
 346clean:
 347                spin_lock(&dev->req_lock);
 348                list_add(&req->list, &dev->rx_reqs);
 349                spin_unlock(&dev->req_lock);
 350                req = NULL;
 351        }
 352        if (req)
 353                rx_submit(dev, req, GFP_ATOMIC);
 354}
 355
 356static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 357{
 358        unsigned                i;
 359        struct usb_request      *req;
 360
 361        if (!n)
 362                return -ENOMEM;
 363
 364        /* queue/recycle up to N requests */
 365        i = n;
 366        list_for_each_entry(req, list, list) {
 367                if (i-- == 0)
 368                        goto extra;
 369        }
 370        while (i--) {
 371                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 372                if (!req)
 373                        return list_empty(list) ? -ENOMEM : 0;
 374                list_add(&req->list, list);
 375        }
 376        return 0;
 377
 378extra:
 379        /* free extras */
 380        for (;;) {
 381                struct list_head        *next;
 382
 383                next = req->list.next;
 384                list_del(&req->list);
 385                usb_ep_free_request(ep, req);
 386
 387                if (next == list)
 388                        break;
 389
 390                req = container_of(next, struct usb_request, list);
 391        }
 392        return 0;
 393}
 394
 395static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 396{
 397        int     status;
 398
 399        spin_lock(&dev->req_lock);
 400        status = prealloc(&dev->tx_reqs, link->in_ep, n);
 401        if (status < 0)
 402                goto fail;
 403        status = prealloc(&dev->rx_reqs, link->out_ep, n);
 404        if (status < 0)
 405                goto fail;
 406        goto done;
 407fail:
 408        DBG(dev, "can't alloc requests\n");
 409done:
 410        spin_unlock(&dev->req_lock);
 411        return status;
 412}
 413
 414static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 415{
 416        struct usb_request      *req;
 417        unsigned long           flags;
 418
 419        /* fill unused rxq slots with some skb */
 420        spin_lock_irqsave(&dev->req_lock, flags);
 421        while (!list_empty(&dev->rx_reqs)) {
 422                req = container_of(dev->rx_reqs.next,
 423                                struct usb_request, list);
 424                list_del_init(&req->list);
 425                spin_unlock_irqrestore(&dev->req_lock, flags);
 426
 427                if (rx_submit(dev, req, gfp_flags) < 0) {
 428                        defer_kevent(dev, WORK_RX_MEMORY);
 429                        return;
 430                }
 431
 432                spin_lock_irqsave(&dev->req_lock, flags);
 433        }
 434        spin_unlock_irqrestore(&dev->req_lock, flags);
 435}
 436
 437static void eth_work(struct work_struct *work)
 438{
 439        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 440
 441        if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 442                if (netif_running(dev->net))
 443                        rx_fill(dev, GFP_KERNEL);
 444        }
 445
 446        if (dev->todo)
 447                DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 448}
 449
 450static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 451{
 452        struct sk_buff  *skb = req->context;
 453        struct eth_dev  *dev = ep->driver_data;
 454
 455        switch (req->status) {
 456        default:
 457                dev->net->stats.tx_errors++;
 458                VDBG(dev, "tx err %d\n", req->status);
 459                /* FALLTHROUGH */
 460        case -ECONNRESET:               /* unlink */
 461        case -ESHUTDOWN:                /* disconnect etc */
 462                break;
 463        case 0:
 464                dev->net->stats.tx_bytes += skb->len;
 465        }
 466        dev->net->stats.tx_packets++;
 467
 468        spin_lock(&dev->req_lock);
 469        list_add(&req->list, &dev->tx_reqs);
 470        spin_unlock(&dev->req_lock);
 471        dev_kfree_skb_any(skb);
 472
 473        atomic_dec(&dev->tx_qlen);
 474        if (netif_carrier_ok(dev->net))
 475                netif_wake_queue(dev->net);
 476}
 477
 478static inline int is_promisc(u16 cdc_filter)
 479{
 480        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 481}
 482
 483static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 484                                        struct net_device *net)
 485{
 486        struct eth_dev          *dev = netdev_priv(net);
 487        int                     length = skb->len;
 488        int                     retval;
 489        struct usb_request      *req = NULL;
 490        unsigned long           flags;
 491        struct usb_ep           *in;
 492        u16                     cdc_filter;
 493
 494        spin_lock_irqsave(&dev->lock, flags);
 495        if (dev->port_usb) {
 496                in = dev->port_usb->in_ep;
 497                cdc_filter = dev->port_usb->cdc_filter;
 498        } else {
 499                in = NULL;
 500                cdc_filter = 0;
 501        }
 502        spin_unlock_irqrestore(&dev->lock, flags);
 503
 504        if (!in) {
 505                dev_kfree_skb_any(skb);
 506                return NETDEV_TX_OK;
 507        }
 508
 509        /* apply outgoing CDC or RNDIS filters */
 510        if (!is_promisc(cdc_filter)) {
 511                u8              *dest = skb->data;
 512
 513                if (is_multicast_ether_addr(dest)) {
 514                        u16     type;
 515
 516                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 517                         * SET_ETHERNET_MULTICAST_FILTERS requests
 518                         */
 519                        if (is_broadcast_ether_addr(dest))
 520                                type = USB_CDC_PACKET_TYPE_BROADCAST;
 521                        else
 522                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 523                        if (!(cdc_filter & type)) {
 524                                dev_kfree_skb_any(skb);
 525                                return NETDEV_TX_OK;
 526                        }
 527                }
 528                /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 529        }
 530
 531        spin_lock_irqsave(&dev->req_lock, flags);
 532        /*
 533         * this freelist can be empty if an interrupt triggered disconnect()
 534         * and reconfigured the gadget (shutting down this queue) after the
 535         * network stack decided to xmit but before we got the spinlock.
 536         */
 537        if (list_empty(&dev->tx_reqs)) {
 538                spin_unlock_irqrestore(&dev->req_lock, flags);
 539                return NETDEV_TX_BUSY;
 540        }
 541
 542        req = container_of(dev->tx_reqs.next, struct usb_request, list);
 543        list_del(&req->list);
 544
 545        /* temporarily stop TX queue when the freelist empties */
 546        if (list_empty(&dev->tx_reqs))
 547                netif_stop_queue(net);
 548        spin_unlock_irqrestore(&dev->req_lock, flags);
 549
 550        /* no buffer copies needed, unless the network stack did it
 551         * or the hardware can't use skb buffers.
 552         * or there's not enough space for extra headers we need
 553         */
 554        if (dev->wrap) {
 555                unsigned long   flags;
 556
 557                spin_lock_irqsave(&dev->lock, flags);
 558                if (dev->port_usb)
 559                        skb = dev->wrap(dev->port_usb, skb);
 560                spin_unlock_irqrestore(&dev->lock, flags);
 561                if (!skb)
 562                        goto drop;
 563
 564                length = skb->len;
 565        }
 566        req->buf = skb->data;
 567        req->context = skb;
 568        req->complete = tx_complete;
 569
 570        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
 571        if (dev->port_usb->is_fixed &&
 572            length == dev->port_usb->fixed_in_len &&
 573            (length % in->maxpacket) == 0)
 574                req->zero = 0;
 575        else
 576                req->zero = 1;
 577
 578        /* use zlp framing on tx for strict CDC-Ether conformance,
 579         * though any robust network rx path ignores extra padding.
 580         * and some hardware doesn't like to write zlps.
 581         */
 582        if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 583                length++;
 584
 585        req->length = length;
 586
 587        /* throttle high/super speed IRQ rate back slightly */
 588        if (gadget_is_dualspeed(dev->gadget))
 589                req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
 590                                     dev->gadget->speed == USB_SPEED_SUPER)
 591                        ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
 592                        : 0;
 593
 594        retval = usb_ep_queue(in, req, GFP_ATOMIC);
 595        switch (retval) {
 596        default:
 597                DBG(dev, "tx queue err %d\n", retval);
 598                break;
 599        case 0:
 600                net->trans_start = jiffies;
 601                atomic_inc(&dev->tx_qlen);
 602        }
 603
 604        if (retval) {
 605                dev_kfree_skb_any(skb);
 606drop:
 607                dev->net->stats.tx_dropped++;
 608                spin_lock_irqsave(&dev->req_lock, flags);
 609                if (list_empty(&dev->tx_reqs))
 610                        netif_start_queue(net);
 611                list_add(&req->list, &dev->tx_reqs);
 612                spin_unlock_irqrestore(&dev->req_lock, flags);
 613        }
 614        return NETDEV_TX_OK;
 615}
 616
 617/*-------------------------------------------------------------------------*/
 618
 619static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 620{
 621        DBG(dev, "%s\n", __func__);
 622
 623        /* fill the rx queue */
 624        rx_fill(dev, gfp_flags);
 625
 626        /* and open the tx floodgates */
 627        atomic_set(&dev->tx_qlen, 0);
 628        netif_wake_queue(dev->net);
 629}
 630
 631static int eth_open(struct net_device *net)
 632{
 633        struct eth_dev  *dev = netdev_priv(net);
 634        struct gether   *link;
 635
 636        DBG(dev, "%s\n", __func__);
 637        if (netif_carrier_ok(dev->net))
 638                eth_start(dev, GFP_KERNEL);
 639
 640        spin_lock_irq(&dev->lock);
 641        link = dev->port_usb;
 642        if (link && link->open)
 643                link->open(link);
 644        spin_unlock_irq(&dev->lock);
 645
 646        return 0;
 647}
 648
 649static int eth_stop(struct net_device *net)
 650{
 651        struct eth_dev  *dev = netdev_priv(net);
 652        unsigned long   flags;
 653
 654        VDBG(dev, "%s\n", __func__);
 655        netif_stop_queue(net);
 656
 657        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 658                dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 659                dev->net->stats.rx_errors, dev->net->stats.tx_errors
 660                );
 661
 662        /* ensure there are no more active requests */
 663        spin_lock_irqsave(&dev->lock, flags);
 664        if (dev->port_usb) {
 665                struct gether   *link = dev->port_usb;
 666                const struct usb_endpoint_descriptor *in;
 667                const struct usb_endpoint_descriptor *out;
 668
 669                if (link->close)
 670                        link->close(link);
 671
 672                /* NOTE:  we have no abort-queue primitive we could use
 673                 * to cancel all pending I/O.  Instead, we disable then
 674                 * reenable the endpoints ... this idiom may leave toggle
 675                 * wrong, but that's a self-correcting error.
 676                 *
 677                 * REVISIT:  we *COULD* just let the transfers complete at
 678                 * their own pace; the network stack can handle old packets.
 679                 * For the moment we leave this here, since it works.
 680                 */
 681                in = link->in_ep->desc;
 682                out = link->out_ep->desc;
 683                usb_ep_disable(link->in_ep);
 684                usb_ep_disable(link->out_ep);
 685                if (netif_carrier_ok(net)) {
 686                        DBG(dev, "host still using in/out endpoints\n");
 687                        link->in_ep->desc = in;
 688                        link->out_ep->desc = out;
 689                        usb_ep_enable(link->in_ep);
 690                        usb_ep_enable(link->out_ep);
 691                }
 692        }
 693        spin_unlock_irqrestore(&dev->lock, flags);
 694
 695        return 0;
 696}
 697
 698/*-------------------------------------------------------------------------*/
 699
 700/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
 701static char *dev_addr;
 702module_param(dev_addr, charp, S_IRUGO);
 703MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
 704
 705/* this address is invisible to ifconfig */
 706static char *host_addr;
 707module_param(host_addr, charp, S_IRUGO);
 708MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
 709
 710static int get_ether_addr(const char *str, u8 *dev_addr)
 711{
 712        if (str) {
 713                unsigned        i;
 714
 715                for (i = 0; i < 6; i++) {
 716                        unsigned char num;
 717
 718                        if ((*str == '.') || (*str == ':'))
 719                                str++;
 720                        num = hex_to_bin(*str++) << 4;
 721                        num |= hex_to_bin(*str++);
 722                        dev_addr [i] = num;
 723                }
 724                if (is_valid_ether_addr(dev_addr))
 725                        return 0;
 726        }
 727        eth_random_addr(dev_addr);
 728        return 1;
 729}
 730
 731static const struct net_device_ops eth_netdev_ops = {
 732        .ndo_open               = eth_open,
 733        .ndo_stop               = eth_stop,
 734        .ndo_start_xmit         = eth_start_xmit,
 735        .ndo_change_mtu         = ueth_change_mtu,
 736        .ndo_set_mac_address    = eth_mac_addr,
 737        .ndo_validate_addr      = eth_validate_addr,
 738};
 739
 740static struct device_type gadget_type = {
 741        .name   = "gadget",
 742};
 743
 744/**
 745 * gether_setup_name - initialize one ethernet-over-usb link
 746 * @g: gadget to associated with these links
 747 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 748 *      host side of the link is recorded
 749 * @netname: name for network device (for example, "usb")
 750 * Context: may sleep
 751 *
 752 * This sets up the single network link that may be exported by a
 753 * gadget driver using this framework.  The link layer addresses are
 754 * set up using module parameters.
 755 *
 756 * Returns negative errno, or zero on success
 757 */
 758struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
 759                const char *netname)
 760{
 761        struct eth_dev          *dev;
 762        struct net_device       *net;
 763        int                     status;
 764
 765        net = alloc_etherdev(sizeof *dev);
 766        if (!net)
 767                return ERR_PTR(-ENOMEM);
 768
 769        dev = netdev_priv(net);
 770        spin_lock_init(&dev->lock);
 771        spin_lock_init(&dev->req_lock);
 772        INIT_WORK(&dev->work, eth_work);
 773        INIT_LIST_HEAD(&dev->tx_reqs);
 774        INIT_LIST_HEAD(&dev->rx_reqs);
 775
 776        skb_queue_head_init(&dev->rx_frames);
 777
 778        /* network device setup */
 779        dev->net = net;
 780        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 781
 782        if (get_ether_addr(dev_addr, net->dev_addr))
 783                dev_warn(&g->dev,
 784                        "using random %s ethernet address\n", "self");
 785        if (get_ether_addr(host_addr, dev->host_mac))
 786                dev_warn(&g->dev,
 787                        "using random %s ethernet address\n", "host");
 788
 789        if (ethaddr)
 790                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 791
 792        net->netdev_ops = &eth_netdev_ops;
 793
 794        SET_ETHTOOL_OPS(net, &ops);
 795
 796        dev->gadget = g;
 797        SET_NETDEV_DEV(net, &g->dev);
 798        SET_NETDEV_DEVTYPE(net, &gadget_type);
 799
 800        status = register_netdev(net);
 801        if (status < 0) {
 802                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 803                free_netdev(net);
 804                dev = ERR_PTR(status);
 805        } else {
 806                INFO(dev, "MAC %pM\n", net->dev_addr);
 807                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 808
 809                /* two kinds of host-initiated state changes:
 810                 *  - iff DATA transfer is active, carrier is "on"
 811                 *  - tx queueing enabled if open *and* carrier is "on"
 812                 */
 813                netif_carrier_off(net);
 814        }
 815
 816        return dev;
 817}
 818
 819/**
 820 * gether_cleanup - remove Ethernet-over-USB device
 821 * Context: may sleep
 822 *
 823 * This is called to free all resources allocated by @gether_setup().
 824 */
 825void gether_cleanup(struct eth_dev *dev)
 826{
 827        if (!dev)
 828                return;
 829
 830        unregister_netdev(dev->net);
 831        flush_work(&dev->work);
 832        free_netdev(dev->net);
 833}
 834
 835/**
 836 * gether_connect - notify network layer that USB link is active
 837 * @link: the USB link, set up with endpoints, descriptors matching
 838 *      current device speed, and any framing wrapper(s) set up.
 839 * Context: irqs blocked
 840 *
 841 * This is called to activate endpoints and let the network layer know
 842 * the connection is active ("carrier detect").  It may cause the I/O
 843 * queues to open and start letting network packets flow, but will in
 844 * any case activate the endpoints so that they respond properly to the
 845 * USB host.
 846 *
 847 * Verify net_device pointer returned using IS_ERR().  If it doesn't
 848 * indicate some error code (negative errno), ep->driver_data values
 849 * have been overwritten.
 850 */
 851struct net_device *gether_connect(struct gether *link)
 852{
 853        struct eth_dev          *dev = link->ioport;
 854        int                     result = 0;
 855
 856        if (!dev)
 857                return ERR_PTR(-EINVAL);
 858
 859        link->in_ep->driver_data = dev;
 860        result = usb_ep_enable(link->in_ep);
 861        if (result != 0) {
 862                DBG(dev, "enable %s --> %d\n",
 863                        link->in_ep->name, result);
 864                goto fail0;
 865        }
 866
 867        link->out_ep->driver_data = dev;
 868        result = usb_ep_enable(link->out_ep);
 869        if (result != 0) {
 870                DBG(dev, "enable %s --> %d\n",
 871                        link->out_ep->name, result);
 872                goto fail1;
 873        }
 874
 875        if (result == 0)
 876                result = alloc_requests(dev, link, qlen(dev->gadget));
 877
 878        if (result == 0) {
 879                dev->zlp = link->is_zlp_ok;
 880                DBG(dev, "qlen %d\n", qlen(dev->gadget));
 881
 882                dev->header_len = link->header_len;
 883                dev->unwrap = link->unwrap;
 884                dev->wrap = link->wrap;
 885
 886                spin_lock(&dev->lock);
 887                dev->port_usb = link;
 888                if (netif_running(dev->net)) {
 889                        if (link->open)
 890                                link->open(link);
 891                } else {
 892                        if (link->close)
 893                                link->close(link);
 894                }
 895                spin_unlock(&dev->lock);
 896
 897                netif_carrier_on(dev->net);
 898                if (netif_running(dev->net))
 899                        eth_start(dev, GFP_ATOMIC);
 900
 901        /* on error, disable any endpoints  */
 902        } else {
 903                (void) usb_ep_disable(link->out_ep);
 904fail1:
 905                (void) usb_ep_disable(link->in_ep);
 906        }
 907fail0:
 908        /* caller is responsible for cleanup on error */
 909        if (result < 0)
 910                return ERR_PTR(result);
 911        return dev->net;
 912}
 913
 914/**
 915 * gether_disconnect - notify network layer that USB link is inactive
 916 * @link: the USB link, on which gether_connect() was called
 917 * Context: irqs blocked
 918 *
 919 * This is called to deactivate endpoints and let the network layer know
 920 * the connection went inactive ("no carrier").
 921 *
 922 * On return, the state is as if gether_connect() had never been called.
 923 * The endpoints are inactive, and accordingly without active USB I/O.
 924 * Pointers to endpoint descriptors and endpoint private data are nulled.
 925 */
 926void gether_disconnect(struct gether *link)
 927{
 928        struct eth_dev          *dev = link->ioport;
 929        struct usb_request      *req;
 930
 931        WARN_ON(!dev);
 932        if (!dev)
 933                return;
 934
 935        DBG(dev, "%s\n", __func__);
 936
 937        netif_stop_queue(dev->net);
 938        netif_carrier_off(dev->net);
 939
 940        /* disable endpoints, forcing (synchronous) completion
 941         * of all pending i/o.  then free the request objects
 942         * and forget about the endpoints.
 943         */
 944        usb_ep_disable(link->in_ep);
 945        spin_lock(&dev->req_lock);
 946        while (!list_empty(&dev->tx_reqs)) {
 947                req = container_of(dev->tx_reqs.next,
 948                                        struct usb_request, list);
 949                list_del(&req->list);
 950
 951                spin_unlock(&dev->req_lock);
 952                usb_ep_free_request(link->in_ep, req);
 953                spin_lock(&dev->req_lock);
 954        }
 955        spin_unlock(&dev->req_lock);
 956        link->in_ep->driver_data = NULL;
 957        link->in_ep->desc = NULL;
 958
 959        usb_ep_disable(link->out_ep);
 960        spin_lock(&dev->req_lock);
 961        while (!list_empty(&dev->rx_reqs)) {
 962                req = container_of(dev->rx_reqs.next,
 963                                        struct usb_request, list);
 964                list_del(&req->list);
 965
 966                spin_unlock(&dev->req_lock);
 967                usb_ep_free_request(link->out_ep, req);
 968                spin_lock(&dev->req_lock);
 969        }
 970        spin_unlock(&dev->req_lock);
 971        link->out_ep->driver_data = NULL;
 972        link->out_ep->desc = NULL;
 973
 974        /* finish forgetting about this USB link episode */
 975        dev->header_len = 0;
 976        dev->unwrap = NULL;
 977        dev->wrap = NULL;
 978
 979        spin_lock(&dev->lock);
 980        dev->port_usb = NULL;
 981        spin_unlock(&dev->lock);
 982}
 983