linux/drivers/usb/gadget/u_ether.c
<<
>>
Prefs
   1/*
   2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   3 *
   4 * Copyright (C) 2003-2005,2008 David Brownell
   5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   6 * Copyright (C) 2008 Nokia Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 */
  22
  23/* #define VERBOSE_DEBUG */
  24
  25#include <linux/kernel.h>
  26#include <linux/device.h>
  27#include <linux/ctype.h>
  28#include <linux/etherdevice.h>
  29#include <linux/ethtool.h>
  30
  31#include "u_ether.h"
  32
  33
  34/*
  35 * This component encapsulates the Ethernet link glue needed to provide
  36 * one (!) network link through the USB gadget stack, normally "usb0".
  37 *
  38 * The control and data models are handled by the function driver which
  39 * connects to this code; such as CDC Ethernet (ECM or EEM),
  40 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  41 * management.
  42 *
  43 * Link level addressing is handled by this component using module
  44 * parameters; if no such parameters are provided, random link level
  45 * addresses are used.  Each end of the link uses one address.  The
  46 * host end address is exported in various ways, and is often recorded
  47 * in configuration databases.
  48 *
  49 * The driver which assembles each configuration using such a link is
  50 * responsible for ensuring that each configuration includes at most one
  51 * instance of is network link.  (The network layer provides ways for
  52 * this single "physical" link to be used by multiple virtual links.)
  53 */
  54
  55#define UETH__VERSION   "29-May-2008"
  56
  57struct eth_dev {
  58        /* lock is held while accessing port_usb
  59         * or updating its backlink port_usb->ioport
  60         */
  61        spinlock_t              lock;
  62        struct gether           *port_usb;
  63
  64        struct net_device       *net;
  65        struct usb_gadget       *gadget;
  66
  67        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
  68        struct list_head        tx_reqs, rx_reqs;
  69        atomic_t                tx_qlen;
  70
  71        struct sk_buff_head     rx_frames;
  72
  73        unsigned                header_len;
  74        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
  75        int                     (*unwrap)(struct gether *,
  76                                                struct sk_buff *skb,
  77                                                struct sk_buff_head *list);
  78
  79        struct work_struct      work;
  80
  81        unsigned long           todo;
  82#define WORK_RX_MEMORY          0
  83
  84        bool                    zlp;
  85        u8                      host_mac[ETH_ALEN];
  86};
  87
  88/*-------------------------------------------------------------------------*/
  89
  90#define RX_EXTRA        20      /* bytes guarding against rx overflows */
  91
  92#define DEFAULT_QLEN    2       /* double buffering by default */
  93
  94
  95#ifdef CONFIG_USB_GADGET_DUALSPEED
  96
  97static unsigned qmult = 5;
  98module_param(qmult, uint, S_IRUGO|S_IWUSR);
  99MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
 100
 101#else   /* full speed (low speed doesn't do bulk) */
 102#define qmult           1
 103#endif
 104
 105/* for dual-speed hardware, use deeper queues at highspeed */
 106static inline int qlen(struct usb_gadget *gadget)
 107{
 108        if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
 109                return qmult * DEFAULT_QLEN;
 110        else
 111                return DEFAULT_QLEN;
 112}
 113
 114/*-------------------------------------------------------------------------*/
 115
 116/* REVISIT there must be a better way than having two sets
 117 * of debug calls ...
 118 */
 119
 120#undef DBG
 121#undef VDBG
 122#undef ERROR
 123#undef INFO
 124
 125#define xprintk(d, level, fmt, args...) \
 126        printk(level "%s: " fmt , (d)->net->name , ## args)
 127
 128#ifdef DEBUG
 129#undef DEBUG
 130#define DBG(dev, fmt, args...) \
 131        xprintk(dev , KERN_DEBUG , fmt , ## args)
 132#else
 133#define DBG(dev, fmt, args...) \
 134        do { } while (0)
 135#endif /* DEBUG */
 136
 137#ifdef VERBOSE_DEBUG
 138#define VDBG    DBG
 139#else
 140#define VDBG(dev, fmt, args...) \
 141        do { } while (0)
 142#endif /* DEBUG */
 143
 144#define ERROR(dev, fmt, args...) \
 145        xprintk(dev , KERN_ERR , fmt , ## args)
 146#define INFO(dev, fmt, args...) \
 147        xprintk(dev , KERN_INFO , fmt , ## args)
 148
 149/*-------------------------------------------------------------------------*/
 150
 151/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 152
 153static int ueth_change_mtu(struct net_device *net, int new_mtu)
 154{
 155        struct eth_dev  *dev = netdev_priv(net);
 156        unsigned long   flags;
 157        int             status = 0;
 158
 159        /* don't change MTU on "live" link (peer won't know) */
 160        spin_lock_irqsave(&dev->lock, flags);
 161        if (dev->port_usb)
 162                status = -EBUSY;
 163        else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
 164                status = -ERANGE;
 165        else
 166                net->mtu = new_mtu;
 167        spin_unlock_irqrestore(&dev->lock, flags);
 168
 169        return status;
 170}
 171
 172static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 173{
 174        struct eth_dev  *dev = netdev_priv(net);
 175
 176        strlcpy(p->driver, "g_ether", sizeof p->driver);
 177        strlcpy(p->version, UETH__VERSION, sizeof p->version);
 178        strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
 179        strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
 180}
 181
 182/* REVISIT can also support:
 183 *   - WOL (by tracking suspends and issuing remote wakeup)
 184 *   - msglevel (implies updated messaging)
 185 *   - ... probably more ethtool ops
 186 */
 187
 188static const struct ethtool_ops ops = {
 189        .get_drvinfo = eth_get_drvinfo,
 190        .get_link = ethtool_op_get_link,
 191};
 192
 193static void defer_kevent(struct eth_dev *dev, int flag)
 194{
 195        if (test_and_set_bit(flag, &dev->todo))
 196                return;
 197        if (!schedule_work(&dev->work))
 198                ERROR(dev, "kevent %d may have been dropped\n", flag);
 199        else
 200                DBG(dev, "kevent %d scheduled\n", flag);
 201}
 202
 203static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 204
 205static int
 206rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 207{
 208        struct sk_buff  *skb;
 209        int             retval = -ENOMEM;
 210        size_t          size = 0;
 211        struct usb_ep   *out;
 212        unsigned long   flags;
 213
 214        spin_lock_irqsave(&dev->lock, flags);
 215        if (dev->port_usb)
 216                out = dev->port_usb->out_ep;
 217        else
 218                out = NULL;
 219        spin_unlock_irqrestore(&dev->lock, flags);
 220
 221        if (!out)
 222                return -ENOTCONN;
 223
 224
 225        /* Padding up to RX_EXTRA handles minor disagreements with host.
 226         * Normally we use the USB "terminate on short read" convention;
 227         * so allow up to (N*maxpacket), since that memory is normally
 228         * already allocated.  Some hardware doesn't deal well with short
 229         * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 230         * byte off the end (to force hardware errors on overflow).
 231         *
 232         * RNDIS uses internal framing, and explicitly allows senders to
 233         * pad to end-of-packet.  That's potentially nice for speed, but
 234         * means receivers can't recover lost synch on their own (because
 235         * new packets don't only start after a short RX).
 236         */
 237        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 238        size += dev->port_usb->header_len;
 239        size += out->maxpacket - 1;
 240        size -= size % out->maxpacket;
 241
 242        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 243        if (skb == NULL) {
 244                DBG(dev, "no rx skb\n");
 245                goto enomem;
 246        }
 247
 248        /* Some platforms perform better when IP packets are aligned,
 249         * but on at least one, checksumming fails otherwise.  Note:
 250         * RNDIS headers involve variable numbers of LE32 values.
 251         */
 252        skb_reserve(skb, NET_IP_ALIGN);
 253
 254        req->buf = skb->data;
 255        req->length = size;
 256        req->complete = rx_complete;
 257        req->context = skb;
 258
 259        retval = usb_ep_queue(out, req, gfp_flags);
 260        if (retval == -ENOMEM)
 261enomem:
 262                defer_kevent(dev, WORK_RX_MEMORY);
 263        if (retval) {
 264                DBG(dev, "rx submit --> %d\n", retval);
 265                if (skb)
 266                        dev_kfree_skb_any(skb);
 267                spin_lock_irqsave(&dev->req_lock, flags);
 268                list_add(&req->list, &dev->rx_reqs);
 269                spin_unlock_irqrestore(&dev->req_lock, flags);
 270        }
 271        return retval;
 272}
 273
 274static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 275{
 276        struct sk_buff  *skb = req->context, *skb2;
 277        struct eth_dev  *dev = ep->driver_data;
 278        int             status = req->status;
 279
 280        switch (status) {
 281
 282        /* normal completion */
 283        case 0:
 284                skb_put(skb, req->actual);
 285
 286                if (dev->unwrap) {
 287                        unsigned long   flags;
 288
 289                        spin_lock_irqsave(&dev->lock, flags);
 290                        if (dev->port_usb) {
 291                                status = dev->unwrap(dev->port_usb,
 292                                                        skb,
 293                                                        &dev->rx_frames);
 294                        } else {
 295                                dev_kfree_skb_any(skb);
 296                                status = -ENOTCONN;
 297                        }
 298                        spin_unlock_irqrestore(&dev->lock, flags);
 299                } else {
 300                        skb_queue_tail(&dev->rx_frames, skb);
 301                }
 302                skb = NULL;
 303
 304                skb2 = skb_dequeue(&dev->rx_frames);
 305                while (skb2) {
 306                        if (status < 0
 307                                        || ETH_HLEN > skb2->len
 308                                        || skb2->len > ETH_FRAME_LEN) {
 309                                dev->net->stats.rx_errors++;
 310                                dev->net->stats.rx_length_errors++;
 311                                DBG(dev, "rx length %d\n", skb2->len);
 312                                dev_kfree_skb_any(skb2);
 313                                goto next_frame;
 314                        }
 315                        skb2->protocol = eth_type_trans(skb2, dev->net);
 316                        dev->net->stats.rx_packets++;
 317                        dev->net->stats.rx_bytes += skb2->len;
 318
 319                        /* no buffer copies needed, unless hardware can't
 320                         * use skb buffers.
 321                         */
 322                        status = netif_rx(skb2);
 323next_frame:
 324                        skb2 = skb_dequeue(&dev->rx_frames);
 325                }
 326                break;
 327
 328        /* software-driven interface shutdown */
 329        case -ECONNRESET:               /* unlink */
 330        case -ESHUTDOWN:                /* disconnect etc */
 331                VDBG(dev, "rx shutdown, code %d\n", status);
 332                goto quiesce;
 333
 334        /* for hardware automagic (such as pxa) */
 335        case -ECONNABORTED:             /* endpoint reset */
 336                DBG(dev, "rx %s reset\n", ep->name);
 337                defer_kevent(dev, WORK_RX_MEMORY);
 338quiesce:
 339                dev_kfree_skb_any(skb);
 340                goto clean;
 341
 342        /* data overrun */
 343        case -EOVERFLOW:
 344                dev->net->stats.rx_over_errors++;
 345                /* FALLTHROUGH */
 346
 347        default:
 348                dev->net->stats.rx_errors++;
 349                DBG(dev, "rx status %d\n", status);
 350                break;
 351        }
 352
 353        if (skb)
 354                dev_kfree_skb_any(skb);
 355        if (!netif_running(dev->net)) {
 356clean:
 357                spin_lock(&dev->req_lock);
 358                list_add(&req->list, &dev->rx_reqs);
 359                spin_unlock(&dev->req_lock);
 360                req = NULL;
 361        }
 362        if (req)
 363                rx_submit(dev, req, GFP_ATOMIC);
 364}
 365
 366static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 367{
 368        unsigned                i;
 369        struct usb_request      *req;
 370
 371        if (!n)
 372                return -ENOMEM;
 373
 374        /* queue/recycle up to N requests */
 375        i = n;
 376        list_for_each_entry(req, list, list) {
 377                if (i-- == 0)
 378                        goto extra;
 379        }
 380        while (i--) {
 381                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 382                if (!req)
 383                        return list_empty(list) ? -ENOMEM : 0;
 384                list_add(&req->list, list);
 385        }
 386        return 0;
 387
 388extra:
 389        /* free extras */
 390        for (;;) {
 391                struct list_head        *next;
 392
 393                next = req->list.next;
 394                list_del(&req->list);
 395                usb_ep_free_request(ep, req);
 396
 397                if (next == list)
 398                        break;
 399
 400                req = container_of(next, struct usb_request, list);
 401        }
 402        return 0;
 403}
 404
 405static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 406{
 407        int     status;
 408
 409        spin_lock(&dev->req_lock);
 410        status = prealloc(&dev->tx_reqs, link->in_ep, n);
 411        if (status < 0)
 412                goto fail;
 413        status = prealloc(&dev->rx_reqs, link->out_ep, n);
 414        if (status < 0)
 415                goto fail;
 416        goto done;
 417fail:
 418        DBG(dev, "can't alloc requests\n");
 419done:
 420        spin_unlock(&dev->req_lock);
 421        return status;
 422}
 423
 424static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 425{
 426        struct usb_request      *req;
 427        unsigned long           flags;
 428
 429        /* fill unused rxq slots with some skb */
 430        spin_lock_irqsave(&dev->req_lock, flags);
 431        while (!list_empty(&dev->rx_reqs)) {
 432                req = container_of(dev->rx_reqs.next,
 433                                struct usb_request, list);
 434                list_del_init(&req->list);
 435                spin_unlock_irqrestore(&dev->req_lock, flags);
 436
 437                if (rx_submit(dev, req, gfp_flags) < 0) {
 438                        defer_kevent(dev, WORK_RX_MEMORY);
 439                        return;
 440                }
 441
 442                spin_lock_irqsave(&dev->req_lock, flags);
 443        }
 444        spin_unlock_irqrestore(&dev->req_lock, flags);
 445}
 446
 447static void eth_work(struct work_struct *work)
 448{
 449        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 450
 451        if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 452                if (netif_running(dev->net))
 453                        rx_fill(dev, GFP_KERNEL);
 454        }
 455
 456        if (dev->todo)
 457                DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 458}
 459
 460static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 461{
 462        struct sk_buff  *skb = req->context;
 463        struct eth_dev  *dev = ep->driver_data;
 464
 465        switch (req->status) {
 466        default:
 467                dev->net->stats.tx_errors++;
 468                VDBG(dev, "tx err %d\n", req->status);
 469                /* FALLTHROUGH */
 470        case -ECONNRESET:               /* unlink */
 471        case -ESHUTDOWN:                /* disconnect etc */
 472                break;
 473        case 0:
 474                dev->net->stats.tx_bytes += skb->len;
 475        }
 476        dev->net->stats.tx_packets++;
 477
 478        spin_lock(&dev->req_lock);
 479        list_add(&req->list, &dev->tx_reqs);
 480        spin_unlock(&dev->req_lock);
 481        dev_kfree_skb_any(skb);
 482
 483        atomic_dec(&dev->tx_qlen);
 484        if (netif_carrier_ok(dev->net))
 485                netif_wake_queue(dev->net);
 486}
 487
 488static inline int is_promisc(u16 cdc_filter)
 489{
 490        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 491}
 492
 493static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 494                                        struct net_device *net)
 495{
 496        struct eth_dev          *dev = netdev_priv(net);
 497        int                     length = skb->len;
 498        int                     retval;
 499        struct usb_request      *req = NULL;
 500        unsigned long           flags;
 501        struct usb_ep           *in;
 502        u16                     cdc_filter;
 503
 504        spin_lock_irqsave(&dev->lock, flags);
 505        if (dev->port_usb) {
 506                in = dev->port_usb->in_ep;
 507                cdc_filter = dev->port_usb->cdc_filter;
 508        } else {
 509                in = NULL;
 510                cdc_filter = 0;
 511        }
 512        spin_unlock_irqrestore(&dev->lock, flags);
 513
 514        if (!in) {
 515                dev_kfree_skb_any(skb);
 516                return NETDEV_TX_OK;
 517        }
 518
 519        /* apply outgoing CDC or RNDIS filters */
 520        if (!is_promisc(cdc_filter)) {
 521                u8              *dest = skb->data;
 522
 523                if (is_multicast_ether_addr(dest)) {
 524                        u16     type;
 525
 526                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 527                         * SET_ETHERNET_MULTICAST_FILTERS requests
 528                         */
 529                        if (is_broadcast_ether_addr(dest))
 530                                type = USB_CDC_PACKET_TYPE_BROADCAST;
 531                        else
 532                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 533                        if (!(cdc_filter & type)) {
 534                                dev_kfree_skb_any(skb);
 535                                return NETDEV_TX_OK;
 536                        }
 537                }
 538                /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 539        }
 540
 541        spin_lock_irqsave(&dev->req_lock, flags);
 542        /*
 543         * this freelist can be empty if an interrupt triggered disconnect()
 544         * and reconfigured the gadget (shutting down this queue) after the
 545         * network stack decided to xmit but before we got the spinlock.
 546         */
 547        if (list_empty(&dev->tx_reqs)) {
 548                spin_unlock_irqrestore(&dev->req_lock, flags);
 549                return NETDEV_TX_BUSY;
 550        }
 551
 552        req = container_of(dev->tx_reqs.next, struct usb_request, list);
 553        list_del(&req->list);
 554
 555        /* temporarily stop TX queue when the freelist empties */
 556        if (list_empty(&dev->tx_reqs))
 557                netif_stop_queue(net);
 558        spin_unlock_irqrestore(&dev->req_lock, flags);
 559
 560        /* no buffer copies needed, unless the network stack did it
 561         * or the hardware can't use skb buffers.
 562         * or there's not enough space for extra headers we need
 563         */
 564        if (dev->wrap) {
 565                unsigned long   flags;
 566
 567                spin_lock_irqsave(&dev->lock, flags);
 568                if (dev->port_usb)
 569                        skb = dev->wrap(dev->port_usb, skb);
 570                spin_unlock_irqrestore(&dev->lock, flags);
 571                if (!skb)
 572                        goto drop;
 573
 574                length = skb->len;
 575        }
 576        req->buf = skb->data;
 577        req->context = skb;
 578        req->complete = tx_complete;
 579
 580        /* use zlp framing on tx for strict CDC-Ether conformance,
 581         * though any robust network rx path ignores extra padding.
 582         * and some hardware doesn't like to write zlps.
 583         */
 584        req->zero = 1;
 585        if (!dev->zlp && (length % in->maxpacket) == 0)
 586                length++;
 587
 588        req->length = length;
 589
 590        /* throttle highspeed IRQ rate back slightly */
 591        if (gadget_is_dualspeed(dev->gadget))
 592                req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
 593                        ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
 594                        : 0;
 595
 596        retval = usb_ep_queue(in, req, GFP_ATOMIC);
 597        switch (retval) {
 598        default:
 599                DBG(dev, "tx queue err %d\n", retval);
 600                break;
 601        case 0:
 602                net->trans_start = jiffies;
 603                atomic_inc(&dev->tx_qlen);
 604        }
 605
 606        if (retval) {
 607                dev_kfree_skb_any(skb);
 608drop:
 609                dev->net->stats.tx_dropped++;
 610                spin_lock_irqsave(&dev->req_lock, flags);
 611                if (list_empty(&dev->tx_reqs))
 612                        netif_start_queue(net);
 613                list_add(&req->list, &dev->tx_reqs);
 614                spin_unlock_irqrestore(&dev->req_lock, flags);
 615        }
 616        return NETDEV_TX_OK;
 617}
 618
 619/*-------------------------------------------------------------------------*/
 620
 621static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 622{
 623        DBG(dev, "%s\n", __func__);
 624
 625        /* fill the rx queue */
 626        rx_fill(dev, gfp_flags);
 627
 628        /* and open the tx floodgates */
 629        atomic_set(&dev->tx_qlen, 0);
 630        netif_wake_queue(dev->net);
 631}
 632
 633static int eth_open(struct net_device *net)
 634{
 635        struct eth_dev  *dev = netdev_priv(net);
 636        struct gether   *link;
 637
 638        DBG(dev, "%s\n", __func__);
 639        if (netif_carrier_ok(dev->net))
 640                eth_start(dev, GFP_KERNEL);
 641
 642        spin_lock_irq(&dev->lock);
 643        link = dev->port_usb;
 644        if (link && link->open)
 645                link->open(link);
 646        spin_unlock_irq(&dev->lock);
 647
 648        return 0;
 649}
 650
 651static int eth_stop(struct net_device *net)
 652{
 653        struct eth_dev  *dev = netdev_priv(net);
 654        unsigned long   flags;
 655
 656        VDBG(dev, "%s\n", __func__);
 657        netif_stop_queue(net);
 658
 659        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 660                dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 661                dev->net->stats.rx_errors, dev->net->stats.tx_errors
 662                );
 663
 664        /* ensure there are no more active requests */
 665        spin_lock_irqsave(&dev->lock, flags);
 666        if (dev->port_usb) {
 667                struct gether   *link = dev->port_usb;
 668
 669                if (link->close)
 670                        link->close(link);
 671
 672                /* NOTE:  we have no abort-queue primitive we could use
 673                 * to cancel all pending I/O.  Instead, we disable then
 674                 * reenable the endpoints ... this idiom may leave toggle
 675                 * wrong, but that's a self-correcting error.
 676                 *
 677                 * REVISIT:  we *COULD* just let the transfers complete at
 678                 * their own pace; the network stack can handle old packets.
 679                 * For the moment we leave this here, since it works.
 680                 */
 681                usb_ep_disable(link->in_ep);
 682                usb_ep_disable(link->out_ep);
 683                if (netif_carrier_ok(net)) {
 684                        DBG(dev, "host still using in/out endpoints\n");
 685                        usb_ep_enable(link->in_ep, link->in);
 686                        usb_ep_enable(link->out_ep, link->out);
 687                }
 688        }
 689        spin_unlock_irqrestore(&dev->lock, flags);
 690
 691        return 0;
 692}
 693
 694/*-------------------------------------------------------------------------*/
 695
 696/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
 697static char *dev_addr;
 698module_param(dev_addr, charp, S_IRUGO);
 699MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
 700
 701/* this address is invisible to ifconfig */
 702static char *host_addr;
 703module_param(host_addr, charp, S_IRUGO);
 704MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
 705
 706
 707static u8 __init nibble(unsigned char c)
 708{
 709        if (isdigit(c))
 710                return c - '0';
 711        c = toupper(c);
 712        if (isxdigit(c))
 713                return 10 + c - 'A';
 714        return 0;
 715}
 716
 717static int __init get_ether_addr(const char *str, u8 *dev_addr)
 718{
 719        if (str) {
 720                unsigned        i;
 721
 722                for (i = 0; i < 6; i++) {
 723                        unsigned char num;
 724
 725                        if ((*str == '.') || (*str == ':'))
 726                                str++;
 727                        num = nibble(*str++) << 4;
 728                        num |= (nibble(*str++));
 729                        dev_addr [i] = num;
 730                }
 731                if (is_valid_ether_addr(dev_addr))
 732                        return 0;
 733        }
 734        random_ether_addr(dev_addr);
 735        return 1;
 736}
 737
 738static struct eth_dev *the_dev;
 739
 740static const struct net_device_ops eth_netdev_ops = {
 741        .ndo_open               = eth_open,
 742        .ndo_stop               = eth_stop,
 743        .ndo_start_xmit         = eth_start_xmit,
 744        .ndo_change_mtu         = ueth_change_mtu,
 745        .ndo_set_mac_address    = eth_mac_addr,
 746        .ndo_validate_addr      = eth_validate_addr,
 747};
 748
 749/**
 750 * gether_setup - initialize one ethernet-over-usb link
 751 * @g: gadget to associated with these links
 752 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 753 *      host side of the link is recorded
 754 * Context: may sleep
 755 *
 756 * This sets up the single network link that may be exported by a
 757 * gadget driver using this framework.  The link layer addresses are
 758 * set up using module parameters.
 759 *
 760 * Returns negative errno, or zero on success
 761 */
 762int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
 763{
 764        struct eth_dev          *dev;
 765        struct net_device       *net;
 766        int                     status;
 767
 768        if (the_dev)
 769                return -EBUSY;
 770
 771        net = alloc_etherdev(sizeof *dev);
 772        if (!net)
 773                return -ENOMEM;
 774
 775        dev = netdev_priv(net);
 776        spin_lock_init(&dev->lock);
 777        spin_lock_init(&dev->req_lock);
 778        INIT_WORK(&dev->work, eth_work);
 779        INIT_LIST_HEAD(&dev->tx_reqs);
 780        INIT_LIST_HEAD(&dev->rx_reqs);
 781
 782        skb_queue_head_init(&dev->rx_frames);
 783
 784        /* network device setup */
 785        dev->net = net;
 786        strcpy(net->name, "usb%d");
 787
 788        if (get_ether_addr(dev_addr, net->dev_addr))
 789                dev_warn(&g->dev,
 790                        "using random %s ethernet address\n", "self");
 791        if (get_ether_addr(host_addr, dev->host_mac))
 792                dev_warn(&g->dev,
 793                        "using random %s ethernet address\n", "host");
 794
 795        if (ethaddr)
 796                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 797
 798        net->netdev_ops = &eth_netdev_ops;
 799
 800        SET_ETHTOOL_OPS(net, &ops);
 801
 802        /* two kinds of host-initiated state changes:
 803         *  - iff DATA transfer is active, carrier is "on"
 804         *  - tx queueing enabled if open *and* carrier is "on"
 805         */
 806        netif_stop_queue(net);
 807        netif_carrier_off(net);
 808
 809        dev->gadget = g;
 810        SET_NETDEV_DEV(net, &g->dev);
 811
 812        status = register_netdev(net);
 813        if (status < 0) {
 814                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 815                free_netdev(net);
 816        } else {
 817                INFO(dev, "MAC %pM\n", net->dev_addr);
 818                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 819
 820                the_dev = dev;
 821        }
 822
 823        return status;
 824}
 825
 826/**
 827 * gether_cleanup - remove Ethernet-over-USB device
 828 * Context: may sleep
 829 *
 830 * This is called to free all resources allocated by @gether_setup().
 831 */
 832void gether_cleanup(void)
 833{
 834        if (!the_dev)
 835                return;
 836
 837        unregister_netdev(the_dev->net);
 838        free_netdev(the_dev->net);
 839
 840        /* assuming we used keventd, it must quiesce too */
 841        flush_scheduled_work();
 842
 843        the_dev = NULL;
 844}
 845
 846
 847/**
 848 * gether_connect - notify network layer that USB link is active
 849 * @link: the USB link, set up with endpoints, descriptors matching
 850 *      current device speed, and any framing wrapper(s) set up.
 851 * Context: irqs blocked
 852 *
 853 * This is called to activate endpoints and let the network layer know
 854 * the connection is active ("carrier detect").  It may cause the I/O
 855 * queues to open and start letting network packets flow, but will in
 856 * any case activate the endpoints so that they respond properly to the
 857 * USB host.
 858 *
 859 * Verify net_device pointer returned using IS_ERR().  If it doesn't
 860 * indicate some error code (negative errno), ep->driver_data values
 861 * have been overwritten.
 862 */
 863struct net_device *gether_connect(struct gether *link)
 864{
 865        struct eth_dev          *dev = the_dev;
 866        int                     result = 0;
 867
 868        if (!dev)
 869                return ERR_PTR(-EINVAL);
 870
 871        link->in_ep->driver_data = dev;
 872        result = usb_ep_enable(link->in_ep, link->in);
 873        if (result != 0) {
 874                DBG(dev, "enable %s --> %d\n",
 875                        link->in_ep->name, result);
 876                goto fail0;
 877        }
 878
 879        link->out_ep->driver_data = dev;
 880        result = usb_ep_enable(link->out_ep, link->out);
 881        if (result != 0) {
 882                DBG(dev, "enable %s --> %d\n",
 883                        link->out_ep->name, result);
 884                goto fail1;
 885        }
 886
 887        if (result == 0)
 888                result = alloc_requests(dev, link, qlen(dev->gadget));
 889
 890        if (result == 0) {
 891                dev->zlp = link->is_zlp_ok;
 892                DBG(dev, "qlen %d\n", qlen(dev->gadget));
 893
 894                dev->header_len = link->header_len;
 895                dev->unwrap = link->unwrap;
 896                dev->wrap = link->wrap;
 897
 898                spin_lock(&dev->lock);
 899                dev->port_usb = link;
 900                link->ioport = dev;
 901                if (netif_running(dev->net)) {
 902                        if (link->open)
 903                                link->open(link);
 904                } else {
 905                        if (link->close)
 906                                link->close(link);
 907                }
 908                spin_unlock(&dev->lock);
 909
 910                netif_carrier_on(dev->net);
 911                if (netif_running(dev->net))
 912                        eth_start(dev, GFP_ATOMIC);
 913
 914        /* on error, disable any endpoints  */
 915        } else {
 916                (void) usb_ep_disable(link->out_ep);
 917fail1:
 918                (void) usb_ep_disable(link->in_ep);
 919        }
 920fail0:
 921        /* caller is responsible for cleanup on error */
 922        if (result < 0)
 923                return ERR_PTR(result);
 924        return dev->net;
 925}
 926
 927/**
 928 * gether_disconnect - notify network layer that USB link is inactive
 929 * @link: the USB link, on which gether_connect() was called
 930 * Context: irqs blocked
 931 *
 932 * This is called to deactivate endpoints and let the network layer know
 933 * the connection went inactive ("no carrier").
 934 *
 935 * On return, the state is as if gether_connect() had never been called.
 936 * The endpoints are inactive, and accordingly without active USB I/O.
 937 * Pointers to endpoint descriptors and endpoint private data are nulled.
 938 */
 939void gether_disconnect(struct gether *link)
 940{
 941        struct eth_dev          *dev = link->ioport;
 942        struct usb_request      *req;
 943
 944        WARN_ON(!dev);
 945        if (!dev)
 946                return;
 947
 948        DBG(dev, "%s\n", __func__);
 949
 950        netif_stop_queue(dev->net);
 951        netif_carrier_off(dev->net);
 952
 953        /* disable endpoints, forcing (synchronous) completion
 954         * of all pending i/o.  then free the request objects
 955         * and forget about the endpoints.
 956         */
 957        usb_ep_disable(link->in_ep);
 958        spin_lock(&dev->req_lock);
 959        while (!list_empty(&dev->tx_reqs)) {
 960                req = container_of(dev->tx_reqs.next,
 961                                        struct usb_request, list);
 962                list_del(&req->list);
 963
 964                spin_unlock(&dev->req_lock);
 965                usb_ep_free_request(link->in_ep, req);
 966                spin_lock(&dev->req_lock);
 967        }
 968        spin_unlock(&dev->req_lock);
 969        link->in_ep->driver_data = NULL;
 970        link->in = NULL;
 971
 972        usb_ep_disable(link->out_ep);
 973        spin_lock(&dev->req_lock);
 974        while (!list_empty(&dev->rx_reqs)) {
 975                req = container_of(dev->rx_reqs.next,
 976                                        struct usb_request, list);
 977                list_del(&req->list);
 978
 979                spin_unlock(&dev->req_lock);
 980                usb_ep_free_request(link->out_ep, req);
 981                spin_lock(&dev->req_lock);
 982        }
 983        spin_unlock(&dev->req_lock);
 984        link->out_ep->driver_data = NULL;
 985        link->out = NULL;
 986
 987        /* finish forgetting about this USB link episode */
 988        dev->header_len = 0;
 989        dev->unwrap = NULL;
 990        dev->wrap = NULL;
 991
 992        spin_lock(&dev->lock);
 993        dev->port_usb = NULL;
 994        link->ioport = NULL;
 995        spin_unlock(&dev->lock);
 996}
 997