linux/drivers/usb/gadget/function/u_ether.c
<<
>>
Prefs
   1/*
   2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   3 *
   4 * Copyright (C) 2003-2005,2008 David Brownell
   5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   6 * Copyright (C) 2008 Nokia Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14/* #define VERBOSE_DEBUG */
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/gfp.h>
  19#include <linux/device.h>
  20#include <linux/ctype.h>
  21#include <linux/etherdevice.h>
  22#include <linux/ethtool.h>
  23#include <linux/if_vlan.h>
  24
  25#include "u_ether.h"
  26
  27
  28/*
  29 * This component encapsulates the Ethernet link glue needed to provide
  30 * one (!) network link through the USB gadget stack, normally "usb0".
  31 *
  32 * The control and data models are handled by the function driver which
  33 * connects to this code; such as CDC Ethernet (ECM or EEM),
  34 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  35 * management.
  36 *
  37 * Link level addressing is handled by this component using module
  38 * parameters; if no such parameters are provided, random link level
  39 * addresses are used.  Each end of the link uses one address.  The
  40 * host end address is exported in various ways, and is often recorded
  41 * in configuration databases.
  42 *
  43 * The driver which assembles each configuration using such a link is
  44 * responsible for ensuring that each configuration includes at most one
  45 * instance of is network link.  (The network layer provides ways for
  46 * this single "physical" link to be used by multiple virtual links.)
  47 */
  48
  49#define UETH__VERSION   "29-May-2008"
  50
  51/* Experiments show that both Linux and Windows hosts allow up to 16k
  52 * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
  53 * blocks and still have efficient handling. */
  54#define GETHER_MAX_ETH_FRAME_LEN 15412
  55
  56struct eth_dev {
  57        /* lock is held while accessing port_usb
  58         */
  59        spinlock_t              lock;
  60        struct gether           *port_usb;
  61
  62        struct net_device       *net;
  63        struct usb_gadget       *gadget;
  64
  65        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
  66        struct list_head        tx_reqs, rx_reqs;
  67        atomic_t                tx_qlen;
  68
  69        struct sk_buff_head     rx_frames;
  70
  71        unsigned                qmult;
  72
  73        unsigned                header_len;
  74        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
  75        int                     (*unwrap)(struct gether *,
  76                                                struct sk_buff *skb,
  77                                                struct sk_buff_head *list);
  78
  79        struct work_struct      work;
  80
  81        unsigned long           todo;
  82#define WORK_RX_MEMORY          0
  83
  84        bool                    zlp;
  85        u8                      host_mac[ETH_ALEN];
  86        u8                      dev_mac[ETH_ALEN];
  87};
  88
  89/*-------------------------------------------------------------------------*/
  90
  91#define RX_EXTRA        20      /* bytes guarding against rx overflows */
  92
  93#define DEFAULT_QLEN    2       /* double buffering by default */
  94
  95/* for dual-speed hardware, use deeper queues at high/super speed */
  96static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
  97{
  98        if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  99                                            gadget->speed == USB_SPEED_SUPER))
 100                return qmult * DEFAULT_QLEN;
 101        else
 102                return DEFAULT_QLEN;
 103}
 104
 105/*-------------------------------------------------------------------------*/
 106
 107/* REVISIT there must be a better way than having two sets
 108 * of debug calls ...
 109 */
 110
 111#undef DBG
 112#undef VDBG
 113#undef ERROR
 114#undef INFO
 115
 116#define xprintk(d, level, fmt, args...) \
 117        printk(level "%s: " fmt , (d)->net->name , ## args)
 118
 119#ifdef DEBUG
 120#undef DEBUG
 121#define DBG(dev, fmt, args...) \
 122        xprintk(dev , KERN_DEBUG , fmt , ## args)
 123#else
 124#define DBG(dev, fmt, args...) \
 125        do { } while (0)
 126#endif /* DEBUG */
 127
 128#ifdef VERBOSE_DEBUG
 129#define VDBG    DBG
 130#else
 131#define VDBG(dev, fmt, args...) \
 132        do { } while (0)
 133#endif /* DEBUG */
 134
 135#define ERROR(dev, fmt, args...) \
 136        xprintk(dev , KERN_ERR , fmt , ## args)
 137#define INFO(dev, fmt, args...) \
 138        xprintk(dev , KERN_INFO , fmt , ## args)
 139
 140/*-------------------------------------------------------------------------*/
 141
 142/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 143
 144static int ueth_change_mtu(struct net_device *net, int new_mtu)
 145{
 146        if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN)
 147                return -ERANGE;
 148        net->mtu = new_mtu;
 149
 150        return 0;
 151}
 152
 153static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 154{
 155        struct eth_dev *dev = netdev_priv(net);
 156
 157        strlcpy(p->driver, "g_ether", sizeof(p->driver));
 158        strlcpy(p->version, UETH__VERSION, sizeof(p->version));
 159        strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
 160        strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
 161}
 162
 163/* REVISIT can also support:
 164 *   - WOL (by tracking suspends and issuing remote wakeup)
 165 *   - msglevel (implies updated messaging)
 166 *   - ... probably more ethtool ops
 167 */
 168
 169static const struct ethtool_ops ops = {
 170        .get_drvinfo = eth_get_drvinfo,
 171        .get_link = ethtool_op_get_link,
 172};
 173
 174static void defer_kevent(struct eth_dev *dev, int flag)
 175{
 176        if (test_and_set_bit(flag, &dev->todo))
 177                return;
 178        if (!schedule_work(&dev->work))
 179                ERROR(dev, "kevent %d may have been dropped\n", flag);
 180        else
 181                DBG(dev, "kevent %d scheduled\n", flag);
 182}
 183
 184static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 185
 186static int
 187rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 188{
 189        struct sk_buff  *skb;
 190        int             retval = -ENOMEM;
 191        size_t          size = 0;
 192        struct usb_ep   *out;
 193        unsigned long   flags;
 194
 195        spin_lock_irqsave(&dev->lock, flags);
 196        if (dev->port_usb)
 197                out = dev->port_usb->out_ep;
 198        else
 199                out = NULL;
 200        spin_unlock_irqrestore(&dev->lock, flags);
 201
 202        if (!out)
 203                return -ENOTCONN;
 204
 205
 206        /* Padding up to RX_EXTRA handles minor disagreements with host.
 207         * Normally we use the USB "terminate on short read" convention;
 208         * so allow up to (N*maxpacket), since that memory is normally
 209         * already allocated.  Some hardware doesn't deal well with short
 210         * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 211         * byte off the end (to force hardware errors on overflow).
 212         *
 213         * RNDIS uses internal framing, and explicitly allows senders to
 214         * pad to end-of-packet.  That's potentially nice for speed, but
 215         * means receivers can't recover lost synch on their own (because
 216         * new packets don't only start after a short RX).
 217         */
 218        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 219        size += dev->port_usb->header_len;
 220        size += out->maxpacket - 1;
 221        size -= size % out->maxpacket;
 222
 223        if (dev->port_usb->is_fixed)
 224                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 225
 226        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 227        if (skb == NULL) {
 228                DBG(dev, "no rx skb\n");
 229                goto enomem;
 230        }
 231
 232        /* Some platforms perform better when IP packets are aligned,
 233         * but on at least one, checksumming fails otherwise.  Note:
 234         * RNDIS headers involve variable numbers of LE32 values.
 235         */
 236        skb_reserve(skb, NET_IP_ALIGN);
 237
 238        req->buf = skb->data;
 239        req->length = size;
 240        req->complete = rx_complete;
 241        req->context = skb;
 242
 243        retval = usb_ep_queue(out, req, gfp_flags);
 244        if (retval == -ENOMEM)
 245enomem:
 246                defer_kevent(dev, WORK_RX_MEMORY);
 247        if (retval) {
 248                DBG(dev, "rx submit --> %d\n", retval);
 249                if (skb)
 250                        dev_kfree_skb_any(skb);
 251                spin_lock_irqsave(&dev->req_lock, flags);
 252                list_add(&req->list, &dev->rx_reqs);
 253                spin_unlock_irqrestore(&dev->req_lock, flags);
 254        }
 255        return retval;
 256}
 257
 258static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 259{
 260        struct sk_buff  *skb = req->context, *skb2;
 261        struct eth_dev  *dev = ep->driver_data;
 262        int             status = req->status;
 263
 264        switch (status) {
 265
 266        /* normal completion */
 267        case 0:
 268                skb_put(skb, req->actual);
 269
 270                if (dev->unwrap) {
 271                        unsigned long   flags;
 272
 273                        spin_lock_irqsave(&dev->lock, flags);
 274                        if (dev->port_usb) {
 275                                status = dev->unwrap(dev->port_usb,
 276                                                        skb,
 277                                                        &dev->rx_frames);
 278                        } else {
 279                                dev_kfree_skb_any(skb);
 280                                status = -ENOTCONN;
 281                        }
 282                        spin_unlock_irqrestore(&dev->lock, flags);
 283                } else {
 284                        skb_queue_tail(&dev->rx_frames, skb);
 285                }
 286                skb = NULL;
 287
 288                skb2 = skb_dequeue(&dev->rx_frames);
 289                while (skb2) {
 290                        if (status < 0
 291                                        || ETH_HLEN > skb2->len
 292                                        || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
 293                                dev->net->stats.rx_errors++;
 294                                dev->net->stats.rx_length_errors++;
 295                                DBG(dev, "rx length %d\n", skb2->len);
 296                                dev_kfree_skb_any(skb2);
 297                                goto next_frame;
 298                        }
 299                        skb2->protocol = eth_type_trans(skb2, dev->net);
 300                        dev->net->stats.rx_packets++;
 301                        dev->net->stats.rx_bytes += skb2->len;
 302
 303                        /* no buffer copies needed, unless hardware can't
 304                         * use skb buffers.
 305                         */
 306                        status = netif_rx(skb2);
 307next_frame:
 308                        skb2 = skb_dequeue(&dev->rx_frames);
 309                }
 310                break;
 311
 312        /* software-driven interface shutdown */
 313        case -ECONNRESET:               /* unlink */
 314        case -ESHUTDOWN:                /* disconnect etc */
 315                VDBG(dev, "rx shutdown, code %d\n", status);
 316                goto quiesce;
 317
 318        /* for hardware automagic (such as pxa) */
 319        case -ECONNABORTED:             /* endpoint reset */
 320                DBG(dev, "rx %s reset\n", ep->name);
 321                defer_kevent(dev, WORK_RX_MEMORY);
 322quiesce:
 323                dev_kfree_skb_any(skb);
 324                goto clean;
 325
 326        /* data overrun */
 327        case -EOVERFLOW:
 328                dev->net->stats.rx_over_errors++;
 329                /* FALLTHROUGH */
 330
 331        default:
 332                dev->net->stats.rx_errors++;
 333                DBG(dev, "rx status %d\n", status);
 334                break;
 335        }
 336
 337        if (skb)
 338                dev_kfree_skb_any(skb);
 339        if (!netif_running(dev->net)) {
 340clean:
 341                spin_lock(&dev->req_lock);
 342                list_add(&req->list, &dev->rx_reqs);
 343                spin_unlock(&dev->req_lock);
 344                req = NULL;
 345        }
 346        if (req)
 347                rx_submit(dev, req, GFP_ATOMIC);
 348}
 349
 350static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 351{
 352        unsigned                i;
 353        struct usb_request      *req;
 354
 355        if (!n)
 356                return -ENOMEM;
 357
 358        /* queue/recycle up to N requests */
 359        i = n;
 360        list_for_each_entry(req, list, list) {
 361                if (i-- == 0)
 362                        goto extra;
 363        }
 364        while (i--) {
 365                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 366                if (!req)
 367                        return list_empty(list) ? -ENOMEM : 0;
 368                list_add(&req->list, list);
 369        }
 370        return 0;
 371
 372extra:
 373        /* free extras */
 374        for (;;) {
 375                struct list_head        *next;
 376
 377                next = req->list.next;
 378                list_del(&req->list);
 379                usb_ep_free_request(ep, req);
 380
 381                if (next == list)
 382                        break;
 383
 384                req = container_of(next, struct usb_request, list);
 385        }
 386        return 0;
 387}
 388
 389static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 390{
 391        int     status;
 392
 393        spin_lock(&dev->req_lock);
 394        status = prealloc(&dev->tx_reqs, link->in_ep, n);
 395        if (status < 0)
 396                goto fail;
 397        status = prealloc(&dev->rx_reqs, link->out_ep, n);
 398        if (status < 0)
 399                goto fail;
 400        goto done;
 401fail:
 402        DBG(dev, "can't alloc requests\n");
 403done:
 404        spin_unlock(&dev->req_lock);
 405        return status;
 406}
 407
 408static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 409{
 410        struct usb_request      *req;
 411        unsigned long           flags;
 412
 413        /* fill unused rxq slots with some skb */
 414        spin_lock_irqsave(&dev->req_lock, flags);
 415        while (!list_empty(&dev->rx_reqs)) {
 416                req = container_of(dev->rx_reqs.next,
 417                                struct usb_request, list);
 418                list_del_init(&req->list);
 419                spin_unlock_irqrestore(&dev->req_lock, flags);
 420
 421                if (rx_submit(dev, req, gfp_flags) < 0) {
 422                        defer_kevent(dev, WORK_RX_MEMORY);
 423                        return;
 424                }
 425
 426                spin_lock_irqsave(&dev->req_lock, flags);
 427        }
 428        spin_unlock_irqrestore(&dev->req_lock, flags);
 429}
 430
 431static void eth_work(struct work_struct *work)
 432{
 433        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 434
 435        if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 436                if (netif_running(dev->net))
 437                        rx_fill(dev, GFP_KERNEL);
 438        }
 439
 440        if (dev->todo)
 441                DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 442}
 443
 444static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 445{
 446        struct sk_buff  *skb = req->context;
 447        struct eth_dev  *dev = ep->driver_data;
 448
 449        switch (req->status) {
 450        default:
 451                dev->net->stats.tx_errors++;
 452                VDBG(dev, "tx err %d\n", req->status);
 453                /* FALLTHROUGH */
 454        case -ECONNRESET:               /* unlink */
 455        case -ESHUTDOWN:                /* disconnect etc */
 456                break;
 457        case 0:
 458                dev->net->stats.tx_bytes += skb->len;
 459        }
 460        dev->net->stats.tx_packets++;
 461
 462        spin_lock(&dev->req_lock);
 463        list_add(&req->list, &dev->tx_reqs);
 464        spin_unlock(&dev->req_lock);
 465        dev_kfree_skb_any(skb);
 466
 467        atomic_dec(&dev->tx_qlen);
 468        if (netif_carrier_ok(dev->net))
 469                netif_wake_queue(dev->net);
 470}
 471
 472static inline int is_promisc(u16 cdc_filter)
 473{
 474        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 475}
 476
 477static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 478                                        struct net_device *net)
 479{
 480        struct eth_dev          *dev = netdev_priv(net);
 481        int                     length = 0;
 482        int                     retval;
 483        struct usb_request      *req = NULL;
 484        unsigned long           flags;
 485        struct usb_ep           *in;
 486        u16                     cdc_filter;
 487
 488        spin_lock_irqsave(&dev->lock, flags);
 489        if (dev->port_usb) {
 490                in = dev->port_usb->in_ep;
 491                cdc_filter = dev->port_usb->cdc_filter;
 492        } else {
 493                in = NULL;
 494                cdc_filter = 0;
 495        }
 496        spin_unlock_irqrestore(&dev->lock, flags);
 497
 498        if (skb && !in) {
 499                dev_kfree_skb_any(skb);
 500                return NETDEV_TX_OK;
 501        }
 502
 503        /* apply outgoing CDC or RNDIS filters */
 504        if (skb && !is_promisc(cdc_filter)) {
 505                u8              *dest = skb->data;
 506
 507                if (is_multicast_ether_addr(dest)) {
 508                        u16     type;
 509
 510                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 511                         * SET_ETHERNET_MULTICAST_FILTERS requests
 512                         */
 513                        if (is_broadcast_ether_addr(dest))
 514                                type = USB_CDC_PACKET_TYPE_BROADCAST;
 515                        else
 516                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 517                        if (!(cdc_filter & type)) {
 518                                dev_kfree_skb_any(skb);
 519                                return NETDEV_TX_OK;
 520                        }
 521                }
 522                /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 523        }
 524
 525        spin_lock_irqsave(&dev->req_lock, flags);
 526        /*
 527         * this freelist can be empty if an interrupt triggered disconnect()
 528         * and reconfigured the gadget (shutting down this queue) after the
 529         * network stack decided to xmit but before we got the spinlock.
 530         */
 531        if (list_empty(&dev->tx_reqs)) {
 532                spin_unlock_irqrestore(&dev->req_lock, flags);
 533                return NETDEV_TX_BUSY;
 534        }
 535
 536        req = container_of(dev->tx_reqs.next, struct usb_request, list);
 537        list_del(&req->list);
 538
 539        /* temporarily stop TX queue when the freelist empties */
 540        if (list_empty(&dev->tx_reqs))
 541                netif_stop_queue(net);
 542        spin_unlock_irqrestore(&dev->req_lock, flags);
 543
 544        /* no buffer copies needed, unless the network stack did it
 545         * or the hardware can't use skb buffers.
 546         * or there's not enough space for extra headers we need
 547         */
 548        if (dev->wrap) {
 549                unsigned long   flags;
 550
 551                spin_lock_irqsave(&dev->lock, flags);
 552                if (dev->port_usb)
 553                        skb = dev->wrap(dev->port_usb, skb);
 554                spin_unlock_irqrestore(&dev->lock, flags);
 555                if (!skb) {
 556                        /* Multi frame CDC protocols may store the frame for
 557                         * later which is not a dropped frame.
 558                         */
 559                        if (dev->port_usb->supports_multi_frame)
 560                                goto multiframe;
 561                        goto drop;
 562                }
 563        }
 564
 565        length = skb->len;
 566        req->buf = skb->data;
 567        req->context = skb;
 568        req->complete = tx_complete;
 569
 570        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
 571        if (dev->port_usb->is_fixed &&
 572            length == dev->port_usb->fixed_in_len &&
 573            (length % in->maxpacket) == 0)
 574                req->zero = 0;
 575        else
 576                req->zero = 1;
 577
 578        /* use zlp framing on tx for strict CDC-Ether conformance,
 579         * though any robust network rx path ignores extra padding.
 580         * and some hardware doesn't like to write zlps.
 581         */
 582        if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 583                length++;
 584
 585        req->length = length;
 586
 587        /* throttle high/super speed IRQ rate back slightly */
 588        if (gadget_is_dualspeed(dev->gadget))
 589                req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
 590                                     dev->gadget->speed == USB_SPEED_SUPER)
 591                        ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
 592                        : 0;
 593
 594        retval = usb_ep_queue(in, req, GFP_ATOMIC);
 595        switch (retval) {
 596        default:
 597                DBG(dev, "tx queue err %d\n", retval);
 598                break;
 599        case 0:
 600                net->trans_start = jiffies;
 601                atomic_inc(&dev->tx_qlen);
 602        }
 603
 604        if (retval) {
 605                dev_kfree_skb_any(skb);
 606drop:
 607                dev->net->stats.tx_dropped++;
 608multiframe:
 609                spin_lock_irqsave(&dev->req_lock, flags);
 610                if (list_empty(&dev->tx_reqs))
 611                        netif_start_queue(net);
 612                list_add(&req->list, &dev->tx_reqs);
 613                spin_unlock_irqrestore(&dev->req_lock, flags);
 614        }
 615        return NETDEV_TX_OK;
 616}
 617
 618/*-------------------------------------------------------------------------*/
 619
 620static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 621{
 622        DBG(dev, "%s\n", __func__);
 623
 624        /* fill the rx queue */
 625        rx_fill(dev, gfp_flags);
 626
 627        /* and open the tx floodgates */
 628        atomic_set(&dev->tx_qlen, 0);
 629        netif_wake_queue(dev->net);
 630}
 631
 632static int eth_open(struct net_device *net)
 633{
 634        struct eth_dev  *dev = netdev_priv(net);
 635        struct gether   *link;
 636
 637        DBG(dev, "%s\n", __func__);
 638        if (netif_carrier_ok(dev->net))
 639                eth_start(dev, GFP_KERNEL);
 640
 641        spin_lock_irq(&dev->lock);
 642        link = dev->port_usb;
 643        if (link && link->open)
 644                link->open(link);
 645        spin_unlock_irq(&dev->lock);
 646
 647        return 0;
 648}
 649
 650static int eth_stop(struct net_device *net)
 651{
 652        struct eth_dev  *dev = netdev_priv(net);
 653        unsigned long   flags;
 654
 655        VDBG(dev, "%s\n", __func__);
 656        netif_stop_queue(net);
 657
 658        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 659                dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 660                dev->net->stats.rx_errors, dev->net->stats.tx_errors
 661                );
 662
 663        /* ensure there are no more active requests */
 664        spin_lock_irqsave(&dev->lock, flags);
 665        if (dev->port_usb) {
 666                struct gether   *link = dev->port_usb;
 667                const struct usb_endpoint_descriptor *in;
 668                const struct usb_endpoint_descriptor *out;
 669
 670                if (link->close)
 671                        link->close(link);
 672
 673                /* NOTE:  we have no abort-queue primitive we could use
 674                 * to cancel all pending I/O.  Instead, we disable then
 675                 * reenable the endpoints ... this idiom may leave toggle
 676                 * wrong, but that's a self-correcting error.
 677                 *
 678                 * REVISIT:  we *COULD* just let the transfers complete at
 679                 * their own pace; the network stack can handle old packets.
 680                 * For the moment we leave this here, since it works.
 681                 */
 682                in = link->in_ep->desc;
 683                out = link->out_ep->desc;
 684                usb_ep_disable(link->in_ep);
 685                usb_ep_disable(link->out_ep);
 686                if (netif_carrier_ok(net)) {
 687                        DBG(dev, "host still using in/out endpoints\n");
 688                        link->in_ep->desc = in;
 689                        link->out_ep->desc = out;
 690                        usb_ep_enable(link->in_ep);
 691                        usb_ep_enable(link->out_ep);
 692                }
 693        }
 694        spin_unlock_irqrestore(&dev->lock, flags);
 695
 696        return 0;
 697}
 698
 699/*-------------------------------------------------------------------------*/
 700
 701static int get_ether_addr(const char *str, u8 *dev_addr)
 702{
 703        if (str) {
 704                unsigned        i;
 705
 706                for (i = 0; i < 6; i++) {
 707                        unsigned char num;
 708
 709                        if ((*str == '.') || (*str == ':'))
 710                                str++;
 711                        num = hex_to_bin(*str++) << 4;
 712                        num |= hex_to_bin(*str++);
 713                        dev_addr [i] = num;
 714                }
 715                if (is_valid_ether_addr(dev_addr))
 716                        return 0;
 717        }
 718        eth_random_addr(dev_addr);
 719        return 1;
 720}
 721
 722static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
 723{
 724        if (len < 18)
 725                return -EINVAL;
 726
 727        snprintf(str, len, "%pM", dev_addr);
 728        return 18;
 729}
 730
 731static const struct net_device_ops eth_netdev_ops = {
 732        .ndo_open               = eth_open,
 733        .ndo_stop               = eth_stop,
 734        .ndo_start_xmit         = eth_start_xmit,
 735        .ndo_change_mtu         = ueth_change_mtu,
 736        .ndo_set_mac_address    = eth_mac_addr,
 737        .ndo_validate_addr      = eth_validate_addr,
 738};
 739
 740static struct device_type gadget_type = {
 741        .name   = "gadget",
 742};
 743
 744/**
 745 * gether_setup_name - initialize one ethernet-over-usb link
 746 * @g: gadget to associated with these links
 747 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 748 *      host side of the link is recorded
 749 * @netname: name for network device (for example, "usb")
 750 * Context: may sleep
 751 *
 752 * This sets up the single network link that may be exported by a
 753 * gadget driver using this framework.  The link layer addresses are
 754 * set up using module parameters.
 755 *
 756 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
 757 */
 758struct eth_dev *gether_setup_name(struct usb_gadget *g,
 759                const char *dev_addr, const char *host_addr,
 760                u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
 761{
 762        struct eth_dev          *dev;
 763        struct net_device       *net;
 764        int                     status;
 765
 766        net = alloc_etherdev(sizeof *dev);
 767        if (!net)
 768                return ERR_PTR(-ENOMEM);
 769
 770        dev = netdev_priv(net);
 771        spin_lock_init(&dev->lock);
 772        spin_lock_init(&dev->req_lock);
 773        INIT_WORK(&dev->work, eth_work);
 774        INIT_LIST_HEAD(&dev->tx_reqs);
 775        INIT_LIST_HEAD(&dev->rx_reqs);
 776
 777        skb_queue_head_init(&dev->rx_frames);
 778
 779        /* network device setup */
 780        dev->net = net;
 781        dev->qmult = qmult;
 782        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 783
 784        if (get_ether_addr(dev_addr, net->dev_addr))
 785                dev_warn(&g->dev,
 786                        "using random %s ethernet address\n", "self");
 787        if (get_ether_addr(host_addr, dev->host_mac))
 788                dev_warn(&g->dev,
 789                        "using random %s ethernet address\n", "host");
 790
 791        if (ethaddr)
 792                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 793
 794        net->netdev_ops = &eth_netdev_ops;
 795
 796        net->ethtool_ops = &ops;
 797
 798        dev->gadget = g;
 799        SET_NETDEV_DEV(net, &g->dev);
 800        SET_NETDEV_DEVTYPE(net, &gadget_type);
 801
 802        status = register_netdev(net);
 803        if (status < 0) {
 804                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 805                free_netdev(net);
 806                dev = ERR_PTR(status);
 807        } else {
 808                INFO(dev, "MAC %pM\n", net->dev_addr);
 809                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 810
 811                /*
 812                 * two kinds of host-initiated state changes:
 813                 *  - iff DATA transfer is active, carrier is "on"
 814                 *  - tx queueing enabled if open *and* carrier is "on"
 815                 */
 816                netif_carrier_off(net);
 817        }
 818
 819        return dev;
 820}
 821EXPORT_SYMBOL_GPL(gether_setup_name);
 822
 823struct net_device *gether_setup_name_default(const char *netname)
 824{
 825        struct net_device       *net;
 826        struct eth_dev          *dev;
 827
 828        net = alloc_etherdev(sizeof(*dev));
 829        if (!net)
 830                return ERR_PTR(-ENOMEM);
 831
 832        dev = netdev_priv(net);
 833        spin_lock_init(&dev->lock);
 834        spin_lock_init(&dev->req_lock);
 835        INIT_WORK(&dev->work, eth_work);
 836        INIT_LIST_HEAD(&dev->tx_reqs);
 837        INIT_LIST_HEAD(&dev->rx_reqs);
 838
 839        skb_queue_head_init(&dev->rx_frames);
 840
 841        /* network device setup */
 842        dev->net = net;
 843        dev->qmult = QMULT_DEFAULT;
 844        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 845
 846        eth_random_addr(dev->dev_mac);
 847        pr_warn("using random %s ethernet address\n", "self");
 848        eth_random_addr(dev->host_mac);
 849        pr_warn("using random %s ethernet address\n", "host");
 850
 851        net->netdev_ops = &eth_netdev_ops;
 852
 853        net->ethtool_ops = &ops;
 854        SET_NETDEV_DEVTYPE(net, &gadget_type);
 855
 856        return net;
 857}
 858EXPORT_SYMBOL_GPL(gether_setup_name_default);
 859
 860int gether_register_netdev(struct net_device *net)
 861{
 862        struct eth_dev *dev;
 863        struct usb_gadget *g;
 864        struct sockaddr sa;
 865        int status;
 866
 867        if (!net->dev.parent)
 868                return -EINVAL;
 869        dev = netdev_priv(net);
 870        g = dev->gadget;
 871        status = register_netdev(net);
 872        if (status < 0) {
 873                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 874                return status;
 875        } else {
 876                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 877
 878                /* two kinds of host-initiated state changes:
 879                 *  - iff DATA transfer is active, carrier is "on"
 880                 *  - tx queueing enabled if open *and* carrier is "on"
 881                 */
 882                netif_carrier_off(net);
 883        }
 884        sa.sa_family = net->type;
 885        memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
 886        rtnl_lock();
 887        status = dev_set_mac_address(net, &sa);
 888        rtnl_unlock();
 889        if (status)
 890                pr_warn("cannot set self ethernet address: %d\n", status);
 891        else
 892                INFO(dev, "MAC %pM\n", dev->dev_mac);
 893
 894        return status;
 895}
 896EXPORT_SYMBOL_GPL(gether_register_netdev);
 897
 898void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
 899{
 900        struct eth_dev *dev;
 901
 902        dev = netdev_priv(net);
 903        dev->gadget = g;
 904        SET_NETDEV_DEV(net, &g->dev);
 905}
 906EXPORT_SYMBOL_GPL(gether_set_gadget);
 907
 908int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
 909{
 910        struct eth_dev *dev;
 911        u8 new_addr[ETH_ALEN];
 912
 913        dev = netdev_priv(net);
 914        if (get_ether_addr(dev_addr, new_addr))
 915                return -EINVAL;
 916        memcpy(dev->dev_mac, new_addr, ETH_ALEN);
 917        return 0;
 918}
 919EXPORT_SYMBOL_GPL(gether_set_dev_addr);
 920
 921int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
 922{
 923        struct eth_dev *dev;
 924
 925        dev = netdev_priv(net);
 926        return get_ether_addr_str(dev->dev_mac, dev_addr, len);
 927}
 928EXPORT_SYMBOL_GPL(gether_get_dev_addr);
 929
 930int gether_set_host_addr(struct net_device *net, const char *host_addr)
 931{
 932        struct eth_dev *dev;
 933        u8 new_addr[ETH_ALEN];
 934
 935        dev = netdev_priv(net);
 936        if (get_ether_addr(host_addr, new_addr))
 937                return -EINVAL;
 938        memcpy(dev->host_mac, new_addr, ETH_ALEN);
 939        return 0;
 940}
 941EXPORT_SYMBOL_GPL(gether_set_host_addr);
 942
 943int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
 944{
 945        struct eth_dev *dev;
 946
 947        dev = netdev_priv(net);
 948        return get_ether_addr_str(dev->host_mac, host_addr, len);
 949}
 950EXPORT_SYMBOL_GPL(gether_get_host_addr);
 951
 952int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
 953{
 954        struct eth_dev *dev;
 955
 956        if (len < 13)
 957                return -EINVAL;
 958
 959        dev = netdev_priv(net);
 960        snprintf(host_addr, len, "%pm", dev->host_mac);
 961
 962        return strlen(host_addr);
 963}
 964EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
 965
 966void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
 967{
 968        struct eth_dev *dev;
 969
 970        dev = netdev_priv(net);
 971        memcpy(host_mac, dev->host_mac, ETH_ALEN);
 972}
 973EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
 974
 975void gether_set_qmult(struct net_device *net, unsigned qmult)
 976{
 977        struct eth_dev *dev;
 978
 979        dev = netdev_priv(net);
 980        dev->qmult = qmult;
 981}
 982EXPORT_SYMBOL_GPL(gether_set_qmult);
 983
 984unsigned gether_get_qmult(struct net_device *net)
 985{
 986        struct eth_dev *dev;
 987
 988        dev = netdev_priv(net);
 989        return dev->qmult;
 990}
 991EXPORT_SYMBOL_GPL(gether_get_qmult);
 992
 993int gether_get_ifname(struct net_device *net, char *name, int len)
 994{
 995        rtnl_lock();
 996        strlcpy(name, netdev_name(net), len);
 997        rtnl_unlock();
 998        return strlen(name);
 999}
1000EXPORT_SYMBOL_GPL(gether_get_ifname);
1001
1002/**
1003 * gether_cleanup - remove Ethernet-over-USB device
1004 * Context: may sleep
1005 *
1006 * This is called to free all resources allocated by @gether_setup().
1007 */
1008void gether_cleanup(struct eth_dev *dev)
1009{
1010        if (!dev)
1011                return;
1012
1013        unregister_netdev(dev->net);
1014        flush_work(&dev->work);
1015        free_netdev(dev->net);
1016}
1017EXPORT_SYMBOL_GPL(gether_cleanup);
1018
1019/**
1020 * gether_connect - notify network layer that USB link is active
1021 * @link: the USB link, set up with endpoints, descriptors matching
1022 *      current device speed, and any framing wrapper(s) set up.
1023 * Context: irqs blocked
1024 *
1025 * This is called to activate endpoints and let the network layer know
1026 * the connection is active ("carrier detect").  It may cause the I/O
1027 * queues to open and start letting network packets flow, but will in
1028 * any case activate the endpoints so that they respond properly to the
1029 * USB host.
1030 *
1031 * Verify net_device pointer returned using IS_ERR().  If it doesn't
1032 * indicate some error code (negative errno), ep->driver_data values
1033 * have been overwritten.
1034 */
1035struct net_device *gether_connect(struct gether *link)
1036{
1037        struct eth_dev          *dev = link->ioport;
1038        int                     result = 0;
1039
1040        if (!dev)
1041                return ERR_PTR(-EINVAL);
1042
1043        link->in_ep->driver_data = dev;
1044        result = usb_ep_enable(link->in_ep);
1045        if (result != 0) {
1046                DBG(dev, "enable %s --> %d\n",
1047                        link->in_ep->name, result);
1048                goto fail0;
1049        }
1050
1051        link->out_ep->driver_data = dev;
1052        result = usb_ep_enable(link->out_ep);
1053        if (result != 0) {
1054                DBG(dev, "enable %s --> %d\n",
1055                        link->out_ep->name, result);
1056                goto fail1;
1057        }
1058
1059        if (result == 0)
1060                result = alloc_requests(dev, link, qlen(dev->gadget,
1061                                        dev->qmult));
1062
1063        if (result == 0) {
1064                dev->zlp = link->is_zlp_ok;
1065                DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1066
1067                dev->header_len = link->header_len;
1068                dev->unwrap = link->unwrap;
1069                dev->wrap = link->wrap;
1070
1071                spin_lock(&dev->lock);
1072                dev->port_usb = link;
1073                if (netif_running(dev->net)) {
1074                        if (link->open)
1075                                link->open(link);
1076                } else {
1077                        if (link->close)
1078                                link->close(link);
1079                }
1080                spin_unlock(&dev->lock);
1081
1082                netif_carrier_on(dev->net);
1083                if (netif_running(dev->net))
1084                        eth_start(dev, GFP_ATOMIC);
1085
1086        /* on error, disable any endpoints  */
1087        } else {
1088                (void) usb_ep_disable(link->out_ep);
1089fail1:
1090                (void) usb_ep_disable(link->in_ep);
1091        }
1092fail0:
1093        /* caller is responsible for cleanup on error */
1094        if (result < 0)
1095                return ERR_PTR(result);
1096        return dev->net;
1097}
1098EXPORT_SYMBOL_GPL(gether_connect);
1099
1100/**
1101 * gether_disconnect - notify network layer that USB link is inactive
1102 * @link: the USB link, on which gether_connect() was called
1103 * Context: irqs blocked
1104 *
1105 * This is called to deactivate endpoints and let the network layer know
1106 * the connection went inactive ("no carrier").
1107 *
1108 * On return, the state is as if gether_connect() had never been called.
1109 * The endpoints are inactive, and accordingly without active USB I/O.
1110 * Pointers to endpoint descriptors and endpoint private data are nulled.
1111 */
1112void gether_disconnect(struct gether *link)
1113{
1114        struct eth_dev          *dev = link->ioport;
1115        struct usb_request      *req;
1116
1117        WARN_ON(!dev);
1118        if (!dev)
1119                return;
1120
1121        DBG(dev, "%s\n", __func__);
1122
1123        netif_stop_queue(dev->net);
1124        netif_carrier_off(dev->net);
1125
1126        /* disable endpoints, forcing (synchronous) completion
1127         * of all pending i/o.  then free the request objects
1128         * and forget about the endpoints.
1129         */
1130        usb_ep_disable(link->in_ep);
1131        spin_lock(&dev->req_lock);
1132        while (!list_empty(&dev->tx_reqs)) {
1133                req = container_of(dev->tx_reqs.next,
1134                                        struct usb_request, list);
1135                list_del(&req->list);
1136
1137                spin_unlock(&dev->req_lock);
1138                usb_ep_free_request(link->in_ep, req);
1139                spin_lock(&dev->req_lock);
1140        }
1141        spin_unlock(&dev->req_lock);
1142        link->in_ep->desc = NULL;
1143
1144        usb_ep_disable(link->out_ep);
1145        spin_lock(&dev->req_lock);
1146        while (!list_empty(&dev->rx_reqs)) {
1147                req = container_of(dev->rx_reqs.next,
1148                                        struct usb_request, list);
1149                list_del(&req->list);
1150
1151                spin_unlock(&dev->req_lock);
1152                usb_ep_free_request(link->out_ep, req);
1153                spin_lock(&dev->req_lock);
1154        }
1155        spin_unlock(&dev->req_lock);
1156        link->out_ep->desc = NULL;
1157
1158        /* finish forgetting about this USB link episode */
1159        dev->header_len = 0;
1160        dev->unwrap = NULL;
1161        dev->wrap = NULL;
1162
1163        spin_lock(&dev->lock);
1164        dev->port_usb = NULL;
1165        spin_unlock(&dev->lock);
1166}
1167EXPORT_SYMBOL_GPL(gether_disconnect);
1168
1169MODULE_LICENSE("GPL");
1170MODULE_AUTHOR("David Brownell");
1171