linux/drivers/usb/gadget/function/u_ether.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   4 *
   5 * Copyright (C) 2003-2005,2008 David Brownell
   6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   7 * Copyright (C) 2008 Nokia Corporation
   8 */
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/gfp.h>
  15#include <linux/device.h>
  16#include <linux/ctype.h>
  17#include <linux/etherdevice.h>
  18#include <linux/ethtool.h>
  19#include <linux/if_vlan.h>
  20
  21#include "u_ether.h"
  22
  23
  24/*
  25 * This component encapsulates the Ethernet link glue needed to provide
  26 * one (!) network link through the USB gadget stack, normally "usb0".
  27 *
  28 * The control and data models are handled by the function driver which
  29 * connects to this code; such as CDC Ethernet (ECM or EEM),
  30 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  31 * management.
  32 *
  33 * Link level addressing is handled by this component using module
  34 * parameters; if no such parameters are provided, random link level
  35 * addresses are used.  Each end of the link uses one address.  The
  36 * host end address is exported in various ways, and is often recorded
  37 * in configuration databases.
  38 *
  39 * The driver which assembles each configuration using such a link is
  40 * responsible for ensuring that each configuration includes at most one
  41 * instance of is network link.  (The network layer provides ways for
  42 * this single "physical" link to be used by multiple virtual links.)
  43 */
  44
  45#define UETH__VERSION   "29-May-2008"
  46
  47/* Experiments show that both Linux and Windows hosts allow up to 16k
  48 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  49 * blocks and still have efficient handling. */
  50#define GETHER_MAX_MTU_SIZE 15412
  51#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
  52
  53struct eth_dev {
  54        /* lock is held while accessing port_usb
  55         */
  56        spinlock_t              lock;
  57        struct gether           *port_usb;
  58
  59        struct net_device       *net;
  60        struct usb_gadget       *gadget;
  61
  62        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
  63        struct list_head        tx_reqs, rx_reqs;
  64        atomic_t                tx_qlen;
  65
  66        struct sk_buff_head     rx_frames;
  67
  68        unsigned                qmult;
  69
  70        unsigned                header_len;
  71        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
  72        int                     (*unwrap)(struct gether *,
  73                                                struct sk_buff *skb,
  74                                                struct sk_buff_head *list);
  75
  76        struct work_struct      work;
  77
  78        unsigned long           todo;
  79#define WORK_RX_MEMORY          0
  80
  81        bool                    zlp;
  82        bool                    no_skb_reserve;
  83        bool                    ifname_set;
  84        u8                      host_mac[ETH_ALEN];
  85        u8                      dev_mac[ETH_ALEN];
  86};
  87
  88/*-------------------------------------------------------------------------*/
  89
  90#define RX_EXTRA        20      /* bytes guarding against rx overflows */
  91
  92#define DEFAULT_QLEN    2       /* double buffering by default */
  93
  94/* for dual-speed hardware, use deeper queues at high/super speed */
  95static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
  96{
  97        if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  98                                            gadget->speed >= USB_SPEED_SUPER))
  99                return qmult * DEFAULT_QLEN;
 100        else
 101                return DEFAULT_QLEN;
 102}
 103
 104/*-------------------------------------------------------------------------*/
 105
 106/* REVISIT there must be a better way than having two sets
 107 * of debug calls ...
 108 */
 109
 110#undef DBG
 111#undef VDBG
 112#undef ERROR
 113#undef INFO
 114
 115#define xprintk(d, level, fmt, args...) \
 116        printk(level "%s: " fmt , (d)->net->name , ## args)
 117
 118#ifdef DEBUG
 119#undef DEBUG
 120#define DBG(dev, fmt, args...) \
 121        xprintk(dev , KERN_DEBUG , fmt , ## args)
 122#else
 123#define DBG(dev, fmt, args...) \
 124        do { } while (0)
 125#endif /* DEBUG */
 126
 127#ifdef VERBOSE_DEBUG
 128#define VDBG    DBG
 129#else
 130#define VDBG(dev, fmt, args...) \
 131        do { } while (0)
 132#endif /* DEBUG */
 133
 134#define ERROR(dev, fmt, args...) \
 135        xprintk(dev , KERN_ERR , fmt , ## args)
 136#define INFO(dev, fmt, args...) \
 137        xprintk(dev , KERN_INFO , fmt , ## args)
 138
 139/*-------------------------------------------------------------------------*/
 140
 141/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 142
 143static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 144{
 145        struct eth_dev *dev = netdev_priv(net);
 146
 147        strlcpy(p->driver, "g_ether", sizeof(p->driver));
 148        strlcpy(p->version, UETH__VERSION, sizeof(p->version));
 149        strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
 150        strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
 151}
 152
 153/* REVISIT can also support:
 154 *   - WOL (by tracking suspends and issuing remote wakeup)
 155 *   - msglevel (implies updated messaging)
 156 *   - ... probably more ethtool ops
 157 */
 158
 159static const struct ethtool_ops ops = {
 160        .get_drvinfo = eth_get_drvinfo,
 161        .get_link = ethtool_op_get_link,
 162};
 163
 164static void defer_kevent(struct eth_dev *dev, int flag)
 165{
 166        if (test_and_set_bit(flag, &dev->todo))
 167                return;
 168        if (!schedule_work(&dev->work))
 169                ERROR(dev, "kevent %d may have been dropped\n", flag);
 170        else
 171                DBG(dev, "kevent %d scheduled\n", flag);
 172}
 173
 174static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 175
 176static int
 177rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 178{
 179        struct usb_gadget *g = dev->gadget;
 180        struct sk_buff  *skb;
 181        int             retval = -ENOMEM;
 182        size_t          size = 0;
 183        struct usb_ep   *out;
 184        unsigned long   flags;
 185
 186        spin_lock_irqsave(&dev->lock, flags);
 187        if (dev->port_usb)
 188                out = dev->port_usb->out_ep;
 189        else
 190                out = NULL;
 191
 192        if (!out)
 193        {
 194                spin_unlock_irqrestore(&dev->lock, flags);
 195                return -ENOTCONN;
 196        }
 197
 198        /* Padding up to RX_EXTRA handles minor disagreements with host.
 199         * Normally we use the USB "terminate on short read" convention;
 200         * so allow up to (N*maxpacket), since that memory is normally
 201         * already allocated.  Some hardware doesn't deal well with short
 202         * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 203         * byte off the end (to force hardware errors on overflow).
 204         *
 205         * RNDIS uses internal framing, and explicitly allows senders to
 206         * pad to end-of-packet.  That's potentially nice for speed, but
 207         * means receivers can't recover lost synch on their own (because
 208         * new packets don't only start after a short RX).
 209         */
 210        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 211        size += dev->port_usb->header_len;
 212
 213        if (g->quirk_ep_out_aligned_size) {
 214                size += out->maxpacket - 1;
 215                size -= size % out->maxpacket;
 216        }
 217
 218        if (dev->port_usb->is_fixed)
 219                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 220        spin_unlock_irqrestore(&dev->lock, flags);
 221
 222        skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
 223        if (skb == NULL) {
 224                DBG(dev, "no rx skb\n");
 225                goto enomem;
 226        }
 227
 228        /* Some platforms perform better when IP packets are aligned,
 229         * but on at least one, checksumming fails otherwise.  Note:
 230         * RNDIS headers involve variable numbers of LE32 values.
 231         */
 232        if (likely(!dev->no_skb_reserve))
 233                skb_reserve(skb, NET_IP_ALIGN);
 234
 235        req->buf = skb->data;
 236        req->length = size;
 237        req->complete = rx_complete;
 238        req->context = skb;
 239
 240        retval = usb_ep_queue(out, req, gfp_flags);
 241        if (retval == -ENOMEM)
 242enomem:
 243                defer_kevent(dev, WORK_RX_MEMORY);
 244        if (retval) {
 245                DBG(dev, "rx submit --> %d\n", retval);
 246                if (skb)
 247                        dev_kfree_skb_any(skb);
 248                spin_lock_irqsave(&dev->req_lock, flags);
 249                list_add(&req->list, &dev->rx_reqs);
 250                spin_unlock_irqrestore(&dev->req_lock, flags);
 251        }
 252        return retval;
 253}
 254
 255static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 256{
 257        struct sk_buff  *skb = req->context, *skb2;
 258        struct eth_dev  *dev = ep->driver_data;
 259        int             status = req->status;
 260
 261        switch (status) {
 262
 263        /* normal completion */
 264        case 0:
 265                skb_put(skb, req->actual);
 266
 267                if (dev->unwrap) {
 268                        unsigned long   flags;
 269
 270                        spin_lock_irqsave(&dev->lock, flags);
 271                        if (dev->port_usb) {
 272                                status = dev->unwrap(dev->port_usb,
 273                                                        skb,
 274                                                        &dev->rx_frames);
 275                        } else {
 276                                dev_kfree_skb_any(skb);
 277                                status = -ENOTCONN;
 278                        }
 279                        spin_unlock_irqrestore(&dev->lock, flags);
 280                } else {
 281                        skb_queue_tail(&dev->rx_frames, skb);
 282                }
 283                skb = NULL;
 284
 285                skb2 = skb_dequeue(&dev->rx_frames);
 286                while (skb2) {
 287                        if (status < 0
 288                                        || ETH_HLEN > skb2->len
 289                                        || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
 290                                dev->net->stats.rx_errors++;
 291                                dev->net->stats.rx_length_errors++;
 292                                DBG(dev, "rx length %d\n", skb2->len);
 293                                dev_kfree_skb_any(skb2);
 294                                goto next_frame;
 295                        }
 296                        skb2->protocol = eth_type_trans(skb2, dev->net);
 297                        dev->net->stats.rx_packets++;
 298                        dev->net->stats.rx_bytes += skb2->len;
 299
 300                        /* no buffer copies needed, unless hardware can't
 301                         * use skb buffers.
 302                         */
 303                        status = netif_rx(skb2);
 304next_frame:
 305                        skb2 = skb_dequeue(&dev->rx_frames);
 306                }
 307                break;
 308
 309        /* software-driven interface shutdown */
 310        case -ECONNRESET:               /* unlink */
 311        case -ESHUTDOWN:                /* disconnect etc */
 312                VDBG(dev, "rx shutdown, code %d\n", status);
 313                goto quiesce;
 314
 315        /* for hardware automagic (such as pxa) */
 316        case -ECONNABORTED:             /* endpoint reset */
 317                DBG(dev, "rx %s reset\n", ep->name);
 318                defer_kevent(dev, WORK_RX_MEMORY);
 319quiesce:
 320                dev_kfree_skb_any(skb);
 321                goto clean;
 322
 323        /* data overrun */
 324        case -EOVERFLOW:
 325                dev->net->stats.rx_over_errors++;
 326                fallthrough;
 327
 328        default:
 329                dev->net->stats.rx_errors++;
 330                DBG(dev, "rx status %d\n", status);
 331                break;
 332        }
 333
 334        if (skb)
 335                dev_kfree_skb_any(skb);
 336        if (!netif_running(dev->net)) {
 337clean:
 338                spin_lock(&dev->req_lock);
 339                list_add(&req->list, &dev->rx_reqs);
 340                spin_unlock(&dev->req_lock);
 341                req = NULL;
 342        }
 343        if (req)
 344                rx_submit(dev, req, GFP_ATOMIC);
 345}
 346
 347static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 348{
 349        unsigned                i;
 350        struct usb_request      *req;
 351
 352        if (!n)
 353                return -ENOMEM;
 354
 355        /* queue/recycle up to N requests */
 356        i = n;
 357        list_for_each_entry(req, list, list) {
 358                if (i-- == 0)
 359                        goto extra;
 360        }
 361        while (i--) {
 362                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 363                if (!req)
 364                        return list_empty(list) ? -ENOMEM : 0;
 365                list_add(&req->list, list);
 366        }
 367        return 0;
 368
 369extra:
 370        /* free extras */
 371        for (;;) {
 372                struct list_head        *next;
 373
 374                next = req->list.next;
 375                list_del(&req->list);
 376                usb_ep_free_request(ep, req);
 377
 378                if (next == list)
 379                        break;
 380
 381                req = container_of(next, struct usb_request, list);
 382        }
 383        return 0;
 384}
 385
 386static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 387{
 388        int     status;
 389
 390        spin_lock(&dev->req_lock);
 391        status = prealloc(&dev->tx_reqs, link->in_ep, n);
 392        if (status < 0)
 393                goto fail;
 394        status = prealloc(&dev->rx_reqs, link->out_ep, n);
 395        if (status < 0)
 396                goto fail;
 397        goto done;
 398fail:
 399        DBG(dev, "can't alloc requests\n");
 400done:
 401        spin_unlock(&dev->req_lock);
 402        return status;
 403}
 404
 405static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 406{
 407        struct usb_request      *req;
 408        unsigned long           flags;
 409
 410        /* fill unused rxq slots with some skb */
 411        spin_lock_irqsave(&dev->req_lock, flags);
 412        while (!list_empty(&dev->rx_reqs)) {
 413                req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
 414                list_del_init(&req->list);
 415                spin_unlock_irqrestore(&dev->req_lock, flags);
 416
 417                if (rx_submit(dev, req, gfp_flags) < 0) {
 418                        defer_kevent(dev, WORK_RX_MEMORY);
 419                        return;
 420                }
 421
 422                spin_lock_irqsave(&dev->req_lock, flags);
 423        }
 424        spin_unlock_irqrestore(&dev->req_lock, flags);
 425}
 426
 427static void eth_work(struct work_struct *work)
 428{
 429        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 430
 431        if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 432                if (netif_running(dev->net))
 433                        rx_fill(dev, GFP_KERNEL);
 434        }
 435
 436        if (dev->todo)
 437                DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 438}
 439
 440static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 441{
 442        struct sk_buff  *skb = req->context;
 443        struct eth_dev  *dev = ep->driver_data;
 444
 445        switch (req->status) {
 446        default:
 447                dev->net->stats.tx_errors++;
 448                VDBG(dev, "tx err %d\n", req->status);
 449                fallthrough;
 450        case -ECONNRESET:               /* unlink */
 451        case -ESHUTDOWN:                /* disconnect etc */
 452                dev_kfree_skb_any(skb);
 453                break;
 454        case 0:
 455                dev->net->stats.tx_bytes += skb->len;
 456                dev_consume_skb_any(skb);
 457        }
 458        dev->net->stats.tx_packets++;
 459
 460        spin_lock(&dev->req_lock);
 461        list_add(&req->list, &dev->tx_reqs);
 462        spin_unlock(&dev->req_lock);
 463
 464        atomic_dec(&dev->tx_qlen);
 465        if (netif_carrier_ok(dev->net))
 466                netif_wake_queue(dev->net);
 467}
 468
 469static inline int is_promisc(u16 cdc_filter)
 470{
 471        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 472}
 473
 474static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 475                                        struct net_device *net)
 476{
 477        struct eth_dev          *dev = netdev_priv(net);
 478        int                     length = 0;
 479        int                     retval;
 480        struct usb_request      *req = NULL;
 481        unsigned long           flags;
 482        struct usb_ep           *in;
 483        u16                     cdc_filter;
 484
 485        spin_lock_irqsave(&dev->lock, flags);
 486        if (dev->port_usb) {
 487                in = dev->port_usb->in_ep;
 488                cdc_filter = dev->port_usb->cdc_filter;
 489        } else {
 490                in = NULL;
 491                cdc_filter = 0;
 492        }
 493        spin_unlock_irqrestore(&dev->lock, flags);
 494
 495        if (skb && !in) {
 496                dev_kfree_skb_any(skb);
 497                return NETDEV_TX_OK;
 498        }
 499
 500        /* apply outgoing CDC or RNDIS filters */
 501        if (skb && !is_promisc(cdc_filter)) {
 502                u8              *dest = skb->data;
 503
 504                if (is_multicast_ether_addr(dest)) {
 505                        u16     type;
 506
 507                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 508                         * SET_ETHERNET_MULTICAST_FILTERS requests
 509                         */
 510                        if (is_broadcast_ether_addr(dest))
 511                                type = USB_CDC_PACKET_TYPE_BROADCAST;
 512                        else
 513                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 514                        if (!(cdc_filter & type)) {
 515                                dev_kfree_skb_any(skb);
 516                                return NETDEV_TX_OK;
 517                        }
 518                }
 519                /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 520        }
 521
 522        spin_lock_irqsave(&dev->req_lock, flags);
 523        /*
 524         * this freelist can be empty if an interrupt triggered disconnect()
 525         * and reconfigured the gadget (shutting down this queue) after the
 526         * network stack decided to xmit but before we got the spinlock.
 527         */
 528        if (list_empty(&dev->tx_reqs)) {
 529                spin_unlock_irqrestore(&dev->req_lock, flags);
 530                return NETDEV_TX_BUSY;
 531        }
 532
 533        req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
 534        list_del(&req->list);
 535
 536        /* temporarily stop TX queue when the freelist empties */
 537        if (list_empty(&dev->tx_reqs))
 538                netif_stop_queue(net);
 539        spin_unlock_irqrestore(&dev->req_lock, flags);
 540
 541        /* no buffer copies needed, unless the network stack did it
 542         * or the hardware can't use skb buffers.
 543         * or there's not enough space for extra headers we need
 544         */
 545        if (dev->wrap) {
 546                unsigned long   flags;
 547
 548                spin_lock_irqsave(&dev->lock, flags);
 549                if (dev->port_usb)
 550                        skb = dev->wrap(dev->port_usb, skb);
 551                spin_unlock_irqrestore(&dev->lock, flags);
 552                if (!skb) {
 553                        /* Multi frame CDC protocols may store the frame for
 554                         * later which is not a dropped frame.
 555                         */
 556                        if (dev->port_usb &&
 557                                        dev->port_usb->supports_multi_frame)
 558                                goto multiframe;
 559                        goto drop;
 560                }
 561        }
 562
 563        length = skb->len;
 564        req->buf = skb->data;
 565        req->context = skb;
 566        req->complete = tx_complete;
 567
 568        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
 569        if (dev->port_usb &&
 570            dev->port_usb->is_fixed &&
 571            length == dev->port_usb->fixed_in_len &&
 572            (length % in->maxpacket) == 0)
 573                req->zero = 0;
 574        else
 575                req->zero = 1;
 576
 577        /* use zlp framing on tx for strict CDC-Ether conformance,
 578         * though any robust network rx path ignores extra padding.
 579         * and some hardware doesn't like to write zlps.
 580         */
 581        if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 582                length++;
 583
 584        req->length = length;
 585
 586        retval = usb_ep_queue(in, req, GFP_ATOMIC);
 587        switch (retval) {
 588        default:
 589                DBG(dev, "tx queue err %d\n", retval);
 590                break;
 591        case 0:
 592                netif_trans_update(net);
 593                atomic_inc(&dev->tx_qlen);
 594        }
 595
 596        if (retval) {
 597                dev_kfree_skb_any(skb);
 598drop:
 599                dev->net->stats.tx_dropped++;
 600multiframe:
 601                spin_lock_irqsave(&dev->req_lock, flags);
 602                if (list_empty(&dev->tx_reqs))
 603                        netif_start_queue(net);
 604                list_add(&req->list, &dev->tx_reqs);
 605                spin_unlock_irqrestore(&dev->req_lock, flags);
 606        }
 607        return NETDEV_TX_OK;
 608}
 609
 610/*-------------------------------------------------------------------------*/
 611
 612static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 613{
 614        DBG(dev, "%s\n", __func__);
 615
 616        /* fill the rx queue */
 617        rx_fill(dev, gfp_flags);
 618
 619        /* and open the tx floodgates */
 620        atomic_set(&dev->tx_qlen, 0);
 621        netif_wake_queue(dev->net);
 622}
 623
 624static int eth_open(struct net_device *net)
 625{
 626        struct eth_dev  *dev = netdev_priv(net);
 627        struct gether   *link;
 628
 629        DBG(dev, "%s\n", __func__);
 630        if (netif_carrier_ok(dev->net))
 631                eth_start(dev, GFP_KERNEL);
 632
 633        spin_lock_irq(&dev->lock);
 634        link = dev->port_usb;
 635        if (link && link->open)
 636                link->open(link);
 637        spin_unlock_irq(&dev->lock);
 638
 639        return 0;
 640}
 641
 642static int eth_stop(struct net_device *net)
 643{
 644        struct eth_dev  *dev = netdev_priv(net);
 645        unsigned long   flags;
 646
 647        VDBG(dev, "%s\n", __func__);
 648        netif_stop_queue(net);
 649
 650        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 651                dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 652                dev->net->stats.rx_errors, dev->net->stats.tx_errors
 653                );
 654
 655        /* ensure there are no more active requests */
 656        spin_lock_irqsave(&dev->lock, flags);
 657        if (dev->port_usb) {
 658                struct gether   *link = dev->port_usb;
 659                const struct usb_endpoint_descriptor *in;
 660                const struct usb_endpoint_descriptor *out;
 661
 662                if (link->close)
 663                        link->close(link);
 664
 665                /* NOTE:  we have no abort-queue primitive we could use
 666                 * to cancel all pending I/O.  Instead, we disable then
 667                 * reenable the endpoints ... this idiom may leave toggle
 668                 * wrong, but that's a self-correcting error.
 669                 *
 670                 * REVISIT:  we *COULD* just let the transfers complete at
 671                 * their own pace; the network stack can handle old packets.
 672                 * For the moment we leave this here, since it works.
 673                 */
 674                in = link->in_ep->desc;
 675                out = link->out_ep->desc;
 676                usb_ep_disable(link->in_ep);
 677                usb_ep_disable(link->out_ep);
 678                if (netif_carrier_ok(net)) {
 679                        DBG(dev, "host still using in/out endpoints\n");
 680                        link->in_ep->desc = in;
 681                        link->out_ep->desc = out;
 682                        usb_ep_enable(link->in_ep);
 683                        usb_ep_enable(link->out_ep);
 684                }
 685        }
 686        spin_unlock_irqrestore(&dev->lock, flags);
 687
 688        return 0;
 689}
 690
 691/*-------------------------------------------------------------------------*/
 692
 693static int get_ether_addr(const char *str, u8 *dev_addr)
 694{
 695        if (str) {
 696                unsigned        i;
 697
 698                for (i = 0; i < 6; i++) {
 699                        unsigned char num;
 700
 701                        if ((*str == '.') || (*str == ':'))
 702                                str++;
 703                        num = hex_to_bin(*str++) << 4;
 704                        num |= hex_to_bin(*str++);
 705                        dev_addr [i] = num;
 706                }
 707                if (is_valid_ether_addr(dev_addr))
 708                        return 0;
 709        }
 710        eth_random_addr(dev_addr);
 711        return 1;
 712}
 713
 714static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
 715{
 716        if (len < 18)
 717                return -EINVAL;
 718
 719        snprintf(str, len, "%pM", dev_addr);
 720        return 18;
 721}
 722
 723static const struct net_device_ops eth_netdev_ops = {
 724        .ndo_open               = eth_open,
 725        .ndo_stop               = eth_stop,
 726        .ndo_start_xmit         = eth_start_xmit,
 727        .ndo_set_mac_address    = eth_mac_addr,
 728        .ndo_validate_addr      = eth_validate_addr,
 729};
 730
 731static struct device_type gadget_type = {
 732        .name   = "gadget",
 733};
 734
 735/*
 736 * gether_setup_name - initialize one ethernet-over-usb link
 737 * @g: gadget to associated with these links
 738 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 739 *      host side of the link is recorded
 740 * @netname: name for network device (for example, "usb")
 741 * Context: may sleep
 742 *
 743 * This sets up the single network link that may be exported by a
 744 * gadget driver using this framework.  The link layer addresses are
 745 * set up using module parameters.
 746 *
 747 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
 748 */
 749struct eth_dev *gether_setup_name(struct usb_gadget *g,
 750                const char *dev_addr, const char *host_addr,
 751                u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
 752{
 753        struct eth_dev          *dev;
 754        struct net_device       *net;
 755        int                     status;
 756
 757        net = alloc_etherdev(sizeof *dev);
 758        if (!net)
 759                return ERR_PTR(-ENOMEM);
 760
 761        dev = netdev_priv(net);
 762        spin_lock_init(&dev->lock);
 763        spin_lock_init(&dev->req_lock);
 764        INIT_WORK(&dev->work, eth_work);
 765        INIT_LIST_HEAD(&dev->tx_reqs);
 766        INIT_LIST_HEAD(&dev->rx_reqs);
 767
 768        skb_queue_head_init(&dev->rx_frames);
 769
 770        /* network device setup */
 771        dev->net = net;
 772        dev->qmult = qmult;
 773        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 774
 775        if (get_ether_addr(dev_addr, net->dev_addr))
 776                dev_warn(&g->dev,
 777                        "using random %s ethernet address\n", "self");
 778        if (get_ether_addr(host_addr, dev->host_mac))
 779                dev_warn(&g->dev,
 780                        "using random %s ethernet address\n", "host");
 781
 782        if (ethaddr)
 783                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 784
 785        net->netdev_ops = &eth_netdev_ops;
 786
 787        net->ethtool_ops = &ops;
 788
 789        /* MTU range: 14 - 15412 */
 790        net->min_mtu = ETH_HLEN;
 791        net->max_mtu = GETHER_MAX_MTU_SIZE;
 792
 793        dev->gadget = g;
 794        SET_NETDEV_DEV(net, &g->dev);
 795        SET_NETDEV_DEVTYPE(net, &gadget_type);
 796
 797        status = register_netdev(net);
 798        if (status < 0) {
 799                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 800                free_netdev(net);
 801                dev = ERR_PTR(status);
 802        } else {
 803                INFO(dev, "MAC %pM\n", net->dev_addr);
 804                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 805
 806                /*
 807                 * two kinds of host-initiated state changes:
 808                 *  - iff DATA transfer is active, carrier is "on"
 809                 *  - tx queueing enabled if open *and* carrier is "on"
 810                 */
 811                netif_carrier_off(net);
 812        }
 813
 814        return dev;
 815}
 816EXPORT_SYMBOL_GPL(gether_setup_name);
 817
 818struct net_device *gether_setup_name_default(const char *netname)
 819{
 820        struct net_device       *net;
 821        struct eth_dev          *dev;
 822
 823        net = alloc_etherdev(sizeof(*dev));
 824        if (!net)
 825                return ERR_PTR(-ENOMEM);
 826
 827        dev = netdev_priv(net);
 828        spin_lock_init(&dev->lock);
 829        spin_lock_init(&dev->req_lock);
 830        INIT_WORK(&dev->work, eth_work);
 831        INIT_LIST_HEAD(&dev->tx_reqs);
 832        INIT_LIST_HEAD(&dev->rx_reqs);
 833
 834        skb_queue_head_init(&dev->rx_frames);
 835
 836        /* network device setup */
 837        dev->net = net;
 838        dev->qmult = QMULT_DEFAULT;
 839        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 840
 841        eth_random_addr(dev->dev_mac);
 842        pr_warn("using random %s ethernet address\n", "self");
 843        eth_random_addr(dev->host_mac);
 844        pr_warn("using random %s ethernet address\n", "host");
 845
 846        net->netdev_ops = &eth_netdev_ops;
 847
 848        net->ethtool_ops = &ops;
 849        SET_NETDEV_DEVTYPE(net, &gadget_type);
 850
 851        /* MTU range: 14 - 15412 */
 852        net->min_mtu = ETH_HLEN;
 853        net->max_mtu = GETHER_MAX_MTU_SIZE;
 854
 855        return net;
 856}
 857EXPORT_SYMBOL_GPL(gether_setup_name_default);
 858
 859int gether_register_netdev(struct net_device *net)
 860{
 861        struct eth_dev *dev;
 862        struct usb_gadget *g;
 863        struct sockaddr sa;
 864        int status;
 865
 866        if (!net->dev.parent)
 867                return -EINVAL;
 868        dev = netdev_priv(net);
 869        g = dev->gadget;
 870        status = register_netdev(net);
 871        if (status < 0) {
 872                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 873                return status;
 874        } else {
 875                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 876
 877                /* two kinds of host-initiated state changes:
 878                 *  - iff DATA transfer is active, carrier is "on"
 879                 *  - tx queueing enabled if open *and* carrier is "on"
 880                 */
 881                netif_carrier_off(net);
 882        }
 883        sa.sa_family = net->type;
 884        memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
 885        rtnl_lock();
 886        status = dev_set_mac_address(net, &sa, NULL);
 887        rtnl_unlock();
 888        if (status)
 889                pr_warn("cannot set self ethernet address: %d\n", status);
 890        else
 891                INFO(dev, "MAC %pM\n", dev->dev_mac);
 892
 893        return status;
 894}
 895EXPORT_SYMBOL_GPL(gether_register_netdev);
 896
 897void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
 898{
 899        struct eth_dev *dev;
 900
 901        dev = netdev_priv(net);
 902        dev->gadget = g;
 903        SET_NETDEV_DEV(net, &g->dev);
 904}
 905EXPORT_SYMBOL_GPL(gether_set_gadget);
 906
 907int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
 908{
 909        struct eth_dev *dev;
 910        u8 new_addr[ETH_ALEN];
 911
 912        dev = netdev_priv(net);
 913        if (get_ether_addr(dev_addr, new_addr))
 914                return -EINVAL;
 915        memcpy(dev->dev_mac, new_addr, ETH_ALEN);
 916        return 0;
 917}
 918EXPORT_SYMBOL_GPL(gether_set_dev_addr);
 919
 920int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
 921{
 922        struct eth_dev *dev;
 923        int ret;
 924
 925        dev = netdev_priv(net);
 926        ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
 927        if (ret + 1 < len) {
 928                dev_addr[ret++] = '\n';
 929                dev_addr[ret] = '\0';
 930        }
 931
 932        return ret;
 933}
 934EXPORT_SYMBOL_GPL(gether_get_dev_addr);
 935
 936int gether_set_host_addr(struct net_device *net, const char *host_addr)
 937{
 938        struct eth_dev *dev;
 939        u8 new_addr[ETH_ALEN];
 940
 941        dev = netdev_priv(net);
 942        if (get_ether_addr(host_addr, new_addr))
 943                return -EINVAL;
 944        memcpy(dev->host_mac, new_addr, ETH_ALEN);
 945        return 0;
 946}
 947EXPORT_SYMBOL_GPL(gether_set_host_addr);
 948
 949int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
 950{
 951        struct eth_dev *dev;
 952        int ret;
 953
 954        dev = netdev_priv(net);
 955        ret = get_ether_addr_str(dev->host_mac, host_addr, len);
 956        if (ret + 1 < len) {
 957                host_addr[ret++] = '\n';
 958                host_addr[ret] = '\0';
 959        }
 960
 961        return ret;
 962}
 963EXPORT_SYMBOL_GPL(gether_get_host_addr);
 964
 965int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
 966{
 967        struct eth_dev *dev;
 968
 969        if (len < 13)
 970                return -EINVAL;
 971
 972        dev = netdev_priv(net);
 973        snprintf(host_addr, len, "%pm", dev->host_mac);
 974
 975        return strlen(host_addr);
 976}
 977EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
 978
 979void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
 980{
 981        struct eth_dev *dev;
 982
 983        dev = netdev_priv(net);
 984        memcpy(host_mac, dev->host_mac, ETH_ALEN);
 985}
 986EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
 987
 988void gether_set_qmult(struct net_device *net, unsigned qmult)
 989{
 990        struct eth_dev *dev;
 991
 992        dev = netdev_priv(net);
 993        dev->qmult = qmult;
 994}
 995EXPORT_SYMBOL_GPL(gether_set_qmult);
 996
 997unsigned gether_get_qmult(struct net_device *net)
 998{
 999        struct eth_dev *dev;
1000
1001        dev = netdev_priv(net);
1002        return dev->qmult;
1003}
1004EXPORT_SYMBOL_GPL(gether_get_qmult);
1005
1006int gether_get_ifname(struct net_device *net, char *name, int len)
1007{
1008        struct eth_dev *dev = netdev_priv(net);
1009        int ret;
1010
1011        rtnl_lock();
1012        ret = scnprintf(name, len, "%s\n",
1013                        dev->ifname_set ? net->name : netdev_name(net));
1014        rtnl_unlock();
1015        return ret;
1016}
1017EXPORT_SYMBOL_GPL(gether_get_ifname);
1018
1019int gether_set_ifname(struct net_device *net, const char *name, int len)
1020{
1021        struct eth_dev *dev = netdev_priv(net);
1022        char tmp[IFNAMSIZ];
1023        const char *p;
1024
1025        if (name[len - 1] == '\n')
1026                len--;
1027
1028        if (len >= sizeof(tmp))
1029                return -E2BIG;
1030
1031        strscpy(tmp, name, len + 1);
1032        if (!dev_valid_name(tmp))
1033                return -EINVAL;
1034
1035        /* Require exactly one %d, so binding will not fail with EEXIST. */
1036        p = strchr(name, '%');
1037        if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1038                return -EINVAL;
1039
1040        strncpy(net->name, tmp, sizeof(net->name));
1041        dev->ifname_set = true;
1042
1043        return 0;
1044}
1045EXPORT_SYMBOL_GPL(gether_set_ifname);
1046
1047/*
1048 * gether_cleanup - remove Ethernet-over-USB device
1049 * Context: may sleep
1050 *
1051 * This is called to free all resources allocated by @gether_setup().
1052 */
1053void gether_cleanup(struct eth_dev *dev)
1054{
1055        if (!dev)
1056                return;
1057
1058        unregister_netdev(dev->net);
1059        flush_work(&dev->work);
1060        free_netdev(dev->net);
1061}
1062EXPORT_SYMBOL_GPL(gether_cleanup);
1063
1064/**
1065 * gether_connect - notify network layer that USB link is active
1066 * @link: the USB link, set up with endpoints, descriptors matching
1067 *      current device speed, and any framing wrapper(s) set up.
1068 * Context: irqs blocked
1069 *
1070 * This is called to activate endpoints and let the network layer know
1071 * the connection is active ("carrier detect").  It may cause the I/O
1072 * queues to open and start letting network packets flow, but will in
1073 * any case activate the endpoints so that they respond properly to the
1074 * USB host.
1075 *
1076 * Verify net_device pointer returned using IS_ERR().  If it doesn't
1077 * indicate some error code (negative errno), ep->driver_data values
1078 * have been overwritten.
1079 */
1080struct net_device *gether_connect(struct gether *link)
1081{
1082        struct eth_dev          *dev = link->ioport;
1083        int                     result = 0;
1084
1085        if (!dev)
1086                return ERR_PTR(-EINVAL);
1087
1088        link->in_ep->driver_data = dev;
1089        result = usb_ep_enable(link->in_ep);
1090        if (result != 0) {
1091                DBG(dev, "enable %s --> %d\n",
1092                        link->in_ep->name, result);
1093                goto fail0;
1094        }
1095
1096        link->out_ep->driver_data = dev;
1097        result = usb_ep_enable(link->out_ep);
1098        if (result != 0) {
1099                DBG(dev, "enable %s --> %d\n",
1100                        link->out_ep->name, result);
1101                goto fail1;
1102        }
1103
1104        if (result == 0)
1105                result = alloc_requests(dev, link, qlen(dev->gadget,
1106                                        dev->qmult));
1107
1108        if (result == 0) {
1109                dev->zlp = link->is_zlp_ok;
1110                dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1111                DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1112
1113                dev->header_len = link->header_len;
1114                dev->unwrap = link->unwrap;
1115                dev->wrap = link->wrap;
1116
1117                spin_lock(&dev->lock);
1118                dev->port_usb = link;
1119                if (netif_running(dev->net)) {
1120                        if (link->open)
1121                                link->open(link);
1122                } else {
1123                        if (link->close)
1124                                link->close(link);
1125                }
1126                spin_unlock(&dev->lock);
1127
1128                netif_carrier_on(dev->net);
1129                if (netif_running(dev->net))
1130                        eth_start(dev, GFP_ATOMIC);
1131
1132        /* on error, disable any endpoints  */
1133        } else {
1134                (void) usb_ep_disable(link->out_ep);
1135fail1:
1136                (void) usb_ep_disable(link->in_ep);
1137        }
1138fail0:
1139        /* caller is responsible for cleanup on error */
1140        if (result < 0)
1141                return ERR_PTR(result);
1142        return dev->net;
1143}
1144EXPORT_SYMBOL_GPL(gether_connect);
1145
1146/**
1147 * gether_disconnect - notify network layer that USB link is inactive
1148 * @link: the USB link, on which gether_connect() was called
1149 * Context: irqs blocked
1150 *
1151 * This is called to deactivate endpoints and let the network layer know
1152 * the connection went inactive ("no carrier").
1153 *
1154 * On return, the state is as if gether_connect() had never been called.
1155 * The endpoints are inactive, and accordingly without active USB I/O.
1156 * Pointers to endpoint descriptors and endpoint private data are nulled.
1157 */
1158void gether_disconnect(struct gether *link)
1159{
1160        struct eth_dev          *dev = link->ioport;
1161        struct usb_request      *req;
1162
1163        WARN_ON(!dev);
1164        if (!dev)
1165                return;
1166
1167        DBG(dev, "%s\n", __func__);
1168
1169        netif_stop_queue(dev->net);
1170        netif_carrier_off(dev->net);
1171
1172        /* disable endpoints, forcing (synchronous) completion
1173         * of all pending i/o.  then free the request objects
1174         * and forget about the endpoints.
1175         */
1176        usb_ep_disable(link->in_ep);
1177        spin_lock(&dev->req_lock);
1178        while (!list_empty(&dev->tx_reqs)) {
1179                req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1180                list_del(&req->list);
1181
1182                spin_unlock(&dev->req_lock);
1183                usb_ep_free_request(link->in_ep, req);
1184                spin_lock(&dev->req_lock);
1185        }
1186        spin_unlock(&dev->req_lock);
1187        link->in_ep->desc = NULL;
1188
1189        usb_ep_disable(link->out_ep);
1190        spin_lock(&dev->req_lock);
1191        while (!list_empty(&dev->rx_reqs)) {
1192                req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1193                list_del(&req->list);
1194
1195                spin_unlock(&dev->req_lock);
1196                usb_ep_free_request(link->out_ep, req);
1197                spin_lock(&dev->req_lock);
1198        }
1199        spin_unlock(&dev->req_lock);
1200        link->out_ep->desc = NULL;
1201
1202        /* finish forgetting about this USB link episode */
1203        dev->header_len = 0;
1204        dev->unwrap = NULL;
1205        dev->wrap = NULL;
1206
1207        spin_lock(&dev->lock);
1208        dev->port_usb = NULL;
1209        spin_unlock(&dev->lock);
1210}
1211EXPORT_SYMBOL_GPL(gether_disconnect);
1212
1213MODULE_LICENSE("GPL");
1214MODULE_AUTHOR("David Brownell");
1215