linux/drivers/usb/gadget/function/u_ether.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   4 *
   5 * Copyright (C) 2003-2005,2008 David Brownell
   6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   7 * Copyright (C) 2008 Nokia Corporation
   8 */
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/gfp.h>
  15#include <linux/device.h>
  16#include <linux/ctype.h>
  17#include <linux/etherdevice.h>
  18#include <linux/ethtool.h>
  19#include <linux/if_vlan.h>
  20#include <linux/etherdevice.h>
  21
  22#include "u_ether.h"
  23
  24
  25/*
  26 * This component encapsulates the Ethernet link glue needed to provide
  27 * one (!) network link through the USB gadget stack, normally "usb0".
  28 *
  29 * The control and data models are handled by the function driver which
  30 * connects to this code; such as CDC Ethernet (ECM or EEM),
  31 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  32 * management.
  33 *
  34 * Link level addressing is handled by this component using module
  35 * parameters; if no such parameters are provided, random link level
  36 * addresses are used.  Each end of the link uses one address.  The
  37 * host end address is exported in various ways, and is often recorded
  38 * in configuration databases.
  39 *
  40 * The driver which assembles each configuration using such a link is
  41 * responsible for ensuring that each configuration includes at most one
  42 * instance of is network link.  (The network layer provides ways for
  43 * this single "physical" link to be used by multiple virtual links.)
  44 */
  45
  46#define UETH__VERSION   "29-May-2008"
  47
  48/* Experiments show that both Linux and Windows hosts allow up to 16k
  49 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  50 * blocks and still have efficient handling. */
  51#define GETHER_MAX_MTU_SIZE 15412
  52#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
  53
  54struct eth_dev {
  55        /* lock is held while accessing port_usb
  56         */
  57        spinlock_t              lock;
  58        struct gether           *port_usb;
  59
  60        struct net_device       *net;
  61        struct usb_gadget       *gadget;
  62
  63        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
  64        struct list_head        tx_reqs, rx_reqs;
  65        atomic_t                tx_qlen;
  66
  67        struct sk_buff_head     rx_frames;
  68
  69        unsigned                qmult;
  70
  71        unsigned                header_len;
  72        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
  73        int                     (*unwrap)(struct gether *,
  74                                                struct sk_buff *skb,
  75                                                struct sk_buff_head *list);
  76
  77        struct work_struct      work;
  78
  79        unsigned long           todo;
  80#define WORK_RX_MEMORY          0
  81
  82        bool                    zlp;
  83        bool                    no_skb_reserve;
  84        bool                    ifname_set;
  85        u8                      host_mac[ETH_ALEN];
  86        u8                      dev_mac[ETH_ALEN];
  87};
  88
  89/*-------------------------------------------------------------------------*/
  90
  91#define RX_EXTRA        20      /* bytes guarding against rx overflows */
  92
  93#define DEFAULT_QLEN    2       /* double buffering by default */
  94
  95/* for dual-speed hardware, use deeper queues at high/super speed */
  96static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
  97{
  98        if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  99                                            gadget->speed >= USB_SPEED_SUPER))
 100                return qmult * DEFAULT_QLEN;
 101        else
 102                return DEFAULT_QLEN;
 103}
 104
 105/*-------------------------------------------------------------------------*/
 106
 107/* REVISIT there must be a better way than having two sets
 108 * of debug calls ...
 109 */
 110
 111#undef DBG
 112#undef VDBG
 113#undef ERROR
 114#undef INFO
 115
 116#define xprintk(d, level, fmt, args...) \
 117        printk(level "%s: " fmt , (d)->net->name , ## args)
 118
 119#ifdef DEBUG
 120#undef DEBUG
 121#define DBG(dev, fmt, args...) \
 122        xprintk(dev , KERN_DEBUG , fmt , ## args)
 123#else
 124#define DBG(dev, fmt, args...) \
 125        do { } while (0)
 126#endif /* DEBUG */
 127
 128#ifdef VERBOSE_DEBUG
 129#define VDBG    DBG
 130#else
 131#define VDBG(dev, fmt, args...) \
 132        do { } while (0)
 133#endif /* DEBUG */
 134
 135#define ERROR(dev, fmt, args...) \
 136        xprintk(dev , KERN_ERR , fmt , ## args)
 137#define INFO(dev, fmt, args...) \
 138        xprintk(dev , KERN_INFO , fmt , ## args)
 139
 140/*-------------------------------------------------------------------------*/
 141
 142/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 143
 144static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 145{
 146        struct eth_dev *dev = netdev_priv(net);
 147
 148        strlcpy(p->driver, "g_ether", sizeof(p->driver));
 149        strlcpy(p->version, UETH__VERSION, sizeof(p->version));
 150        strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
 151        strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
 152}
 153
 154/* REVISIT can also support:
 155 *   - WOL (by tracking suspends and issuing remote wakeup)
 156 *   - msglevel (implies updated messaging)
 157 *   - ... probably more ethtool ops
 158 */
 159
 160static const struct ethtool_ops ops = {
 161        .get_drvinfo = eth_get_drvinfo,
 162        .get_link = ethtool_op_get_link,
 163};
 164
 165static void defer_kevent(struct eth_dev *dev, int flag)
 166{
 167        if (test_and_set_bit(flag, &dev->todo))
 168                return;
 169        if (!schedule_work(&dev->work))
 170                ERROR(dev, "kevent %d may have been dropped\n", flag);
 171        else
 172                DBG(dev, "kevent %d scheduled\n", flag);
 173}
 174
 175static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 176
 177static int
 178rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 179{
 180        struct usb_gadget *g = dev->gadget;
 181        struct sk_buff  *skb;
 182        int             retval = -ENOMEM;
 183        size_t          size = 0;
 184        struct usb_ep   *out;
 185        unsigned long   flags;
 186
 187        spin_lock_irqsave(&dev->lock, flags);
 188        if (dev->port_usb)
 189                out = dev->port_usb->out_ep;
 190        else
 191                out = NULL;
 192
 193        if (!out)
 194        {
 195                spin_unlock_irqrestore(&dev->lock, flags);
 196                return -ENOTCONN;
 197        }
 198
 199        /* Padding up to RX_EXTRA handles minor disagreements with host.
 200         * Normally we use the USB "terminate on short read" convention;
 201         * so allow up to (N*maxpacket), since that memory is normally
 202         * already allocated.  Some hardware doesn't deal well with short
 203         * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 204         * byte off the end (to force hardware errors on overflow).
 205         *
 206         * RNDIS uses internal framing, and explicitly allows senders to
 207         * pad to end-of-packet.  That's potentially nice for speed, but
 208         * means receivers can't recover lost synch on their own (because
 209         * new packets don't only start after a short RX).
 210         */
 211        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 212        size += dev->port_usb->header_len;
 213
 214        if (g->quirk_ep_out_aligned_size) {
 215                size += out->maxpacket - 1;
 216                size -= size % out->maxpacket;
 217        }
 218
 219        if (dev->port_usb->is_fixed)
 220                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 221        spin_unlock_irqrestore(&dev->lock, flags);
 222
 223        skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
 224        if (skb == NULL) {
 225                DBG(dev, "no rx skb\n");
 226                goto enomem;
 227        }
 228
 229        /* Some platforms perform better when IP packets are aligned,
 230         * but on at least one, checksumming fails otherwise.  Note:
 231         * RNDIS headers involve variable numbers of LE32 values.
 232         */
 233        if (likely(!dev->no_skb_reserve))
 234                skb_reserve(skb, NET_IP_ALIGN);
 235
 236        req->buf = skb->data;
 237        req->length = size;
 238        req->complete = rx_complete;
 239        req->context = skb;
 240
 241        retval = usb_ep_queue(out, req, gfp_flags);
 242        if (retval == -ENOMEM)
 243enomem:
 244                defer_kevent(dev, WORK_RX_MEMORY);
 245        if (retval) {
 246                DBG(dev, "rx submit --> %d\n", retval);
 247                if (skb)
 248                        dev_kfree_skb_any(skb);
 249                spin_lock_irqsave(&dev->req_lock, flags);
 250                list_add(&req->list, &dev->rx_reqs);
 251                spin_unlock_irqrestore(&dev->req_lock, flags);
 252        }
 253        return retval;
 254}
 255
 256static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 257{
 258        struct sk_buff  *skb = req->context, *skb2;
 259        struct eth_dev  *dev = ep->driver_data;
 260        int             status = req->status;
 261
 262        switch (status) {
 263
 264        /* normal completion */
 265        case 0:
 266                skb_put(skb, req->actual);
 267
 268                if (dev->unwrap) {
 269                        unsigned long   flags;
 270
 271                        spin_lock_irqsave(&dev->lock, flags);
 272                        if (dev->port_usb) {
 273                                status = dev->unwrap(dev->port_usb,
 274                                                        skb,
 275                                                        &dev->rx_frames);
 276                        } else {
 277                                dev_kfree_skb_any(skb);
 278                                status = -ENOTCONN;
 279                        }
 280                        spin_unlock_irqrestore(&dev->lock, flags);
 281                } else {
 282                        skb_queue_tail(&dev->rx_frames, skb);
 283                }
 284                skb = NULL;
 285
 286                skb2 = skb_dequeue(&dev->rx_frames);
 287                while (skb2) {
 288                        if (status < 0
 289                                        || ETH_HLEN > skb2->len
 290                                        || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
 291                                dev->net->stats.rx_errors++;
 292                                dev->net->stats.rx_length_errors++;
 293                                DBG(dev, "rx length %d\n", skb2->len);
 294                                dev_kfree_skb_any(skb2);
 295                                goto next_frame;
 296                        }
 297                        skb2->protocol = eth_type_trans(skb2, dev->net);
 298                        dev->net->stats.rx_packets++;
 299                        dev->net->stats.rx_bytes += skb2->len;
 300
 301                        /* no buffer copies needed, unless hardware can't
 302                         * use skb buffers.
 303                         */
 304                        status = netif_rx(skb2);
 305next_frame:
 306                        skb2 = skb_dequeue(&dev->rx_frames);
 307                }
 308                break;
 309
 310        /* software-driven interface shutdown */
 311        case -ECONNRESET:               /* unlink */
 312        case -ESHUTDOWN:                /* disconnect etc */
 313                VDBG(dev, "rx shutdown, code %d\n", status);
 314                goto quiesce;
 315
 316        /* for hardware automagic (such as pxa) */
 317        case -ECONNABORTED:             /* endpoint reset */
 318                DBG(dev, "rx %s reset\n", ep->name);
 319                defer_kevent(dev, WORK_RX_MEMORY);
 320quiesce:
 321                dev_kfree_skb_any(skb);
 322                goto clean;
 323
 324        /* data overrun */
 325        case -EOVERFLOW:
 326                dev->net->stats.rx_over_errors++;
 327                fallthrough;
 328
 329        default:
 330                dev->net->stats.rx_errors++;
 331                DBG(dev, "rx status %d\n", status);
 332                break;
 333        }
 334
 335        if (skb)
 336                dev_kfree_skb_any(skb);
 337        if (!netif_running(dev->net)) {
 338clean:
 339                spin_lock(&dev->req_lock);
 340                list_add(&req->list, &dev->rx_reqs);
 341                spin_unlock(&dev->req_lock);
 342                req = NULL;
 343        }
 344        if (req)
 345                rx_submit(dev, req, GFP_ATOMIC);
 346}
 347
 348static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 349{
 350        unsigned                i;
 351        struct usb_request      *req;
 352
 353        if (!n)
 354                return -ENOMEM;
 355
 356        /* queue/recycle up to N requests */
 357        i = n;
 358        list_for_each_entry(req, list, list) {
 359                if (i-- == 0)
 360                        goto extra;
 361        }
 362        while (i--) {
 363                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 364                if (!req)
 365                        return list_empty(list) ? -ENOMEM : 0;
 366                list_add(&req->list, list);
 367        }
 368        return 0;
 369
 370extra:
 371        /* free extras */
 372        for (;;) {
 373                struct list_head        *next;
 374
 375                next = req->list.next;
 376                list_del(&req->list);
 377                usb_ep_free_request(ep, req);
 378
 379                if (next == list)
 380                        break;
 381
 382                req = container_of(next, struct usb_request, list);
 383        }
 384        return 0;
 385}
 386
 387static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 388{
 389        int     status;
 390
 391        spin_lock(&dev->req_lock);
 392        status = prealloc(&dev->tx_reqs, link->in_ep, n);
 393        if (status < 0)
 394                goto fail;
 395        status = prealloc(&dev->rx_reqs, link->out_ep, n);
 396        if (status < 0)
 397                goto fail;
 398        goto done;
 399fail:
 400        DBG(dev, "can't alloc requests\n");
 401done:
 402        spin_unlock(&dev->req_lock);
 403        return status;
 404}
 405
 406static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 407{
 408        struct usb_request      *req;
 409        unsigned long           flags;
 410
 411        /* fill unused rxq slots with some skb */
 412        spin_lock_irqsave(&dev->req_lock, flags);
 413        while (!list_empty(&dev->rx_reqs)) {
 414                req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
 415                list_del_init(&req->list);
 416                spin_unlock_irqrestore(&dev->req_lock, flags);
 417
 418                if (rx_submit(dev, req, gfp_flags) < 0) {
 419                        defer_kevent(dev, WORK_RX_MEMORY);
 420                        return;
 421                }
 422
 423                spin_lock_irqsave(&dev->req_lock, flags);
 424        }
 425        spin_unlock_irqrestore(&dev->req_lock, flags);
 426}
 427
 428static void eth_work(struct work_struct *work)
 429{
 430        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 431
 432        if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 433                if (netif_running(dev->net))
 434                        rx_fill(dev, GFP_KERNEL);
 435        }
 436
 437        if (dev->todo)
 438                DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 439}
 440
 441static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 442{
 443        struct sk_buff  *skb = req->context;
 444        struct eth_dev  *dev = ep->driver_data;
 445
 446        switch (req->status) {
 447        default:
 448                dev->net->stats.tx_errors++;
 449                VDBG(dev, "tx err %d\n", req->status);
 450                fallthrough;
 451        case -ECONNRESET:               /* unlink */
 452        case -ESHUTDOWN:                /* disconnect etc */
 453                dev_kfree_skb_any(skb);
 454                break;
 455        case 0:
 456                dev->net->stats.tx_bytes += skb->len;
 457                dev_consume_skb_any(skb);
 458        }
 459        dev->net->stats.tx_packets++;
 460
 461        spin_lock(&dev->req_lock);
 462        list_add(&req->list, &dev->tx_reqs);
 463        spin_unlock(&dev->req_lock);
 464
 465        atomic_dec(&dev->tx_qlen);
 466        if (netif_carrier_ok(dev->net))
 467                netif_wake_queue(dev->net);
 468}
 469
 470static inline int is_promisc(u16 cdc_filter)
 471{
 472        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 473}
 474
 475static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 476                                        struct net_device *net)
 477{
 478        struct eth_dev          *dev = netdev_priv(net);
 479        int                     length = 0;
 480        int                     retval;
 481        struct usb_request      *req = NULL;
 482        unsigned long           flags;
 483        struct usb_ep           *in;
 484        u16                     cdc_filter;
 485
 486        spin_lock_irqsave(&dev->lock, flags);
 487        if (dev->port_usb) {
 488                in = dev->port_usb->in_ep;
 489                cdc_filter = dev->port_usb->cdc_filter;
 490        } else {
 491                in = NULL;
 492                cdc_filter = 0;
 493        }
 494        spin_unlock_irqrestore(&dev->lock, flags);
 495
 496        if (!in) {
 497                if (skb)
 498                        dev_kfree_skb_any(skb);
 499                return NETDEV_TX_OK;
 500        }
 501
 502        /* apply outgoing CDC or RNDIS filters */
 503        if (skb && !is_promisc(cdc_filter)) {
 504                u8              *dest = skb->data;
 505
 506                if (is_multicast_ether_addr(dest)) {
 507                        u16     type;
 508
 509                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 510                         * SET_ETHERNET_MULTICAST_FILTERS requests
 511                         */
 512                        if (is_broadcast_ether_addr(dest))
 513                                type = USB_CDC_PACKET_TYPE_BROADCAST;
 514                        else
 515                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 516                        if (!(cdc_filter & type)) {
 517                                dev_kfree_skb_any(skb);
 518                                return NETDEV_TX_OK;
 519                        }
 520                }
 521                /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 522        }
 523
 524        spin_lock_irqsave(&dev->req_lock, flags);
 525        /*
 526         * this freelist can be empty if an interrupt triggered disconnect()
 527         * and reconfigured the gadget (shutting down this queue) after the
 528         * network stack decided to xmit but before we got the spinlock.
 529         */
 530        if (list_empty(&dev->tx_reqs)) {
 531                spin_unlock_irqrestore(&dev->req_lock, flags);
 532                return NETDEV_TX_BUSY;
 533        }
 534
 535        req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
 536        list_del(&req->list);
 537
 538        /* temporarily stop TX queue when the freelist empties */
 539        if (list_empty(&dev->tx_reqs))
 540                netif_stop_queue(net);
 541        spin_unlock_irqrestore(&dev->req_lock, flags);
 542
 543        /* no buffer copies needed, unless the network stack did it
 544         * or the hardware can't use skb buffers.
 545         * or there's not enough space for extra headers we need
 546         */
 547        if (dev->wrap) {
 548                unsigned long   flags;
 549
 550                spin_lock_irqsave(&dev->lock, flags);
 551                if (dev->port_usb)
 552                        skb = dev->wrap(dev->port_usb, skb);
 553                spin_unlock_irqrestore(&dev->lock, flags);
 554                if (!skb) {
 555                        /* Multi frame CDC protocols may store the frame for
 556                         * later which is not a dropped frame.
 557                         */
 558                        if (dev->port_usb &&
 559                                        dev->port_usb->supports_multi_frame)
 560                                goto multiframe;
 561                        goto drop;
 562                }
 563        }
 564
 565        length = skb->len;
 566        req->buf = skb->data;
 567        req->context = skb;
 568        req->complete = tx_complete;
 569
 570        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
 571        if (dev->port_usb &&
 572            dev->port_usb->is_fixed &&
 573            length == dev->port_usb->fixed_in_len &&
 574            (length % in->maxpacket) == 0)
 575                req->zero = 0;
 576        else
 577                req->zero = 1;
 578
 579        /* use zlp framing on tx for strict CDC-Ether conformance,
 580         * though any robust network rx path ignores extra padding.
 581         * and some hardware doesn't like to write zlps.
 582         */
 583        if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 584                length++;
 585
 586        req->length = length;
 587
 588        retval = usb_ep_queue(in, req, GFP_ATOMIC);
 589        switch (retval) {
 590        default:
 591                DBG(dev, "tx queue err %d\n", retval);
 592                break;
 593        case 0:
 594                netif_trans_update(net);
 595                atomic_inc(&dev->tx_qlen);
 596        }
 597
 598        if (retval) {
 599                dev_kfree_skb_any(skb);
 600drop:
 601                dev->net->stats.tx_dropped++;
 602multiframe:
 603                spin_lock_irqsave(&dev->req_lock, flags);
 604                if (list_empty(&dev->tx_reqs))
 605                        netif_start_queue(net);
 606                list_add(&req->list, &dev->tx_reqs);
 607                spin_unlock_irqrestore(&dev->req_lock, flags);
 608        }
 609        return NETDEV_TX_OK;
 610}
 611
 612/*-------------------------------------------------------------------------*/
 613
 614static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 615{
 616        DBG(dev, "%s\n", __func__);
 617
 618        /* fill the rx queue */
 619        rx_fill(dev, gfp_flags);
 620
 621        /* and open the tx floodgates */
 622        atomic_set(&dev->tx_qlen, 0);
 623        netif_wake_queue(dev->net);
 624}
 625
 626static int eth_open(struct net_device *net)
 627{
 628        struct eth_dev  *dev = netdev_priv(net);
 629        struct gether   *link;
 630
 631        DBG(dev, "%s\n", __func__);
 632        if (netif_carrier_ok(dev->net))
 633                eth_start(dev, GFP_KERNEL);
 634
 635        spin_lock_irq(&dev->lock);
 636        link = dev->port_usb;
 637        if (link && link->open)
 638                link->open(link);
 639        spin_unlock_irq(&dev->lock);
 640
 641        return 0;
 642}
 643
 644static int eth_stop(struct net_device *net)
 645{
 646        struct eth_dev  *dev = netdev_priv(net);
 647        unsigned long   flags;
 648
 649        VDBG(dev, "%s\n", __func__);
 650        netif_stop_queue(net);
 651
 652        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 653                dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 654                dev->net->stats.rx_errors, dev->net->stats.tx_errors
 655                );
 656
 657        /* ensure there are no more active requests */
 658        spin_lock_irqsave(&dev->lock, flags);
 659        if (dev->port_usb) {
 660                struct gether   *link = dev->port_usb;
 661                const struct usb_endpoint_descriptor *in;
 662                const struct usb_endpoint_descriptor *out;
 663
 664                if (link->close)
 665                        link->close(link);
 666
 667                /* NOTE:  we have no abort-queue primitive we could use
 668                 * to cancel all pending I/O.  Instead, we disable then
 669                 * reenable the endpoints ... this idiom may leave toggle
 670                 * wrong, but that's a self-correcting error.
 671                 *
 672                 * REVISIT:  we *COULD* just let the transfers complete at
 673                 * their own pace; the network stack can handle old packets.
 674                 * For the moment we leave this here, since it works.
 675                 */
 676                in = link->in_ep->desc;
 677                out = link->out_ep->desc;
 678                usb_ep_disable(link->in_ep);
 679                usb_ep_disable(link->out_ep);
 680                if (netif_carrier_ok(net)) {
 681                        DBG(dev, "host still using in/out endpoints\n");
 682                        link->in_ep->desc = in;
 683                        link->out_ep->desc = out;
 684                        usb_ep_enable(link->in_ep);
 685                        usb_ep_enable(link->out_ep);
 686                }
 687        }
 688        spin_unlock_irqrestore(&dev->lock, flags);
 689
 690        return 0;
 691}
 692
 693/*-------------------------------------------------------------------------*/
 694
 695static int get_ether_addr(const char *str, u8 *dev_addr)
 696{
 697        if (str) {
 698                unsigned        i;
 699
 700                for (i = 0; i < 6; i++) {
 701                        unsigned char num;
 702
 703                        if ((*str == '.') || (*str == ':'))
 704                                str++;
 705                        num = hex_to_bin(*str++) << 4;
 706                        num |= hex_to_bin(*str++);
 707                        dev_addr [i] = num;
 708                }
 709                if (is_valid_ether_addr(dev_addr))
 710                        return 0;
 711        }
 712        eth_random_addr(dev_addr);
 713        return 1;
 714}
 715
 716static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
 717{
 718        if (len < 18)
 719                return -EINVAL;
 720
 721        snprintf(str, len, "%pM", dev_addr);
 722        return 18;
 723}
 724
 725static const struct net_device_ops eth_netdev_ops = {
 726        .ndo_open               = eth_open,
 727        .ndo_stop               = eth_stop,
 728        .ndo_start_xmit         = eth_start_xmit,
 729        .ndo_set_mac_address    = eth_mac_addr,
 730        .ndo_validate_addr      = eth_validate_addr,
 731};
 732
 733static struct device_type gadget_type = {
 734        .name   = "gadget",
 735};
 736
 737/*
 738 * gether_setup_name - initialize one ethernet-over-usb link
 739 * @g: gadget to associated with these links
 740 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 741 *      host side of the link is recorded
 742 * @netname: name for network device (for example, "usb")
 743 * Context: may sleep
 744 *
 745 * This sets up the single network link that may be exported by a
 746 * gadget driver using this framework.  The link layer addresses are
 747 * set up using module parameters.
 748 *
 749 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
 750 */
 751struct eth_dev *gether_setup_name(struct usb_gadget *g,
 752                const char *dev_addr, const char *host_addr,
 753                u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
 754{
 755        struct eth_dev          *dev;
 756        struct net_device       *net;
 757        int                     status;
 758        u8                      addr[ETH_ALEN];
 759
 760        net = alloc_etherdev(sizeof *dev);
 761        if (!net)
 762                return ERR_PTR(-ENOMEM);
 763
 764        dev = netdev_priv(net);
 765        spin_lock_init(&dev->lock);
 766        spin_lock_init(&dev->req_lock);
 767        INIT_WORK(&dev->work, eth_work);
 768        INIT_LIST_HEAD(&dev->tx_reqs);
 769        INIT_LIST_HEAD(&dev->rx_reqs);
 770
 771        skb_queue_head_init(&dev->rx_frames);
 772
 773        /* network device setup */
 774        dev->net = net;
 775        dev->qmult = qmult;
 776        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 777
 778        if (get_ether_addr(dev_addr, addr)) {
 779                net->addr_assign_type = NET_ADDR_RANDOM;
 780                dev_warn(&g->dev,
 781                        "using random %s ethernet address\n", "self");
 782        } else {
 783                net->addr_assign_type = NET_ADDR_SET;
 784        }
 785        eth_hw_addr_set(net, addr);
 786        if (get_ether_addr(host_addr, dev->host_mac))
 787                dev_warn(&g->dev,
 788                        "using random %s ethernet address\n", "host");
 789
 790        if (ethaddr)
 791                memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 792
 793        net->netdev_ops = &eth_netdev_ops;
 794
 795        net->ethtool_ops = &ops;
 796
 797        /* MTU range: 14 - 15412 */
 798        net->min_mtu = ETH_HLEN;
 799        net->max_mtu = GETHER_MAX_MTU_SIZE;
 800
 801        dev->gadget = g;
 802        SET_NETDEV_DEV(net, &g->dev);
 803        SET_NETDEV_DEVTYPE(net, &gadget_type);
 804
 805        status = register_netdev(net);
 806        if (status < 0) {
 807                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 808                free_netdev(net);
 809                dev = ERR_PTR(status);
 810        } else {
 811                INFO(dev, "MAC %pM\n", net->dev_addr);
 812                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 813
 814                /*
 815                 * two kinds of host-initiated state changes:
 816                 *  - iff DATA transfer is active, carrier is "on"
 817                 *  - tx queueing enabled if open *and* carrier is "on"
 818                 */
 819                netif_carrier_off(net);
 820        }
 821
 822        return dev;
 823}
 824EXPORT_SYMBOL_GPL(gether_setup_name);
 825
 826struct net_device *gether_setup_name_default(const char *netname)
 827{
 828        struct net_device       *net;
 829        struct eth_dev          *dev;
 830
 831        net = alloc_etherdev(sizeof(*dev));
 832        if (!net)
 833                return ERR_PTR(-ENOMEM);
 834
 835        dev = netdev_priv(net);
 836        spin_lock_init(&dev->lock);
 837        spin_lock_init(&dev->req_lock);
 838        INIT_WORK(&dev->work, eth_work);
 839        INIT_LIST_HEAD(&dev->tx_reqs);
 840        INIT_LIST_HEAD(&dev->rx_reqs);
 841
 842        skb_queue_head_init(&dev->rx_frames);
 843
 844        /* network device setup */
 845        dev->net = net;
 846        dev->qmult = QMULT_DEFAULT;
 847        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 848
 849        eth_random_addr(dev->dev_mac);
 850        pr_warn("using random %s ethernet address\n", "self");
 851
 852        /* by default we always have a random MAC address */
 853        net->addr_assign_type = NET_ADDR_RANDOM;
 854
 855        eth_random_addr(dev->host_mac);
 856        pr_warn("using random %s ethernet address\n", "host");
 857
 858        net->netdev_ops = &eth_netdev_ops;
 859
 860        net->ethtool_ops = &ops;
 861        SET_NETDEV_DEVTYPE(net, &gadget_type);
 862
 863        /* MTU range: 14 - 15412 */
 864        net->min_mtu = ETH_HLEN;
 865        net->max_mtu = GETHER_MAX_MTU_SIZE;
 866
 867        return net;
 868}
 869EXPORT_SYMBOL_GPL(gether_setup_name_default);
 870
 871int gether_register_netdev(struct net_device *net)
 872{
 873        struct eth_dev *dev;
 874        struct usb_gadget *g;
 875        int status;
 876
 877        if (!net->dev.parent)
 878                return -EINVAL;
 879        dev = netdev_priv(net);
 880        g = dev->gadget;
 881
 882        eth_hw_addr_set(net, dev->dev_mac);
 883
 884        status = register_netdev(net);
 885        if (status < 0) {
 886                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 887                return status;
 888        } else {
 889                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 890                INFO(dev, "MAC %pM\n", dev->dev_mac);
 891
 892                /* two kinds of host-initiated state changes:
 893                 *  - iff DATA transfer is active, carrier is "on"
 894                 *  - tx queueing enabled if open *and* carrier is "on"
 895                 */
 896                netif_carrier_off(net);
 897        }
 898
 899        return status;
 900}
 901EXPORT_SYMBOL_GPL(gether_register_netdev);
 902
 903void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
 904{
 905        struct eth_dev *dev;
 906
 907        dev = netdev_priv(net);
 908        dev->gadget = g;
 909        SET_NETDEV_DEV(net, &g->dev);
 910}
 911EXPORT_SYMBOL_GPL(gether_set_gadget);
 912
 913int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
 914{
 915        struct eth_dev *dev;
 916        u8 new_addr[ETH_ALEN];
 917
 918        dev = netdev_priv(net);
 919        if (get_ether_addr(dev_addr, new_addr))
 920                return -EINVAL;
 921        memcpy(dev->dev_mac, new_addr, ETH_ALEN);
 922        net->addr_assign_type = NET_ADDR_SET;
 923        return 0;
 924}
 925EXPORT_SYMBOL_GPL(gether_set_dev_addr);
 926
 927int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
 928{
 929        struct eth_dev *dev;
 930        int ret;
 931
 932        dev = netdev_priv(net);
 933        ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
 934        if (ret + 1 < len) {
 935                dev_addr[ret++] = '\n';
 936                dev_addr[ret] = '\0';
 937        }
 938
 939        return ret;
 940}
 941EXPORT_SYMBOL_GPL(gether_get_dev_addr);
 942
 943int gether_set_host_addr(struct net_device *net, const char *host_addr)
 944{
 945        struct eth_dev *dev;
 946        u8 new_addr[ETH_ALEN];
 947
 948        dev = netdev_priv(net);
 949        if (get_ether_addr(host_addr, new_addr))
 950                return -EINVAL;
 951        memcpy(dev->host_mac, new_addr, ETH_ALEN);
 952        return 0;
 953}
 954EXPORT_SYMBOL_GPL(gether_set_host_addr);
 955
 956int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
 957{
 958        struct eth_dev *dev;
 959        int ret;
 960
 961        dev = netdev_priv(net);
 962        ret = get_ether_addr_str(dev->host_mac, host_addr, len);
 963        if (ret + 1 < len) {
 964                host_addr[ret++] = '\n';
 965                host_addr[ret] = '\0';
 966        }
 967
 968        return ret;
 969}
 970EXPORT_SYMBOL_GPL(gether_get_host_addr);
 971
 972int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
 973{
 974        struct eth_dev *dev;
 975
 976        if (len < 13)
 977                return -EINVAL;
 978
 979        dev = netdev_priv(net);
 980        snprintf(host_addr, len, "%pm", dev->host_mac);
 981
 982        return strlen(host_addr);
 983}
 984EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
 985
 986void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
 987{
 988        struct eth_dev *dev;
 989
 990        dev = netdev_priv(net);
 991        memcpy(host_mac, dev->host_mac, ETH_ALEN);
 992}
 993EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
 994
 995void gether_set_qmult(struct net_device *net, unsigned qmult)
 996{
 997        struct eth_dev *dev;
 998
 999        dev = netdev_priv(net);
1000        dev->qmult = qmult;
1001}
1002EXPORT_SYMBOL_GPL(gether_set_qmult);
1003
1004unsigned gether_get_qmult(struct net_device *net)
1005{
1006        struct eth_dev *dev;
1007
1008        dev = netdev_priv(net);
1009        return dev->qmult;
1010}
1011EXPORT_SYMBOL_GPL(gether_get_qmult);
1012
1013int gether_get_ifname(struct net_device *net, char *name, int len)
1014{
1015        struct eth_dev *dev = netdev_priv(net);
1016        int ret;
1017
1018        rtnl_lock();
1019        ret = scnprintf(name, len, "%s\n",
1020                        dev->ifname_set ? net->name : netdev_name(net));
1021        rtnl_unlock();
1022        return ret;
1023}
1024EXPORT_SYMBOL_GPL(gether_get_ifname);
1025
1026int gether_set_ifname(struct net_device *net, const char *name, int len)
1027{
1028        struct eth_dev *dev = netdev_priv(net);
1029        char tmp[IFNAMSIZ];
1030        const char *p;
1031
1032        if (name[len - 1] == '\n')
1033                len--;
1034
1035        if (len >= sizeof(tmp))
1036                return -E2BIG;
1037
1038        strscpy(tmp, name, len + 1);
1039        if (!dev_valid_name(tmp))
1040                return -EINVAL;
1041
1042        /* Require exactly one %d, so binding will not fail with EEXIST. */
1043        p = strchr(name, '%');
1044        if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1045                return -EINVAL;
1046
1047        strncpy(net->name, tmp, sizeof(net->name));
1048        dev->ifname_set = true;
1049
1050        return 0;
1051}
1052EXPORT_SYMBOL_GPL(gether_set_ifname);
1053
1054/*
1055 * gether_cleanup - remove Ethernet-over-USB device
1056 * Context: may sleep
1057 *
1058 * This is called to free all resources allocated by @gether_setup().
1059 */
1060void gether_cleanup(struct eth_dev *dev)
1061{
1062        if (!dev)
1063                return;
1064
1065        unregister_netdev(dev->net);
1066        flush_work(&dev->work);
1067        free_netdev(dev->net);
1068}
1069EXPORT_SYMBOL_GPL(gether_cleanup);
1070
1071/**
1072 * gether_connect - notify network layer that USB link is active
1073 * @link: the USB link, set up with endpoints, descriptors matching
1074 *      current device speed, and any framing wrapper(s) set up.
1075 * Context: irqs blocked
1076 *
1077 * This is called to activate endpoints and let the network layer know
1078 * the connection is active ("carrier detect").  It may cause the I/O
1079 * queues to open and start letting network packets flow, but will in
1080 * any case activate the endpoints so that they respond properly to the
1081 * USB host.
1082 *
1083 * Verify net_device pointer returned using IS_ERR().  If it doesn't
1084 * indicate some error code (negative errno), ep->driver_data values
1085 * have been overwritten.
1086 */
1087struct net_device *gether_connect(struct gether *link)
1088{
1089        struct eth_dev          *dev = link->ioport;
1090        int                     result = 0;
1091
1092        if (!dev)
1093                return ERR_PTR(-EINVAL);
1094
1095        link->in_ep->driver_data = dev;
1096        result = usb_ep_enable(link->in_ep);
1097        if (result != 0) {
1098                DBG(dev, "enable %s --> %d\n",
1099                        link->in_ep->name, result);
1100                goto fail0;
1101        }
1102
1103        link->out_ep->driver_data = dev;
1104        result = usb_ep_enable(link->out_ep);
1105        if (result != 0) {
1106                DBG(dev, "enable %s --> %d\n",
1107                        link->out_ep->name, result);
1108                goto fail1;
1109        }
1110
1111        if (result == 0)
1112                result = alloc_requests(dev, link, qlen(dev->gadget,
1113                                        dev->qmult));
1114
1115        if (result == 0) {
1116                dev->zlp = link->is_zlp_ok;
1117                dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1118                DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1119
1120                dev->header_len = link->header_len;
1121                dev->unwrap = link->unwrap;
1122                dev->wrap = link->wrap;
1123
1124                spin_lock(&dev->lock);
1125                dev->port_usb = link;
1126                if (netif_running(dev->net)) {
1127                        if (link->open)
1128                                link->open(link);
1129                } else {
1130                        if (link->close)
1131                                link->close(link);
1132                }
1133                spin_unlock(&dev->lock);
1134
1135                netif_carrier_on(dev->net);
1136                if (netif_running(dev->net))
1137                        eth_start(dev, GFP_ATOMIC);
1138
1139        /* on error, disable any endpoints  */
1140        } else {
1141                (void) usb_ep_disable(link->out_ep);
1142fail1:
1143                (void) usb_ep_disable(link->in_ep);
1144        }
1145fail0:
1146        /* caller is responsible for cleanup on error */
1147        if (result < 0)
1148                return ERR_PTR(result);
1149        return dev->net;
1150}
1151EXPORT_SYMBOL_GPL(gether_connect);
1152
1153/**
1154 * gether_disconnect - notify network layer that USB link is inactive
1155 * @link: the USB link, on which gether_connect() was called
1156 * Context: irqs blocked
1157 *
1158 * This is called to deactivate endpoints and let the network layer know
1159 * the connection went inactive ("no carrier").
1160 *
1161 * On return, the state is as if gether_connect() had never been called.
1162 * The endpoints are inactive, and accordingly without active USB I/O.
1163 * Pointers to endpoint descriptors and endpoint private data are nulled.
1164 */
1165void gether_disconnect(struct gether *link)
1166{
1167        struct eth_dev          *dev = link->ioport;
1168        struct usb_request      *req;
1169
1170        WARN_ON(!dev);
1171        if (!dev)
1172                return;
1173
1174        DBG(dev, "%s\n", __func__);
1175
1176        netif_stop_queue(dev->net);
1177        netif_carrier_off(dev->net);
1178
1179        /* disable endpoints, forcing (synchronous) completion
1180         * of all pending i/o.  then free the request objects
1181         * and forget about the endpoints.
1182         */
1183        usb_ep_disable(link->in_ep);
1184        spin_lock(&dev->req_lock);
1185        while (!list_empty(&dev->tx_reqs)) {
1186                req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1187                list_del(&req->list);
1188
1189                spin_unlock(&dev->req_lock);
1190                usb_ep_free_request(link->in_ep, req);
1191                spin_lock(&dev->req_lock);
1192        }
1193        spin_unlock(&dev->req_lock);
1194        link->in_ep->desc = NULL;
1195
1196        usb_ep_disable(link->out_ep);
1197        spin_lock(&dev->req_lock);
1198        while (!list_empty(&dev->rx_reqs)) {
1199                req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1200                list_del(&req->list);
1201
1202                spin_unlock(&dev->req_lock);
1203                usb_ep_free_request(link->out_ep, req);
1204                spin_lock(&dev->req_lock);
1205        }
1206        spin_unlock(&dev->req_lock);
1207        link->out_ep->desc = NULL;
1208
1209        /* finish forgetting about this USB link episode */
1210        dev->header_len = 0;
1211        dev->unwrap = NULL;
1212        dev->wrap = NULL;
1213
1214        spin_lock(&dev->lock);
1215        dev->port_usb = NULL;
1216        spin_unlock(&dev->lock);
1217}
1218EXPORT_SYMBOL_GPL(gether_disconnect);
1219
1220MODULE_LICENSE("GPL");
1221MODULE_AUTHOR("David Brownell");
1222