linux/drivers/net/wimax/i2400m/usb-rx.c
<<
>>
Prefs
   1/*
   2 * Intel Wireless WiMAX Connection 2400m
   3 * USB RX handling
   4 *
   5 *
   6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions
  10 * are met:
  11 *
  12 *   * Redistributions of source code must retain the above copyright
  13 *     notice, this list of conditions and the following disclaimer.
  14 *   * Redistributions in binary form must reproduce the above copyright
  15 *     notice, this list of conditions and the following disclaimer in
  16 *     the documentation and/or other materials provided with the
  17 *     distribution.
  18 *   * Neither the name of Intel Corporation nor the names of its
  19 *     contributors may be used to endorse or promote products derived
  20 *     from this software without specific prior written permission.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 *
  34 *
  35 * Intel Corporation <linux-wimax@intel.com>
  36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
  37 *  - Initial implementation
  38 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  39 *  - Use skb_clone(), break up processing in chunks
  40 *  - Split transport/device specific
  41 *  - Make buffer size dynamic to exert less memory pressure
  42 *
  43 *
  44 * This handles the RX path on USB.
  45 *
  46 * When a notification is received that says 'there is RX data ready',
  47 * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
  48 * reads a buffer from USB and passes it to i2400m_rx() in the generic
  49 * handling code. The RX buffer has an specific format that is
  50 * described in rx.c.
  51 *
  52 * We use a kernel thread in a loop because:
  53 *
  54 *  - we want to be able to call the USB power management get/put
  55 *    functions (blocking) before each transaction.
  56 *
  57 *  - We might get a lot of notifications and we don't want to submit
  58 *    a zillion reads; by serializing, we are throttling.
  59 *
  60 *  - RX data processing can get heavy enough so that it is not
  61 *    appropriate for doing it in the USB callback; thus we run it in a
  62 *    process context.
  63 *
  64 * We provide a read buffer of an arbitrary size (short of a page); if
  65 * the callback reports -EOVERFLOW, it means it was too small, so we
  66 * just double the size and retry (being careful to append, as
  67 * sometimes the device provided some data). Every now and then we
  68 * check if the average packet size is smaller than the current packet
  69 * size and if so, we halve it. At the end, the size of the
  70 * preallocated buffer should be following the average received
  71 * transaction size, adapting dynamically to it.
  72 *
  73 * ROADMAP
  74 *
  75 * i2400mu_rx_kick()               Called from notif.c when we get a
  76 *                                 'data ready' notification
  77 * i2400mu_rxd()                   Kernel RX daemon
  78 *   i2400mu_rx()                  Receive USB data
  79 *   i2400m_rx()                   Send data to generic i2400m RX handling
  80 *
  81 * i2400mu_rx_setup()              called from i2400mu_bus_dev_start()
  82 *
  83 * i2400mu_rx_release()            called from i2400mu_bus_dev_stop()
  84 */
  85#include <linux/workqueue.h>
  86#include <linux/slab.h>
  87#include <linux/usb.h>
  88#include "i2400m-usb.h"
  89
  90
  91#define D_SUBMODULE rx
  92#include "usb-debug-levels.h"
  93
  94/*
  95 * Dynamic RX size
  96 *
  97 * We can't let the rx_size be a multiple of 512 bytes (the RX
  98 * endpoint's max packet size). On some USB host controllers (we
  99 * haven't been able to fully characterize which), if the device is
 100 * about to send (for example) X bytes and we only post a buffer to
 101 * receive n*512, it will fail to mark that as babble (so that
 102 * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
 103 * rest).
 104 *
 105 * So on growing or shrinking, if it is a multiple of the
 106 * maxpacketsize, we remove some (instead of incresing some, so in a
 107 * buddy allocator we try to waste less space).
 108 *
 109 * Note we also need a hook for this on i2400mu_rx() -- when we do the
 110 * first read, we are sure we won't hit this spot because
 111 * i240mm->rx_size has been set properly. However, if we have to
 112 * double because of -EOVERFLOW, when we launch the read to get the
 113 * rest of the data, we *have* to make sure that also is not a
 114 * multiple of the max_pkt_size.
 115 */
 116
 117static
 118size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
 119{
 120        struct device *dev = &i2400mu->usb_iface->dev;
 121        size_t rx_size;
 122        const size_t max_pkt_size = 512;
 123
 124        rx_size = 2 * i2400mu->rx_size;
 125        if (rx_size % max_pkt_size == 0) {
 126                rx_size -= 8;
 127                d_printf(1, dev,
 128                         "RX: expected size grew to %zu [adjusted -8] "
 129                         "from %zu\n",
 130                         rx_size, i2400mu->rx_size);
 131        } else
 132                d_printf(1, dev,
 133                         "RX: expected size grew to %zu from %zu\n",
 134                         rx_size, i2400mu->rx_size);
 135        return rx_size;
 136}
 137
 138
 139static
 140void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
 141{
 142        const size_t max_pkt_size = 512;
 143        struct device *dev = &i2400mu->usb_iface->dev;
 144
 145        if (unlikely(i2400mu->rx_size_cnt >= 100
 146                     && i2400mu->rx_size_auto_shrink)) {
 147                size_t avg_rx_size =
 148                        i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
 149                size_t new_rx_size = i2400mu->rx_size / 2;
 150                if (avg_rx_size < new_rx_size) {
 151                        if (new_rx_size % max_pkt_size == 0) {
 152                                new_rx_size -= 8;
 153                                d_printf(1, dev,
 154                                         "RX: expected size shrank to %zu "
 155                                         "[adjusted -8] from %zu\n",
 156                                         new_rx_size, i2400mu->rx_size);
 157                        } else
 158                                d_printf(1, dev,
 159                                         "RX: expected size shrank to %zu "
 160                                         "from %zu\n",
 161                                         new_rx_size, i2400mu->rx_size);
 162                        i2400mu->rx_size = new_rx_size;
 163                        i2400mu->rx_size_cnt = 0;
 164                        i2400mu->rx_size_acc = i2400mu->rx_size;
 165                }
 166        }
 167}
 168
 169/*
 170 * Receive a message with payloads from the USB bus into an skb
 171 *
 172 * @i2400mu: USB device descriptor
 173 * @rx_skb: skb where to place the received message
 174 *
 175 * Deals with all the USB-specifics of receiving, dynamically
 176 * increasing the buffer size if so needed. Returns the payload in the
 177 * skb, ready to process. On a zero-length packet, we retry.
 178 *
 179 * On soft USB errors, we retry (until they become too frequent and
 180 * then are promoted to hard); on hard USB errors, we reset the
 181 * device. On other errors (skb realloacation, we just drop it and
 182 * hope for the next invocation to solve it).
 183 *
 184 * Returns: pointer to the skb if ok, ERR_PTR on error.
 185 *   NOTE: this function might realloc the skb (if it is too small),
 186 *   so always update with the one returned.
 187 *   ERR_PTR() is < 0 on error.
 188 *   Will return NULL if it cannot reallocate -- this can be
 189 *   considered a transient retryable error.
 190 */
 191static
 192struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
 193{
 194        int result = 0;
 195        struct device *dev = &i2400mu->usb_iface->dev;
 196        int usb_pipe, read_size, rx_size, do_autopm;
 197        struct usb_endpoint_descriptor *epd;
 198        const size_t max_pkt_size = 512;
 199
 200        d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
 201        do_autopm = atomic_read(&i2400mu->do_autopm);
 202        result = do_autopm ?
 203                usb_autopm_get_interface(i2400mu->usb_iface) : 0;
 204        if (result < 0) {
 205                dev_err(dev, "RX: can't get autopm: %d\n", result);
 206                do_autopm = 0;
 207        }
 208        epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
 209        usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 210retry:
 211        rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
 212        if (unlikely(rx_size % max_pkt_size == 0)) {
 213                rx_size -= 8;
 214                d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
 215        }
 216        result = usb_bulk_msg(
 217                i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
 218                rx_size, &read_size, 200);
 219        usb_mark_last_busy(i2400mu->usb_dev);
 220        switch (result) {
 221        case 0:
 222                if (read_size == 0)
 223                        goto retry;     /* ZLP, just resubmit */
 224                skb_put(rx_skb, read_size);
 225                break;
 226        case -EPIPE:
 227                /*
 228                 * Stall -- maybe the device is choking with our
 229                 * requests. Clear it and give it some time. If they
 230                 * happen to often, it might be another symptom, so we
 231                 * reset.
 232                 *
 233                 * No error handling for usb_clear_halt(0; if it
 234                 * works, the retry works; if it fails, this switch
 235                 * does the error handling for us.
 236                 */
 237                if (edc_inc(&i2400mu->urb_edc,
 238                            10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
 239                        dev_err(dev, "BM-CMD: too many stalls in "
 240                                "URB; resetting device\n");
 241                        goto do_reset;
 242                }
 243                usb_clear_halt(i2400mu->usb_dev, usb_pipe);
 244                msleep(10);     /* give the device some time */
 245                goto retry;
 246        case -EINVAL:                   /* while removing driver */
 247        case -ENODEV:                   /* dev disconnect ... */
 248        case -ENOENT:                   /* just ignore it */
 249        case -ESHUTDOWN:
 250        case -ECONNRESET:
 251                break;
 252        case -EOVERFLOW: {              /* too small, reallocate */
 253                struct sk_buff *new_skb;
 254                rx_size = i2400mu_rx_size_grow(i2400mu);
 255                if (rx_size <= (1 << 16))       /* cap it */
 256                        i2400mu->rx_size = rx_size;
 257                else if (printk_ratelimit()) {
 258                        dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
 259                        result = -EINVAL;
 260                        goto out;
 261                }
 262                skb_put(rx_skb, read_size);
 263                new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
 264                                          GFP_KERNEL);
 265                if (new_skb == NULL) {
 266                        if (printk_ratelimit())
 267                                dev_err(dev, "RX: Can't reallocate skb to %d; "
 268                                        "RX dropped\n", rx_size);
 269                        kfree_skb(rx_skb);
 270                        rx_skb = NULL;
 271                        goto out;       /* drop it...*/
 272                }
 273                kfree_skb(rx_skb);
 274                rx_skb = new_skb;
 275                i2400mu->rx_size_cnt = 0;
 276                i2400mu->rx_size_acc = i2400mu->rx_size;
 277                d_printf(1, dev, "RX: size changed to %d, received %d, "
 278                         "copied %d, capacity %ld\n",
 279                         rx_size, read_size, rx_skb->len,
 280                         (long) skb_end_offset(new_skb));
 281                goto retry;
 282        }
 283                /* In most cases, it happens due to the hardware scheduling a
 284                 * read when there was no data - unfortunately, we have no way
 285                 * to tell this timeout from a USB timeout. So we just ignore
 286                 * it. */
 287        case -ETIMEDOUT:
 288                dev_err(dev, "RX: timeout: %d\n", result);
 289                result = 0;
 290                break;
 291        default:                        /* Any error */
 292                if (edc_inc(&i2400mu->urb_edc,
 293                            EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
 294                        goto error_reset;
 295                dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
 296                goto retry;
 297        }
 298out:
 299        if (do_autopm)
 300                usb_autopm_put_interface(i2400mu->usb_iface);
 301        d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
 302        return rx_skb;
 303
 304error_reset:
 305        dev_err(dev, "RX: maximum errors in URB exceeded; "
 306                "resetting device\n");
 307do_reset:
 308        usb_queue_reset_device(i2400mu->usb_iface);
 309        rx_skb = ERR_PTR(result);
 310        goto out;
 311}
 312
 313
 314/*
 315 * Kernel thread for USB reception of data
 316 *
 317 * This thread waits for a kick; once kicked, it will allocate an skb
 318 * and receive a single message to it from USB (using
 319 * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
 320 * code for processing.
 321 *
 322 * When done processing, it runs some dirty statistics to verify if
 323 * the last 100 messages received were smaller than half of the
 324 * current RX buffer size. In that case, the RX buffer size is
 325 * halved. This will helps lowering the pressure on the memory
 326 * allocator.
 327 *
 328 * Hard errors force the thread to exit.
 329 */
 330static
 331int i2400mu_rxd(void *_i2400mu)
 332{
 333        int result = 0;
 334        struct i2400mu *i2400mu = _i2400mu;
 335        struct i2400m *i2400m = &i2400mu->i2400m;
 336        struct device *dev = &i2400mu->usb_iface->dev;
 337        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
 338        size_t pending;
 339        int rx_size;
 340        struct sk_buff *rx_skb;
 341        unsigned long flags;
 342
 343        d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
 344        spin_lock_irqsave(&i2400m->rx_lock, flags);
 345        BUG_ON(i2400mu->rx_kthread != NULL);
 346        i2400mu->rx_kthread = current;
 347        spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 348        while (1) {
 349                d_printf(2, dev, "RX: waiting for messages\n");
 350                pending = 0;
 351                wait_event_interruptible(
 352                        i2400mu->rx_wq,
 353                        (kthread_should_stop()  /* check this first! */
 354                         || (pending = atomic_read(&i2400mu->rx_pending_count)))
 355                        );
 356                if (kthread_should_stop())
 357                        break;
 358                if (pending == 0)
 359                        continue;
 360                rx_size = i2400mu->rx_size;
 361                d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
 362                rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
 363                if (rx_skb == NULL) {
 364                        dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
 365                                rx_size);
 366                        msleep(50);     /* give it some time? */
 367                        continue;
 368                }
 369
 370                /* Receive the message with the payloads */
 371                rx_skb = i2400mu_rx(i2400mu, rx_skb);
 372                result = PTR_ERR(rx_skb);
 373                if (IS_ERR(rx_skb))
 374                        goto out;
 375                atomic_dec(&i2400mu->rx_pending_count);
 376                if (rx_skb == NULL || rx_skb->len == 0) {
 377                        /* some "ignorable" condition */
 378                        kfree_skb(rx_skb);
 379                        continue;
 380                }
 381
 382                /* Deliver the message to the generic i2400m code */
 383                i2400mu->rx_size_cnt++;
 384                i2400mu->rx_size_acc += rx_skb->len;
 385                result = i2400m_rx(i2400m, rx_skb);
 386                if (result == -EIO
 387                    && edc_inc(&i2400mu->urb_edc,
 388                               EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
 389                        goto error_reset;
 390                }
 391
 392                /* Maybe adjust RX buffer size */
 393                i2400mu_rx_size_maybe_shrink(i2400mu);
 394        }
 395        result = 0;
 396out:
 397        spin_lock_irqsave(&i2400m->rx_lock, flags);
 398        i2400mu->rx_kthread = NULL;
 399        spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 400        d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
 401        return result;
 402
 403error_reset:
 404        dev_err(dev, "RX: maximum errors in received buffer exceeded; "
 405                "resetting device\n");
 406        usb_queue_reset_device(i2400mu->usb_iface);
 407        goto out;
 408}
 409
 410
 411/*
 412 * Start reading from the device
 413 *
 414 * @i2400m: device instance
 415 *
 416 * Notify the RX thread that there is data pending.
 417 */
 418void i2400mu_rx_kick(struct i2400mu *i2400mu)
 419{
 420        struct i2400m *i2400m = &i2400mu->i2400m;
 421        struct device *dev = &i2400mu->usb_iface->dev;
 422
 423        d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
 424        atomic_inc(&i2400mu->rx_pending_count);
 425        wake_up_all(&i2400mu->rx_wq);
 426        d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
 427}
 428
 429
 430int i2400mu_rx_setup(struct i2400mu *i2400mu)
 431{
 432        int result = 0;
 433        struct i2400m *i2400m = &i2400mu->i2400m;
 434        struct device *dev = &i2400mu->usb_iface->dev;
 435        struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
 436        struct task_struct *kthread;
 437
 438        kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
 439                              wimax_dev->name);
 440        /* the kthread function sets i2400mu->rx_thread */
 441        if (IS_ERR(kthread)) {
 442                result = PTR_ERR(kthread);
 443                dev_err(dev, "RX: cannot start thread: %d\n", result);
 444        }
 445        return result;
 446}
 447
 448
 449void i2400mu_rx_release(struct i2400mu *i2400mu)
 450{
 451        unsigned long flags;
 452        struct i2400m *i2400m = &i2400mu->i2400m;
 453        struct device *dev = i2400m_dev(i2400m);
 454        struct task_struct *kthread;
 455
 456        spin_lock_irqsave(&i2400m->rx_lock, flags);
 457        kthread = i2400mu->rx_kthread;
 458        i2400mu->rx_kthread = NULL;
 459        spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 460        if (kthread)
 461                kthread_stop(kthread);
 462        else
 463                d_printf(1, dev, "RX: kthread had already exited\n");
 464}
 465
 466