linux/drivers/uwb/i1480/i1480u-wlp/tx.c
<<
>>
Prefs
   1/*
   2 * WUSB Wire Adapter: WLP interface
   3 * Deal with TX (massaging data to transmit, handling it)
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * Transmission engine. Get an skb, create from that a WLP transmit
  24 * context, add a WLP TX header (which we keep prefilled in the
  25 * device's instance), fill out the target-specific fields and
  26 * fire it.
  27 *
  28 * ROADMAP:
  29 *
  30 *   Entry points:
  31 *
  32 *     i1480u_tx_release(): called by i1480u_disconnect() to release
  33 *                          pending tx contexts.
  34 *
  35 *     i1480u_tx_cb(): callback for TX contexts (USB URBs)
  36 *       i1480u_tx_destroy():
  37 *
  38 *     i1480u_tx_timeout(): called for timeout handling from the
  39 *                          network stack.
  40 *
  41 *     i1480u_hard_start_xmit(): called for transmitting an skb from
  42 *                               the network stack. Will interact with WLP
  43 *                               substack to verify and prepare frame.
  44 *       i1480u_xmit_frame(): actual transmission on hardware
  45 *
  46 *         i1480u_tx_create()       Creates TX context
  47 *            i1480u_tx_create_1()    For packets in 1 fragment
  48 *            i1480u_tx_create_n()    For packets in >1 fragments
  49 *
  50 * TODO:
  51 *
  52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
  53 *          usb_sg_*(). It might not make too much sense as most of
  54 *          the times the MTU will be smaller than one page...
  55 */
  56
  57#include "i1480u-wlp.h"
  58
  59enum {
  60        /* This is only for Next and Last TX packets */
  61        i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
  62                - sizeof(struct untd_hdr_rst),
  63};
  64
  65/* Free resources allocated to a i1480u tx context. */
  66static
  67void i1480u_tx_free(struct i1480u_tx *wtx)
  68{
  69        kfree(wtx->buf);
  70        if (wtx->skb)
  71                dev_kfree_skb_irq(wtx->skb);
  72        usb_free_urb(wtx->urb);
  73        kfree(wtx);
  74}
  75
  76static
  77void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
  78{
  79        unsigned long flags;
  80        spin_lock_irqsave(&i1480u->tx_list_lock, flags);        /* not active any more */
  81        list_del(&wtx->list_node);
  82        i1480u_tx_free(wtx);
  83        spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  84}
  85
  86static
  87void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
  88{
  89        unsigned long flags;
  90        struct i1480u_tx *wtx, *next;
  91
  92        spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  93        list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
  94                usb_unlink_urb(wtx->urb);
  95        }
  96        spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  97}
  98
  99
 100/*
 101 * Callback for a completed tx USB URB.
 102 *
 103 * TODO:
 104 *
 105 * - FIXME: recover errors more gracefully
 106 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
 107 */
 108static
 109void i1480u_tx_cb(struct urb *urb)
 110{
 111        struct i1480u_tx *wtx = urb->context;
 112        struct i1480u *i1480u = wtx->i1480u;
 113        struct net_device *net_dev = i1480u->net_dev;
 114        struct device *dev = &i1480u->usb_iface->dev;
 115        unsigned long flags;
 116
 117        switch (urb->status) {
 118        case 0:
 119                spin_lock_irqsave(&i1480u->lock, flags);
 120                net_dev->stats.tx_packets++;
 121                net_dev->stats.tx_bytes += urb->actual_length;
 122                spin_unlock_irqrestore(&i1480u->lock, flags);
 123                break;
 124        case -ECONNRESET:       /* Not an error, but a controlled situation; */
 125        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
 126                dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
 127                netif_stop_queue(net_dev);
 128                break;
 129        case -ESHUTDOWN:        /* going away! */
 130                dev_dbg(dev, "notif endp: down %d\n", urb->status);
 131                netif_stop_queue(net_dev);
 132                break;
 133        default:
 134                dev_err(dev, "TX: unknown URB status %d\n", urb->status);
 135                if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
 136                                        EDC_ERROR_TIMEFRAME)) {
 137                        dev_err(dev, "TX: max acceptable errors exceeded."
 138                                        "Reset device.\n");
 139                        netif_stop_queue(net_dev);
 140                        i1480u_tx_unlink_urbs(i1480u);
 141                        wlp_reset_all(&i1480u->wlp);
 142                }
 143                break;
 144        }
 145        i1480u_tx_destroy(i1480u, wtx);
 146        if (atomic_dec_return(&i1480u->tx_inflight.count)
 147            <= i1480u->tx_inflight.threshold
 148            && netif_queue_stopped(net_dev)
 149            && i1480u->tx_inflight.threshold != 0) {
 150                netif_start_queue(net_dev);
 151                atomic_inc(&i1480u->tx_inflight.restart_count);
 152        }
 153        return;
 154}
 155
 156
 157/*
 158 * Given a buffer that doesn't fit in a single fragment, create an
 159 * scatter/gather structure for delivery to the USB pipe.
 160 *
 161 * Implements functionality of i1480u_tx_create().
 162 *
 163 * @wtx:        tx descriptor
 164 * @skb:        skb to send
 165 * @gfp_mask:   gfp allocation mask
 166 * @returns:    Pointer to @wtx if ok, NULL on error.
 167 *
 168 * Sorry, TOO LONG a function, but breaking it up is kind of hard
 169 *
 170 * This will break the buffer in chunks smaller than
 171 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
 172 * to each:
 173 *
 174 *   1st header           \
 175 *   i1480 tx header      |  fragment 1
 176 *   fragment data        /
 177 *   nxt header           \  fragment 2
 178 *   fragment data        /
 179 *   ..
 180 *   ..
 181 *   last header          \  fragment 3
 182 *   last fragment data   /
 183 *
 184 * This does not fill the i1480 TX header, it is left up to the
 185 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
 186 *
 187 * This function consumes the skb unless there is an error.
 188 */
 189static
 190int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
 191                       gfp_t gfp_mask)
 192{
 193        int result;
 194        void *pl;
 195        size_t pl_size;
 196
 197        void *pl_itr, *buf_itr;
 198        size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
 199        struct untd_hdr_1st *untd_hdr_1st;
 200        struct wlp_tx_hdr *wlp_tx_hdr;
 201        struct untd_hdr_rst *untd_hdr_rst;
 202
 203        wtx->skb = NULL;
 204        pl = skb->data;
 205        pl_itr = pl;
 206        pl_size = skb->len;
 207        pl_size_left = pl_size; /* payload size */
 208        /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
 209         * the headers */
 210        pl_size_1st = i1480u_MAX_FRG_SIZE
 211                - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
 212        BUG_ON(pl_size_1st > pl_size);
 213        pl_size_left -= pl_size_1st;
 214        /* The rest have an smaller header (no i1480 TX header). We
 215         * need to break up the payload in blocks smaller than
 216         * i1480u_MAX_PL_SIZE (payload excluding header). */
 217        frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
 218        /* Allocate space for the new buffer. In this new buffer we'll
 219         * place the headers followed by the data fragment, headers,
 220         * data fragments, etc..
 221         */
 222        result = -ENOMEM;
 223        wtx->buf_size = sizeof(*untd_hdr_1st)
 224                + sizeof(*wlp_tx_hdr)
 225                + frgs * sizeof(*untd_hdr_rst)
 226                + pl_size;
 227        wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
 228        if (wtx->buf == NULL)
 229                goto error_buf_alloc;
 230
 231        buf_itr = wtx->buf;             /* We got the space, let's fill it up */
 232        /* Fill 1st fragment */
 233        untd_hdr_1st = buf_itr;
 234        buf_itr += sizeof(*untd_hdr_1st);
 235        untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
 236        untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
 237        untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
 238        untd_hdr_1st->fragment_len =
 239                cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
 240        memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
 241        /* Set up i1480 header info */
 242        wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
 243        buf_itr += sizeof(*wlp_tx_hdr);
 244        /* Copy the first fragment */
 245        memcpy(buf_itr, pl_itr, pl_size_1st);
 246        pl_itr += pl_size_1st;
 247        buf_itr += pl_size_1st;
 248
 249        /* Now do each remaining fragment */
 250        result = -EINVAL;
 251        while (pl_size_left > 0) {
 252                if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
 253                    > wtx->buf_size) {
 254                        printk(KERN_ERR "BUG: no space for header\n");
 255                        goto error_bug;
 256                }
 257                untd_hdr_rst = buf_itr;
 258                buf_itr += sizeof(*untd_hdr_rst);
 259                if (pl_size_left > i1480u_MAX_PL_SIZE) {
 260                        frg_pl_size = i1480u_MAX_PL_SIZE;
 261                        untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
 262                } else {
 263                        frg_pl_size = pl_size_left;
 264                        untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
 265                }
 266                untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
 267                untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
 268                untd_hdr_rst->padding = 0;
 269                if (buf_itr + frg_pl_size - wtx->buf
 270                    > wtx->buf_size) {
 271                        printk(KERN_ERR "BUG: no space for payload\n");
 272                        goto error_bug;
 273                }
 274                memcpy(buf_itr, pl_itr, frg_pl_size);
 275                buf_itr += frg_pl_size;
 276                pl_itr += frg_pl_size;
 277                pl_size_left -= frg_pl_size;
 278        }
 279        dev_kfree_skb_irq(skb);
 280        return 0;
 281
 282error_bug:
 283        printk(KERN_ERR
 284               "BUG: skb %u bytes\n"
 285               "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
 286               "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
 287               skb->len,
 288               frg_pl_size, i1480u_MAX_FRG_SIZE,
 289               buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
 290
 291        kfree(wtx->buf);
 292error_buf_alloc:
 293        return result;
 294}
 295
 296
 297/*
 298 * Given a buffer that fits in a single fragment, fill out a @wtx
 299 * struct for transmitting it down the USB pipe.
 300 *
 301 * Uses the fact that we have space reserved in front of the skbuff
 302 * for hardware headers :]
 303 *
 304 * This does not fill the i1480 TX header, it is left up to the
 305 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
 306 *
 307 * @pl:         pointer to payload data
 308 * @pl_size:    size of the payuload
 309 *
 310 * This function does not consume the @skb.
 311 */
 312static
 313int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
 314                       gfp_t gfp_mask)
 315{
 316        struct untd_hdr_cmp *untd_hdr_cmp;
 317        struct wlp_tx_hdr *wlp_tx_hdr;
 318
 319        wtx->buf = NULL;
 320        wtx->skb = skb;
 321        BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
 322        wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
 323        wtx->wlp_tx_hdr = wlp_tx_hdr;
 324        BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
 325        untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
 326
 327        untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
 328        untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
 329        untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
 330        untd_hdr_cmp->padding = 0;
 331        return 0;
 332}
 333
 334
 335/*
 336 * Given a skb to transmit, massage it to become palatable for the TX pipe
 337 *
 338 * This will break the buffer in chunks smaller than
 339 * i1480u_MAX_FRG_SIZE and add proper headers to each.
 340 *
 341 *   1st header           \
 342 *   i1480 tx header      |  fragment 1
 343 *   fragment data        /
 344 *   nxt header           \  fragment 2
 345 *   fragment data        /
 346 *   ..
 347 *   ..
 348 *   last header          \  fragment 3
 349 *   last fragment data   /
 350 *
 351 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
 352 *
 353 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
 354 * following is composed:
 355 *
 356 *   complete header      \
 357 *   i1480 tx header      | single fragment
 358 *   packet data          /
 359 *
 360 * We were going to use s/g support, but because the interface is
 361 * synch and at the end there is plenty of overhead to do it, it
 362 * didn't seem that worth for data that is going to be smaller than
 363 * one page.
 364 */
 365static
 366struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
 367                                   struct sk_buff *skb, gfp_t gfp_mask)
 368{
 369        int result;
 370        struct usb_endpoint_descriptor *epd;
 371        int usb_pipe;
 372        unsigned long flags;
 373
 374        struct i1480u_tx *wtx;
 375        const size_t pl_max_size =
 376                i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
 377                - sizeof(struct wlp_tx_hdr);
 378
 379        wtx = kmalloc(sizeof(*wtx), gfp_mask);
 380        if (wtx == NULL)
 381                goto error_wtx_alloc;
 382        wtx->urb = usb_alloc_urb(0, gfp_mask);
 383        if (wtx->urb == NULL)
 384                goto error_urb_alloc;
 385        epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
 386        usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
 387        /* Fits in a single complete packet or need to split? */
 388        if (skb->len > pl_max_size) {
 389                result = i1480u_tx_create_n(wtx, skb, gfp_mask);
 390                if (result < 0)
 391                        goto error_create;
 392                usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
 393                                  wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
 394        } else {
 395                result = i1480u_tx_create_1(wtx, skb, gfp_mask);
 396                if (result < 0)
 397                        goto error_create;
 398                usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
 399                                  skb->data, skb->len, i1480u_tx_cb, wtx);
 400        }
 401        spin_lock_irqsave(&i1480u->tx_list_lock, flags);
 402        list_add(&wtx->list_node, &i1480u->tx_list);
 403        spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
 404        return wtx;
 405
 406error_create:
 407        kfree(wtx->urb);
 408error_urb_alloc:
 409        kfree(wtx);
 410error_wtx_alloc:
 411        return NULL;
 412}
 413
 414/*
 415 * Actual fragmentation and transmission of frame
 416 *
 417 * @wlp:  WLP substack data structure
 418 * @skb:  To be transmitted
 419 * @dst:  Device address of destination
 420 * @returns: 0 on success, <0 on failure
 421 *
 422 * This function can also be called directly (not just from
 423 * hard_start_xmit), so we also check here if the interface is up before
 424 * taking sending anything.
 425 */
 426int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
 427                      struct uwb_dev_addr *dst)
 428{
 429        int result = -ENXIO;
 430        struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
 431        struct device *dev = &i1480u->usb_iface->dev;
 432        struct net_device *net_dev = i1480u->net_dev;
 433        struct i1480u_tx *wtx;
 434        struct wlp_tx_hdr *wlp_tx_hdr;
 435        static unsigned char dev_bcast[2] = { 0xff, 0xff };
 436
 437        BUG_ON(i1480u->wlp.rc == NULL);
 438        if ((net_dev->flags & IFF_UP) == 0)
 439                goto out;
 440        result = -EBUSY;
 441        if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
 442                netif_stop_queue(net_dev);
 443                goto error_max_inflight;
 444        }
 445        result = -ENOMEM;
 446        wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
 447        if (unlikely(wtx == NULL)) {
 448                if (printk_ratelimit())
 449                        dev_err(dev, "TX: no memory for WLP TX URB,"
 450                                "dropping packet (in flight %d)\n",
 451                                atomic_read(&i1480u->tx_inflight.count));
 452                netif_stop_queue(net_dev);
 453                goto error_wtx_alloc;
 454        }
 455        wtx->i1480u = i1480u;
 456        /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
 457         * locking. We do so because they are kind of orthogonal to
 458         * each other (and thus not changed in an atomic batch).
 459         * The ETH header is right after the WLP TX header. */
 460        wlp_tx_hdr = wtx->wlp_tx_hdr;
 461        *wlp_tx_hdr = i1480u->options.def_tx_hdr;
 462        wlp_tx_hdr->dstaddr = *dst;
 463        if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
 464            && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
 465                /*Broadcast message directed to DRP host. Send as best effort
 466                 * on PCA. */
 467                wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
 468        }
 469
 470        result = usb_submit_urb(wtx->urb, GFP_ATOMIC);          /* Go baby */
 471        if (result < 0) {
 472                dev_err(dev, "TX: cannot submit URB: %d\n", result);
 473                /* We leave the freeing of skb to calling function */
 474                wtx->skb = NULL;
 475                goto error_tx_urb_submit;
 476        }
 477        atomic_inc(&i1480u->tx_inflight.count);
 478        net_dev->trans_start = jiffies;
 479        return result;
 480
 481error_tx_urb_submit:
 482        i1480u_tx_destroy(i1480u, wtx);
 483error_wtx_alloc:
 484error_max_inflight:
 485out:
 486        return result;
 487}
 488
 489
 490/*
 491 * Transmit an skb  Called when an skbuf has to be transmitted
 492 *
 493 * The skb is first passed to WLP substack to ensure this is a valid
 494 * frame. If valid the device address of destination will be filled and
 495 * the WLP header prepended to the skb. If this step fails we fake sending
 496 * the frame, if we return an error the network stack will just keep trying.
 497 *
 498 * Broadcast frames inside a WSS needs to be treated special as multicast is
 499 * not supported. A broadcast frame is sent as unicast to each member of the
 500 * WSS - this is done by the WLP substack when it finds a broadcast frame.
 501 * So, we test if the WLP substack took over the skb and only transmit it
 502 * if it has not (been taken over).
 503 *
 504 * @net_dev->xmit_lock is held
 505 */
 506netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
 507                                         struct net_device *net_dev)
 508{
 509        int result;
 510        struct i1480u *i1480u = netdev_priv(net_dev);
 511        struct device *dev = &i1480u->usb_iface->dev;
 512        struct uwb_dev_addr dst;
 513
 514        if ((net_dev->flags & IFF_UP) == 0)
 515                goto error;
 516        result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
 517        if (result < 0) {
 518                dev_err(dev, "WLP verification of TX frame failed (%d). "
 519                        "Dropping packet.\n", result);
 520                goto error;
 521        } else if (result == 1) {
 522                /* trans_start time will be set when WLP actually transmits
 523                 * the frame */
 524                goto out;
 525        }
 526        result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
 527        if (result < 0) {
 528                dev_err(dev, "Frame TX failed (%d).\n", result);
 529                goto error;
 530        }
 531        return NETDEV_TX_OK;
 532error:
 533        dev_kfree_skb_any(skb);
 534        net_dev->stats.tx_dropped++;
 535out:
 536        return NETDEV_TX_OK;
 537}
 538
 539
 540/*
 541 * Called when a pkt transmission doesn't complete in a reasonable period
 542 * Device reset may sleep - do it outside of interrupt context (delayed)
 543 */
 544void i1480u_tx_timeout(struct net_device *net_dev)
 545{
 546        struct i1480u *i1480u = netdev_priv(net_dev);
 547
 548        wlp_reset_all(&i1480u->wlp);
 549}
 550
 551
 552void i1480u_tx_release(struct i1480u *i1480u)
 553{
 554        unsigned long flags;
 555        struct i1480u_tx *wtx, *next;
 556        int count = 0, empty;
 557
 558        spin_lock_irqsave(&i1480u->tx_list_lock, flags);
 559        list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
 560                count++;
 561                usb_unlink_urb(wtx->urb);
 562        }
 563        spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
 564        count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
 565        /*
 566         * We don't like this sollution too much (dirty as it is), but
 567         * it is cheaper than putting a refcount on each i1480u_tx and
 568         * i1480uting for all of them to go away...
 569         *
 570         * Called when no more packets can be added to tx_list
 571         * so can i1480ut for it to be empty.
 572         */
 573        while (1) {
 574                spin_lock_irqsave(&i1480u->tx_list_lock, flags);
 575                empty = list_empty(&i1480u->tx_list);
 576                spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
 577                if (empty)
 578                        break;
 579                count--;
 580                BUG_ON(count == 0);
 581                msleep(20);
 582        }
 583}
 584