linux/drivers/usb/wusbcore/wa-xfer.c
<<
>>
Prefs
   1/*
   2 * WUSB Wire Adapter
   3 * Data transfer and URB enqueing
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * How transfers work: get a buffer, break it up in segments (segment
  24 * size is a multiple of the maxpacket size). For each segment issue a
  25 * segment request (struct wa_xfer_*), then send the data buffer if
  26 * out or nothing if in (all over the DTO endpoint).
  27 *
  28 * For each submitted segment request, a notification will come over
  29 * the NEP endpoint and a transfer result (struct xfer_result) will
  30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31 * data coming (inbound transfer), schedule a read and handle it.
  32 *
  33 * Sounds simple, it is a pain to implement.
  34 *
  35 *
  36 * ENTRY POINTS
  37 *
  38 *   FIXME
  39 *
  40 * LIFE CYCLE / STATE DIAGRAM
  41 *
  42 *   FIXME
  43 *
  44 * THIS CODE IS DISGUSTING
  45 *
  46 *   Warned you are; it's my second try and still not happy with it.
  47 *
  48 * NOTES:
  49 *
  50 *   - No iso
  51 *
  52 *   - Supports DMA xfers, control, bulk and maybe interrupt
  53 *
  54 *   - Does not recycle unused rpipes
  55 *
  56 *     An rpipe is assigned to an endpoint the first time it is used,
  57 *     and then it's there, assigned, until the endpoint is disabled
  58 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60 *     (should be a mutex).
  61 *
  62 *     Two methods it could be done:
  63 *
  64 *     (a) set up a timer everytime an rpipe's use count drops to 1
  65 *         (which means unused) or when a transfer ends. Reset the
  66 *         timer when a xfer is queued. If the timer expires, release
  67 *         the rpipe [see rpipe_ep_disable()].
  68 *
  69 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70 *         when none are found go over the list, check their endpoint
  71 *         and their activity record (if no last-xfer-done-ts in the
  72 *         last x seconds) take it
  73 *
  74 *     However, due to the fact that we have a set of limited
  75 *     resources (max-segments-at-the-same-time per xfer,
  76 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77 *     we are going to have to rebuild all this based on an scheduler,
  78 *     to where we have a list of transactions to do and based on the
  79 *     availability of the different required components (blocks,
  80 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  81 */
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/slab.h>
  85#include <linux/hash.h>
  86
  87#include "wa-hc.h"
  88#include "wusbhc.h"
  89
  90enum {
  91        WA_SEGS_MAX = 255,
  92};
  93
  94enum wa_seg_status {
  95        WA_SEG_NOTREADY,
  96        WA_SEG_READY,
  97        WA_SEG_DELAYED,
  98        WA_SEG_SUBMITTED,
  99        WA_SEG_PENDING,
 100        WA_SEG_DTI_PENDING,
 101        WA_SEG_DONE,
 102        WA_SEG_ERROR,
 103        WA_SEG_ABORTED,
 104};
 105
 106static void wa_xfer_delayed_run(struct wa_rpipe *);
 107
 108/*
 109 * Life cycle governed by 'struct urb' (the refcount of the struct is
 110 * that of the 'struct urb' and usb_free_urb() would free the whole
 111 * struct).
 112 */
 113struct wa_seg {
 114        struct urb urb;
 115        struct urb *dto_urb;            /* for data output? */
 116        struct list_head list_node;     /* for rpipe->req_list */
 117        struct wa_xfer *xfer;           /* out xfer */
 118        u8 index;                       /* which segment we are */
 119        enum wa_seg_status status;
 120        ssize_t result;                 /* bytes xfered or error */
 121        struct wa_xfer_hdr xfer_hdr;
 122        u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
 123};
 124
 125static void wa_seg_init(struct wa_seg *seg)
 126{
 127        /* usb_init_urb() repeats a lot of work, so we do it here */
 128        kref_init(&seg->urb.kref);
 129}
 130
 131/*
 132 * Protected by xfer->lock
 133 *
 134 */
 135struct wa_xfer {
 136        struct kref refcnt;
 137        struct list_head list_node;
 138        spinlock_t lock;
 139        u32 id;
 140
 141        struct wahc *wa;                /* Wire adapter we are plugged to */
 142        struct usb_host_endpoint *ep;
 143        struct urb *urb;                /* URB we are transfering for */
 144        struct wa_seg **seg;            /* transfer segments */
 145        u8 segs, segs_submitted, segs_done;
 146        unsigned is_inbound:1;
 147        unsigned is_dma:1;
 148        size_t seg_size;
 149        int result;
 150
 151        gfp_t gfp;                      /* allocation mask */
 152
 153        struct wusb_dev *wusb_dev;      /* for activity timestamps */
 154};
 155
 156static inline void wa_xfer_init(struct wa_xfer *xfer)
 157{
 158        kref_init(&xfer->refcnt);
 159        INIT_LIST_HEAD(&xfer->list_node);
 160        spin_lock_init(&xfer->lock);
 161}
 162
 163/*
 164 * Destory a transfer structure
 165 *
 166 * Note that the xfer->seg[index] thingies follow the URB life cycle,
 167 * so we need to put them, not free them.
 168 */
 169static void wa_xfer_destroy(struct kref *_xfer)
 170{
 171        struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 172        if (xfer->seg) {
 173                unsigned cnt;
 174                for (cnt = 0; cnt < xfer->segs; cnt++) {
 175                        if (xfer->is_inbound)
 176                                usb_put_urb(xfer->seg[cnt]->dto_urb);
 177                        usb_put_urb(&xfer->seg[cnt]->urb);
 178                }
 179        }
 180        kfree(xfer);
 181}
 182
 183static void wa_xfer_get(struct wa_xfer *xfer)
 184{
 185        kref_get(&xfer->refcnt);
 186}
 187
 188static void wa_xfer_put(struct wa_xfer *xfer)
 189{
 190        kref_put(&xfer->refcnt, wa_xfer_destroy);
 191}
 192
 193/*
 194 * xfer is referenced
 195 *
 196 * xfer->lock has to be unlocked
 197 *
 198 * We take xfer->lock for setting the result; this is a barrier
 199 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 200 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 201 * reference to the transfer.
 202 */
 203static void wa_xfer_giveback(struct wa_xfer *xfer)
 204{
 205        unsigned long flags;
 206
 207        spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 208        list_del_init(&xfer->list_node);
 209        spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 210        /* FIXME: segmentation broken -- kills DWA */
 211        wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 212        wa_put(xfer->wa);
 213        wa_xfer_put(xfer);
 214}
 215
 216/*
 217 * xfer is referenced
 218 *
 219 * xfer->lock has to be unlocked
 220 */
 221static void wa_xfer_completion(struct wa_xfer *xfer)
 222{
 223        if (xfer->wusb_dev)
 224                wusb_dev_put(xfer->wusb_dev);
 225        rpipe_put(xfer->ep->hcpriv);
 226        wa_xfer_giveback(xfer);
 227}
 228
 229/*
 230 * If transfer is done, wrap it up and return true
 231 *
 232 * xfer->lock has to be locked
 233 */
 234static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 235{
 236        struct device *dev = &xfer->wa->usb_iface->dev;
 237        unsigned result, cnt;
 238        struct wa_seg *seg;
 239        struct urb *urb = xfer->urb;
 240        unsigned found_short = 0;
 241
 242        result = xfer->segs_done == xfer->segs_submitted;
 243        if (result == 0)
 244                goto out;
 245        urb->actual_length = 0;
 246        for (cnt = 0; cnt < xfer->segs; cnt++) {
 247                seg = xfer->seg[cnt];
 248                switch (seg->status) {
 249                case WA_SEG_DONE:
 250                        if (found_short && seg->result > 0) {
 251                                dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
 252                                        xfer, cnt, seg->result);
 253                                urb->status = -EINVAL;
 254                                goto out;
 255                        }
 256                        urb->actual_length += seg->result;
 257                        if (seg->result < xfer->seg_size
 258                            && cnt != xfer->segs-1)
 259                                found_short = 1;
 260                        dev_dbg(dev, "xfer %p#%u: DONE short %d "
 261                                "result %zu urb->actual_length %d\n",
 262                                xfer, seg->index, found_short, seg->result,
 263                                urb->actual_length);
 264                        break;
 265                case WA_SEG_ERROR:
 266                        xfer->result = seg->result;
 267                        dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
 268                                xfer, seg->index, seg->result);
 269                        goto out;
 270                case WA_SEG_ABORTED:
 271                        dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
 272                                xfer, seg->index, urb->status);
 273                        xfer->result = urb->status;
 274                        goto out;
 275                default:
 276                        dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
 277                                 xfer, cnt, seg->status);
 278                        xfer->result = -EINVAL;
 279                        goto out;
 280                }
 281        }
 282        xfer->result = 0;
 283out:
 284        return result;
 285}
 286
 287/*
 288 * Initialize a transfer's ID
 289 *
 290 * We need to use a sequential number; if we use the pointer or the
 291 * hash of the pointer, it can repeat over sequential transfers and
 292 * then it will confuse the HWA....wonder why in hell they put a 32
 293 * bit handle in there then.
 294 */
 295static void wa_xfer_id_init(struct wa_xfer *xfer)
 296{
 297        xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 298}
 299
 300/*
 301 * Return the xfer's ID associated with xfer
 302 *
 303 * Need to generate a
 304 */
 305static u32 wa_xfer_id(struct wa_xfer *xfer)
 306{
 307        return xfer->id;
 308}
 309
 310/*
 311 * Search for a transfer list ID on the HCD's URB list
 312 *
 313 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 314 * 32-bit hash of the pointer.
 315 *
 316 * @returns NULL if not found.
 317 */
 318static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 319{
 320        unsigned long flags;
 321        struct wa_xfer *xfer_itr;
 322        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 323        list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 324                if (id == xfer_itr->id) {
 325                        wa_xfer_get(xfer_itr);
 326                        goto out;
 327                }
 328        }
 329        xfer_itr = NULL;
 330out:
 331        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 332        return xfer_itr;
 333}
 334
 335struct wa_xfer_abort_buffer {
 336        struct urb urb;
 337        struct wa_xfer_abort cmd;
 338};
 339
 340static void __wa_xfer_abort_cb(struct urb *urb)
 341{
 342        struct wa_xfer_abort_buffer *b = urb->context;
 343        usb_put_urb(&b->urb);
 344}
 345
 346/*
 347 * Aborts an ongoing transaction
 348 *
 349 * Assumes the transfer is referenced and locked and in a submitted
 350 * state (mainly that there is an endpoint/rpipe assigned).
 351 *
 352 * The callback (see above) does nothing but freeing up the data by
 353 * putting the URB. Because the URB is allocated at the head of the
 354 * struct, the whole space we allocated is kfreed.
 355 *
 356 * We'll get an 'aborted transaction' xfer result on DTI, that'll
 357 * politely ignore because at this point the transaction has been
 358 * marked as aborted already.
 359 */
 360static void __wa_xfer_abort(struct wa_xfer *xfer)
 361{
 362        int result;
 363        struct device *dev = &xfer->wa->usb_iface->dev;
 364        struct wa_xfer_abort_buffer *b;
 365        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 366
 367        b = kmalloc(sizeof(*b), GFP_ATOMIC);
 368        if (b == NULL)
 369                goto error_kmalloc;
 370        b->cmd.bLength =  sizeof(b->cmd);
 371        b->cmd.bRequestType = WA_XFER_ABORT;
 372        b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 373        b->cmd.dwTransferID = wa_xfer_id(xfer);
 374
 375        usb_init_urb(&b->urb);
 376        usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 377                usb_sndbulkpipe(xfer->wa->usb_dev,
 378                                xfer->wa->dto_epd->bEndpointAddress),
 379                &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 380        result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 381        if (result < 0)
 382                goto error_submit;
 383        return;                         /* callback frees! */
 384
 385
 386error_submit:
 387        if (printk_ratelimit())
 388                dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 389                        xfer, result);
 390        kfree(b);
 391error_kmalloc:
 392        return;
 393
 394}
 395
 396/*
 397 *
 398 * @returns < 0 on error, transfer segment request size if ok
 399 */
 400static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 401                                     enum wa_xfer_type *pxfer_type)
 402{
 403        ssize_t result;
 404        struct device *dev = &xfer->wa->usb_iface->dev;
 405        size_t maxpktsize;
 406        struct urb *urb = xfer->urb;
 407        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 408
 409        switch (rpipe->descr.bmAttribute & 0x3) {
 410        case USB_ENDPOINT_XFER_CONTROL:
 411                *pxfer_type = WA_XFER_TYPE_CTL;
 412                result = sizeof(struct wa_xfer_ctl);
 413                break;
 414        case USB_ENDPOINT_XFER_INT:
 415        case USB_ENDPOINT_XFER_BULK:
 416                *pxfer_type = WA_XFER_TYPE_BI;
 417                result = sizeof(struct wa_xfer_bi);
 418                break;
 419        case USB_ENDPOINT_XFER_ISOC:
 420                dev_err(dev, "FIXME: ISOC not implemented\n");
 421                result = -ENOSYS;
 422                goto error;
 423        default:
 424                /* never happens */
 425                BUG();
 426                result = -EINVAL;       /* shut gcc up */
 427        };
 428        xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 429        xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 430        xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 431                * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 432        /* Compute the segment size and make sure it is a multiple of
 433         * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 434         * a check (FIXME) */
 435        maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 436        if (xfer->seg_size < maxpktsize) {
 437                dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
 438                        "%zu\n", xfer->seg_size, maxpktsize);
 439                result = -EINVAL;
 440                goto error;
 441        }
 442        xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 443        xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
 444                / xfer->seg_size;
 445        if (xfer->segs >= WA_SEGS_MAX) {
 446                dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
 447                        (int)(urb->transfer_buffer_length / xfer->seg_size),
 448                        WA_SEGS_MAX);
 449                result = -EINVAL;
 450                goto error;
 451        }
 452        if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 453                xfer->segs = 1;
 454error:
 455        return result;
 456}
 457
 458/* Fill in the common request header and xfer-type specific data. */
 459static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 460                                 struct wa_xfer_hdr *xfer_hdr0,
 461                                 enum wa_xfer_type xfer_type,
 462                                 size_t xfer_hdr_size)
 463{
 464        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 465
 466        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 467        xfer_hdr0->bLength = xfer_hdr_size;
 468        xfer_hdr0->bRequestType = xfer_type;
 469        xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 470        xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
 471        xfer_hdr0->bTransferSegment = 0;
 472        switch (xfer_type) {
 473        case WA_XFER_TYPE_CTL: {
 474                struct wa_xfer_ctl *xfer_ctl =
 475                        container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 476                xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 477                memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 478                       sizeof(xfer_ctl->baSetupData));
 479                break;
 480        }
 481        case WA_XFER_TYPE_BI:
 482                break;
 483        case WA_XFER_TYPE_ISO:
 484                printk(KERN_ERR "FIXME: ISOC not implemented\n");
 485        default:
 486                BUG();
 487        };
 488}
 489
 490/*
 491 * Callback for the OUT data phase of the segment request
 492 *
 493 * Check wa_seg_cb(); most comments also apply here because this
 494 * function does almost the same thing and they work closely
 495 * together.
 496 *
 497 * If the seg request has failed but this DTO phase has suceeded,
 498 * wa_seg_cb() has already failed the segment and moved the
 499 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 500 * effectively do nothing.
 501 */
 502static void wa_seg_dto_cb(struct urb *urb)
 503{
 504        struct wa_seg *seg = urb->context;
 505        struct wa_xfer *xfer = seg->xfer;
 506        struct wahc *wa;
 507        struct device *dev;
 508        struct wa_rpipe *rpipe;
 509        unsigned long flags;
 510        unsigned rpipe_ready = 0;
 511        u8 done = 0;
 512
 513        switch (urb->status) {
 514        case 0:
 515                spin_lock_irqsave(&xfer->lock, flags);
 516                wa = xfer->wa;
 517                dev = &wa->usb_iface->dev;
 518                dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
 519                        xfer, seg->index, urb->actual_length);
 520                if (seg->status < WA_SEG_PENDING)
 521                        seg->status = WA_SEG_PENDING;
 522                seg->result = urb->actual_length;
 523                spin_unlock_irqrestore(&xfer->lock, flags);
 524                break;
 525        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 526        case -ENOENT:           /* as it was done by the who unlinked us */
 527                break;
 528        default:                /* Other errors ... */
 529                spin_lock_irqsave(&xfer->lock, flags);
 530                wa = xfer->wa;
 531                dev = &wa->usb_iface->dev;
 532                rpipe = xfer->ep->hcpriv;
 533                dev_dbg(dev, "xfer %p#%u: data out error %d\n",
 534                        xfer, seg->index, urb->status);
 535                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 536                            EDC_ERROR_TIMEFRAME)){
 537                        dev_err(dev, "DTO: URB max acceptable errors "
 538                                "exceeded, resetting device\n");
 539                        wa_reset_all(wa);
 540                }
 541                if (seg->status != WA_SEG_ERROR) {
 542                        seg->status = WA_SEG_ERROR;
 543                        seg->result = urb->status;
 544                        xfer->segs_done++;
 545                        __wa_xfer_abort(xfer);
 546                        rpipe_ready = rpipe_avail_inc(rpipe);
 547                        done = __wa_xfer_is_done(xfer);
 548                }
 549                spin_unlock_irqrestore(&xfer->lock, flags);
 550                if (done)
 551                        wa_xfer_completion(xfer);
 552                if (rpipe_ready)
 553                        wa_xfer_delayed_run(rpipe);
 554        }
 555}
 556
 557/*
 558 * Callback for the segment request
 559 *
 560 * If successful transition state (unless already transitioned or
 561 * outbound transfer); otherwise, take a note of the error, mark this
 562 * segment done and try completion.
 563 *
 564 * Note we don't access until we are sure that the transfer hasn't
 565 * been cancelled (ECONNRESET, ENOENT), which could mean that
 566 * seg->xfer could be already gone.
 567 *
 568 * We have to check before setting the status to WA_SEG_PENDING
 569 * because sometimes the xfer result callback arrives before this
 570 * callback (geeeeeeze), so it might happen that we are already in
 571 * another state. As well, we don't set it if the transfer is inbound,
 572 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 573 * finishes.
 574 */
 575static void wa_seg_cb(struct urb *urb)
 576{
 577        struct wa_seg *seg = urb->context;
 578        struct wa_xfer *xfer = seg->xfer;
 579        struct wahc *wa;
 580        struct device *dev;
 581        struct wa_rpipe *rpipe;
 582        unsigned long flags;
 583        unsigned rpipe_ready;
 584        u8 done = 0;
 585
 586        switch (urb->status) {
 587        case 0:
 588                spin_lock_irqsave(&xfer->lock, flags);
 589                wa = xfer->wa;
 590                dev = &wa->usb_iface->dev;
 591                dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
 592                if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 593                        seg->status = WA_SEG_PENDING;
 594                spin_unlock_irqrestore(&xfer->lock, flags);
 595                break;
 596        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 597        case -ENOENT:           /* as it was done by the who unlinked us */
 598                break;
 599        default:                /* Other errors ... */
 600                spin_lock_irqsave(&xfer->lock, flags);
 601                wa = xfer->wa;
 602                dev = &wa->usb_iface->dev;
 603                rpipe = xfer->ep->hcpriv;
 604                if (printk_ratelimit())
 605                        dev_err(dev, "xfer %p#%u: request error %d\n",
 606                                xfer, seg->index, urb->status);
 607                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 608                            EDC_ERROR_TIMEFRAME)){
 609                        dev_err(dev, "DTO: URB max acceptable errors "
 610                                "exceeded, resetting device\n");
 611                        wa_reset_all(wa);
 612                }
 613                usb_unlink_urb(seg->dto_urb);
 614                seg->status = WA_SEG_ERROR;
 615                seg->result = urb->status;
 616                xfer->segs_done++;
 617                __wa_xfer_abort(xfer);
 618                rpipe_ready = rpipe_avail_inc(rpipe);
 619                done = __wa_xfer_is_done(xfer);
 620                spin_unlock_irqrestore(&xfer->lock, flags);
 621                if (done)
 622                        wa_xfer_completion(xfer);
 623                if (rpipe_ready)
 624                        wa_xfer_delayed_run(rpipe);
 625        }
 626}
 627
 628/*
 629 * Allocate the segs array and initialize each of them
 630 *
 631 * The segments are freed by wa_xfer_destroy() when the xfer use count
 632 * drops to zero; however, because each segment is given the same life
 633 * cycle as the USB URB it contains, it is actually freed by
 634 * usb_put_urb() on the contained USB URB (twisted, eh?).
 635 */
 636static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 637{
 638        int result, cnt;
 639        size_t alloc_size = sizeof(*xfer->seg[0])
 640                - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
 641        struct usb_device *usb_dev = xfer->wa->usb_dev;
 642        const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
 643        struct wa_seg *seg;
 644        size_t buf_itr, buf_size, buf_itr_size;
 645
 646        result = -ENOMEM;
 647        xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
 648        if (xfer->seg == NULL)
 649                goto error_segs_kzalloc;
 650        buf_itr = 0;
 651        buf_size = xfer->urb->transfer_buffer_length;
 652        for (cnt = 0; cnt < xfer->segs; cnt++) {
 653                seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
 654                if (seg == NULL)
 655                        goto error_seg_kzalloc;
 656                wa_seg_init(seg);
 657                seg->xfer = xfer;
 658                seg->index = cnt;
 659                usb_fill_bulk_urb(&seg->urb, usb_dev,
 660                                  usb_sndbulkpipe(usb_dev,
 661                                                  dto_epd->bEndpointAddress),
 662                                  &seg->xfer_hdr, xfer_hdr_size,
 663                                  wa_seg_cb, seg);
 664                buf_itr_size = buf_size > xfer->seg_size ?
 665                        xfer->seg_size : buf_size;
 666                if (xfer->is_inbound == 0 && buf_size > 0) {
 667                        seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
 668                        if (seg->dto_urb == NULL)
 669                                goto error_dto_alloc;
 670                        usb_fill_bulk_urb(
 671                                seg->dto_urb, usb_dev,
 672                                usb_sndbulkpipe(usb_dev,
 673                                                dto_epd->bEndpointAddress),
 674                                NULL, 0, wa_seg_dto_cb, seg);
 675                        if (xfer->is_dma) {
 676                                seg->dto_urb->transfer_dma =
 677                                        xfer->urb->transfer_dma + buf_itr;
 678                                seg->dto_urb->transfer_flags |=
 679                                        URB_NO_TRANSFER_DMA_MAP;
 680                        } else
 681                                seg->dto_urb->transfer_buffer =
 682                                        xfer->urb->transfer_buffer + buf_itr;
 683                        seg->dto_urb->transfer_buffer_length = buf_itr_size;
 684                }
 685                seg->status = WA_SEG_READY;
 686                buf_itr += buf_itr_size;
 687                buf_size -= buf_itr_size;
 688        }
 689        return 0;
 690
 691error_dto_alloc:
 692        kfree(xfer->seg[cnt]);
 693        cnt--;
 694error_seg_kzalloc:
 695        /* use the fact that cnt is left at were it failed */
 696        for (; cnt > 0; cnt--) {
 697                if (xfer->is_inbound == 0)
 698                        kfree(xfer->seg[cnt]->dto_urb);
 699                kfree(xfer->seg[cnt]);
 700        }
 701error_segs_kzalloc:
 702        return result;
 703}
 704
 705/*
 706 * Allocates all the stuff needed to submit a transfer
 707 *
 708 * Breaks the whole data buffer in a list of segments, each one has a
 709 * structure allocated to it and linked in xfer->seg[index]
 710 *
 711 * FIXME: merge setup_segs() and the last part of this function, no
 712 *        need to do two for loops when we could run everything in a
 713 *        single one
 714 */
 715static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
 716{
 717        int result;
 718        struct device *dev = &xfer->wa->usb_iface->dev;
 719        enum wa_xfer_type xfer_type = 0; /* shut up GCC */
 720        size_t xfer_hdr_size, cnt, transfer_size;
 721        struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
 722
 723        result = __wa_xfer_setup_sizes(xfer, &xfer_type);
 724        if (result < 0)
 725                goto error_setup_sizes;
 726        xfer_hdr_size = result;
 727        result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
 728        if (result < 0) {
 729                dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
 730                        xfer, xfer->segs, result);
 731                goto error_setup_segs;
 732        }
 733        /* Fill the first header */
 734        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 735        wa_xfer_id_init(xfer);
 736        __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
 737
 738        /* Fill remainig headers */
 739        xfer_hdr = xfer_hdr0;
 740        transfer_size = urb->transfer_buffer_length;
 741        xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
 742                xfer->seg_size : transfer_size;
 743        transfer_size -=  xfer->seg_size;
 744        for (cnt = 1; cnt < xfer->segs; cnt++) {
 745                xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
 746                memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
 747                xfer_hdr->bTransferSegment = cnt;
 748                xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
 749                        cpu_to_le32(xfer->seg_size)
 750                        : cpu_to_le32(transfer_size);
 751                xfer->seg[cnt]->status = WA_SEG_READY;
 752                transfer_size -=  xfer->seg_size;
 753        }
 754        xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
 755        result = 0;
 756error_setup_segs:
 757error_setup_sizes:
 758        return result;
 759}
 760
 761/*
 762 *
 763 *
 764 * rpipe->seg_lock is held!
 765 */
 766static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 767                           struct wa_seg *seg)
 768{
 769        int result;
 770        result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
 771        if (result < 0) {
 772                printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
 773                       xfer, seg->index, result);
 774                goto error_seg_submit;
 775        }
 776        if (seg->dto_urb) {
 777                result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 778                if (result < 0) {
 779                        printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
 780                               xfer, seg->index, result);
 781                        goto error_dto_submit;
 782                }
 783        }
 784        seg->status = WA_SEG_SUBMITTED;
 785        rpipe_avail_dec(rpipe);
 786        return 0;
 787
 788error_dto_submit:
 789        usb_unlink_urb(&seg->urb);
 790error_seg_submit:
 791        seg->status = WA_SEG_ERROR;
 792        seg->result = result;
 793        return result;
 794}
 795
 796/*
 797 * Execute more queued request segments until the maximum concurrent allowed
 798 *
 799 * The ugly unlock/lock sequence on the error path is needed as the
 800 * xfer->lock normally nests the seg_lock and not viceversa.
 801 *
 802 */
 803static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
 804{
 805        int result;
 806        struct device *dev = &rpipe->wa->usb_iface->dev;
 807        struct wa_seg *seg;
 808        struct wa_xfer *xfer;
 809        unsigned long flags;
 810
 811        spin_lock_irqsave(&rpipe->seg_lock, flags);
 812        while (atomic_read(&rpipe->segs_available) > 0
 813              && !list_empty(&rpipe->seg_list)) {
 814                seg = list_entry(rpipe->seg_list.next, struct wa_seg,
 815                                 list_node);
 816                list_del(&seg->list_node);
 817                xfer = seg->xfer;
 818                result = __wa_seg_submit(rpipe, xfer, seg);
 819                dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
 820                        xfer, seg->index, atomic_read(&rpipe->segs_available), result);
 821                if (unlikely(result < 0)) {
 822                        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 823                        spin_lock_irqsave(&xfer->lock, flags);
 824                        __wa_xfer_abort(xfer);
 825                        xfer->segs_done++;
 826                        spin_unlock_irqrestore(&xfer->lock, flags);
 827                        spin_lock_irqsave(&rpipe->seg_lock, flags);
 828                }
 829        }
 830        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 831}
 832
 833/*
 834 *
 835 * xfer->lock is taken
 836 *
 837 * On failure submitting we just stop submitting and return error;
 838 * wa_urb_enqueue_b() will execute the completion path
 839 */
 840static int __wa_xfer_submit(struct wa_xfer *xfer)
 841{
 842        int result;
 843        struct wahc *wa = xfer->wa;
 844        struct device *dev = &wa->usb_iface->dev;
 845        unsigned cnt;
 846        struct wa_seg *seg;
 847        unsigned long flags;
 848        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 849        size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
 850        u8 available;
 851        u8 empty;
 852
 853        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 854        list_add_tail(&xfer->list_node, &wa->xfer_list);
 855        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 856
 857        BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
 858        result = 0;
 859        spin_lock_irqsave(&rpipe->seg_lock, flags);
 860        for (cnt = 0; cnt < xfer->segs; cnt++) {
 861                available = atomic_read(&rpipe->segs_available);
 862                empty = list_empty(&rpipe->seg_list);
 863                seg = xfer->seg[cnt];
 864                dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
 865                        xfer, cnt, available, empty,
 866                        available == 0 || !empty ? "delayed" : "submitted");
 867                if (available == 0 || !empty) {
 868                        dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
 869                        seg->status = WA_SEG_DELAYED;
 870                        list_add_tail(&seg->list_node, &rpipe->seg_list);
 871                } else {
 872                        result = __wa_seg_submit(rpipe, xfer, seg);
 873                        if (result < 0) {
 874                                __wa_xfer_abort(xfer);
 875                                goto error_seg_submit;
 876                        }
 877                }
 878                xfer->segs_submitted++;
 879        }
 880error_seg_submit:
 881        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 882        return result;
 883}
 884
 885/*
 886 * Second part of a URB/transfer enqueuement
 887 *
 888 * Assumes this comes from wa_urb_enqueue() [maybe through
 889 * wa_urb_enqueue_run()]. At this point:
 890 *
 891 * xfer->wa     filled and refcounted
 892 * xfer->ep     filled with rpipe refcounted if
 893 *              delayed == 0
 894 * xfer->urb    filled and refcounted (this is the case when called
 895 *              from wa_urb_enqueue() as we come from usb_submit_urb()
 896 *              and when called by wa_urb_enqueue_run(), as we took an
 897 *              extra ref dropped by _run() after we return).
 898 * xfer->gfp    filled
 899 *
 900 * If we fail at __wa_xfer_submit(), then we just check if we are done
 901 * and if so, we run the completion procedure. However, if we are not
 902 * yet done, we do nothing and wait for the completion handlers from
 903 * the submitted URBs or from the xfer-result path to kick in. If xfer
 904 * result never kicks in, the xfer will timeout from the USB code and
 905 * dequeue() will be called.
 906 */
 907static void wa_urb_enqueue_b(struct wa_xfer *xfer)
 908{
 909        int result;
 910        unsigned long flags;
 911        struct urb *urb = xfer->urb;
 912        struct wahc *wa = xfer->wa;
 913        struct wusbhc *wusbhc = wa->wusb;
 914        struct wusb_dev *wusb_dev;
 915        unsigned done;
 916
 917        result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
 918        if (result < 0)
 919                goto error_rpipe_get;
 920        result = -ENODEV;
 921        /* FIXME: segmentation broken -- kills DWA */
 922        mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
 923        if (urb->dev == NULL) {
 924                mutex_unlock(&wusbhc->mutex);
 925                goto error_dev_gone;
 926        }
 927        wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
 928        if (wusb_dev == NULL) {
 929                mutex_unlock(&wusbhc->mutex);
 930                goto error_dev_gone;
 931        }
 932        mutex_unlock(&wusbhc->mutex);
 933
 934        spin_lock_irqsave(&xfer->lock, flags);
 935        xfer->wusb_dev = wusb_dev;
 936        result = urb->status;
 937        if (urb->status != -EINPROGRESS)
 938                goto error_dequeued;
 939
 940        result = __wa_xfer_setup(xfer, urb);
 941        if (result < 0)
 942                goto error_xfer_setup;
 943        result = __wa_xfer_submit(xfer);
 944        if (result < 0)
 945                goto error_xfer_submit;
 946        spin_unlock_irqrestore(&xfer->lock, flags);
 947        return;
 948
 949        /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
 950         * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
 951         * upundo setup().
 952         */
 953error_xfer_setup:
 954error_dequeued:
 955        spin_unlock_irqrestore(&xfer->lock, flags);
 956        /* FIXME: segmentation broken, kills DWA */
 957        if (wusb_dev)
 958                wusb_dev_put(wusb_dev);
 959error_dev_gone:
 960        rpipe_put(xfer->ep->hcpriv);
 961error_rpipe_get:
 962        xfer->result = result;
 963        wa_xfer_giveback(xfer);
 964        return;
 965
 966error_xfer_submit:
 967        done = __wa_xfer_is_done(xfer);
 968        xfer->result = result;
 969        spin_unlock_irqrestore(&xfer->lock, flags);
 970        if (done)
 971                wa_xfer_completion(xfer);
 972}
 973
 974/*
 975 * Execute the delayed transfers in the Wire Adapter @wa
 976 *
 977 * We need to be careful here, as dequeue() could be called in the
 978 * middle.  That's why we do the whole thing under the
 979 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
 980 * and then checks the list -- so as we would be acquiring in inverse
 981 * order, we just drop the lock once we have the xfer and reacquire it
 982 * later.
 983 */
 984void wa_urb_enqueue_run(struct work_struct *ws)
 985{
 986        struct wahc *wa = container_of(ws, struct wahc, xfer_work);
 987        struct wa_xfer *xfer, *next;
 988        struct urb *urb;
 989
 990        spin_lock_irq(&wa->xfer_list_lock);
 991        list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
 992                                 list_node) {
 993                list_del_init(&xfer->list_node);
 994                spin_unlock_irq(&wa->xfer_list_lock);
 995
 996                urb = xfer->urb;
 997                wa_urb_enqueue_b(xfer);
 998                usb_put_urb(urb);       /* taken when queuing */
 999
1000                spin_lock_irq(&wa->xfer_list_lock);
1001        }
1002        spin_unlock_irq(&wa->xfer_list_lock);
1003}
1004EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1005
1006/*
1007 * Submit a transfer to the Wire Adapter in a delayed way
1008 *
1009 * The process of enqueuing involves possible sleeps() [see
1010 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1011 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1012 *
1013 * @urb: We own a reference to it done by the HCI Linux USB stack that
1014 *       will be given up by calling usb_hcd_giveback_urb() or by
1015 *       returning error from this function -> ergo we don't have to
1016 *       refcount it.
1017 */
1018int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1019                   struct urb *urb, gfp_t gfp)
1020{
1021        int result;
1022        struct device *dev = &wa->usb_iface->dev;
1023        struct wa_xfer *xfer;
1024        unsigned long my_flags;
1025        unsigned cant_sleep = irqs_disabled() | in_atomic();
1026
1027        if (urb->transfer_buffer == NULL
1028            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1029            && urb->transfer_buffer_length != 0) {
1030                dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1031                dump_stack();
1032        }
1033
1034        result = -ENOMEM;
1035        xfer = kzalloc(sizeof(*xfer), gfp);
1036        if (xfer == NULL)
1037                goto error_kmalloc;
1038
1039        result = -ENOENT;
1040        if (urb->status != -EINPROGRESS)        /* cancelled */
1041                goto error_dequeued;            /* before starting? */
1042        wa_xfer_init(xfer);
1043        xfer->wa = wa_get(wa);
1044        xfer->urb = urb;
1045        xfer->gfp = gfp;
1046        xfer->ep = ep;
1047        urb->hcpriv = xfer;
1048
1049        dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1050                xfer, urb, urb->pipe, urb->transfer_buffer_length,
1051                urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1052                urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1053                cant_sleep ? "deferred" : "inline");
1054
1055        if (cant_sleep) {
1056                usb_get_urb(urb);
1057                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1058                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1059                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1060                queue_work(wusbd, &wa->xfer_work);
1061        } else {
1062                wa_urb_enqueue_b(xfer);
1063        }
1064        return 0;
1065
1066error_dequeued:
1067        kfree(xfer);
1068error_kmalloc:
1069        return result;
1070}
1071EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1072
1073/*
1074 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1075 * handler] is called.
1076 *
1077 * Until a transfer goes successfully through wa_urb_enqueue() it
1078 * needs to be dequeued with completion calling; when stuck in delayed
1079 * or before wa_xfer_setup() is called, we need to do completion.
1080 *
1081 *  not setup  If there is no hcpriv yet, that means that that enqueue
1082 *             still had no time to set the xfer up. Because
1083 *             urb->status should be other than -EINPROGRESS,
1084 *             enqueue() will catch that and bail out.
1085 *
1086 * If the transfer has gone through setup, we just need to clean it
1087 * up. If it has gone through submit(), we have to abort it [with an
1088 * asynch request] and then make sure we cancel each segment.
1089 *
1090 */
1091int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1092{
1093        unsigned long flags, flags2;
1094        struct wa_xfer *xfer;
1095        struct wa_seg *seg;
1096        struct wa_rpipe *rpipe;
1097        unsigned cnt;
1098        unsigned rpipe_ready = 0;
1099
1100        xfer = urb->hcpriv;
1101        if (xfer == NULL) {
1102                /* NOthing setup yet enqueue will see urb->status !=
1103                 * -EINPROGRESS (by hcd layer) and bail out with
1104                 * error, no need to do completion
1105                 */
1106                BUG_ON(urb->status == -EINPROGRESS);
1107                goto out;
1108        }
1109        spin_lock_irqsave(&xfer->lock, flags);
1110        rpipe = xfer->ep->hcpriv;
1111        /* Check the delayed list -> if there, release and complete */
1112        spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1113        if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1114                goto dequeue_delayed;
1115        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1116        if (xfer->seg == NULL)          /* still hasn't reached */
1117                goto out_unlock;        /* setup(), enqueue_b() completes */
1118        /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1119        __wa_xfer_abort(xfer);
1120        for (cnt = 0; cnt < xfer->segs; cnt++) {
1121                seg = xfer->seg[cnt];
1122                switch (seg->status) {
1123                case WA_SEG_NOTREADY:
1124                case WA_SEG_READY:
1125                        printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1126                               xfer, cnt, seg->status);
1127                        WARN_ON(1);
1128                        break;
1129                case WA_SEG_DELAYED:
1130                        seg->status = WA_SEG_ABORTED;
1131                        spin_lock_irqsave(&rpipe->seg_lock, flags2);
1132                        list_del(&seg->list_node);
1133                        xfer->segs_done++;
1134                        rpipe_ready = rpipe_avail_inc(rpipe);
1135                        spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1136                        break;
1137                case WA_SEG_SUBMITTED:
1138                        seg->status = WA_SEG_ABORTED;
1139                        usb_unlink_urb(&seg->urb);
1140                        if (xfer->is_inbound == 0)
1141                                usb_unlink_urb(seg->dto_urb);
1142                        xfer->segs_done++;
1143                        rpipe_ready = rpipe_avail_inc(rpipe);
1144                        break;
1145                case WA_SEG_PENDING:
1146                        seg->status = WA_SEG_ABORTED;
1147                        xfer->segs_done++;
1148                        rpipe_ready = rpipe_avail_inc(rpipe);
1149                        break;
1150                case WA_SEG_DTI_PENDING:
1151                        usb_unlink_urb(wa->dti_urb);
1152                        seg->status = WA_SEG_ABORTED;
1153                        xfer->segs_done++;
1154                        rpipe_ready = rpipe_avail_inc(rpipe);
1155                        break;
1156                case WA_SEG_DONE:
1157                case WA_SEG_ERROR:
1158                case WA_SEG_ABORTED:
1159                        break;
1160                }
1161        }
1162        xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1163        __wa_xfer_is_done(xfer);
1164        spin_unlock_irqrestore(&xfer->lock, flags);
1165        wa_xfer_completion(xfer);
1166        if (rpipe_ready)
1167                wa_xfer_delayed_run(rpipe);
1168        return 0;
1169
1170out_unlock:
1171        spin_unlock_irqrestore(&xfer->lock, flags);
1172out:
1173        return 0;
1174
1175dequeue_delayed:
1176        list_del_init(&xfer->list_node);
1177        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1178        xfer->result = urb->status;
1179        spin_unlock_irqrestore(&xfer->lock, flags);
1180        wa_xfer_giveback(xfer);
1181        usb_put_urb(urb);               /* we got a ref in enqueue() */
1182        return 0;
1183}
1184EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1185
1186/*
1187 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1188 * codes
1189 *
1190 * Positive errno values are internal inconsistencies and should be
1191 * flagged louder. Negative are to be passed up to the user in the
1192 * normal way.
1193 *
1194 * @status: USB WA status code -- high two bits are stripped.
1195 */
1196static int wa_xfer_status_to_errno(u8 status)
1197{
1198        int errno;
1199        u8 real_status = status;
1200        static int xlat[] = {
1201                [WA_XFER_STATUS_SUCCESS] =              0,
1202                [WA_XFER_STATUS_HALTED] =               -EPIPE,
1203                [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1204                [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1205                [WA_XFER_RESERVED] =                    EINVAL,
1206                [WA_XFER_STATUS_NOT_FOUND] =            0,
1207                [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1208                [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1209                [WA_XFER_STATUS_ABORTED] =              -EINTR,
1210                [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1211                [WA_XFER_INVALID_FORMAT] =              EINVAL,
1212                [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1213                [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1214        };
1215        status &= 0x3f;
1216
1217        if (status == 0)
1218                return 0;
1219        if (status >= ARRAY_SIZE(xlat)) {
1220                if (printk_ratelimit())
1221                        printk(KERN_ERR "%s(): BUG? "
1222                               "Unknown WA transfer status 0x%02x\n",
1223                               __func__, real_status);
1224                return -EINVAL;
1225        }
1226        errno = xlat[status];
1227        if (unlikely(errno > 0)) {
1228                if (printk_ratelimit())
1229                        printk(KERN_ERR "%s(): BUG? "
1230                               "Inconsistent WA status: 0x%02x\n",
1231                               __func__, real_status);
1232                errno = -errno;
1233        }
1234        return errno;
1235}
1236
1237/*
1238 * Process a xfer result completion message
1239 *
1240 * inbound transfers: need to schedule a DTI read
1241 *
1242 * FIXME: this functio needs to be broken up in parts
1243 */
1244static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1245{
1246        int result;
1247        struct device *dev = &wa->usb_iface->dev;
1248        unsigned long flags;
1249        u8 seg_idx;
1250        struct wa_seg *seg;
1251        struct wa_rpipe *rpipe;
1252        struct wa_xfer_result *xfer_result = wa->xfer_result;
1253        u8 done = 0;
1254        u8 usb_status;
1255        unsigned rpipe_ready = 0;
1256
1257        spin_lock_irqsave(&xfer->lock, flags);
1258        seg_idx = xfer_result->bTransferSegment & 0x7f;
1259        if (unlikely(seg_idx >= xfer->segs))
1260                goto error_bad_seg;
1261        seg = xfer->seg[seg_idx];
1262        rpipe = xfer->ep->hcpriv;
1263        usb_status = xfer_result->bTransferStatus;
1264        dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1265                xfer, seg_idx, usb_status, seg->status);
1266        if (seg->status == WA_SEG_ABORTED
1267            || seg->status == WA_SEG_ERROR)     /* already handled */
1268                goto segment_aborted;
1269        if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1270                seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1271        if (seg->status != WA_SEG_PENDING) {
1272                if (printk_ratelimit())
1273                        dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1274                                xfer, seg_idx, seg->status);
1275                seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1276        }
1277        if (usb_status & 0x80) {
1278                seg->result = wa_xfer_status_to_errno(usb_status);
1279                dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1280                        xfer, seg->index, usb_status);
1281                goto error_complete;
1282        }
1283        /* FIXME: we ignore warnings, tally them for stats */
1284        if (usb_status & 0x40)          /* Warning?... */
1285                usb_status = 0;         /* ... pass */
1286        if (xfer->is_inbound) { /* IN data phase: read to buffer */
1287                seg->status = WA_SEG_DTI_PENDING;
1288                BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1289                if (xfer->is_dma) {
1290                        wa->buf_in_urb->transfer_dma =
1291                                xfer->urb->transfer_dma
1292                                + seg_idx * xfer->seg_size;
1293                        wa->buf_in_urb->transfer_flags
1294                                |= URB_NO_TRANSFER_DMA_MAP;
1295                } else {
1296                        wa->buf_in_urb->transfer_buffer =
1297                                xfer->urb->transfer_buffer
1298                                + seg_idx * xfer->seg_size;
1299                        wa->buf_in_urb->transfer_flags
1300                                &= ~URB_NO_TRANSFER_DMA_MAP;
1301                }
1302                wa->buf_in_urb->transfer_buffer_length =
1303                        le32_to_cpu(xfer_result->dwTransferLength);
1304                wa->buf_in_urb->context = seg;
1305                result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1306                if (result < 0)
1307                        goto error_submit_buf_in;
1308        } else {
1309                /* OUT data phase, complete it -- */
1310                seg->status = WA_SEG_DONE;
1311                seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1312                xfer->segs_done++;
1313                rpipe_ready = rpipe_avail_inc(rpipe);
1314                done = __wa_xfer_is_done(xfer);
1315        }
1316        spin_unlock_irqrestore(&xfer->lock, flags);
1317        if (done)
1318                wa_xfer_completion(xfer);
1319        if (rpipe_ready)
1320                wa_xfer_delayed_run(rpipe);
1321        return;
1322
1323error_submit_buf_in:
1324        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1325                dev_err(dev, "DTI: URB max acceptable errors "
1326                        "exceeded, resetting device\n");
1327                wa_reset_all(wa);
1328        }
1329        if (printk_ratelimit())
1330                dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1331                        xfer, seg_idx, result);
1332        seg->result = result;
1333error_complete:
1334        seg->status = WA_SEG_ERROR;
1335        xfer->segs_done++;
1336        rpipe_ready = rpipe_avail_inc(rpipe);
1337        __wa_xfer_abort(xfer);
1338        done = __wa_xfer_is_done(xfer);
1339        spin_unlock_irqrestore(&xfer->lock, flags);
1340        if (done)
1341                wa_xfer_completion(xfer);
1342        if (rpipe_ready)
1343                wa_xfer_delayed_run(rpipe);
1344        return;
1345
1346error_bad_seg:
1347        spin_unlock_irqrestore(&xfer->lock, flags);
1348        wa_urb_dequeue(wa, xfer->urb);
1349        if (printk_ratelimit())
1350                dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1351        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1352                dev_err(dev, "DTI: URB max acceptable errors "
1353                        "exceeded, resetting device\n");
1354                wa_reset_all(wa);
1355        }
1356        return;
1357
1358segment_aborted:
1359        /* nothing to do, as the aborter did the completion */
1360        spin_unlock_irqrestore(&xfer->lock, flags);
1361}
1362
1363/*
1364 * Callback for the IN data phase
1365 *
1366 * If successful transition state; otherwise, take a note of the
1367 * error, mark this segment done and try completion.
1368 *
1369 * Note we don't access until we are sure that the transfer hasn't
1370 * been cancelled (ECONNRESET, ENOENT), which could mean that
1371 * seg->xfer could be already gone.
1372 */
1373static void wa_buf_in_cb(struct urb *urb)
1374{
1375        struct wa_seg *seg = urb->context;
1376        struct wa_xfer *xfer = seg->xfer;
1377        struct wahc *wa;
1378        struct device *dev;
1379        struct wa_rpipe *rpipe;
1380        unsigned rpipe_ready;
1381        unsigned long flags;
1382        u8 done = 0;
1383
1384        switch (urb->status) {
1385        case 0:
1386                spin_lock_irqsave(&xfer->lock, flags);
1387                wa = xfer->wa;
1388                dev = &wa->usb_iface->dev;
1389                rpipe = xfer->ep->hcpriv;
1390                dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1391                        xfer, seg->index, (size_t)urb->actual_length);
1392                seg->status = WA_SEG_DONE;
1393                seg->result = urb->actual_length;
1394                xfer->segs_done++;
1395                rpipe_ready = rpipe_avail_inc(rpipe);
1396                done = __wa_xfer_is_done(xfer);
1397                spin_unlock_irqrestore(&xfer->lock, flags);
1398                if (done)
1399                        wa_xfer_completion(xfer);
1400                if (rpipe_ready)
1401                        wa_xfer_delayed_run(rpipe);
1402                break;
1403        case -ECONNRESET:       /* URB unlinked; no need to do anything */
1404        case -ENOENT:           /* as it was done by the who unlinked us */
1405                break;
1406        default:                /* Other errors ... */
1407                spin_lock_irqsave(&xfer->lock, flags);
1408                wa = xfer->wa;
1409                dev = &wa->usb_iface->dev;
1410                rpipe = xfer->ep->hcpriv;
1411                if (printk_ratelimit())
1412                        dev_err(dev, "xfer %p#%u: data in error %d\n",
1413                                xfer, seg->index, urb->status);
1414                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1415                            EDC_ERROR_TIMEFRAME)){
1416                        dev_err(dev, "DTO: URB max acceptable errors "
1417                                "exceeded, resetting device\n");
1418                        wa_reset_all(wa);
1419                }
1420                seg->status = WA_SEG_ERROR;
1421                seg->result = urb->status;
1422                xfer->segs_done++;
1423                rpipe_ready = rpipe_avail_inc(rpipe);
1424                __wa_xfer_abort(xfer);
1425                done = __wa_xfer_is_done(xfer);
1426                spin_unlock_irqrestore(&xfer->lock, flags);
1427                if (done)
1428                        wa_xfer_completion(xfer);
1429                if (rpipe_ready)
1430                        wa_xfer_delayed_run(rpipe);
1431        }
1432}
1433
1434/*
1435 * Handle an incoming transfer result buffer
1436 *
1437 * Given a transfer result buffer, it completes the transfer (possibly
1438 * scheduling and buffer in read) and then resubmits the DTI URB for a
1439 * new transfer result read.
1440 *
1441 *
1442 * The xfer_result DTI URB state machine
1443 *
1444 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1445 *
1446 * We start in OFF mode, the first xfer_result notification [through
1447 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1448 * read.
1449 *
1450 * We receive a buffer -- if it is not a xfer_result, we complain and
1451 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1452 * request accounting. If it is an IN segment, we move to RBI and post
1453 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1454 * repost the DTI-URB and move to RXR state. if there was no IN
1455 * segment, it will repost the DTI-URB.
1456 *
1457 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1458 * errors) in the URBs.
1459 */
1460static void wa_xfer_result_cb(struct urb *urb)
1461{
1462        int result;
1463        struct wahc *wa = urb->context;
1464        struct device *dev = &wa->usb_iface->dev;
1465        struct wa_xfer_result *xfer_result;
1466        u32 xfer_id;
1467        struct wa_xfer *xfer;
1468        u8 usb_status;
1469
1470        BUG_ON(wa->dti_urb != urb);
1471        switch (wa->dti_urb->status) {
1472        case 0:
1473                /* We have a xfer result buffer; check it */
1474                dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1475                        urb->actual_length, urb->transfer_buffer);
1476                if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1477                        dev_err(dev, "DTI Error: xfer result--bad size "
1478                                "xfer result (%d bytes vs %zu needed)\n",
1479                                urb->actual_length, sizeof(*xfer_result));
1480                        break;
1481                }
1482                xfer_result = wa->xfer_result;
1483                if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1484                        dev_err(dev, "DTI Error: xfer result--"
1485                                "bad header length %u\n",
1486                                xfer_result->hdr.bLength);
1487                        break;
1488                }
1489                if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1490                        dev_err(dev, "DTI Error: xfer result--"
1491                                "bad header type 0x%02x\n",
1492                                xfer_result->hdr.bNotifyType);
1493                        break;
1494                }
1495                usb_status = xfer_result->bTransferStatus & 0x3f;
1496                if (usb_status == WA_XFER_STATUS_ABORTED
1497                    || usb_status == WA_XFER_STATUS_NOT_FOUND)
1498                        /* taken care of already */
1499                        break;
1500                xfer_id = xfer_result->dwTransferID;
1501                xfer = wa_xfer_get_by_id(wa, xfer_id);
1502                if (xfer == NULL) {
1503                        /* FIXME: transaction might have been cancelled */
1504                        dev_err(dev, "DTI Error: xfer result--"
1505                                "unknown xfer 0x%08x (status 0x%02x)\n",
1506                                xfer_id, usb_status);
1507                        break;
1508                }
1509                wa_xfer_result_chew(wa, xfer);
1510                wa_xfer_put(xfer);
1511                break;
1512        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1513        case -ESHUTDOWN:        /* going away! */
1514                dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1515                goto out;
1516        default:
1517                /* Unknown error */
1518                if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1519                            EDC_ERROR_TIMEFRAME)) {
1520                        dev_err(dev, "DTI: URB max acceptable errors "
1521                                "exceeded, resetting device\n");
1522                        wa_reset_all(wa);
1523                        goto out;
1524                }
1525                if (printk_ratelimit())
1526                        dev_err(dev, "DTI: URB error %d\n", urb->status);
1527                break;
1528        }
1529        /* Resubmit the DTI URB */
1530        result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1531        if (result < 0) {
1532                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1533                        "resetting\n", result);
1534                wa_reset_all(wa);
1535        }
1536out:
1537        return;
1538}
1539
1540/*
1541 * Transfer complete notification
1542 *
1543 * Called from the notif.c code. We get a notification on EP2 saying
1544 * that some endpoint has some transfer result data available. We are
1545 * about to read it.
1546 *
1547 * To speed up things, we always have a URB reading the DTI URB; we
1548 * don't really set it up and start it until the first xfer complete
1549 * notification arrives, which is what we do here.
1550 *
1551 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1552 * machine starts.
1553 *
1554 * So here we just initialize the DTI URB for reading transfer result
1555 * notifications and also the buffer-in URB, for reading buffers. Then
1556 * we just submit the DTI URB.
1557 *
1558 * @wa shall be referenced
1559 */
1560void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1561{
1562        int result;
1563        struct device *dev = &wa->usb_iface->dev;
1564        struct wa_notif_xfer *notif_xfer;
1565        const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1566
1567        notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1568        BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1569
1570        if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1571                /* FIXME: hardcoded limitation, adapt */
1572                dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1573                        notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1574                goto error;
1575        }
1576        if (wa->dti_urb != NULL)        /* DTI URB already started */
1577                goto out;
1578
1579        wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1580        if (wa->dti_urb == NULL) {
1581                dev_err(dev, "Can't allocate DTI URB\n");
1582                goto error_dti_urb_alloc;
1583        }
1584        usb_fill_bulk_urb(
1585                wa->dti_urb, wa->usb_dev,
1586                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1587                wa->xfer_result, wa->xfer_result_size,
1588                wa_xfer_result_cb, wa);
1589
1590        wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1591        if (wa->buf_in_urb == NULL) {
1592                dev_err(dev, "Can't allocate BUF-IN URB\n");
1593                goto error_buf_in_urb_alloc;
1594        }
1595        usb_fill_bulk_urb(
1596                wa->buf_in_urb, wa->usb_dev,
1597                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1598                NULL, 0, wa_buf_in_cb, wa);
1599        result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1600        if (result < 0) {
1601                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1602                        "resetting\n", result);
1603                goto error_dti_urb_submit;
1604        }
1605out:
1606        return;
1607
1608error_dti_urb_submit:
1609        usb_put_urb(wa->buf_in_urb);
1610error_buf_in_urb_alloc:
1611        usb_put_urb(wa->dti_urb);
1612        wa->dti_urb = NULL;
1613error_dti_urb_alloc:
1614error:
1615        wa_reset_all(wa);
1616}
1617