linux/drivers/usb/wusbcore/wa-xfer.c
<<
>>
Prefs
   1/*
   2 * WUSB Wire Adapter
   3 * Data transfer and URB enqueing
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * How transfers work: get a buffer, break it up in segments (segment
  24 * size is a multiple of the maxpacket size). For each segment issue a
  25 * segment request (struct wa_xfer_*), then send the data buffer if
  26 * out or nothing if in (all over the DTO endpoint).
  27 *
  28 * For each submitted segment request, a notification will come over
  29 * the NEP endpoint and a transfer result (struct xfer_result) will
  30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31 * data coming (inbound transfer), schedule a read and handle it.
  32 *
  33 * Sounds simple, it is a pain to implement.
  34 *
  35 *
  36 * ENTRY POINTS
  37 *
  38 *   FIXME
  39 *
  40 * LIFE CYCLE / STATE DIAGRAM
  41 *
  42 *   FIXME
  43 *
  44 * THIS CODE IS DISGUSTING
  45 *
  46 *   Warned you are; it's my second try and still not happy with it.
  47 *
  48 * NOTES:
  49 *
  50 *   - No iso
  51 *
  52 *   - Supports DMA xfers, control, bulk and maybe interrupt
  53 *
  54 *   - Does not recycle unused rpipes
  55 *
  56 *     An rpipe is assigned to an endpoint the first time it is used,
  57 *     and then it's there, assigned, until the endpoint is disabled
  58 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60 *     (should be a mutex).
  61 *
  62 *     Two methods it could be done:
  63 *
  64 *     (a) set up a timer everytime an rpipe's use count drops to 1
  65 *         (which means unused) or when a transfer ends. Reset the
  66 *         timer when a xfer is queued. If the timer expires, release
  67 *         the rpipe [see rpipe_ep_disable()].
  68 *
  69 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70 *         when none are found go over the list, check their endpoint
  71 *         and their activity record (if no last-xfer-done-ts in the
  72 *         last x seconds) take it
  73 *
  74 *     However, due to the fact that we have a set of limited
  75 *     resources (max-segments-at-the-same-time per xfer,
  76 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77 *     we are going to have to rebuild all this based on an scheduler,
  78 *     to where we have a list of transactions to do and based on the
  79 *     availability of the different requried components (blocks,
  80 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  81 */
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/hash.h>
  85
  86#include "wa-hc.h"
  87#include "wusbhc.h"
  88
  89enum {
  90        WA_SEGS_MAX = 255,
  91};
  92
  93enum wa_seg_status {
  94        WA_SEG_NOTREADY,
  95        WA_SEG_READY,
  96        WA_SEG_DELAYED,
  97        WA_SEG_SUBMITTED,
  98        WA_SEG_PENDING,
  99        WA_SEG_DTI_PENDING,
 100        WA_SEG_DONE,
 101        WA_SEG_ERROR,
 102        WA_SEG_ABORTED,
 103};
 104
 105static void wa_xfer_delayed_run(struct wa_rpipe *);
 106
 107/*
 108 * Life cycle governed by 'struct urb' (the refcount of the struct is
 109 * that of the 'struct urb' and usb_free_urb() would free the whole
 110 * struct).
 111 */
 112struct wa_seg {
 113        struct urb urb;
 114        struct urb *dto_urb;            /* for data output? */
 115        struct list_head list_node;     /* for rpipe->req_list */
 116        struct wa_xfer *xfer;           /* out xfer */
 117        u8 index;                       /* which segment we are */
 118        enum wa_seg_status status;
 119        ssize_t result;                 /* bytes xfered or error */
 120        struct wa_xfer_hdr xfer_hdr;
 121        u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
 122};
 123
 124static void wa_seg_init(struct wa_seg *seg)
 125{
 126        /* usb_init_urb() repeats a lot of work, so we do it here */
 127        kref_init(&seg->urb.kref);
 128}
 129
 130/*
 131 * Protected by xfer->lock
 132 *
 133 */
 134struct wa_xfer {
 135        struct kref refcnt;
 136        struct list_head list_node;
 137        spinlock_t lock;
 138        u32 id;
 139
 140        struct wahc *wa;                /* Wire adapter we are plugged to */
 141        struct usb_host_endpoint *ep;
 142        struct urb *urb;                /* URB we are transfering for */
 143        struct wa_seg **seg;            /* transfer segments */
 144        u8 segs, segs_submitted, segs_done;
 145        unsigned is_inbound:1;
 146        unsigned is_dma:1;
 147        size_t seg_size;
 148        int result;
 149
 150        gfp_t gfp;                      /* allocation mask */
 151
 152        struct wusb_dev *wusb_dev;      /* for activity timestamps */
 153};
 154
 155static inline void wa_xfer_init(struct wa_xfer *xfer)
 156{
 157        kref_init(&xfer->refcnt);
 158        INIT_LIST_HEAD(&xfer->list_node);
 159        spin_lock_init(&xfer->lock);
 160}
 161
 162/*
 163 * Destory a transfer structure
 164 *
 165 * Note that the xfer->seg[index] thingies follow the URB life cycle,
 166 * so we need to put them, not free them.
 167 */
 168static void wa_xfer_destroy(struct kref *_xfer)
 169{
 170        struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 171        if (xfer->seg) {
 172                unsigned cnt;
 173                for (cnt = 0; cnt < xfer->segs; cnt++) {
 174                        if (xfer->is_inbound)
 175                                usb_put_urb(xfer->seg[cnt]->dto_urb);
 176                        usb_put_urb(&xfer->seg[cnt]->urb);
 177                }
 178        }
 179        kfree(xfer);
 180}
 181
 182static void wa_xfer_get(struct wa_xfer *xfer)
 183{
 184        kref_get(&xfer->refcnt);
 185}
 186
 187static void wa_xfer_put(struct wa_xfer *xfer)
 188{
 189        kref_put(&xfer->refcnt, wa_xfer_destroy);
 190}
 191
 192/*
 193 * xfer is referenced
 194 *
 195 * xfer->lock has to be unlocked
 196 *
 197 * We take xfer->lock for setting the result; this is a barrier
 198 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 199 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 200 * reference to the transfer.
 201 */
 202static void wa_xfer_giveback(struct wa_xfer *xfer)
 203{
 204        unsigned long flags;
 205
 206        spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 207        list_del_init(&xfer->list_node);
 208        spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 209        /* FIXME: segmentation broken -- kills DWA */
 210        wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 211        wa_put(xfer->wa);
 212        wa_xfer_put(xfer);
 213}
 214
 215/*
 216 * xfer is referenced
 217 *
 218 * xfer->lock has to be unlocked
 219 */
 220static void wa_xfer_completion(struct wa_xfer *xfer)
 221{
 222        if (xfer->wusb_dev)
 223                wusb_dev_put(xfer->wusb_dev);
 224        rpipe_put(xfer->ep->hcpriv);
 225        wa_xfer_giveback(xfer);
 226}
 227
 228/*
 229 * If transfer is done, wrap it up and return true
 230 *
 231 * xfer->lock has to be locked
 232 */
 233static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 234{
 235        struct device *dev = &xfer->wa->usb_iface->dev;
 236        unsigned result, cnt;
 237        struct wa_seg *seg;
 238        struct urb *urb = xfer->urb;
 239        unsigned found_short = 0;
 240
 241        result = xfer->segs_done == xfer->segs_submitted;
 242        if (result == 0)
 243                goto out;
 244        urb->actual_length = 0;
 245        for (cnt = 0; cnt < xfer->segs; cnt++) {
 246                seg = xfer->seg[cnt];
 247                switch (seg->status) {
 248                case WA_SEG_DONE:
 249                        if (found_short && seg->result > 0) {
 250                                dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
 251                                        xfer, cnt, seg->result);
 252                                urb->status = -EINVAL;
 253                                goto out;
 254                        }
 255                        urb->actual_length += seg->result;
 256                        if (seg->result < xfer->seg_size
 257                            && cnt != xfer->segs-1)
 258                                found_short = 1;
 259                        dev_dbg(dev, "xfer %p#%u: DONE short %d "
 260                                "result %zu urb->actual_length %d\n",
 261                                xfer, seg->index, found_short, seg->result,
 262                                urb->actual_length);
 263                        break;
 264                case WA_SEG_ERROR:
 265                        xfer->result = seg->result;
 266                        dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
 267                                xfer, seg->index, seg->result);
 268                        goto out;
 269                case WA_SEG_ABORTED:
 270                        dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
 271                                xfer, seg->index, urb->status);
 272                        xfer->result = urb->status;
 273                        goto out;
 274                default:
 275                        dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
 276                                 xfer, cnt, seg->status);
 277                        xfer->result = -EINVAL;
 278                        goto out;
 279                }
 280        }
 281        xfer->result = 0;
 282out:
 283        return result;
 284}
 285
 286/*
 287 * Initialize a transfer's ID
 288 *
 289 * We need to use a sequential number; if we use the pointer or the
 290 * hash of the pointer, it can repeat over sequential transfers and
 291 * then it will confuse the HWA....wonder why in hell they put a 32
 292 * bit handle in there then.
 293 */
 294static void wa_xfer_id_init(struct wa_xfer *xfer)
 295{
 296        xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 297}
 298
 299/*
 300 * Return the xfer's ID associated with xfer
 301 *
 302 * Need to generate a
 303 */
 304static u32 wa_xfer_id(struct wa_xfer *xfer)
 305{
 306        return xfer->id;
 307}
 308
 309/*
 310 * Search for a transfer list ID on the HCD's URB list
 311 *
 312 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 313 * 32-bit hash of the pointer.
 314 *
 315 * @returns NULL if not found.
 316 */
 317static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 318{
 319        unsigned long flags;
 320        struct wa_xfer *xfer_itr;
 321        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 322        list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 323                if (id == xfer_itr->id) {
 324                        wa_xfer_get(xfer_itr);
 325                        goto out;
 326                }
 327        }
 328        xfer_itr = NULL;
 329out:
 330        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 331        return xfer_itr;
 332}
 333
 334struct wa_xfer_abort_buffer {
 335        struct urb urb;
 336        struct wa_xfer_abort cmd;
 337};
 338
 339static void __wa_xfer_abort_cb(struct urb *urb)
 340{
 341        struct wa_xfer_abort_buffer *b = urb->context;
 342        usb_put_urb(&b->urb);
 343}
 344
 345/*
 346 * Aborts an ongoing transaction
 347 *
 348 * Assumes the transfer is referenced and locked and in a submitted
 349 * state (mainly that there is an endpoint/rpipe assigned).
 350 *
 351 * The callback (see above) does nothing but freeing up the data by
 352 * putting the URB. Because the URB is allocated at the head of the
 353 * struct, the whole space we allocated is kfreed.
 354 *
 355 * We'll get an 'aborted transaction' xfer result on DTI, that'll
 356 * politely ignore because at this point the transaction has been
 357 * marked as aborted already.
 358 */
 359static void __wa_xfer_abort(struct wa_xfer *xfer)
 360{
 361        int result;
 362        struct device *dev = &xfer->wa->usb_iface->dev;
 363        struct wa_xfer_abort_buffer *b;
 364        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 365
 366        b = kmalloc(sizeof(*b), GFP_ATOMIC);
 367        if (b == NULL)
 368                goto error_kmalloc;
 369        b->cmd.bLength =  sizeof(b->cmd);
 370        b->cmd.bRequestType = WA_XFER_ABORT;
 371        b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 372        b->cmd.dwTransferID = wa_xfer_id(xfer);
 373
 374        usb_init_urb(&b->urb);
 375        usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 376                usb_sndbulkpipe(xfer->wa->usb_dev,
 377                                xfer->wa->dto_epd->bEndpointAddress),
 378                &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 379        result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 380        if (result < 0)
 381                goto error_submit;
 382        return;                         /* callback frees! */
 383
 384
 385error_submit:
 386        if (printk_ratelimit())
 387                dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 388                        xfer, result);
 389        kfree(b);
 390error_kmalloc:
 391        return;
 392
 393}
 394
 395/*
 396 *
 397 * @returns < 0 on error, transfer segment request size if ok
 398 */
 399static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 400                                     enum wa_xfer_type *pxfer_type)
 401{
 402        ssize_t result;
 403        struct device *dev = &xfer->wa->usb_iface->dev;
 404        size_t maxpktsize;
 405        struct urb *urb = xfer->urb;
 406        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 407
 408        switch (rpipe->descr.bmAttribute & 0x3) {
 409        case USB_ENDPOINT_XFER_CONTROL:
 410                *pxfer_type = WA_XFER_TYPE_CTL;
 411                result = sizeof(struct wa_xfer_ctl);
 412                break;
 413        case USB_ENDPOINT_XFER_INT:
 414        case USB_ENDPOINT_XFER_BULK:
 415                *pxfer_type = WA_XFER_TYPE_BI;
 416                result = sizeof(struct wa_xfer_bi);
 417                break;
 418        case USB_ENDPOINT_XFER_ISOC:
 419                dev_err(dev, "FIXME: ISOC not implemented\n");
 420                result = -ENOSYS;
 421                goto error;
 422        default:
 423                /* never happens */
 424                BUG();
 425                result = -EINVAL;       /* shut gcc up */
 426        };
 427        xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 428        xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 429        xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 430                * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 431        /* Compute the segment size and make sure it is a multiple of
 432         * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 433         * a check (FIXME) */
 434        maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 435        if (xfer->seg_size < maxpktsize) {
 436                dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
 437                        "%zu\n", xfer->seg_size, maxpktsize);
 438                result = -EINVAL;
 439                goto error;
 440        }
 441        xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 442        xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
 443                / xfer->seg_size;
 444        if (xfer->segs >= WA_SEGS_MAX) {
 445                dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
 446                        (int)(urb->transfer_buffer_length / xfer->seg_size),
 447                        WA_SEGS_MAX);
 448                result = -EINVAL;
 449                goto error;
 450        }
 451        if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 452                xfer->segs = 1;
 453error:
 454        return result;
 455}
 456
 457/* Fill in the common request header and xfer-type specific data. */
 458static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 459                                 struct wa_xfer_hdr *xfer_hdr0,
 460                                 enum wa_xfer_type xfer_type,
 461                                 size_t xfer_hdr_size)
 462{
 463        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 464
 465        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 466        xfer_hdr0->bLength = xfer_hdr_size;
 467        xfer_hdr0->bRequestType = xfer_type;
 468        xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 469        xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
 470        xfer_hdr0->bTransferSegment = 0;
 471        switch (xfer_type) {
 472        case WA_XFER_TYPE_CTL: {
 473                struct wa_xfer_ctl *xfer_ctl =
 474                        container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 475                xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 476                BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
 477                       && xfer->urb->setup_packet == NULL);
 478                memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 479                       sizeof(xfer_ctl->baSetupData));
 480                break;
 481        }
 482        case WA_XFER_TYPE_BI:
 483                break;
 484        case WA_XFER_TYPE_ISO:
 485                printk(KERN_ERR "FIXME: ISOC not implemented\n");
 486        default:
 487                BUG();
 488        };
 489}
 490
 491/*
 492 * Callback for the OUT data phase of the segment request
 493 *
 494 * Check wa_seg_cb(); most comments also apply here because this
 495 * function does almost the same thing and they work closely
 496 * together.
 497 *
 498 * If the seg request has failed but this DTO phase has suceeded,
 499 * wa_seg_cb() has already failed the segment and moved the
 500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 501 * effectively do nothing.
 502 */
 503static void wa_seg_dto_cb(struct urb *urb)
 504{
 505        struct wa_seg *seg = urb->context;
 506        struct wa_xfer *xfer = seg->xfer;
 507        struct wahc *wa;
 508        struct device *dev;
 509        struct wa_rpipe *rpipe;
 510        unsigned long flags;
 511        unsigned rpipe_ready = 0;
 512        u8 done = 0;
 513
 514        switch (urb->status) {
 515        case 0:
 516                spin_lock_irqsave(&xfer->lock, flags);
 517                wa = xfer->wa;
 518                dev = &wa->usb_iface->dev;
 519                dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
 520                        xfer, seg->index, urb->actual_length);
 521                if (seg->status < WA_SEG_PENDING)
 522                        seg->status = WA_SEG_PENDING;
 523                seg->result = urb->actual_length;
 524                spin_unlock_irqrestore(&xfer->lock, flags);
 525                break;
 526        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 527        case -ENOENT:           /* as it was done by the who unlinked us */
 528                break;
 529        default:                /* Other errors ... */
 530                spin_lock_irqsave(&xfer->lock, flags);
 531                wa = xfer->wa;
 532                dev = &wa->usb_iface->dev;
 533                rpipe = xfer->ep->hcpriv;
 534                dev_dbg(dev, "xfer %p#%u: data out error %d\n",
 535                        xfer, seg->index, urb->status);
 536                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 537                            EDC_ERROR_TIMEFRAME)){
 538                        dev_err(dev, "DTO: URB max acceptable errors "
 539                                "exceeded, resetting device\n");
 540                        wa_reset_all(wa);
 541                }
 542                if (seg->status != WA_SEG_ERROR) {
 543                        seg->status = WA_SEG_ERROR;
 544                        seg->result = urb->status;
 545                        xfer->segs_done++;
 546                        __wa_xfer_abort(xfer);
 547                        rpipe_ready = rpipe_avail_inc(rpipe);
 548                        done = __wa_xfer_is_done(xfer);
 549                }
 550                spin_unlock_irqrestore(&xfer->lock, flags);
 551                if (done)
 552                        wa_xfer_completion(xfer);
 553                if (rpipe_ready)
 554                        wa_xfer_delayed_run(rpipe);
 555        }
 556}
 557
 558/*
 559 * Callback for the segment request
 560 *
 561 * If succesful transition state (unless already transitioned or
 562 * outbound transfer); otherwise, take a note of the error, mark this
 563 * segment done and try completion.
 564 *
 565 * Note we don't access until we are sure that the transfer hasn't
 566 * been cancelled (ECONNRESET, ENOENT), which could mean that
 567 * seg->xfer could be already gone.
 568 *
 569 * We have to check before setting the status to WA_SEG_PENDING
 570 * because sometimes the xfer result callback arrives before this
 571 * callback (geeeeeeze), so it might happen that we are already in
 572 * another state. As well, we don't set it if the transfer is inbound,
 573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 574 * finishes.
 575 */
 576static void wa_seg_cb(struct urb *urb)
 577{
 578        struct wa_seg *seg = urb->context;
 579        struct wa_xfer *xfer = seg->xfer;
 580        struct wahc *wa;
 581        struct device *dev;
 582        struct wa_rpipe *rpipe;
 583        unsigned long flags;
 584        unsigned rpipe_ready;
 585        u8 done = 0;
 586
 587        switch (urb->status) {
 588        case 0:
 589                spin_lock_irqsave(&xfer->lock, flags);
 590                wa = xfer->wa;
 591                dev = &wa->usb_iface->dev;
 592                dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
 593                if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 594                        seg->status = WA_SEG_PENDING;
 595                spin_unlock_irqrestore(&xfer->lock, flags);
 596                break;
 597        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 598        case -ENOENT:           /* as it was done by the who unlinked us */
 599                break;
 600        default:                /* Other errors ... */
 601                spin_lock_irqsave(&xfer->lock, flags);
 602                wa = xfer->wa;
 603                dev = &wa->usb_iface->dev;
 604                rpipe = xfer->ep->hcpriv;
 605                if (printk_ratelimit())
 606                        dev_err(dev, "xfer %p#%u: request error %d\n",
 607                                xfer, seg->index, urb->status);
 608                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 609                            EDC_ERROR_TIMEFRAME)){
 610                        dev_err(dev, "DTO: URB max acceptable errors "
 611                                "exceeded, resetting device\n");
 612                        wa_reset_all(wa);
 613                }
 614                usb_unlink_urb(seg->dto_urb);
 615                seg->status = WA_SEG_ERROR;
 616                seg->result = urb->status;
 617                xfer->segs_done++;
 618                __wa_xfer_abort(xfer);
 619                rpipe_ready = rpipe_avail_inc(rpipe);
 620                done = __wa_xfer_is_done(xfer);
 621                spin_unlock_irqrestore(&xfer->lock, flags);
 622                if (done)
 623                        wa_xfer_completion(xfer);
 624                if (rpipe_ready)
 625                        wa_xfer_delayed_run(rpipe);
 626        }
 627}
 628
 629/*
 630 * Allocate the segs array and initialize each of them
 631 *
 632 * The segments are freed by wa_xfer_destroy() when the xfer use count
 633 * drops to zero; however, because each segment is given the same life
 634 * cycle as the USB URB it contains, it is actually freed by
 635 * usb_put_urb() on the contained USB URB (twisted, eh?).
 636 */
 637static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 638{
 639        int result, cnt;
 640        size_t alloc_size = sizeof(*xfer->seg[0])
 641                - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
 642        struct usb_device *usb_dev = xfer->wa->usb_dev;
 643        const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
 644        struct wa_seg *seg;
 645        size_t buf_itr, buf_size, buf_itr_size;
 646
 647        result = -ENOMEM;
 648        xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
 649        if (xfer->seg == NULL)
 650                goto error_segs_kzalloc;
 651        buf_itr = 0;
 652        buf_size = xfer->urb->transfer_buffer_length;
 653        for (cnt = 0; cnt < xfer->segs; cnt++) {
 654                seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
 655                if (seg == NULL)
 656                        goto error_seg_kzalloc;
 657                wa_seg_init(seg);
 658                seg->xfer = xfer;
 659                seg->index = cnt;
 660                usb_fill_bulk_urb(&seg->urb, usb_dev,
 661                                  usb_sndbulkpipe(usb_dev,
 662                                                  dto_epd->bEndpointAddress),
 663                                  &seg->xfer_hdr, xfer_hdr_size,
 664                                  wa_seg_cb, seg);
 665                buf_itr_size = buf_size > xfer->seg_size ?
 666                        xfer->seg_size : buf_size;
 667                if (xfer->is_inbound == 0 && buf_size > 0) {
 668                        seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
 669                        if (seg->dto_urb == NULL)
 670                                goto error_dto_alloc;
 671                        usb_fill_bulk_urb(
 672                                seg->dto_urb, usb_dev,
 673                                usb_sndbulkpipe(usb_dev,
 674                                                dto_epd->bEndpointAddress),
 675                                NULL, 0, wa_seg_dto_cb, seg);
 676                        if (xfer->is_dma) {
 677                                seg->dto_urb->transfer_dma =
 678                                        xfer->urb->transfer_dma + buf_itr;
 679                                seg->dto_urb->transfer_flags |=
 680                                        URB_NO_TRANSFER_DMA_MAP;
 681                        } else
 682                                seg->dto_urb->transfer_buffer =
 683                                        xfer->urb->transfer_buffer + buf_itr;
 684                        seg->dto_urb->transfer_buffer_length = buf_itr_size;
 685                }
 686                seg->status = WA_SEG_READY;
 687                buf_itr += buf_itr_size;
 688                buf_size -= buf_itr_size;
 689        }
 690        return 0;
 691
 692error_dto_alloc:
 693        kfree(xfer->seg[cnt]);
 694        cnt--;
 695error_seg_kzalloc:
 696        /* use the fact that cnt is left at were it failed */
 697        for (; cnt > 0; cnt--) {
 698                if (xfer->is_inbound == 0)
 699                        kfree(xfer->seg[cnt]->dto_urb);
 700                kfree(xfer->seg[cnt]);
 701        }
 702error_segs_kzalloc:
 703        return result;
 704}
 705
 706/*
 707 * Allocates all the stuff needed to submit a transfer
 708 *
 709 * Breaks the whole data buffer in a list of segments, each one has a
 710 * structure allocated to it and linked in xfer->seg[index]
 711 *
 712 * FIXME: merge setup_segs() and the last part of this function, no
 713 *        need to do two for loops when we could run everything in a
 714 *        single one
 715 */
 716static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
 717{
 718        int result;
 719        struct device *dev = &xfer->wa->usb_iface->dev;
 720        enum wa_xfer_type xfer_type = 0; /* shut up GCC */
 721        size_t xfer_hdr_size, cnt, transfer_size;
 722        struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
 723
 724        result = __wa_xfer_setup_sizes(xfer, &xfer_type);
 725        if (result < 0)
 726                goto error_setup_sizes;
 727        xfer_hdr_size = result;
 728        result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
 729        if (result < 0) {
 730                dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
 731                        xfer, xfer->segs, result);
 732                goto error_setup_segs;
 733        }
 734        /* Fill the first header */
 735        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 736        wa_xfer_id_init(xfer);
 737        __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
 738
 739        /* Fill remainig headers */
 740        xfer_hdr = xfer_hdr0;
 741        transfer_size = urb->transfer_buffer_length;
 742        xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
 743                xfer->seg_size : transfer_size;
 744        transfer_size -=  xfer->seg_size;
 745        for (cnt = 1; cnt < xfer->segs; cnt++) {
 746                xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
 747                memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
 748                xfer_hdr->bTransferSegment = cnt;
 749                xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
 750                        cpu_to_le32(xfer->seg_size)
 751                        : cpu_to_le32(transfer_size);
 752                xfer->seg[cnt]->status = WA_SEG_READY;
 753                transfer_size -=  xfer->seg_size;
 754        }
 755        xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
 756        result = 0;
 757error_setup_segs:
 758error_setup_sizes:
 759        return result;
 760}
 761
 762/*
 763 *
 764 *
 765 * rpipe->seg_lock is held!
 766 */
 767static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 768                           struct wa_seg *seg)
 769{
 770        int result;
 771        result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
 772        if (result < 0) {
 773                printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
 774                       xfer, seg->index, result);
 775                goto error_seg_submit;
 776        }
 777        if (seg->dto_urb) {
 778                result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 779                if (result < 0) {
 780                        printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
 781                               xfer, seg->index, result);
 782                        goto error_dto_submit;
 783                }
 784        }
 785        seg->status = WA_SEG_SUBMITTED;
 786        rpipe_avail_dec(rpipe);
 787        return 0;
 788
 789error_dto_submit:
 790        usb_unlink_urb(&seg->urb);
 791error_seg_submit:
 792        seg->status = WA_SEG_ERROR;
 793        seg->result = result;
 794        return result;
 795}
 796
 797/*
 798 * Execute more queued request segments until the maximum concurrent allowed
 799 *
 800 * The ugly unlock/lock sequence on the error path is needed as the
 801 * xfer->lock normally nests the seg_lock and not viceversa.
 802 *
 803 */
 804static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
 805{
 806        int result;
 807        struct device *dev = &rpipe->wa->usb_iface->dev;
 808        struct wa_seg *seg;
 809        struct wa_xfer *xfer;
 810        unsigned long flags;
 811
 812        spin_lock_irqsave(&rpipe->seg_lock, flags);
 813        while (atomic_read(&rpipe->segs_available) > 0
 814              && !list_empty(&rpipe->seg_list)) {
 815                seg = list_entry(rpipe->seg_list.next, struct wa_seg,
 816                                 list_node);
 817                list_del(&seg->list_node);
 818                xfer = seg->xfer;
 819                result = __wa_seg_submit(rpipe, xfer, seg);
 820                dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
 821                        xfer, seg->index, atomic_read(&rpipe->segs_available), result);
 822                if (unlikely(result < 0)) {
 823                        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 824                        spin_lock_irqsave(&xfer->lock, flags);
 825                        __wa_xfer_abort(xfer);
 826                        xfer->segs_done++;
 827                        spin_unlock_irqrestore(&xfer->lock, flags);
 828                        spin_lock_irqsave(&rpipe->seg_lock, flags);
 829                }
 830        }
 831        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 832}
 833
 834/*
 835 *
 836 * xfer->lock is taken
 837 *
 838 * On failure submitting we just stop submitting and return error;
 839 * wa_urb_enqueue_b() will execute the completion path
 840 */
 841static int __wa_xfer_submit(struct wa_xfer *xfer)
 842{
 843        int result;
 844        struct wahc *wa = xfer->wa;
 845        struct device *dev = &wa->usb_iface->dev;
 846        unsigned cnt;
 847        struct wa_seg *seg;
 848        unsigned long flags;
 849        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 850        size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
 851        u8 available;
 852        u8 empty;
 853
 854        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 855        list_add_tail(&xfer->list_node, &wa->xfer_list);
 856        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 857
 858        BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
 859        result = 0;
 860        spin_lock_irqsave(&rpipe->seg_lock, flags);
 861        for (cnt = 0; cnt < xfer->segs; cnt++) {
 862                available = atomic_read(&rpipe->segs_available);
 863                empty = list_empty(&rpipe->seg_list);
 864                seg = xfer->seg[cnt];
 865                dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
 866                        xfer, cnt, available, empty,
 867                        available == 0 || !empty ? "delayed" : "submitted");
 868                if (available == 0 || !empty) {
 869                        dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
 870                        seg->status = WA_SEG_DELAYED;
 871                        list_add_tail(&seg->list_node, &rpipe->seg_list);
 872                } else {
 873                        result = __wa_seg_submit(rpipe, xfer, seg);
 874                        if (result < 0) {
 875                                __wa_xfer_abort(xfer);
 876                                goto error_seg_submit;
 877                        }
 878                }
 879                xfer->segs_submitted++;
 880        }
 881error_seg_submit:
 882        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 883        return result;
 884}
 885
 886/*
 887 * Second part of a URB/transfer enqueuement
 888 *
 889 * Assumes this comes from wa_urb_enqueue() [maybe through
 890 * wa_urb_enqueue_run()]. At this point:
 891 *
 892 * xfer->wa     filled and refcounted
 893 * xfer->ep     filled with rpipe refcounted if
 894 *              delayed == 0
 895 * xfer->urb    filled and refcounted (this is the case when called
 896 *              from wa_urb_enqueue() as we come from usb_submit_urb()
 897 *              and when called by wa_urb_enqueue_run(), as we took an
 898 *              extra ref dropped by _run() after we return).
 899 * xfer->gfp    filled
 900 *
 901 * If we fail at __wa_xfer_submit(), then we just check if we are done
 902 * and if so, we run the completion procedure. However, if we are not
 903 * yet done, we do nothing and wait for the completion handlers from
 904 * the submitted URBs or from the xfer-result path to kick in. If xfer
 905 * result never kicks in, the xfer will timeout from the USB code and
 906 * dequeue() will be called.
 907 */
 908static void wa_urb_enqueue_b(struct wa_xfer *xfer)
 909{
 910        int result;
 911        unsigned long flags;
 912        struct urb *urb = xfer->urb;
 913        struct wahc *wa = xfer->wa;
 914        struct wusbhc *wusbhc = wa->wusb;
 915        struct wusb_dev *wusb_dev;
 916        unsigned done;
 917
 918        result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
 919        if (result < 0)
 920                goto error_rpipe_get;
 921        result = -ENODEV;
 922        /* FIXME: segmentation broken -- kills DWA */
 923        mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
 924        if (urb->dev == NULL) {
 925                mutex_unlock(&wusbhc->mutex);
 926                goto error_dev_gone;
 927        }
 928        wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
 929        if (wusb_dev == NULL) {
 930                mutex_unlock(&wusbhc->mutex);
 931                goto error_dev_gone;
 932        }
 933        mutex_unlock(&wusbhc->mutex);
 934
 935        spin_lock_irqsave(&xfer->lock, flags);
 936        xfer->wusb_dev = wusb_dev;
 937        result = urb->status;
 938        if (urb->status != -EINPROGRESS)
 939                goto error_dequeued;
 940
 941        result = __wa_xfer_setup(xfer, urb);
 942        if (result < 0)
 943                goto error_xfer_setup;
 944        result = __wa_xfer_submit(xfer);
 945        if (result < 0)
 946                goto error_xfer_submit;
 947        spin_unlock_irqrestore(&xfer->lock, flags);
 948        return;
 949
 950        /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
 951         * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
 952         * upundo setup().
 953         */
 954error_xfer_setup:
 955error_dequeued:
 956        spin_unlock_irqrestore(&xfer->lock, flags);
 957        /* FIXME: segmentation broken, kills DWA */
 958        if (wusb_dev)
 959                wusb_dev_put(wusb_dev);
 960error_dev_gone:
 961        rpipe_put(xfer->ep->hcpriv);
 962error_rpipe_get:
 963        xfer->result = result;
 964        wa_xfer_giveback(xfer);
 965        return;
 966
 967error_xfer_submit:
 968        done = __wa_xfer_is_done(xfer);
 969        xfer->result = result;
 970        spin_unlock_irqrestore(&xfer->lock, flags);
 971        if (done)
 972                wa_xfer_completion(xfer);
 973}
 974
 975/*
 976 * Execute the delayed transfers in the Wire Adapter @wa
 977 *
 978 * We need to be careful here, as dequeue() could be called in the
 979 * middle.  That's why we do the whole thing under the
 980 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
 981 * and then checks the list -- so as we would be acquiring in inverse
 982 * order, we just drop the lock once we have the xfer and reacquire it
 983 * later.
 984 */
 985void wa_urb_enqueue_run(struct work_struct *ws)
 986{
 987        struct wahc *wa = container_of(ws, struct wahc, xfer_work);
 988        struct wa_xfer *xfer, *next;
 989        struct urb *urb;
 990
 991        spin_lock_irq(&wa->xfer_list_lock);
 992        list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
 993                                 list_node) {
 994                list_del_init(&xfer->list_node);
 995                spin_unlock_irq(&wa->xfer_list_lock);
 996
 997                urb = xfer->urb;
 998                wa_urb_enqueue_b(xfer);
 999                usb_put_urb(urb);       /* taken when queuing */
1000
1001                spin_lock_irq(&wa->xfer_list_lock);
1002        }
1003        spin_unlock_irq(&wa->xfer_list_lock);
1004}
1005EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1006
1007/*
1008 * Submit a transfer to the Wire Adapter in a delayed way
1009 *
1010 * The process of enqueuing involves possible sleeps() [see
1011 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013 *
1014 * @urb: We own a reference to it done by the HCI Linux USB stack that
1015 *       will be given up by calling usb_hcd_giveback_urb() or by
1016 *       returning error from this function -> ergo we don't have to
1017 *       refcount it.
1018 */
1019int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1020                   struct urb *urb, gfp_t gfp)
1021{
1022        int result;
1023        struct device *dev = &wa->usb_iface->dev;
1024        struct wa_xfer *xfer;
1025        unsigned long my_flags;
1026        unsigned cant_sleep = irqs_disabled() | in_atomic();
1027
1028        if (urb->transfer_buffer == NULL
1029            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1030            && urb->transfer_buffer_length != 0) {
1031                dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1032                dump_stack();
1033        }
1034
1035        result = -ENOMEM;
1036        xfer = kzalloc(sizeof(*xfer), gfp);
1037        if (xfer == NULL)
1038                goto error_kmalloc;
1039
1040        result = -ENOENT;
1041        if (urb->status != -EINPROGRESS)        /* cancelled */
1042                goto error_dequeued;            /* before starting? */
1043        wa_xfer_init(xfer);
1044        xfer->wa = wa_get(wa);
1045        xfer->urb = urb;
1046        xfer->gfp = gfp;
1047        xfer->ep = ep;
1048        urb->hcpriv = xfer;
1049
1050        dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051                xfer, urb, urb->pipe, urb->transfer_buffer_length,
1052                urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1053                urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1054                cant_sleep ? "deferred" : "inline");
1055
1056        if (cant_sleep) {
1057                usb_get_urb(urb);
1058                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1059                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1060                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1061                queue_work(wusbd, &wa->xfer_work);
1062        } else {
1063                wa_urb_enqueue_b(xfer);
1064        }
1065        return 0;
1066
1067error_dequeued:
1068        kfree(xfer);
1069error_kmalloc:
1070        return result;
1071}
1072EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1073
1074/*
1075 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076 * handler] is called.
1077 *
1078 * Until a transfer goes successfully through wa_urb_enqueue() it
1079 * needs to be dequeued with completion calling; when stuck in delayed
1080 * or before wa_xfer_setup() is called, we need to do completion.
1081 *
1082 *  not setup  If there is no hcpriv yet, that means that that enqueue
1083 *             still had no time to set the xfer up. Because
1084 *             urb->status should be other than -EINPROGRESS,
1085 *             enqueue() will catch that and bail out.
1086 *
1087 * If the transfer has gone through setup, we just need to clean it
1088 * up. If it has gone through submit(), we have to abort it [with an
1089 * asynch request] and then make sure we cancel each segment.
1090 *
1091 */
1092int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1093{
1094        unsigned long flags, flags2;
1095        struct wa_xfer *xfer;
1096        struct wa_seg *seg;
1097        struct wa_rpipe *rpipe;
1098        unsigned cnt;
1099        unsigned rpipe_ready = 0;
1100
1101        xfer = urb->hcpriv;
1102        if (xfer == NULL) {
1103                /* NOthing setup yet enqueue will see urb->status !=
1104                 * -EINPROGRESS (by hcd layer) and bail out with
1105                 * error, no need to do completion
1106                 */
1107                BUG_ON(urb->status == -EINPROGRESS);
1108                goto out;
1109        }
1110        spin_lock_irqsave(&xfer->lock, flags);
1111        rpipe = xfer->ep->hcpriv;
1112        /* Check the delayed list -> if there, release and complete */
1113        spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1114        if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1115                goto dequeue_delayed;
1116        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1117        if (xfer->seg == NULL)          /* still hasn't reached */
1118                goto out_unlock;        /* setup(), enqueue_b() completes */
1119        /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120        __wa_xfer_abort(xfer);
1121        for (cnt = 0; cnt < xfer->segs; cnt++) {
1122                seg = xfer->seg[cnt];
1123                switch (seg->status) {
1124                case WA_SEG_NOTREADY:
1125                case WA_SEG_READY:
1126                        printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1127                               xfer, cnt, seg->status);
1128                        WARN_ON(1);
1129                        break;
1130                case WA_SEG_DELAYED:
1131                        seg->status = WA_SEG_ABORTED;
1132                        spin_lock_irqsave(&rpipe->seg_lock, flags2);
1133                        list_del(&seg->list_node);
1134                        xfer->segs_done++;
1135                        rpipe_ready = rpipe_avail_inc(rpipe);
1136                        spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1137                        break;
1138                case WA_SEG_SUBMITTED:
1139                        seg->status = WA_SEG_ABORTED;
1140                        usb_unlink_urb(&seg->urb);
1141                        if (xfer->is_inbound == 0)
1142                                usb_unlink_urb(seg->dto_urb);
1143                        xfer->segs_done++;
1144                        rpipe_ready = rpipe_avail_inc(rpipe);
1145                        break;
1146                case WA_SEG_PENDING:
1147                        seg->status = WA_SEG_ABORTED;
1148                        xfer->segs_done++;
1149                        rpipe_ready = rpipe_avail_inc(rpipe);
1150                        break;
1151                case WA_SEG_DTI_PENDING:
1152                        usb_unlink_urb(wa->dti_urb);
1153                        seg->status = WA_SEG_ABORTED;
1154                        xfer->segs_done++;
1155                        rpipe_ready = rpipe_avail_inc(rpipe);
1156                        break;
1157                case WA_SEG_DONE:
1158                case WA_SEG_ERROR:
1159                case WA_SEG_ABORTED:
1160                        break;
1161                }
1162        }
1163        xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1164        __wa_xfer_is_done(xfer);
1165        spin_unlock_irqrestore(&xfer->lock, flags);
1166        wa_xfer_completion(xfer);
1167        if (rpipe_ready)
1168                wa_xfer_delayed_run(rpipe);
1169        return 0;
1170
1171out_unlock:
1172        spin_unlock_irqrestore(&xfer->lock, flags);
1173out:
1174        return 0;
1175
1176dequeue_delayed:
1177        list_del_init(&xfer->list_node);
1178        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1179        xfer->result = urb->status;
1180        spin_unlock_irqrestore(&xfer->lock, flags);
1181        wa_xfer_giveback(xfer);
1182        usb_put_urb(urb);               /* we got a ref in enqueue() */
1183        return 0;
1184}
1185EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1186
1187/*
1188 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1189 * codes
1190 *
1191 * Positive errno values are internal inconsistencies and should be
1192 * flagged louder. Negative are to be passed up to the user in the
1193 * normal way.
1194 *
1195 * @status: USB WA status code -- high two bits are stripped.
1196 */
1197static int wa_xfer_status_to_errno(u8 status)
1198{
1199        int errno;
1200        u8 real_status = status;
1201        static int xlat[] = {
1202                [WA_XFER_STATUS_SUCCESS] =              0,
1203                [WA_XFER_STATUS_HALTED] =               -EPIPE,
1204                [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1205                [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1206                [WA_XFER_RESERVED] =                    EINVAL,
1207                [WA_XFER_STATUS_NOT_FOUND] =            0,
1208                [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1209                [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1210                [WA_XFER_STATUS_ABORTED] =              -EINTR,
1211                [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1212                [WA_XFER_INVALID_FORMAT] =              EINVAL,
1213                [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1214                [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1215        };
1216        status &= 0x3f;
1217
1218        if (status == 0)
1219                return 0;
1220        if (status >= ARRAY_SIZE(xlat)) {
1221                if (printk_ratelimit())
1222                        printk(KERN_ERR "%s(): BUG? "
1223                               "Unknown WA transfer status 0x%02x\n",
1224                               __func__, real_status);
1225                return -EINVAL;
1226        }
1227        errno = xlat[status];
1228        if (unlikely(errno > 0)) {
1229                if (printk_ratelimit())
1230                        printk(KERN_ERR "%s(): BUG? "
1231                               "Inconsistent WA status: 0x%02x\n",
1232                               __func__, real_status);
1233                errno = -errno;
1234        }
1235        return errno;
1236}
1237
1238/*
1239 * Process a xfer result completion message
1240 *
1241 * inbound transfers: need to schedule a DTI read
1242 *
1243 * FIXME: this functio needs to be broken up in parts
1244 */
1245static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1246{
1247        int result;
1248        struct device *dev = &wa->usb_iface->dev;
1249        unsigned long flags;
1250        u8 seg_idx;
1251        struct wa_seg *seg;
1252        struct wa_rpipe *rpipe;
1253        struct wa_xfer_result *xfer_result = wa->xfer_result;
1254        u8 done = 0;
1255        u8 usb_status;
1256        unsigned rpipe_ready = 0;
1257
1258        spin_lock_irqsave(&xfer->lock, flags);
1259        seg_idx = xfer_result->bTransferSegment & 0x7f;
1260        if (unlikely(seg_idx >= xfer->segs))
1261                goto error_bad_seg;
1262        seg = xfer->seg[seg_idx];
1263        rpipe = xfer->ep->hcpriv;
1264        usb_status = xfer_result->bTransferStatus;
1265        dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1266                xfer, seg_idx, usb_status, seg->status);
1267        if (seg->status == WA_SEG_ABORTED
1268            || seg->status == WA_SEG_ERROR)     /* already handled */
1269                goto segment_aborted;
1270        if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1271                seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1272        if (seg->status != WA_SEG_PENDING) {
1273                if (printk_ratelimit())
1274                        dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1275                                xfer, seg_idx, seg->status);
1276                seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1277        }
1278        if (usb_status & 0x80) {
1279                seg->result = wa_xfer_status_to_errno(usb_status);
1280                dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1281                        xfer, seg->index, usb_status);
1282                goto error_complete;
1283        }
1284        /* FIXME: we ignore warnings, tally them for stats */
1285        if (usb_status & 0x40)          /* Warning?... */
1286                usb_status = 0;         /* ... pass */
1287        if (xfer->is_inbound) { /* IN data phase: read to buffer */
1288                seg->status = WA_SEG_DTI_PENDING;
1289                BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1290                if (xfer->is_dma) {
1291                        wa->buf_in_urb->transfer_dma =
1292                                xfer->urb->transfer_dma
1293                                + seg_idx * xfer->seg_size;
1294                        wa->buf_in_urb->transfer_flags
1295                                |= URB_NO_TRANSFER_DMA_MAP;
1296                } else {
1297                        wa->buf_in_urb->transfer_buffer =
1298                                xfer->urb->transfer_buffer
1299                                + seg_idx * xfer->seg_size;
1300                        wa->buf_in_urb->transfer_flags
1301                                &= ~URB_NO_TRANSFER_DMA_MAP;
1302                }
1303                wa->buf_in_urb->transfer_buffer_length =
1304                        le32_to_cpu(xfer_result->dwTransferLength);
1305                wa->buf_in_urb->context = seg;
1306                result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1307                if (result < 0)
1308                        goto error_submit_buf_in;
1309        } else {
1310                /* OUT data phase, complete it -- */
1311                seg->status = WA_SEG_DONE;
1312                seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1313                xfer->segs_done++;
1314                rpipe_ready = rpipe_avail_inc(rpipe);
1315                done = __wa_xfer_is_done(xfer);
1316        }
1317        spin_unlock_irqrestore(&xfer->lock, flags);
1318        if (done)
1319                wa_xfer_completion(xfer);
1320        if (rpipe_ready)
1321                wa_xfer_delayed_run(rpipe);
1322        return;
1323
1324error_submit_buf_in:
1325        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1326                dev_err(dev, "DTI: URB max acceptable errors "
1327                        "exceeded, resetting device\n");
1328                wa_reset_all(wa);
1329        }
1330        if (printk_ratelimit())
1331                dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1332                        xfer, seg_idx, result);
1333        seg->result = result;
1334error_complete:
1335        seg->status = WA_SEG_ERROR;
1336        xfer->segs_done++;
1337        rpipe_ready = rpipe_avail_inc(rpipe);
1338        __wa_xfer_abort(xfer);
1339        done = __wa_xfer_is_done(xfer);
1340        spin_unlock_irqrestore(&xfer->lock, flags);
1341        if (done)
1342                wa_xfer_completion(xfer);
1343        if (rpipe_ready)
1344                wa_xfer_delayed_run(rpipe);
1345        return;
1346
1347error_bad_seg:
1348        spin_unlock_irqrestore(&xfer->lock, flags);
1349        wa_urb_dequeue(wa, xfer->urb);
1350        if (printk_ratelimit())
1351                dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1352        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1353                dev_err(dev, "DTI: URB max acceptable errors "
1354                        "exceeded, resetting device\n");
1355                wa_reset_all(wa);
1356        }
1357        return;
1358
1359segment_aborted:
1360        /* nothing to do, as the aborter did the completion */
1361        spin_unlock_irqrestore(&xfer->lock, flags);
1362}
1363
1364/*
1365 * Callback for the IN data phase
1366 *
1367 * If succesful transition state; otherwise, take a note of the
1368 * error, mark this segment done and try completion.
1369 *
1370 * Note we don't access until we are sure that the transfer hasn't
1371 * been cancelled (ECONNRESET, ENOENT), which could mean that
1372 * seg->xfer could be already gone.
1373 */
1374static void wa_buf_in_cb(struct urb *urb)
1375{
1376        struct wa_seg *seg = urb->context;
1377        struct wa_xfer *xfer = seg->xfer;
1378        struct wahc *wa;
1379        struct device *dev;
1380        struct wa_rpipe *rpipe;
1381        unsigned rpipe_ready;
1382        unsigned long flags;
1383        u8 done = 0;
1384
1385        switch (urb->status) {
1386        case 0:
1387                spin_lock_irqsave(&xfer->lock, flags);
1388                wa = xfer->wa;
1389                dev = &wa->usb_iface->dev;
1390                rpipe = xfer->ep->hcpriv;
1391                dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1392                        xfer, seg->index, (size_t)urb->actual_length);
1393                seg->status = WA_SEG_DONE;
1394                seg->result = urb->actual_length;
1395                xfer->segs_done++;
1396                rpipe_ready = rpipe_avail_inc(rpipe);
1397                done = __wa_xfer_is_done(xfer);
1398                spin_unlock_irqrestore(&xfer->lock, flags);
1399                if (done)
1400                        wa_xfer_completion(xfer);
1401                if (rpipe_ready)
1402                        wa_xfer_delayed_run(rpipe);
1403                break;
1404        case -ECONNRESET:       /* URB unlinked; no need to do anything */
1405        case -ENOENT:           /* as it was done by the who unlinked us */
1406                break;
1407        default:                /* Other errors ... */
1408                spin_lock_irqsave(&xfer->lock, flags);
1409                wa = xfer->wa;
1410                dev = &wa->usb_iface->dev;
1411                rpipe = xfer->ep->hcpriv;
1412                if (printk_ratelimit())
1413                        dev_err(dev, "xfer %p#%u: data in error %d\n",
1414                                xfer, seg->index, urb->status);
1415                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1416                            EDC_ERROR_TIMEFRAME)){
1417                        dev_err(dev, "DTO: URB max acceptable errors "
1418                                "exceeded, resetting device\n");
1419                        wa_reset_all(wa);
1420                }
1421                seg->status = WA_SEG_ERROR;
1422                seg->result = urb->status;
1423                xfer->segs_done++;
1424                rpipe_ready = rpipe_avail_inc(rpipe);
1425                __wa_xfer_abort(xfer);
1426                done = __wa_xfer_is_done(xfer);
1427                spin_unlock_irqrestore(&xfer->lock, flags);
1428                if (done)
1429                        wa_xfer_completion(xfer);
1430                if (rpipe_ready)
1431                        wa_xfer_delayed_run(rpipe);
1432        }
1433}
1434
1435/*
1436 * Handle an incoming transfer result buffer
1437 *
1438 * Given a transfer result buffer, it completes the transfer (possibly
1439 * scheduling and buffer in read) and then resubmits the DTI URB for a
1440 * new transfer result read.
1441 *
1442 *
1443 * The xfer_result DTI URB state machine
1444 *
1445 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1446 *
1447 * We start in OFF mode, the first xfer_result notification [through
1448 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1449 * read.
1450 *
1451 * We receive a buffer -- if it is not a xfer_result, we complain and
1452 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1453 * request accounting. If it is an IN segment, we move to RBI and post
1454 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1455 * repost the DTI-URB and move to RXR state. if there was no IN
1456 * segment, it will repost the DTI-URB.
1457 *
1458 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1459 * errors) in the URBs.
1460 */
1461static void wa_xfer_result_cb(struct urb *urb)
1462{
1463        int result;
1464        struct wahc *wa = urb->context;
1465        struct device *dev = &wa->usb_iface->dev;
1466        struct wa_xfer_result *xfer_result;
1467        u32 xfer_id;
1468        struct wa_xfer *xfer;
1469        u8 usb_status;
1470
1471        BUG_ON(wa->dti_urb != urb);
1472        switch (wa->dti_urb->status) {
1473        case 0:
1474                /* We have a xfer result buffer; check it */
1475                dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1476                        urb->actual_length, urb->transfer_buffer);
1477                if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1478                        dev_err(dev, "DTI Error: xfer result--bad size "
1479                                "xfer result (%d bytes vs %zu needed)\n",
1480                                urb->actual_length, sizeof(*xfer_result));
1481                        break;
1482                }
1483                xfer_result = wa->xfer_result;
1484                if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1485                        dev_err(dev, "DTI Error: xfer result--"
1486                                "bad header length %u\n",
1487                                xfer_result->hdr.bLength);
1488                        break;
1489                }
1490                if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1491                        dev_err(dev, "DTI Error: xfer result--"
1492                                "bad header type 0x%02x\n",
1493                                xfer_result->hdr.bNotifyType);
1494                        break;
1495                }
1496                usb_status = xfer_result->bTransferStatus & 0x3f;
1497                if (usb_status == WA_XFER_STATUS_ABORTED
1498                    || usb_status == WA_XFER_STATUS_NOT_FOUND)
1499                        /* taken care of already */
1500                        break;
1501                xfer_id = xfer_result->dwTransferID;
1502                xfer = wa_xfer_get_by_id(wa, xfer_id);
1503                if (xfer == NULL) {
1504                        /* FIXME: transaction might have been cancelled */
1505                        dev_err(dev, "DTI Error: xfer result--"
1506                                "unknown xfer 0x%08x (status 0x%02x)\n",
1507                                xfer_id, usb_status);
1508                        break;
1509                }
1510                wa_xfer_result_chew(wa, xfer);
1511                wa_xfer_put(xfer);
1512                break;
1513        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1514        case -ESHUTDOWN:        /* going away! */
1515                dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1516                goto out;
1517        default:
1518                /* Unknown error */
1519                if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1520                            EDC_ERROR_TIMEFRAME)) {
1521                        dev_err(dev, "DTI: URB max acceptable errors "
1522                                "exceeded, resetting device\n");
1523                        wa_reset_all(wa);
1524                        goto out;
1525                }
1526                if (printk_ratelimit())
1527                        dev_err(dev, "DTI: URB error %d\n", urb->status);
1528                break;
1529        }
1530        /* Resubmit the DTI URB */
1531        result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1532        if (result < 0) {
1533                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1534                        "resetting\n", result);
1535                wa_reset_all(wa);
1536        }
1537out:
1538        return;
1539}
1540
1541/*
1542 * Transfer complete notification
1543 *
1544 * Called from the notif.c code. We get a notification on EP2 saying
1545 * that some endpoint has some transfer result data available. We are
1546 * about to read it.
1547 *
1548 * To speed up things, we always have a URB reading the DTI URB; we
1549 * don't really set it up and start it until the first xfer complete
1550 * notification arrives, which is what we do here.
1551 *
1552 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1553 * machine starts.
1554 *
1555 * So here we just initialize the DTI URB for reading transfer result
1556 * notifications and also the buffer-in URB, for reading buffers. Then
1557 * we just submit the DTI URB.
1558 *
1559 * @wa shall be referenced
1560 */
1561void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1562{
1563        int result;
1564        struct device *dev = &wa->usb_iface->dev;
1565        struct wa_notif_xfer *notif_xfer;
1566        const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1567
1568        notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1569        BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1570
1571        if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1572                /* FIXME: hardcoded limitation, adapt */
1573                dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1574                        notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1575                goto error;
1576        }
1577        if (wa->dti_urb != NULL)        /* DTI URB already started */
1578                goto out;
1579
1580        wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1581        if (wa->dti_urb == NULL) {
1582                dev_err(dev, "Can't allocate DTI URB\n");
1583                goto error_dti_urb_alloc;
1584        }
1585        usb_fill_bulk_urb(
1586                wa->dti_urb, wa->usb_dev,
1587                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1588                wa->xfer_result, wa->xfer_result_size,
1589                wa_xfer_result_cb, wa);
1590
1591        wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1592        if (wa->buf_in_urb == NULL) {
1593                dev_err(dev, "Can't allocate BUF-IN URB\n");
1594                goto error_buf_in_urb_alloc;
1595        }
1596        usb_fill_bulk_urb(
1597                wa->buf_in_urb, wa->usb_dev,
1598                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1599                NULL, 0, wa_buf_in_cb, wa);
1600        result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1601        if (result < 0) {
1602                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1603                        "resetting\n", result);
1604                goto error_dti_urb_submit;
1605        }
1606out:
1607        return;
1608
1609error_dti_urb_submit:
1610        usb_put_urb(wa->buf_in_urb);
1611error_buf_in_urb_alloc:
1612        usb_put_urb(wa->dti_urb);
1613        wa->dti_urb = NULL;
1614error_dti_urb_alloc:
1615error:
1616        wa_reset_all(wa);
1617}
1618