linux/drivers/usb/wusbcore/wa-xfer.c
<<
>>
Prefs
   1/*
   2 * WUSB Wire Adapter
   3 * Data transfer and URB enqueing
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * How transfers work: get a buffer, break it up in segments (segment
  24 * size is a multiple of the maxpacket size). For each segment issue a
  25 * segment request (struct wa_xfer_*), then send the data buffer if
  26 * out or nothing if in (all over the DTO endpoint).
  27 *
  28 * For each submitted segment request, a notification will come over
  29 * the NEP endpoint and a transfer result (struct xfer_result) will
  30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31 * data coming (inbound transfer), schedule a read and handle it.
  32 *
  33 * Sounds simple, it is a pain to implement.
  34 *
  35 *
  36 * ENTRY POINTS
  37 *
  38 *   FIXME
  39 *
  40 * LIFE CYCLE / STATE DIAGRAM
  41 *
  42 *   FIXME
  43 *
  44 * THIS CODE IS DISGUSTING
  45 *
  46 *   Warned you are; it's my second try and still not happy with it.
  47 *
  48 * NOTES:
  49 *
  50 *   - No iso
  51 *
  52 *   - Supports DMA xfers, control, bulk and maybe interrupt
  53 *
  54 *   - Does not recycle unused rpipes
  55 *
  56 *     An rpipe is assigned to an endpoint the first time it is used,
  57 *     and then it's there, assigned, until the endpoint is disabled
  58 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60 *     (should be a mutex).
  61 *
  62 *     Two methods it could be done:
  63 *
  64 *     (a) set up a timer every time an rpipe's use count drops to 1
  65 *         (which means unused) or when a transfer ends. Reset the
  66 *         timer when a xfer is queued. If the timer expires, release
  67 *         the rpipe [see rpipe_ep_disable()].
  68 *
  69 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70 *         when none are found go over the list, check their endpoint
  71 *         and their activity record (if no last-xfer-done-ts in the
  72 *         last x seconds) take it
  73 *
  74 *     However, due to the fact that we have a set of limited
  75 *     resources (max-segments-at-the-same-time per xfer,
  76 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77 *     we are going to have to rebuild all this based on an scheduler,
  78 *     to where we have a list of transactions to do and based on the
  79 *     availability of the different required components (blocks,
  80 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  81 */
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/slab.h>
  85#include <linux/hash.h>
  86#include <linux/ratelimit.h>
  87#include <linux/export.h>
  88#include <linux/scatterlist.h>
  89
  90#include "wa-hc.h"
  91#include "wusbhc.h"
  92
  93enum {
  94        WA_SEGS_MAX = 255,
  95};
  96
  97enum wa_seg_status {
  98        WA_SEG_NOTREADY,
  99        WA_SEG_READY,
 100        WA_SEG_DELAYED,
 101        WA_SEG_SUBMITTED,
 102        WA_SEG_PENDING,
 103        WA_SEG_DTI_PENDING,
 104        WA_SEG_DONE,
 105        WA_SEG_ERROR,
 106        WA_SEG_ABORTED,
 107};
 108
 109static void wa_xfer_delayed_run(struct wa_rpipe *);
 110
 111/*
 112 * Life cycle governed by 'struct urb' (the refcount of the struct is
 113 * that of the 'struct urb' and usb_free_urb() would free the whole
 114 * struct).
 115 */
 116struct wa_seg {
 117        struct urb urb;
 118        struct urb *dto_urb;            /* for data output? */
 119        struct list_head list_node;     /* for rpipe->req_list */
 120        struct wa_xfer *xfer;           /* out xfer */
 121        u8 index;                       /* which segment we are */
 122        enum wa_seg_status status;
 123        ssize_t result;                 /* bytes xfered or error */
 124        struct wa_xfer_hdr xfer_hdr;
 125        u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
 126};
 127
 128static inline void wa_seg_init(struct wa_seg *seg)
 129{
 130        usb_init_urb(&seg->urb);
 131
 132        /* set the remaining memory to 0. */
 133        memset(((void *)seg) + sizeof(seg->urb), 0,
 134                sizeof(*seg) - sizeof(seg->urb));
 135}
 136
 137/*
 138 * Protected by xfer->lock
 139 *
 140 */
 141struct wa_xfer {
 142        struct kref refcnt;
 143        struct list_head list_node;
 144        spinlock_t lock;
 145        u32 id;
 146
 147        struct wahc *wa;                /* Wire adapter we are plugged to */
 148        struct usb_host_endpoint *ep;
 149        struct urb *urb;                /* URB we are transferring for */
 150        struct wa_seg **seg;            /* transfer segments */
 151        u8 segs, segs_submitted, segs_done;
 152        unsigned is_inbound:1;
 153        unsigned is_dma:1;
 154        size_t seg_size;
 155        int result;
 156
 157        gfp_t gfp;                      /* allocation mask */
 158
 159        struct wusb_dev *wusb_dev;      /* for activity timestamps */
 160};
 161
 162static inline void wa_xfer_init(struct wa_xfer *xfer)
 163{
 164        kref_init(&xfer->refcnt);
 165        INIT_LIST_HEAD(&xfer->list_node);
 166        spin_lock_init(&xfer->lock);
 167}
 168
 169/*
 170 * Destroy a transfer structure
 171 *
 172 * Note that freeing xfer->seg[cnt]->urb will free the containing
 173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
 174 */
 175static void wa_xfer_destroy(struct kref *_xfer)
 176{
 177        struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 178        if (xfer->seg) {
 179                unsigned cnt;
 180                for (cnt = 0; cnt < xfer->segs; cnt++) {
 181                        usb_free_urb(xfer->seg[cnt]->dto_urb);
 182                        usb_free_urb(&xfer->seg[cnt]->urb);
 183                }
 184        }
 185        kfree(xfer);
 186}
 187
 188static void wa_xfer_get(struct wa_xfer *xfer)
 189{
 190        kref_get(&xfer->refcnt);
 191}
 192
 193static void wa_xfer_put(struct wa_xfer *xfer)
 194{
 195        kref_put(&xfer->refcnt, wa_xfer_destroy);
 196}
 197
 198/*
 199 * xfer is referenced
 200 *
 201 * xfer->lock has to be unlocked
 202 *
 203 * We take xfer->lock for setting the result; this is a barrier
 204 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 205 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 206 * reference to the transfer.
 207 */
 208static void wa_xfer_giveback(struct wa_xfer *xfer)
 209{
 210        unsigned long flags;
 211
 212        spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 213        list_del_init(&xfer->list_node);
 214        spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 215        /* FIXME: segmentation broken -- kills DWA */
 216        wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 217        wa_put(xfer->wa);
 218        wa_xfer_put(xfer);
 219}
 220
 221/*
 222 * xfer is referenced
 223 *
 224 * xfer->lock has to be unlocked
 225 */
 226static void wa_xfer_completion(struct wa_xfer *xfer)
 227{
 228        if (xfer->wusb_dev)
 229                wusb_dev_put(xfer->wusb_dev);
 230        rpipe_put(xfer->ep->hcpriv);
 231        wa_xfer_giveback(xfer);
 232}
 233
 234/*
 235 * If transfer is done, wrap it up and return true
 236 *
 237 * xfer->lock has to be locked
 238 */
 239static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 240{
 241        struct device *dev = &xfer->wa->usb_iface->dev;
 242        unsigned result, cnt;
 243        struct wa_seg *seg;
 244        struct urb *urb = xfer->urb;
 245        unsigned found_short = 0;
 246
 247        result = xfer->segs_done == xfer->segs_submitted;
 248        if (result == 0)
 249                goto out;
 250        urb->actual_length = 0;
 251        for (cnt = 0; cnt < xfer->segs; cnt++) {
 252                seg = xfer->seg[cnt];
 253                switch (seg->status) {
 254                case WA_SEG_DONE:
 255                        if (found_short && seg->result > 0) {
 256                                dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
 257                                        xfer, cnt, seg->result);
 258                                urb->status = -EINVAL;
 259                                goto out;
 260                        }
 261                        urb->actual_length += seg->result;
 262                        if (seg->result < xfer->seg_size
 263                            && cnt != xfer->segs-1)
 264                                found_short = 1;
 265                        dev_dbg(dev, "xfer %p#%u: DONE short %d "
 266                                "result %zu urb->actual_length %d\n",
 267                                xfer, seg->index, found_short, seg->result,
 268                                urb->actual_length);
 269                        break;
 270                case WA_SEG_ERROR:
 271                        xfer->result = seg->result;
 272                        dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
 273                                xfer, seg->index, seg->result);
 274                        goto out;
 275                case WA_SEG_ABORTED:
 276                        dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
 277                                xfer, seg->index, urb->status);
 278                        xfer->result = urb->status;
 279                        goto out;
 280                default:
 281                        dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
 282                                 xfer, cnt, seg->status);
 283                        xfer->result = -EINVAL;
 284                        goto out;
 285                }
 286        }
 287        xfer->result = 0;
 288out:
 289        return result;
 290}
 291
 292/*
 293 * Initialize a transfer's ID
 294 *
 295 * We need to use a sequential number; if we use the pointer or the
 296 * hash of the pointer, it can repeat over sequential transfers and
 297 * then it will confuse the HWA....wonder why in hell they put a 32
 298 * bit handle in there then.
 299 */
 300static void wa_xfer_id_init(struct wa_xfer *xfer)
 301{
 302        xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 303}
 304
 305/*
 306 * Return the xfer's ID associated with xfer
 307 *
 308 * Need to generate a
 309 */
 310static u32 wa_xfer_id(struct wa_xfer *xfer)
 311{
 312        return xfer->id;
 313}
 314
 315/*
 316 * Search for a transfer list ID on the HCD's URB list
 317 *
 318 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 319 * 32-bit hash of the pointer.
 320 *
 321 * @returns NULL if not found.
 322 */
 323static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 324{
 325        unsigned long flags;
 326        struct wa_xfer *xfer_itr;
 327        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 328        list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 329                if (id == xfer_itr->id) {
 330                        wa_xfer_get(xfer_itr);
 331                        goto out;
 332                }
 333        }
 334        xfer_itr = NULL;
 335out:
 336        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 337        return xfer_itr;
 338}
 339
 340struct wa_xfer_abort_buffer {
 341        struct urb urb;
 342        struct wa_xfer_abort cmd;
 343};
 344
 345static void __wa_xfer_abort_cb(struct urb *urb)
 346{
 347        struct wa_xfer_abort_buffer *b = urb->context;
 348        usb_put_urb(&b->urb);
 349}
 350
 351/*
 352 * Aborts an ongoing transaction
 353 *
 354 * Assumes the transfer is referenced and locked and in a submitted
 355 * state (mainly that there is an endpoint/rpipe assigned).
 356 *
 357 * The callback (see above) does nothing but freeing up the data by
 358 * putting the URB. Because the URB is allocated at the head of the
 359 * struct, the whole space we allocated is kfreed.
 360 *
 361 * We'll get an 'aborted transaction' xfer result on DTI, that'll
 362 * politely ignore because at this point the transaction has been
 363 * marked as aborted already.
 364 */
 365static void __wa_xfer_abort(struct wa_xfer *xfer)
 366{
 367        int result;
 368        struct device *dev = &xfer->wa->usb_iface->dev;
 369        struct wa_xfer_abort_buffer *b;
 370        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 371
 372        b = kmalloc(sizeof(*b), GFP_ATOMIC);
 373        if (b == NULL)
 374                goto error_kmalloc;
 375        b->cmd.bLength =  sizeof(b->cmd);
 376        b->cmd.bRequestType = WA_XFER_ABORT;
 377        b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 378        b->cmd.dwTransferID = wa_xfer_id(xfer);
 379
 380        usb_init_urb(&b->urb);
 381        usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 382                usb_sndbulkpipe(xfer->wa->usb_dev,
 383                                xfer->wa->dto_epd->bEndpointAddress),
 384                &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 385        result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 386        if (result < 0)
 387                goto error_submit;
 388        return;                         /* callback frees! */
 389
 390
 391error_submit:
 392        if (printk_ratelimit())
 393                dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 394                        xfer, result);
 395        kfree(b);
 396error_kmalloc:
 397        return;
 398
 399}
 400
 401/*
 402 *
 403 * @returns < 0 on error, transfer segment request size if ok
 404 */
 405static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 406                                     enum wa_xfer_type *pxfer_type)
 407{
 408        ssize_t result;
 409        struct device *dev = &xfer->wa->usb_iface->dev;
 410        size_t maxpktsize;
 411        struct urb *urb = xfer->urb;
 412        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 413
 414        switch (rpipe->descr.bmAttribute & 0x3) {
 415        case USB_ENDPOINT_XFER_CONTROL:
 416                *pxfer_type = WA_XFER_TYPE_CTL;
 417                result = sizeof(struct wa_xfer_ctl);
 418                break;
 419        case USB_ENDPOINT_XFER_INT:
 420        case USB_ENDPOINT_XFER_BULK:
 421                *pxfer_type = WA_XFER_TYPE_BI;
 422                result = sizeof(struct wa_xfer_bi);
 423                break;
 424        case USB_ENDPOINT_XFER_ISOC:
 425                dev_err(dev, "FIXME: ISOC not implemented\n");
 426                result = -ENOSYS;
 427                goto error;
 428        default:
 429                /* never happens */
 430                BUG();
 431                result = -EINVAL;       /* shut gcc up */
 432        };
 433        xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 434        xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 435        xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 436                * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 437        /* Compute the segment size and make sure it is a multiple of
 438         * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 439         * a check (FIXME) */
 440        maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 441        if (xfer->seg_size < maxpktsize) {
 442                dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
 443                        "%zu\n", xfer->seg_size, maxpktsize);
 444                result = -EINVAL;
 445                goto error;
 446        }
 447        xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 448        xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
 449        if (xfer->segs >= WA_SEGS_MAX) {
 450                dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
 451                        (int)(urb->transfer_buffer_length / xfer->seg_size),
 452                        WA_SEGS_MAX);
 453                result = -EINVAL;
 454                goto error;
 455        }
 456        if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 457                xfer->segs = 1;
 458error:
 459        return result;
 460}
 461
 462/* Fill in the common request header and xfer-type specific data. */
 463static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 464                                 struct wa_xfer_hdr *xfer_hdr0,
 465                                 enum wa_xfer_type xfer_type,
 466                                 size_t xfer_hdr_size)
 467{
 468        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 469
 470        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 471        xfer_hdr0->bLength = xfer_hdr_size;
 472        xfer_hdr0->bRequestType = xfer_type;
 473        xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 474        xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
 475        xfer_hdr0->bTransferSegment = 0;
 476        switch (xfer_type) {
 477        case WA_XFER_TYPE_CTL: {
 478                struct wa_xfer_ctl *xfer_ctl =
 479                        container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 480                xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 481                memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 482                       sizeof(xfer_ctl->baSetupData));
 483                break;
 484        }
 485        case WA_XFER_TYPE_BI:
 486                break;
 487        case WA_XFER_TYPE_ISO:
 488                printk(KERN_ERR "FIXME: ISOC not implemented\n");
 489        default:
 490                BUG();
 491        };
 492}
 493
 494/*
 495 * Callback for the OUT data phase of the segment request
 496 *
 497 * Check wa_seg_cb(); most comments also apply here because this
 498 * function does almost the same thing and they work closely
 499 * together.
 500 *
 501 * If the seg request has failed but this DTO phase has succeeded,
 502 * wa_seg_cb() has already failed the segment and moved the
 503 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 504 * effectively do nothing.
 505 */
 506static void wa_seg_dto_cb(struct urb *urb)
 507{
 508        struct wa_seg *seg = urb->context;
 509        struct wa_xfer *xfer = seg->xfer;
 510        struct wahc *wa;
 511        struct device *dev;
 512        struct wa_rpipe *rpipe;
 513        unsigned long flags;
 514        unsigned rpipe_ready = 0;
 515        u8 done = 0;
 516
 517        switch (urb->status) {
 518        case 0:
 519                spin_lock_irqsave(&xfer->lock, flags);
 520                wa = xfer->wa;
 521                dev = &wa->usb_iface->dev;
 522                dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
 523                        xfer, seg->index, urb->actual_length);
 524                if (seg->status < WA_SEG_PENDING)
 525                        seg->status = WA_SEG_PENDING;
 526                seg->result = urb->actual_length;
 527                spin_unlock_irqrestore(&xfer->lock, flags);
 528                break;
 529        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 530        case -ENOENT:           /* as it was done by the who unlinked us */
 531                break;
 532        default:                /* Other errors ... */
 533                spin_lock_irqsave(&xfer->lock, flags);
 534                wa = xfer->wa;
 535                dev = &wa->usb_iface->dev;
 536                rpipe = xfer->ep->hcpriv;
 537                dev_dbg(dev, "xfer %p#%u: data out error %d\n",
 538                        xfer, seg->index, urb->status);
 539                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 540                            EDC_ERROR_TIMEFRAME)){
 541                        dev_err(dev, "DTO: URB max acceptable errors "
 542                                "exceeded, resetting device\n");
 543                        wa_reset_all(wa);
 544                }
 545                if (seg->status != WA_SEG_ERROR) {
 546                        seg->status = WA_SEG_ERROR;
 547                        seg->result = urb->status;
 548                        xfer->segs_done++;
 549                        __wa_xfer_abort(xfer);
 550                        rpipe_ready = rpipe_avail_inc(rpipe);
 551                        done = __wa_xfer_is_done(xfer);
 552                }
 553                spin_unlock_irqrestore(&xfer->lock, flags);
 554                if (done)
 555                        wa_xfer_completion(xfer);
 556                if (rpipe_ready)
 557                        wa_xfer_delayed_run(rpipe);
 558        }
 559}
 560
 561/*
 562 * Callback for the segment request
 563 *
 564 * If successful transition state (unless already transitioned or
 565 * outbound transfer); otherwise, take a note of the error, mark this
 566 * segment done and try completion.
 567 *
 568 * Note we don't access until we are sure that the transfer hasn't
 569 * been cancelled (ECONNRESET, ENOENT), which could mean that
 570 * seg->xfer could be already gone.
 571 *
 572 * We have to check before setting the status to WA_SEG_PENDING
 573 * because sometimes the xfer result callback arrives before this
 574 * callback (geeeeeeze), so it might happen that we are already in
 575 * another state. As well, we don't set it if the transfer is inbound,
 576 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 577 * finishes.
 578 */
 579static void wa_seg_cb(struct urb *urb)
 580{
 581        struct wa_seg *seg = urb->context;
 582        struct wa_xfer *xfer = seg->xfer;
 583        struct wahc *wa;
 584        struct device *dev;
 585        struct wa_rpipe *rpipe;
 586        unsigned long flags;
 587        unsigned rpipe_ready;
 588        u8 done = 0;
 589
 590        switch (urb->status) {
 591        case 0:
 592                spin_lock_irqsave(&xfer->lock, flags);
 593                wa = xfer->wa;
 594                dev = &wa->usb_iface->dev;
 595                dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
 596                if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 597                        seg->status = WA_SEG_PENDING;
 598                spin_unlock_irqrestore(&xfer->lock, flags);
 599                break;
 600        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 601        case -ENOENT:           /* as it was done by the who unlinked us */
 602                break;
 603        default:                /* Other errors ... */
 604                spin_lock_irqsave(&xfer->lock, flags);
 605                wa = xfer->wa;
 606                dev = &wa->usb_iface->dev;
 607                rpipe = xfer->ep->hcpriv;
 608                if (printk_ratelimit())
 609                        dev_err(dev, "xfer %p#%u: request error %d\n",
 610                                xfer, seg->index, urb->status);
 611                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 612                            EDC_ERROR_TIMEFRAME)){
 613                        dev_err(dev, "DTO: URB max acceptable errors "
 614                                "exceeded, resetting device\n");
 615                        wa_reset_all(wa);
 616                }
 617                usb_unlink_urb(seg->dto_urb);
 618                seg->status = WA_SEG_ERROR;
 619                seg->result = urb->status;
 620                xfer->segs_done++;
 621                __wa_xfer_abort(xfer);
 622                rpipe_ready = rpipe_avail_inc(rpipe);
 623                done = __wa_xfer_is_done(xfer);
 624                spin_unlock_irqrestore(&xfer->lock, flags);
 625                if (done)
 626                        wa_xfer_completion(xfer);
 627                if (rpipe_ready)
 628                        wa_xfer_delayed_run(rpipe);
 629        }
 630}
 631
 632/* allocate an SG list to store bytes_to_transfer bytes and copy the
 633 * subset of the in_sg that matches the buffer subset
 634 * we are about to transfer. */
 635static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
 636        const unsigned int bytes_transferred,
 637        const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
 638{
 639        struct scatterlist *out_sg;
 640        unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
 641                nents;
 642        struct scatterlist *current_xfer_sg = in_sg;
 643        struct scatterlist *current_seg_sg, *last_seg_sg;
 644
 645        /* skip previously transferred pages. */
 646        while ((current_xfer_sg) &&
 647                        (bytes_processed < bytes_transferred)) {
 648                bytes_processed += current_xfer_sg->length;
 649
 650                /* advance the sg if current segment starts on or past the
 651                        next page. */
 652                if (bytes_processed <= bytes_transferred)
 653                        current_xfer_sg = sg_next(current_xfer_sg);
 654        }
 655
 656        /* the data for the current segment starts in current_xfer_sg.
 657                calculate the offset. */
 658        if (bytes_processed > bytes_transferred) {
 659                offset_into_current_page_data = current_xfer_sg->length -
 660                        (bytes_processed - bytes_transferred);
 661        }
 662
 663        /* calculate the number of pages needed by this segment. */
 664        nents = DIV_ROUND_UP((bytes_to_transfer +
 665                offset_into_current_page_data +
 666                current_xfer_sg->offset),
 667                PAGE_SIZE);
 668
 669        out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
 670        if (out_sg) {
 671                sg_init_table(out_sg, nents);
 672
 673                /* copy the portion of the incoming SG that correlates to the
 674                 * data to be transferred by this segment to the segment SG. */
 675                last_seg_sg = current_seg_sg = out_sg;
 676                bytes_processed = 0;
 677
 678                /* reset nents and calculate the actual number of sg entries
 679                        needed. */
 680                nents = 0;
 681                while ((bytes_processed < bytes_to_transfer) &&
 682                                current_seg_sg && current_xfer_sg) {
 683                        unsigned int page_len = min((current_xfer_sg->length -
 684                                offset_into_current_page_data),
 685                                (bytes_to_transfer - bytes_processed));
 686
 687                        sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
 688                                page_len,
 689                                current_xfer_sg->offset +
 690                                offset_into_current_page_data);
 691
 692                        bytes_processed += page_len;
 693
 694                        last_seg_sg = current_seg_sg;
 695                        current_seg_sg = sg_next(current_seg_sg);
 696                        current_xfer_sg = sg_next(current_xfer_sg);
 697
 698                        /* only the first page may require additional offset. */
 699                        offset_into_current_page_data = 0;
 700                        nents++;
 701                }
 702
 703                /* update num_sgs and terminate the list since we may have
 704                 *  concatenated pages. */
 705                sg_mark_end(last_seg_sg);
 706                *out_num_sgs = nents;
 707        }
 708
 709        return out_sg;
 710}
 711
 712/*
 713 * Allocate the segs array and initialize each of them
 714 *
 715 * The segments are freed by wa_xfer_destroy() when the xfer use count
 716 * drops to zero; however, because each segment is given the same life
 717 * cycle as the USB URB it contains, it is actually freed by
 718 * usb_put_urb() on the contained USB URB (twisted, eh?).
 719 */
 720static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 721{
 722        int result, cnt;
 723        size_t alloc_size = sizeof(*xfer->seg[0])
 724                - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
 725        struct usb_device *usb_dev = xfer->wa->usb_dev;
 726        const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
 727        struct wa_seg *seg;
 728        size_t buf_itr, buf_size, buf_itr_size;
 729
 730        result = -ENOMEM;
 731        xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
 732        if (xfer->seg == NULL)
 733                goto error_segs_kzalloc;
 734        buf_itr = 0;
 735        buf_size = xfer->urb->transfer_buffer_length;
 736        for (cnt = 0; cnt < xfer->segs; cnt++) {
 737                seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
 738                if (seg == NULL)
 739                        goto error_seg_kmalloc;
 740                wa_seg_init(seg);
 741                seg->xfer = xfer;
 742                seg->index = cnt;
 743                usb_fill_bulk_urb(&seg->urb, usb_dev,
 744                                  usb_sndbulkpipe(usb_dev,
 745                                                  dto_epd->bEndpointAddress),
 746                                  &seg->xfer_hdr, xfer_hdr_size,
 747                                  wa_seg_cb, seg);
 748                buf_itr_size = min(buf_size, xfer->seg_size);
 749                if (xfer->is_inbound == 0 && buf_size > 0) {
 750                        /* outbound data. */
 751                        seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
 752                        if (seg->dto_urb == NULL)
 753                                goto error_dto_alloc;
 754                        usb_fill_bulk_urb(
 755                                seg->dto_urb, usb_dev,
 756                                usb_sndbulkpipe(usb_dev,
 757                                                dto_epd->bEndpointAddress),
 758                                NULL, 0, wa_seg_dto_cb, seg);
 759                        if (xfer->is_dma) {
 760                                seg->dto_urb->transfer_dma =
 761                                        xfer->urb->transfer_dma + buf_itr;
 762                                seg->dto_urb->transfer_flags |=
 763                                        URB_NO_TRANSFER_DMA_MAP;
 764                                seg->dto_urb->transfer_buffer = NULL;
 765                                seg->dto_urb->sg = NULL;
 766                                seg->dto_urb->num_sgs = 0;
 767                        } else {
 768                                /* do buffer or SG processing. */
 769                                seg->dto_urb->transfer_flags &=
 770                                        ~URB_NO_TRANSFER_DMA_MAP;
 771                                /* this should always be 0 before a resubmit. */
 772                                seg->dto_urb->num_mapped_sgs = 0;
 773
 774                                if (xfer->urb->transfer_buffer) {
 775                                        seg->dto_urb->transfer_buffer =
 776                                                xfer->urb->transfer_buffer +
 777                                                buf_itr;
 778                                        seg->dto_urb->sg = NULL;
 779                                        seg->dto_urb->num_sgs = 0;
 780                                } else {
 781                                        /* allocate an SG list to store seg_size
 782                                            bytes and copy the subset of the
 783                                            xfer->urb->sg that matches the
 784                                            buffer subset we are about to read.
 785                                        */
 786                                        seg->dto_urb->sg =
 787                                                wa_xfer_create_subset_sg(
 788                                                xfer->urb->sg,
 789                                                buf_itr, buf_itr_size,
 790                                                &(seg->dto_urb->num_sgs));
 791
 792                                        if (!(seg->dto_urb->sg)) {
 793                                                seg->dto_urb->num_sgs   = 0;
 794                                                goto error_sg_alloc;
 795                                        }
 796
 797                                        seg->dto_urb->transfer_buffer = NULL;
 798                                }
 799                        }
 800                        seg->dto_urb->transfer_buffer_length = buf_itr_size;
 801                }
 802                seg->status = WA_SEG_READY;
 803                buf_itr += buf_itr_size;
 804                buf_size -= buf_itr_size;
 805        }
 806        return 0;
 807
 808error_sg_alloc:
 809        usb_free_urb(xfer->seg[cnt]->dto_urb);
 810error_dto_alloc:
 811        kfree(xfer->seg[cnt]);
 812        cnt--;
 813error_seg_kmalloc:
 814        /* use the fact that cnt is left at were it failed */
 815        for (; cnt >= 0; cnt--) {
 816                if (xfer->seg[cnt] && xfer->is_inbound == 0) {
 817                        usb_free_urb(xfer->seg[cnt]->dto_urb);
 818                        kfree(xfer->seg[cnt]->dto_urb->sg);
 819                }
 820                kfree(xfer->seg[cnt]);
 821        }
 822error_segs_kzalloc:
 823        return result;
 824}
 825
 826/*
 827 * Allocates all the stuff needed to submit a transfer
 828 *
 829 * Breaks the whole data buffer in a list of segments, each one has a
 830 * structure allocated to it and linked in xfer->seg[index]
 831 *
 832 * FIXME: merge setup_segs() and the last part of this function, no
 833 *        need to do two for loops when we could run everything in a
 834 *        single one
 835 */
 836static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
 837{
 838        int result;
 839        struct device *dev = &xfer->wa->usb_iface->dev;
 840        enum wa_xfer_type xfer_type = 0; /* shut up GCC */
 841        size_t xfer_hdr_size, cnt, transfer_size;
 842        struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
 843
 844        result = __wa_xfer_setup_sizes(xfer, &xfer_type);
 845        if (result < 0)
 846                goto error_setup_sizes;
 847        xfer_hdr_size = result;
 848        result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
 849        if (result < 0) {
 850                dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
 851                        xfer, xfer->segs, result);
 852                goto error_setup_segs;
 853        }
 854        /* Fill the first header */
 855        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 856        wa_xfer_id_init(xfer);
 857        __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
 858
 859        /* Fill remainig headers */
 860        xfer_hdr = xfer_hdr0;
 861        transfer_size = urb->transfer_buffer_length;
 862        xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
 863                xfer->seg_size : transfer_size;
 864        transfer_size -=  xfer->seg_size;
 865        for (cnt = 1; cnt < xfer->segs; cnt++) {
 866                xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
 867                memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
 868                xfer_hdr->bTransferSegment = cnt;
 869                xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
 870                        cpu_to_le32(xfer->seg_size)
 871                        : cpu_to_le32(transfer_size);
 872                xfer->seg[cnt]->status = WA_SEG_READY;
 873                transfer_size -=  xfer->seg_size;
 874        }
 875        xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
 876        result = 0;
 877error_setup_segs:
 878error_setup_sizes:
 879        return result;
 880}
 881
 882/*
 883 *
 884 *
 885 * rpipe->seg_lock is held!
 886 */
 887static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 888                           struct wa_seg *seg)
 889{
 890        int result;
 891        result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
 892        if (result < 0) {
 893                printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
 894                       xfer, seg->index, result);
 895                goto error_seg_submit;
 896        }
 897        if (seg->dto_urb) {
 898                result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 899                if (result < 0) {
 900                        printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
 901                               xfer, seg->index, result);
 902                        goto error_dto_submit;
 903                }
 904        }
 905        seg->status = WA_SEG_SUBMITTED;
 906        rpipe_avail_dec(rpipe);
 907        return 0;
 908
 909error_dto_submit:
 910        usb_unlink_urb(&seg->urb);
 911error_seg_submit:
 912        seg->status = WA_SEG_ERROR;
 913        seg->result = result;
 914        return result;
 915}
 916
 917/*
 918 * Execute more queued request segments until the maximum concurrent allowed
 919 *
 920 * The ugly unlock/lock sequence on the error path is needed as the
 921 * xfer->lock normally nests the seg_lock and not viceversa.
 922 *
 923 */
 924static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
 925{
 926        int result;
 927        struct device *dev = &rpipe->wa->usb_iface->dev;
 928        struct wa_seg *seg;
 929        struct wa_xfer *xfer;
 930        unsigned long flags;
 931
 932        spin_lock_irqsave(&rpipe->seg_lock, flags);
 933        while (atomic_read(&rpipe->segs_available) > 0
 934              && !list_empty(&rpipe->seg_list)) {
 935                seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
 936                                 list_node);
 937                list_del(&seg->list_node);
 938                xfer = seg->xfer;
 939                result = __wa_seg_submit(rpipe, xfer, seg);
 940                dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
 941                        xfer, seg->index, atomic_read(&rpipe->segs_available), result);
 942                if (unlikely(result < 0)) {
 943                        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 944                        spin_lock_irqsave(&xfer->lock, flags);
 945                        __wa_xfer_abort(xfer);
 946                        xfer->segs_done++;
 947                        spin_unlock_irqrestore(&xfer->lock, flags);
 948                        spin_lock_irqsave(&rpipe->seg_lock, flags);
 949                }
 950        }
 951        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 952}
 953
 954/*
 955 *
 956 * xfer->lock is taken
 957 *
 958 * On failure submitting we just stop submitting and return error;
 959 * wa_urb_enqueue_b() will execute the completion path
 960 */
 961static int __wa_xfer_submit(struct wa_xfer *xfer)
 962{
 963        int result;
 964        struct wahc *wa = xfer->wa;
 965        struct device *dev = &wa->usb_iface->dev;
 966        unsigned cnt;
 967        struct wa_seg *seg;
 968        unsigned long flags;
 969        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 970        size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
 971        u8 available;
 972        u8 empty;
 973
 974        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 975        list_add_tail(&xfer->list_node, &wa->xfer_list);
 976        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 977
 978        BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
 979        result = 0;
 980        spin_lock_irqsave(&rpipe->seg_lock, flags);
 981        for (cnt = 0; cnt < xfer->segs; cnt++) {
 982                available = atomic_read(&rpipe->segs_available);
 983                empty = list_empty(&rpipe->seg_list);
 984                seg = xfer->seg[cnt];
 985                dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
 986                        xfer, cnt, available, empty,
 987                        available == 0 || !empty ? "delayed" : "submitted");
 988                if (available == 0 || !empty) {
 989                        dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
 990                        seg->status = WA_SEG_DELAYED;
 991                        list_add_tail(&seg->list_node, &rpipe->seg_list);
 992                } else {
 993                        result = __wa_seg_submit(rpipe, xfer, seg);
 994                        if (result < 0) {
 995                                __wa_xfer_abort(xfer);
 996                                goto error_seg_submit;
 997                        }
 998                }
 999                xfer->segs_submitted++;
1000        }
1001error_seg_submit:
1002        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1003        return result;
1004}
1005
1006/*
1007 * Second part of a URB/transfer enqueuement
1008 *
1009 * Assumes this comes from wa_urb_enqueue() [maybe through
1010 * wa_urb_enqueue_run()]. At this point:
1011 *
1012 * xfer->wa     filled and refcounted
1013 * xfer->ep     filled with rpipe refcounted if
1014 *              delayed == 0
1015 * xfer->urb    filled and refcounted (this is the case when called
1016 *              from wa_urb_enqueue() as we come from usb_submit_urb()
1017 *              and when called by wa_urb_enqueue_run(), as we took an
1018 *              extra ref dropped by _run() after we return).
1019 * xfer->gfp    filled
1020 *
1021 * If we fail at __wa_xfer_submit(), then we just check if we are done
1022 * and if so, we run the completion procedure. However, if we are not
1023 * yet done, we do nothing and wait for the completion handlers from
1024 * the submitted URBs or from the xfer-result path to kick in. If xfer
1025 * result never kicks in, the xfer will timeout from the USB code and
1026 * dequeue() will be called.
1027 */
1028static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1029{
1030        int result;
1031        unsigned long flags;
1032        struct urb *urb = xfer->urb;
1033        struct wahc *wa = xfer->wa;
1034        struct wusbhc *wusbhc = wa->wusb;
1035        struct wusb_dev *wusb_dev;
1036        unsigned done;
1037
1038        result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1039        if (result < 0)
1040                goto error_rpipe_get;
1041        result = -ENODEV;
1042        /* FIXME: segmentation broken -- kills DWA */
1043        mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
1044        if (urb->dev == NULL) {
1045                mutex_unlock(&wusbhc->mutex);
1046                goto error_dev_gone;
1047        }
1048        wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1049        if (wusb_dev == NULL) {
1050                mutex_unlock(&wusbhc->mutex);
1051                goto error_dev_gone;
1052        }
1053        mutex_unlock(&wusbhc->mutex);
1054
1055        spin_lock_irqsave(&xfer->lock, flags);
1056        xfer->wusb_dev = wusb_dev;
1057        result = urb->status;
1058        if (urb->status != -EINPROGRESS)
1059                goto error_dequeued;
1060
1061        result = __wa_xfer_setup(xfer, urb);
1062        if (result < 0)
1063                goto error_xfer_setup;
1064        result = __wa_xfer_submit(xfer);
1065        if (result < 0)
1066                goto error_xfer_submit;
1067        spin_unlock_irqrestore(&xfer->lock, flags);
1068        return;
1069
1070        /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1071         * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1072         * upundo setup().
1073         */
1074error_xfer_setup:
1075error_dequeued:
1076        spin_unlock_irqrestore(&xfer->lock, flags);
1077        /* FIXME: segmentation broken, kills DWA */
1078        if (wusb_dev)
1079                wusb_dev_put(wusb_dev);
1080error_dev_gone:
1081        rpipe_put(xfer->ep->hcpriv);
1082error_rpipe_get:
1083        xfer->result = result;
1084        wa_xfer_giveback(xfer);
1085        return;
1086
1087error_xfer_submit:
1088        done = __wa_xfer_is_done(xfer);
1089        xfer->result = result;
1090        spin_unlock_irqrestore(&xfer->lock, flags);
1091        if (done)
1092                wa_xfer_completion(xfer);
1093}
1094
1095/*
1096 * Execute the delayed transfers in the Wire Adapter @wa
1097 *
1098 * We need to be careful here, as dequeue() could be called in the
1099 * middle.  That's why we do the whole thing under the
1100 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1101 * and then checks the list -- so as we would be acquiring in inverse
1102 * order, we move the delayed list to a separate list while locked and then
1103 * submit them without the list lock held.
1104 */
1105void wa_urb_enqueue_run(struct work_struct *ws)
1106{
1107        struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1108        struct wa_xfer *xfer, *next;
1109        struct urb *urb;
1110        LIST_HEAD(tmp_list);
1111
1112        /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1113        spin_lock_irq(&wa->xfer_list_lock);
1114        list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1115                        wa->xfer_delayed_list.prev);
1116        spin_unlock_irq(&wa->xfer_list_lock);
1117
1118        /*
1119         * enqueue from temp list without list lock held since wa_urb_enqueue_b
1120         * can take xfer->lock as well as lock mutexes.
1121         */
1122        list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1123                list_del_init(&xfer->list_node);
1124
1125                urb = xfer->urb;
1126                wa_urb_enqueue_b(xfer);
1127                usb_put_urb(urb);       /* taken when queuing */
1128        }
1129}
1130EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1131
1132/*
1133 * Process the errored transfers on the Wire Adapter outside of interrupt.
1134 */
1135void wa_process_errored_transfers_run(struct work_struct *ws)
1136{
1137        struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1138        struct wa_xfer *xfer, *next;
1139        LIST_HEAD(tmp_list);
1140
1141        pr_info("%s: Run delayed STALL processing.\n", __func__);
1142
1143        /* Create a copy of the wa->xfer_errored_list while holding the lock */
1144        spin_lock_irq(&wa->xfer_list_lock);
1145        list_cut_position(&tmp_list, &wa->xfer_errored_list,
1146                        wa->xfer_errored_list.prev);
1147        spin_unlock_irq(&wa->xfer_list_lock);
1148
1149        /*
1150         * run rpipe_clear_feature_stalled from temp list without list lock
1151         * held.
1152         */
1153        list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1154                struct usb_host_endpoint *ep;
1155                unsigned long flags;
1156                struct wa_rpipe *rpipe;
1157
1158                spin_lock_irqsave(&xfer->lock, flags);
1159                ep = xfer->ep;
1160                rpipe = ep->hcpriv;
1161                spin_unlock_irqrestore(&xfer->lock, flags);
1162
1163                /* clear RPIPE feature stalled without holding a lock. */
1164                rpipe_clear_feature_stalled(wa, ep);
1165
1166                /* complete the xfer. This removes it from the tmp list. */
1167                wa_xfer_completion(xfer);
1168
1169                /* check for work. */
1170                wa_xfer_delayed_run(rpipe);
1171        }
1172}
1173EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1174
1175/*
1176 * Submit a transfer to the Wire Adapter in a delayed way
1177 *
1178 * The process of enqueuing involves possible sleeps() [see
1179 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1180 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1181 *
1182 * @urb: We own a reference to it done by the HCI Linux USB stack that
1183 *       will be given up by calling usb_hcd_giveback_urb() or by
1184 *       returning error from this function -> ergo we don't have to
1185 *       refcount it.
1186 */
1187int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1188                   struct urb *urb, gfp_t gfp)
1189{
1190        int result;
1191        struct device *dev = &wa->usb_iface->dev;
1192        struct wa_xfer *xfer;
1193        unsigned long my_flags;
1194        unsigned cant_sleep = irqs_disabled() | in_atomic();
1195
1196        if ((urb->transfer_buffer == NULL)
1197            && (urb->sg == NULL)
1198            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1199            && urb->transfer_buffer_length != 0) {
1200                dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1201                dump_stack();
1202        }
1203
1204        result = -ENOMEM;
1205        xfer = kzalloc(sizeof(*xfer), gfp);
1206        if (xfer == NULL)
1207                goto error_kmalloc;
1208
1209        result = -ENOENT;
1210        if (urb->status != -EINPROGRESS)        /* cancelled */
1211                goto error_dequeued;            /* before starting? */
1212        wa_xfer_init(xfer);
1213        xfer->wa = wa_get(wa);
1214        xfer->urb = urb;
1215        xfer->gfp = gfp;
1216        xfer->ep = ep;
1217        urb->hcpriv = xfer;
1218
1219        dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1220                xfer, urb, urb->pipe, urb->transfer_buffer_length,
1221                urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1222                urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1223                cant_sleep ? "deferred" : "inline");
1224
1225        if (cant_sleep) {
1226                usb_get_urb(urb);
1227                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1228                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1229                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1230                queue_work(wusbd, &wa->xfer_enqueue_work);
1231        } else {
1232                wa_urb_enqueue_b(xfer);
1233        }
1234        return 0;
1235
1236error_dequeued:
1237        kfree(xfer);
1238error_kmalloc:
1239        return result;
1240}
1241EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1242
1243/*
1244 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1245 * handler] is called.
1246 *
1247 * Until a transfer goes successfully through wa_urb_enqueue() it
1248 * needs to be dequeued with completion calling; when stuck in delayed
1249 * or before wa_xfer_setup() is called, we need to do completion.
1250 *
1251 *  not setup  If there is no hcpriv yet, that means that that enqueue
1252 *             still had no time to set the xfer up. Because
1253 *             urb->status should be other than -EINPROGRESS,
1254 *             enqueue() will catch that and bail out.
1255 *
1256 * If the transfer has gone through setup, we just need to clean it
1257 * up. If it has gone through submit(), we have to abort it [with an
1258 * asynch request] and then make sure we cancel each segment.
1259 *
1260 */
1261int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1262{
1263        unsigned long flags, flags2;
1264        struct wa_xfer *xfer;
1265        struct wa_seg *seg;
1266        struct wa_rpipe *rpipe;
1267        unsigned cnt;
1268        unsigned rpipe_ready = 0;
1269
1270        xfer = urb->hcpriv;
1271        if (xfer == NULL) {
1272                /*
1273                 * Nothing setup yet enqueue will see urb->status !=
1274                 * -EINPROGRESS (by hcd layer) and bail out with
1275                 * error, no need to do completion
1276                 */
1277                BUG_ON(urb->status == -EINPROGRESS);
1278                goto out;
1279        }
1280        spin_lock_irqsave(&xfer->lock, flags);
1281        rpipe = xfer->ep->hcpriv;
1282        if (rpipe == NULL) {
1283                pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
1284                        __func__, wa_xfer_id(xfer),
1285                        "Probably already aborted.\n" );
1286                goto out_unlock;
1287        }
1288        /* Check the delayed list -> if there, release and complete */
1289        spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1290        if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1291                goto dequeue_delayed;
1292        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1293        if (xfer->seg == NULL)          /* still hasn't reached */
1294                goto out_unlock;        /* setup(), enqueue_b() completes */
1295        /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1296        __wa_xfer_abort(xfer);
1297        for (cnt = 0; cnt < xfer->segs; cnt++) {
1298                seg = xfer->seg[cnt];
1299                switch (seg->status) {
1300                case WA_SEG_NOTREADY:
1301                case WA_SEG_READY:
1302                        printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1303                               xfer, cnt, seg->status);
1304                        WARN_ON(1);
1305                        break;
1306                case WA_SEG_DELAYED:
1307                        seg->status = WA_SEG_ABORTED;
1308                        spin_lock_irqsave(&rpipe->seg_lock, flags2);
1309                        list_del(&seg->list_node);
1310                        xfer->segs_done++;
1311                        rpipe_ready = rpipe_avail_inc(rpipe);
1312                        spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1313                        break;
1314                case WA_SEG_SUBMITTED:
1315                        seg->status = WA_SEG_ABORTED;
1316                        usb_unlink_urb(&seg->urb);
1317                        if (xfer->is_inbound == 0)
1318                                usb_unlink_urb(seg->dto_urb);
1319                        xfer->segs_done++;
1320                        rpipe_ready = rpipe_avail_inc(rpipe);
1321                        break;
1322                case WA_SEG_PENDING:
1323                        seg->status = WA_SEG_ABORTED;
1324                        xfer->segs_done++;
1325                        rpipe_ready = rpipe_avail_inc(rpipe);
1326                        break;
1327                case WA_SEG_DTI_PENDING:
1328                        usb_unlink_urb(wa->dti_urb);
1329                        seg->status = WA_SEG_ABORTED;
1330                        xfer->segs_done++;
1331                        rpipe_ready = rpipe_avail_inc(rpipe);
1332                        break;
1333                case WA_SEG_DONE:
1334                case WA_SEG_ERROR:
1335                case WA_SEG_ABORTED:
1336                        break;
1337                }
1338        }
1339        xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1340        __wa_xfer_is_done(xfer);
1341        spin_unlock_irqrestore(&xfer->lock, flags);
1342        wa_xfer_completion(xfer);
1343        if (rpipe_ready)
1344                wa_xfer_delayed_run(rpipe);
1345        return 0;
1346
1347out_unlock:
1348        spin_unlock_irqrestore(&xfer->lock, flags);
1349out:
1350        return 0;
1351
1352dequeue_delayed:
1353        list_del_init(&xfer->list_node);
1354        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1355        xfer->result = urb->status;
1356        spin_unlock_irqrestore(&xfer->lock, flags);
1357        wa_xfer_giveback(xfer);
1358        usb_put_urb(urb);               /* we got a ref in enqueue() */
1359        return 0;
1360}
1361EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1362
1363/*
1364 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1365 * codes
1366 *
1367 * Positive errno values are internal inconsistencies and should be
1368 * flagged louder. Negative are to be passed up to the user in the
1369 * normal way.
1370 *
1371 * @status: USB WA status code -- high two bits are stripped.
1372 */
1373static int wa_xfer_status_to_errno(u8 status)
1374{
1375        int errno;
1376        u8 real_status = status;
1377        static int xlat[] = {
1378                [WA_XFER_STATUS_SUCCESS] =              0,
1379                [WA_XFER_STATUS_HALTED] =               -EPIPE,
1380                [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1381                [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1382                [WA_XFER_RESERVED] =                    EINVAL,
1383                [WA_XFER_STATUS_NOT_FOUND] =            0,
1384                [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1385                [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1386                [WA_XFER_STATUS_ABORTED] =              -EINTR,
1387                [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1388                [WA_XFER_INVALID_FORMAT] =              EINVAL,
1389                [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1390                [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1391        };
1392        status &= 0x3f;
1393
1394        if (status == 0)
1395                return 0;
1396        if (status >= ARRAY_SIZE(xlat)) {
1397                printk_ratelimited(KERN_ERR "%s(): BUG? "
1398                               "Unknown WA transfer status 0x%02x\n",
1399                               __func__, real_status);
1400                return -EINVAL;
1401        }
1402        errno = xlat[status];
1403        if (unlikely(errno > 0)) {
1404                printk_ratelimited(KERN_ERR "%s(): BUG? "
1405                               "Inconsistent WA status: 0x%02x\n",
1406                               __func__, real_status);
1407                errno = -errno;
1408        }
1409        return errno;
1410}
1411
1412/*
1413 * Process a xfer result completion message
1414 *
1415 * inbound transfers: need to schedule a DTI read
1416 *
1417 * FIXME: this function needs to be broken up in parts
1418 */
1419static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1420{
1421        int result;
1422        struct device *dev = &wa->usb_iface->dev;
1423        unsigned long flags;
1424        u8 seg_idx;
1425        struct wa_seg *seg;
1426        struct wa_rpipe *rpipe;
1427        struct wa_xfer_result *xfer_result = wa->xfer_result;
1428        u8 done = 0;
1429        u8 usb_status;
1430        unsigned rpipe_ready = 0;
1431
1432        spin_lock_irqsave(&xfer->lock, flags);
1433        seg_idx = xfer_result->bTransferSegment & 0x7f;
1434        if (unlikely(seg_idx >= xfer->segs))
1435                goto error_bad_seg;
1436        seg = xfer->seg[seg_idx];
1437        rpipe = xfer->ep->hcpriv;
1438        usb_status = xfer_result->bTransferStatus;
1439        dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1440                xfer, seg_idx, usb_status, seg->status);
1441        if (seg->status == WA_SEG_ABORTED
1442            || seg->status == WA_SEG_ERROR)     /* already handled */
1443                goto segment_aborted;
1444        if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1445                seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1446        if (seg->status != WA_SEG_PENDING) {
1447                if (printk_ratelimit())
1448                        dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1449                                xfer, seg_idx, seg->status);
1450                seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1451        }
1452        if (usb_status & 0x80) {
1453                seg->result = wa_xfer_status_to_errno(usb_status);
1454                dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1455                        xfer, xfer->id, seg->index, usb_status);
1456                goto error_complete;
1457        }
1458        /* FIXME: we ignore warnings, tally them for stats */
1459        if (usb_status & 0x40)          /* Warning?... */
1460                usb_status = 0;         /* ... pass */
1461        if (xfer->is_inbound) { /* IN data phase: read to buffer */
1462                seg->status = WA_SEG_DTI_PENDING;
1463                BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1464                /* this should always be 0 before a resubmit. */
1465                wa->buf_in_urb->num_mapped_sgs  = 0;
1466
1467                if (xfer->is_dma) {
1468                        wa->buf_in_urb->transfer_dma =
1469                                xfer->urb->transfer_dma
1470                                + (seg_idx * xfer->seg_size);
1471                        wa->buf_in_urb->transfer_flags
1472                                |= URB_NO_TRANSFER_DMA_MAP;
1473                        wa->buf_in_urb->transfer_buffer = NULL;
1474                        wa->buf_in_urb->sg = NULL;
1475                        wa->buf_in_urb->num_sgs = 0;
1476                } else {
1477                        /* do buffer or SG processing. */
1478                        wa->buf_in_urb->transfer_flags
1479                                &= ~URB_NO_TRANSFER_DMA_MAP;
1480
1481                        if (xfer->urb->transfer_buffer) {
1482                                wa->buf_in_urb->transfer_buffer =
1483                                        xfer->urb->transfer_buffer
1484                                        + (seg_idx * xfer->seg_size);
1485                                wa->buf_in_urb->sg = NULL;
1486                                wa->buf_in_urb->num_sgs = 0;
1487                        } else {
1488                                /* allocate an SG list to store seg_size bytes
1489                                        and copy the subset of the xfer->urb->sg
1490                                        that matches the buffer subset we are
1491                                        about to read. */
1492                                wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1493                                        xfer->urb->sg,
1494                                        seg_idx * xfer->seg_size,
1495                                        le32_to_cpu(
1496                                                xfer_result->dwTransferLength),
1497                                        &(wa->buf_in_urb->num_sgs));
1498
1499                                if (!(wa->buf_in_urb->sg)) {
1500                                        wa->buf_in_urb->num_sgs = 0;
1501                                        goto error_sg_alloc;
1502                                }
1503                                wa->buf_in_urb->transfer_buffer = NULL;
1504                        }
1505                }
1506                wa->buf_in_urb->transfer_buffer_length =
1507                        le32_to_cpu(xfer_result->dwTransferLength);
1508                wa->buf_in_urb->context = seg;
1509                result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1510                if (result < 0)
1511                        goto error_submit_buf_in;
1512        } else {
1513                /* OUT data phase, complete it -- */
1514                seg->status = WA_SEG_DONE;
1515                seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1516                xfer->segs_done++;
1517                rpipe_ready = rpipe_avail_inc(rpipe);
1518                done = __wa_xfer_is_done(xfer);
1519        }
1520        spin_unlock_irqrestore(&xfer->lock, flags);
1521        if (done)
1522                wa_xfer_completion(xfer);
1523        if (rpipe_ready)
1524                wa_xfer_delayed_run(rpipe);
1525        return;
1526
1527error_submit_buf_in:
1528        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1529                dev_err(dev, "DTI: URB max acceptable errors "
1530                        "exceeded, resetting device\n");
1531                wa_reset_all(wa);
1532        }
1533        if (printk_ratelimit())
1534                dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1535                        xfer, seg_idx, result);
1536        seg->result = result;
1537        kfree(wa->buf_in_urb->sg);
1538error_sg_alloc:
1539        __wa_xfer_abort(xfer);
1540error_complete:
1541        seg->status = WA_SEG_ERROR;
1542        xfer->segs_done++;
1543        rpipe_ready = rpipe_avail_inc(rpipe);
1544        done = __wa_xfer_is_done(xfer);
1545        /*
1546         * queue work item to clear STALL for control endpoints.
1547         * Otherwise, let endpoint_reset take care of it.
1548         */
1549        if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1550                usb_endpoint_xfer_control(&xfer->ep->desc) &&
1551                done) {
1552
1553                dev_info(dev, "Control EP stall.  Queue delayed work.\n");
1554                spin_lock_irq(&wa->xfer_list_lock);
1555                /* remove xfer from xfer_list. */
1556                list_del(&xfer->list_node);
1557                /* add xfer to xfer_errored_list. */
1558                list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
1559                spin_unlock_irq(&wa->xfer_list_lock);
1560                spin_unlock_irqrestore(&xfer->lock, flags);
1561                queue_work(wusbd, &wa->xfer_error_work);
1562        } else {
1563                spin_unlock_irqrestore(&xfer->lock, flags);
1564                if (done)
1565                        wa_xfer_completion(xfer);
1566                if (rpipe_ready)
1567                        wa_xfer_delayed_run(rpipe);
1568        }
1569
1570        return;
1571
1572error_bad_seg:
1573        spin_unlock_irqrestore(&xfer->lock, flags);
1574        wa_urb_dequeue(wa, xfer->urb);
1575        if (printk_ratelimit())
1576                dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1577        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1578                dev_err(dev, "DTI: URB max acceptable errors "
1579                        "exceeded, resetting device\n");
1580                wa_reset_all(wa);
1581        }
1582        return;
1583
1584segment_aborted:
1585        /* nothing to do, as the aborter did the completion */
1586        spin_unlock_irqrestore(&xfer->lock, flags);
1587}
1588
1589/*
1590 * Callback for the IN data phase
1591 *
1592 * If successful transition state; otherwise, take a note of the
1593 * error, mark this segment done and try completion.
1594 *
1595 * Note we don't access until we are sure that the transfer hasn't
1596 * been cancelled (ECONNRESET, ENOENT), which could mean that
1597 * seg->xfer could be already gone.
1598 */
1599static void wa_buf_in_cb(struct urb *urb)
1600{
1601        struct wa_seg *seg = urb->context;
1602        struct wa_xfer *xfer = seg->xfer;
1603        struct wahc *wa;
1604        struct device *dev;
1605        struct wa_rpipe *rpipe;
1606        unsigned rpipe_ready;
1607        unsigned long flags;
1608        u8 done = 0;
1609
1610        /* free the sg if it was used. */
1611        kfree(urb->sg);
1612        urb->sg = NULL;
1613
1614        switch (urb->status) {
1615        case 0:
1616                spin_lock_irqsave(&xfer->lock, flags);
1617                wa = xfer->wa;
1618                dev = &wa->usb_iface->dev;
1619                rpipe = xfer->ep->hcpriv;
1620                dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1621                        xfer, seg->index, (size_t)urb->actual_length);
1622                seg->status = WA_SEG_DONE;
1623                seg->result = urb->actual_length;
1624                xfer->segs_done++;
1625                rpipe_ready = rpipe_avail_inc(rpipe);
1626                done = __wa_xfer_is_done(xfer);
1627                spin_unlock_irqrestore(&xfer->lock, flags);
1628                if (done)
1629                        wa_xfer_completion(xfer);
1630                if (rpipe_ready)
1631                        wa_xfer_delayed_run(rpipe);
1632                break;
1633        case -ECONNRESET:       /* URB unlinked; no need to do anything */
1634        case -ENOENT:           /* as it was done by the who unlinked us */
1635                break;
1636        default:                /* Other errors ... */
1637                spin_lock_irqsave(&xfer->lock, flags);
1638                wa = xfer->wa;
1639                dev = &wa->usb_iface->dev;
1640                rpipe = xfer->ep->hcpriv;
1641                if (printk_ratelimit())
1642                        dev_err(dev, "xfer %p#%u: data in error %d\n",
1643                                xfer, seg->index, urb->status);
1644                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1645                            EDC_ERROR_TIMEFRAME)){
1646                        dev_err(dev, "DTO: URB max acceptable errors "
1647                                "exceeded, resetting device\n");
1648                        wa_reset_all(wa);
1649                }
1650                seg->status = WA_SEG_ERROR;
1651                seg->result = urb->status;
1652                xfer->segs_done++;
1653                rpipe_ready = rpipe_avail_inc(rpipe);
1654                __wa_xfer_abort(xfer);
1655                done = __wa_xfer_is_done(xfer);
1656                spin_unlock_irqrestore(&xfer->lock, flags);
1657                if (done)
1658                        wa_xfer_completion(xfer);
1659                if (rpipe_ready)
1660                        wa_xfer_delayed_run(rpipe);
1661        }
1662}
1663
1664/*
1665 * Handle an incoming transfer result buffer
1666 *
1667 * Given a transfer result buffer, it completes the transfer (possibly
1668 * scheduling and buffer in read) and then resubmits the DTI URB for a
1669 * new transfer result read.
1670 *
1671 *
1672 * The xfer_result DTI URB state machine
1673 *
1674 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1675 *
1676 * We start in OFF mode, the first xfer_result notification [through
1677 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1678 * read.
1679 *
1680 * We receive a buffer -- if it is not a xfer_result, we complain and
1681 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1682 * request accounting. If it is an IN segment, we move to RBI and post
1683 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1684 * repost the DTI-URB and move to RXR state. if there was no IN
1685 * segment, it will repost the DTI-URB.
1686 *
1687 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1688 * errors) in the URBs.
1689 */
1690static void wa_xfer_result_cb(struct urb *urb)
1691{
1692        int result;
1693        struct wahc *wa = urb->context;
1694        struct device *dev = &wa->usb_iface->dev;
1695        struct wa_xfer_result *xfer_result;
1696        u32 xfer_id;
1697        struct wa_xfer *xfer;
1698        u8 usb_status;
1699
1700        BUG_ON(wa->dti_urb != urb);
1701        switch (wa->dti_urb->status) {
1702        case 0:
1703                /* We have a xfer result buffer; check it */
1704                dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1705                        urb->actual_length, urb->transfer_buffer);
1706                if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1707                        dev_err(dev, "DTI Error: xfer result--bad size "
1708                                "xfer result (%d bytes vs %zu needed)\n",
1709                                urb->actual_length, sizeof(*xfer_result));
1710                        break;
1711                }
1712                xfer_result = wa->xfer_result;
1713                if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1714                        dev_err(dev, "DTI Error: xfer result--"
1715                                "bad header length %u\n",
1716                                xfer_result->hdr.bLength);
1717                        break;
1718                }
1719                if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1720                        dev_err(dev, "DTI Error: xfer result--"
1721                                "bad header type 0x%02x\n",
1722                                xfer_result->hdr.bNotifyType);
1723                        break;
1724                }
1725                usb_status = xfer_result->bTransferStatus & 0x3f;
1726                if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1727                        /* taken care of already */
1728                        break;
1729                xfer_id = xfer_result->dwTransferID;
1730                xfer = wa_xfer_get_by_id(wa, xfer_id);
1731                if (xfer == NULL) {
1732                        /* FIXME: transaction might have been cancelled */
1733                        dev_err(dev, "DTI Error: xfer result--"
1734                                "unknown xfer 0x%08x (status 0x%02x)\n",
1735                                xfer_id, usb_status);
1736                        break;
1737                }
1738                wa_xfer_result_chew(wa, xfer);
1739                wa_xfer_put(xfer);
1740                break;
1741        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1742        case -ESHUTDOWN:        /* going away! */
1743                dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1744                goto out;
1745        default:
1746                /* Unknown error */
1747                if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1748                            EDC_ERROR_TIMEFRAME)) {
1749                        dev_err(dev, "DTI: URB max acceptable errors "
1750                                "exceeded, resetting device\n");
1751                        wa_reset_all(wa);
1752                        goto out;
1753                }
1754                if (printk_ratelimit())
1755                        dev_err(dev, "DTI: URB error %d\n", urb->status);
1756                break;
1757        }
1758        /* Resubmit the DTI URB */
1759        result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1760        if (result < 0) {
1761                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1762                        "resetting\n", result);
1763                wa_reset_all(wa);
1764        }
1765out:
1766        return;
1767}
1768
1769/*
1770 * Transfer complete notification
1771 *
1772 * Called from the notif.c code. We get a notification on EP2 saying
1773 * that some endpoint has some transfer result data available. We are
1774 * about to read it.
1775 *
1776 * To speed up things, we always have a URB reading the DTI URB; we
1777 * don't really set it up and start it until the first xfer complete
1778 * notification arrives, which is what we do here.
1779 *
1780 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1781 * machine starts.
1782 *
1783 * So here we just initialize the DTI URB for reading transfer result
1784 * notifications and also the buffer-in URB, for reading buffers. Then
1785 * we just submit the DTI URB.
1786 *
1787 * @wa shall be referenced
1788 */
1789void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1790{
1791        int result;
1792        struct device *dev = &wa->usb_iface->dev;
1793        struct wa_notif_xfer *notif_xfer;
1794        const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1795
1796        notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1797        BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1798
1799        if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1800                /* FIXME: hardcoded limitation, adapt */
1801                dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1802                        notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1803                goto error;
1804        }
1805        if (wa->dti_urb != NULL)        /* DTI URB already started */
1806                goto out;
1807
1808        wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1809        if (wa->dti_urb == NULL) {
1810                dev_err(dev, "Can't allocate DTI URB\n");
1811                goto error_dti_urb_alloc;
1812        }
1813        usb_fill_bulk_urb(
1814                wa->dti_urb, wa->usb_dev,
1815                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1816                wa->xfer_result, wa->xfer_result_size,
1817                wa_xfer_result_cb, wa);
1818
1819        wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1820        if (wa->buf_in_urb == NULL) {
1821                dev_err(dev, "Can't allocate BUF-IN URB\n");
1822                goto error_buf_in_urb_alloc;
1823        }
1824        usb_fill_bulk_urb(
1825                wa->buf_in_urb, wa->usb_dev,
1826                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1827                NULL, 0, wa_buf_in_cb, wa);
1828        result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1829        if (result < 0) {
1830                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1831                        "resetting\n", result);
1832                goto error_dti_urb_submit;
1833        }
1834out:
1835        return;
1836
1837error_dti_urb_submit:
1838        usb_put_urb(wa->buf_in_urb);
1839error_buf_in_urb_alloc:
1840        usb_put_urb(wa->dti_urb);
1841        wa->dti_urb = NULL;
1842error_dti_urb_alloc:
1843error:
1844        wa_reset_all(wa);
1845}
1846