linux/drivers/usb/wusbcore/wa-xfer.c
<<
>>
Prefs
   1/*
   2 * WUSB Wire Adapter
   3 * Data transfer and URB enqueing
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * How transfers work: get a buffer, break it up in segments (segment
  24 * size is a multiple of the maxpacket size). For each segment issue a
  25 * segment request (struct wa_xfer_*), then send the data buffer if
  26 * out or nothing if in (all over the DTO endpoint).
  27 *
  28 * For each submitted segment request, a notification will come over
  29 * the NEP endpoint and a transfer result (struct xfer_result) will
  30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31 * data coming (inbound transfer), schedule a read and handle it.
  32 *
  33 * Sounds simple, it is a pain to implement.
  34 *
  35 *
  36 * ENTRY POINTS
  37 *
  38 *   FIXME
  39 *
  40 * LIFE CYCLE / STATE DIAGRAM
  41 *
  42 *   FIXME
  43 *
  44 * THIS CODE IS DISGUSTING
  45 *
  46 *   Warned you are; it's my second try and still not happy with it.
  47 *
  48 * NOTES:
  49 *
  50 *   - No iso
  51 *
  52 *   - Supports DMA xfers, control, bulk and maybe interrupt
  53 *
  54 *   - Does not recycle unused rpipes
  55 *
  56 *     An rpipe is assigned to an endpoint the first time it is used,
  57 *     and then it's there, assigned, until the endpoint is disabled
  58 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60 *     (should be a mutex).
  61 *
  62 *     Two methods it could be done:
  63 *
  64 *     (a) set up a timer every time an rpipe's use count drops to 1
  65 *         (which means unused) or when a transfer ends. Reset the
  66 *         timer when a xfer is queued. If the timer expires, release
  67 *         the rpipe [see rpipe_ep_disable()].
  68 *
  69 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70 *         when none are found go over the list, check their endpoint
  71 *         and their activity record (if no last-xfer-done-ts in the
  72 *         last x seconds) take it
  73 *
  74 *     However, due to the fact that we have a set of limited
  75 *     resources (max-segments-at-the-same-time per xfer,
  76 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77 *     we are going to have to rebuild all this based on an scheduler,
  78 *     to where we have a list of transactions to do and based on the
  79 *     availability of the different required components (blocks,
  80 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  81 */
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/slab.h>
  85#include <linux/hash.h>
  86#include <linux/ratelimit.h>
  87#include <linux/export.h>
  88#include <linux/scatterlist.h>
  89
  90#include "wa-hc.h"
  91#include "wusbhc.h"
  92
  93enum {
  94        /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
  95        WA_SEGS_MAX = 128,
  96};
  97
  98enum wa_seg_status {
  99        WA_SEG_NOTREADY,
 100        WA_SEG_READY,
 101        WA_SEG_DELAYED,
 102        WA_SEG_SUBMITTED,
 103        WA_SEG_PENDING,
 104        WA_SEG_DTI_PENDING,
 105        WA_SEG_DONE,
 106        WA_SEG_ERROR,
 107        WA_SEG_ABORTED,
 108};
 109
 110static void wa_xfer_delayed_run(struct wa_rpipe *);
 111static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
 112
 113/*
 114 * Life cycle governed by 'struct urb' (the refcount of the struct is
 115 * that of the 'struct urb' and usb_free_urb() would free the whole
 116 * struct).
 117 */
 118struct wa_seg {
 119        struct urb tr_urb;              /* transfer request urb. */
 120        struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
 121        struct urb *dto_urb;            /* for data output. */
 122        struct list_head list_node;     /* for rpipe->req_list */
 123        struct wa_xfer *xfer;           /* out xfer */
 124        u8 index;                       /* which segment we are */
 125        int isoc_frame_count;   /* number of isoc frames in this segment. */
 126        int isoc_frame_offset;  /* starting frame offset in the xfer URB. */
 127        int isoc_size;  /* size of all isoc frames sent by this seg. */
 128        enum wa_seg_status status;
 129        ssize_t result;                 /* bytes xfered or error */
 130        struct wa_xfer_hdr xfer_hdr;
 131};
 132
 133static inline void wa_seg_init(struct wa_seg *seg)
 134{
 135        usb_init_urb(&seg->tr_urb);
 136
 137        /* set the remaining memory to 0. */
 138        memset(((void *)seg) + sizeof(seg->tr_urb), 0,
 139                sizeof(*seg) - sizeof(seg->tr_urb));
 140}
 141
 142/*
 143 * Protected by xfer->lock
 144 *
 145 */
 146struct wa_xfer {
 147        struct kref refcnt;
 148        struct list_head list_node;
 149        spinlock_t lock;
 150        u32 id;
 151
 152        struct wahc *wa;                /* Wire adapter we are plugged to */
 153        struct usb_host_endpoint *ep;
 154        struct urb *urb;                /* URB we are transferring for */
 155        struct wa_seg **seg;            /* transfer segments */
 156        u8 segs, segs_submitted, segs_done;
 157        unsigned is_inbound:1;
 158        unsigned is_dma:1;
 159        size_t seg_size;
 160        int result;
 161        /* Isoc frame that the current transfer buffer corresponds to. */
 162        int dto_isoc_frame_index;
 163
 164        gfp_t gfp;                      /* allocation mask */
 165
 166        struct wusb_dev *wusb_dev;      /* for activity timestamps */
 167};
 168
 169static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
 170        struct wa_seg *seg, int curr_iso_frame);
 171
 172static inline void wa_xfer_init(struct wa_xfer *xfer)
 173{
 174        kref_init(&xfer->refcnt);
 175        INIT_LIST_HEAD(&xfer->list_node);
 176        spin_lock_init(&xfer->lock);
 177}
 178
 179/*
 180 * Destroy a transfer structure
 181 *
 182 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
 183 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
 184 */
 185static void wa_xfer_destroy(struct kref *_xfer)
 186{
 187        struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 188        if (xfer->seg) {
 189                unsigned cnt;
 190                for (cnt = 0; cnt < xfer->segs; cnt++) {
 191                        struct wa_seg *seg = xfer->seg[cnt];
 192                        if (seg) {
 193                                usb_free_urb(seg->isoc_pack_desc_urb);
 194                                if (seg->dto_urb) {
 195                                        kfree(seg->dto_urb->sg);
 196                                        usb_free_urb(seg->dto_urb);
 197                                }
 198                                usb_free_urb(&seg->tr_urb);
 199                        }
 200                }
 201                kfree(xfer->seg);
 202        }
 203        kfree(xfer);
 204}
 205
 206static void wa_xfer_get(struct wa_xfer *xfer)
 207{
 208        kref_get(&xfer->refcnt);
 209}
 210
 211static void wa_xfer_put(struct wa_xfer *xfer)
 212{
 213        kref_put(&xfer->refcnt, wa_xfer_destroy);
 214}
 215
 216/*
 217 * Try to get exclusive access to the DTO endpoint resource.  Return true
 218 * if successful.
 219 */
 220static inline int __wa_dto_try_get(struct wahc *wa)
 221{
 222        return (test_and_set_bit(0, &wa->dto_in_use) == 0);
 223}
 224
 225/* Release the DTO endpoint resource. */
 226static inline void __wa_dto_put(struct wahc *wa)
 227{
 228        clear_bit_unlock(0, &wa->dto_in_use);
 229}
 230
 231/* Service RPIPEs that are waiting on the DTO resource. */
 232static void wa_check_for_delayed_rpipes(struct wahc *wa)
 233{
 234        unsigned long flags;
 235        int dto_waiting = 0;
 236        struct wa_rpipe *rpipe;
 237
 238        spin_lock_irqsave(&wa->rpipe_lock, flags);
 239        while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
 240                rpipe = list_first_entry(&wa->rpipe_delayed_list,
 241                                struct wa_rpipe, list_node);
 242                __wa_xfer_delayed_run(rpipe, &dto_waiting);
 243                /* remove this RPIPE from the list if it is not waiting. */
 244                if (!dto_waiting) {
 245                        pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
 246                                __func__,
 247                                le16_to_cpu(rpipe->descr.wRPipeIndex));
 248                        list_del_init(&rpipe->list_node);
 249                }
 250        }
 251        spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 252}
 253
 254/* add this RPIPE to the end of the delayed RPIPE list. */
 255static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
 256{
 257        unsigned long flags;
 258
 259        spin_lock_irqsave(&wa->rpipe_lock, flags);
 260        /* add rpipe to the list if it is not already on it. */
 261        if (list_empty(&rpipe->list_node)) {
 262                pr_debug("%s: adding RPIPE %d to the delayed list.\n",
 263                        __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
 264                list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
 265        }
 266        spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 267}
 268
 269/*
 270 * xfer is referenced
 271 *
 272 * xfer->lock has to be unlocked
 273 *
 274 * We take xfer->lock for setting the result; this is a barrier
 275 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 276 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 277 * reference to the transfer.
 278 */
 279static void wa_xfer_giveback(struct wa_xfer *xfer)
 280{
 281        unsigned long flags;
 282
 283        spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 284        list_del_init(&xfer->list_node);
 285        spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 286        /* FIXME: segmentation broken -- kills DWA */
 287        wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 288        wa_put(xfer->wa);
 289        wa_xfer_put(xfer);
 290}
 291
 292/*
 293 * xfer is referenced
 294 *
 295 * xfer->lock has to be unlocked
 296 */
 297static void wa_xfer_completion(struct wa_xfer *xfer)
 298{
 299        if (xfer->wusb_dev)
 300                wusb_dev_put(xfer->wusb_dev);
 301        rpipe_put(xfer->ep->hcpriv);
 302        wa_xfer_giveback(xfer);
 303}
 304
 305/*
 306 * Initialize a transfer's ID
 307 *
 308 * We need to use a sequential number; if we use the pointer or the
 309 * hash of the pointer, it can repeat over sequential transfers and
 310 * then it will confuse the HWA....wonder why in hell they put a 32
 311 * bit handle in there then.
 312 */
 313static void wa_xfer_id_init(struct wa_xfer *xfer)
 314{
 315        xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 316}
 317
 318/* Return the xfer's ID. */
 319static inline u32 wa_xfer_id(struct wa_xfer *xfer)
 320{
 321        return xfer->id;
 322}
 323
 324/* Return the xfer's ID in transport format (little endian). */
 325static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
 326{
 327        return cpu_to_le32(xfer->id);
 328}
 329
 330/*
 331 * If transfer is done, wrap it up and return true
 332 *
 333 * xfer->lock has to be locked
 334 */
 335static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 336{
 337        struct device *dev = &xfer->wa->usb_iface->dev;
 338        unsigned result, cnt;
 339        struct wa_seg *seg;
 340        struct urb *urb = xfer->urb;
 341        unsigned found_short = 0;
 342
 343        result = xfer->segs_done == xfer->segs_submitted;
 344        if (result == 0)
 345                goto out;
 346        urb->actual_length = 0;
 347        for (cnt = 0; cnt < xfer->segs; cnt++) {
 348                seg = xfer->seg[cnt];
 349                switch (seg->status) {
 350                case WA_SEG_DONE:
 351                        if (found_short && seg->result > 0) {
 352                                dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
 353                                        xfer, wa_xfer_id(xfer), cnt,
 354                                        seg->result);
 355                                urb->status = -EINVAL;
 356                                goto out;
 357                        }
 358                        urb->actual_length += seg->result;
 359                        if (!(usb_pipeisoc(xfer->urb->pipe))
 360                                && seg->result < xfer->seg_size
 361                            && cnt != xfer->segs-1)
 362                                found_short = 1;
 363                        dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
 364                                "result %zu urb->actual_length %d\n",
 365                                xfer, wa_xfer_id(xfer), seg->index, found_short,
 366                                seg->result, urb->actual_length);
 367                        break;
 368                case WA_SEG_ERROR:
 369                        xfer->result = seg->result;
 370                        dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
 371                                xfer, wa_xfer_id(xfer), seg->index, seg->result,
 372                                seg->result);
 373                        goto out;
 374                case WA_SEG_ABORTED:
 375                        dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
 376                                xfer, wa_xfer_id(xfer), seg->index,
 377                                urb->status);
 378                        xfer->result = urb->status;
 379                        goto out;
 380                default:
 381                        dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
 382                                 xfer, wa_xfer_id(xfer), cnt, seg->status);
 383                        xfer->result = -EINVAL;
 384                        goto out;
 385                }
 386        }
 387        xfer->result = 0;
 388out:
 389        return result;
 390}
 391
 392/*
 393 * Search for a transfer list ID on the HCD's URB list
 394 *
 395 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 396 * 32-bit hash of the pointer.
 397 *
 398 * @returns NULL if not found.
 399 */
 400static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 401{
 402        unsigned long flags;
 403        struct wa_xfer *xfer_itr;
 404        spin_lock_irqsave(&wa->xfer_list_lock, flags);
 405        list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 406                if (id == xfer_itr->id) {
 407                        wa_xfer_get(xfer_itr);
 408                        goto out;
 409                }
 410        }
 411        xfer_itr = NULL;
 412out:
 413        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 414        return xfer_itr;
 415}
 416
 417struct wa_xfer_abort_buffer {
 418        struct urb urb;
 419        struct wa_xfer_abort cmd;
 420};
 421
 422static void __wa_xfer_abort_cb(struct urb *urb)
 423{
 424        struct wa_xfer_abort_buffer *b = urb->context;
 425        usb_put_urb(&b->urb);
 426}
 427
 428/*
 429 * Aborts an ongoing transaction
 430 *
 431 * Assumes the transfer is referenced and locked and in a submitted
 432 * state (mainly that there is an endpoint/rpipe assigned).
 433 *
 434 * The callback (see above) does nothing but freeing up the data by
 435 * putting the URB. Because the URB is allocated at the head of the
 436 * struct, the whole space we allocated is kfreed. *
 437 */
 438static int __wa_xfer_abort(struct wa_xfer *xfer)
 439{
 440        int result = -ENOMEM;
 441        struct device *dev = &xfer->wa->usb_iface->dev;
 442        struct wa_xfer_abort_buffer *b;
 443        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 444
 445        b = kmalloc(sizeof(*b), GFP_ATOMIC);
 446        if (b == NULL)
 447                goto error_kmalloc;
 448        b->cmd.bLength =  sizeof(b->cmd);
 449        b->cmd.bRequestType = WA_XFER_ABORT;
 450        b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 451        b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
 452
 453        usb_init_urb(&b->urb);
 454        usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 455                usb_sndbulkpipe(xfer->wa->usb_dev,
 456                                xfer->wa->dto_epd->bEndpointAddress),
 457                &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 458        result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 459        if (result < 0)
 460                goto error_submit;
 461        return result;                          /* callback frees! */
 462
 463
 464error_submit:
 465        if (printk_ratelimit())
 466                dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 467                        xfer, result);
 468        kfree(b);
 469error_kmalloc:
 470        return result;
 471
 472}
 473
 474/*
 475 * Calculate the number of isoc frames starting from isoc_frame_offset
 476 * that will fit a in transfer segment.
 477 */
 478static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
 479        int isoc_frame_offset, int *total_size)
 480{
 481        int segment_size = 0, frame_count = 0;
 482        int index = isoc_frame_offset;
 483        struct usb_iso_packet_descriptor *iso_frame_desc =
 484                xfer->urb->iso_frame_desc;
 485
 486        while ((index < xfer->urb->number_of_packets)
 487                && ((segment_size + iso_frame_desc[index].length)
 488                                <= xfer->seg_size)) {
 489                /*
 490                 * For Alereon HWA devices, only include an isoc frame in a
 491                 * segment if it is physically contiguous with the previous
 492                 * frame.  This is required because those devices expect
 493                 * the isoc frames to be sent as a single USB transaction as
 494                 * opposed to one transaction per frame with standard HWA.
 495                 */
 496                if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 497                        && (index > isoc_frame_offset)
 498                        && ((iso_frame_desc[index - 1].offset +
 499                                iso_frame_desc[index - 1].length) !=
 500                                iso_frame_desc[index].offset))
 501                        break;
 502
 503                /* this frame fits. count it. */
 504                ++frame_count;
 505                segment_size += iso_frame_desc[index].length;
 506
 507                /* move to the next isoc frame. */
 508                ++index;
 509        }
 510
 511        *total_size = segment_size;
 512        return frame_count;
 513}
 514
 515/*
 516 *
 517 * @returns < 0 on error, transfer segment request size if ok
 518 */
 519static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 520                                     enum wa_xfer_type *pxfer_type)
 521{
 522        ssize_t result;
 523        struct device *dev = &xfer->wa->usb_iface->dev;
 524        size_t maxpktsize;
 525        struct urb *urb = xfer->urb;
 526        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 527
 528        switch (rpipe->descr.bmAttribute & 0x3) {
 529        case USB_ENDPOINT_XFER_CONTROL:
 530                *pxfer_type = WA_XFER_TYPE_CTL;
 531                result = sizeof(struct wa_xfer_ctl);
 532                break;
 533        case USB_ENDPOINT_XFER_INT:
 534        case USB_ENDPOINT_XFER_BULK:
 535                *pxfer_type = WA_XFER_TYPE_BI;
 536                result = sizeof(struct wa_xfer_bi);
 537                break;
 538        case USB_ENDPOINT_XFER_ISOC:
 539                if (usb_pipeout(urb->pipe)) {
 540                        *pxfer_type = WA_XFER_TYPE_ISO;
 541                        result = sizeof(struct wa_xfer_hwaiso);
 542                } else {
 543                        dev_err(dev, "FIXME: ISOC IN not implemented\n");
 544                        result = -ENOSYS;
 545                        goto error;
 546                }
 547                break;
 548        default:
 549                /* never happens */
 550                BUG();
 551                result = -EINVAL;       /* shut gcc up */
 552        }
 553        xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 554        xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 555
 556        maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 557        if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
 558                int index = 0;
 559
 560                xfer->seg_size = maxpktsize;
 561                xfer->segs = 0;
 562                /*
 563                 * loop over urb->number_of_packets to determine how many
 564                 * xfer segments will be needed to send the isoc frames.
 565                 */
 566                while (index < urb->number_of_packets) {
 567                        int seg_size; /* don't care. */
 568                        index += __wa_seg_calculate_isoc_frame_count(xfer,
 569                                        index, &seg_size);
 570                        ++xfer->segs;
 571                }
 572        } else {
 573                xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 574                        * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 575                /* Compute the segment size and make sure it is a multiple of
 576                 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 577                 * a check (FIXME) */
 578                if (xfer->seg_size < maxpktsize) {
 579                        dev_err(dev,
 580                                "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
 581                                xfer->seg_size, maxpktsize);
 582                        result = -EINVAL;
 583                        goto error;
 584                }
 585                xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 586                xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
 587                                                xfer->seg_size);
 588                if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 589                        xfer->segs = 1;
 590        }
 591
 592        if (xfer->segs > WA_SEGS_MAX) {
 593                dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
 594                        (urb->transfer_buffer_length/xfer->seg_size),
 595                        WA_SEGS_MAX);
 596                result = -EINVAL;
 597                goto error;
 598        }
 599error:
 600        return result;
 601}
 602
 603static void __wa_setup_isoc_packet_descr(
 604                struct wa_xfer_packet_info_hwaiso *packet_desc,
 605                struct wa_xfer *xfer,
 606                struct wa_seg *seg) {
 607        struct usb_iso_packet_descriptor *iso_frame_desc =
 608                xfer->urb->iso_frame_desc;
 609        int frame_index;
 610
 611        /* populate isoc packet descriptor. */
 612        packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
 613        packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
 614                (sizeof(packet_desc->PacketLength[0]) *
 615                        seg->isoc_frame_count));
 616        for (frame_index = 0; frame_index < seg->isoc_frame_count;
 617                ++frame_index) {
 618                int offset_index = frame_index + seg->isoc_frame_offset;
 619                packet_desc->PacketLength[frame_index] =
 620                        cpu_to_le16(iso_frame_desc[offset_index].length);
 621        }
 622}
 623
 624
 625/* Fill in the common request header and xfer-type specific data. */
 626static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 627                                 struct wa_xfer_hdr *xfer_hdr0,
 628                                 enum wa_xfer_type xfer_type,
 629                                 size_t xfer_hdr_size)
 630{
 631        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 632        struct wa_seg *seg = xfer->seg[0];
 633
 634        xfer_hdr0 = &seg->xfer_hdr;
 635        xfer_hdr0->bLength = xfer_hdr_size;
 636        xfer_hdr0->bRequestType = xfer_type;
 637        xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 638        xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
 639        xfer_hdr0->bTransferSegment = 0;
 640        switch (xfer_type) {
 641        case WA_XFER_TYPE_CTL: {
 642                struct wa_xfer_ctl *xfer_ctl =
 643                        container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 644                xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 645                memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 646                       sizeof(xfer_ctl->baSetupData));
 647                break;
 648        }
 649        case WA_XFER_TYPE_BI:
 650                break;
 651        case WA_XFER_TYPE_ISO: {
 652                struct wa_xfer_hwaiso *xfer_iso =
 653                        container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
 654                struct wa_xfer_packet_info_hwaiso *packet_desc =
 655                        ((void *)xfer_iso) + xfer_hdr_size;
 656
 657                /* populate the isoc section of the transfer request. */
 658                xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
 659                /* populate isoc packet descriptor. */
 660                __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
 661                break;
 662        }
 663        default:
 664                BUG();
 665        };
 666}
 667
 668/*
 669 * Callback for the OUT data phase of the segment request
 670 *
 671 * Check wa_seg_tr_cb(); most comments also apply here because this
 672 * function does almost the same thing and they work closely
 673 * together.
 674 *
 675 * If the seg request has failed but this DTO phase has succeeded,
 676 * wa_seg_tr_cb() has already failed the segment and moved the
 677 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 678 * effectively do nothing.
 679 */
 680static void wa_seg_dto_cb(struct urb *urb)
 681{
 682        struct wa_seg *seg = urb->context;
 683        struct wa_xfer *xfer = seg->xfer;
 684        struct wahc *wa;
 685        struct device *dev;
 686        struct wa_rpipe *rpipe;
 687        unsigned long flags;
 688        unsigned rpipe_ready = 0;
 689        int data_send_done = 1, release_dto = 0, holding_dto = 0;
 690        u8 done = 0;
 691        int result;
 692
 693        /* free the sg if it was used. */
 694        kfree(urb->sg);
 695        urb->sg = NULL;
 696
 697        spin_lock_irqsave(&xfer->lock, flags);
 698        wa = xfer->wa;
 699        dev = &wa->usb_iface->dev;
 700        if (usb_pipeisoc(xfer->urb->pipe)) {
 701                /* Alereon HWA sends all isoc frames in a single transfer. */
 702                if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 703                        xfer->dto_isoc_frame_index += seg->isoc_frame_count;
 704                else
 705                        xfer->dto_isoc_frame_index += 1;
 706                if (xfer->dto_isoc_frame_index < seg->isoc_frame_count) {
 707                        data_send_done = 0;
 708                        holding_dto = 1; /* checked in error cases. */
 709                        /*
 710                         * if this is the last isoc frame of the segment, we
 711                         * can release DTO after sending this frame.
 712                         */
 713                        if ((xfer->dto_isoc_frame_index + 1) >=
 714                                seg->isoc_frame_count)
 715                                release_dto = 1;
 716                }
 717                dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
 718                        wa_xfer_id(xfer), seg->index,
 719                        xfer->dto_isoc_frame_index, holding_dto, release_dto);
 720        }
 721        spin_unlock_irqrestore(&xfer->lock, flags);
 722
 723        switch (urb->status) {
 724        case 0:
 725                spin_lock_irqsave(&xfer->lock, flags);
 726                seg->result += urb->actual_length;
 727                if (data_send_done) {
 728                        dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
 729                                wa_xfer_id(xfer), seg->index, seg->result);
 730                        if (seg->status < WA_SEG_PENDING)
 731                                seg->status = WA_SEG_PENDING;
 732                } else {
 733                        /* should only hit this for isoc xfers. */
 734                        /*
 735                         * Populate the dto URB with the next isoc frame buffer,
 736                         * send the URB and release DTO if we no longer need it.
 737                         */
 738                         __wa_populate_dto_urb_isoc(xfer, seg,
 739                                seg->isoc_frame_offset +
 740                                xfer->dto_isoc_frame_index);
 741
 742                        /* resubmit the URB with the next isoc frame. */
 743                        result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 744                        if (result < 0) {
 745                                dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
 746                                       wa_xfer_id(xfer), seg->index, result);
 747                                spin_unlock_irqrestore(&xfer->lock, flags);
 748                                goto error_dto_submit;
 749                        }
 750                }
 751                spin_unlock_irqrestore(&xfer->lock, flags);
 752                if (release_dto) {
 753                        __wa_dto_put(wa);
 754                        wa_check_for_delayed_rpipes(wa);
 755                }
 756                break;
 757        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 758        case -ENOENT:           /* as it was done by the who unlinked us */
 759                if (holding_dto) {
 760                        __wa_dto_put(wa);
 761                        wa_check_for_delayed_rpipes(wa);
 762                }
 763                break;
 764        default:                /* Other errors ... */
 765                dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
 766                        wa_xfer_id(xfer), seg->index, urb->status);
 767                goto error_default;
 768        }
 769
 770        return;
 771
 772error_dto_submit:
 773error_default:
 774        spin_lock_irqsave(&xfer->lock, flags);
 775        rpipe = xfer->ep->hcpriv;
 776        if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 777                    EDC_ERROR_TIMEFRAME)){
 778                dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
 779                wa_reset_all(wa);
 780        }
 781        if (seg->status != WA_SEG_ERROR) {
 782                seg->status = WA_SEG_ERROR;
 783                seg->result = urb->status;
 784                xfer->segs_done++;
 785                __wa_xfer_abort(xfer);
 786                rpipe_ready = rpipe_avail_inc(rpipe);
 787                done = __wa_xfer_is_done(xfer);
 788        }
 789        spin_unlock_irqrestore(&xfer->lock, flags);
 790        if (holding_dto) {
 791                __wa_dto_put(wa);
 792                wa_check_for_delayed_rpipes(wa);
 793        }
 794        if (done)
 795                wa_xfer_completion(xfer);
 796        if (rpipe_ready)
 797                wa_xfer_delayed_run(rpipe);
 798
 799}
 800
 801/*
 802 * Callback for the isoc packet descriptor phase of the segment request
 803 *
 804 * Check wa_seg_tr_cb(); most comments also apply here because this
 805 * function does almost the same thing and they work closely
 806 * together.
 807 *
 808 * If the seg request has failed but this phase has succeeded,
 809 * wa_seg_tr_cb() has already failed the segment and moved the
 810 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 811 * effectively do nothing.
 812 */
 813static void wa_seg_iso_pack_desc_cb(struct urb *urb)
 814{
 815        struct wa_seg *seg = urb->context;
 816        struct wa_xfer *xfer = seg->xfer;
 817        struct wahc *wa;
 818        struct device *dev;
 819        struct wa_rpipe *rpipe;
 820        unsigned long flags;
 821        unsigned rpipe_ready = 0;
 822        u8 done = 0;
 823
 824        switch (urb->status) {
 825        case 0:
 826                spin_lock_irqsave(&xfer->lock, flags);
 827                wa = xfer->wa;
 828                dev = &wa->usb_iface->dev;
 829                dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
 830                        wa_xfer_id(xfer), seg->index);
 831                if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 832                        seg->status = WA_SEG_PENDING;
 833                spin_unlock_irqrestore(&xfer->lock, flags);
 834                break;
 835        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 836        case -ENOENT:           /* as it was done by the who unlinked us */
 837                break;
 838        default:                /* Other errors ... */
 839                spin_lock_irqsave(&xfer->lock, flags);
 840                wa = xfer->wa;
 841                dev = &wa->usb_iface->dev;
 842                rpipe = xfer->ep->hcpriv;
 843                pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
 844                                wa_xfer_id(xfer), seg->index, urb->status);
 845                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 846                            EDC_ERROR_TIMEFRAME)){
 847                        dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
 848                        wa_reset_all(wa);
 849                }
 850                if (seg->status != WA_SEG_ERROR) {
 851                        usb_unlink_urb(seg->dto_urb);
 852                        seg->status = WA_SEG_ERROR;
 853                        seg->result = urb->status;
 854                        xfer->segs_done++;
 855                        __wa_xfer_abort(xfer);
 856                        rpipe_ready = rpipe_avail_inc(rpipe);
 857                        done = __wa_xfer_is_done(xfer);
 858                }
 859                spin_unlock_irqrestore(&xfer->lock, flags);
 860                if (done)
 861                        wa_xfer_completion(xfer);
 862                if (rpipe_ready)
 863                        wa_xfer_delayed_run(rpipe);
 864        }
 865}
 866
 867/*
 868 * Callback for the segment request
 869 *
 870 * If successful transition state (unless already transitioned or
 871 * outbound transfer); otherwise, take a note of the error, mark this
 872 * segment done and try completion.
 873 *
 874 * Note we don't access until we are sure that the transfer hasn't
 875 * been cancelled (ECONNRESET, ENOENT), which could mean that
 876 * seg->xfer could be already gone.
 877 *
 878 * We have to check before setting the status to WA_SEG_PENDING
 879 * because sometimes the xfer result callback arrives before this
 880 * callback (geeeeeeze), so it might happen that we are already in
 881 * another state. As well, we don't set it if the transfer is not inbound,
 882 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 883 * finishes.
 884 */
 885static void wa_seg_tr_cb(struct urb *urb)
 886{
 887        struct wa_seg *seg = urb->context;
 888        struct wa_xfer *xfer = seg->xfer;
 889        struct wahc *wa;
 890        struct device *dev;
 891        struct wa_rpipe *rpipe;
 892        unsigned long flags;
 893        unsigned rpipe_ready;
 894        u8 done = 0;
 895
 896        switch (urb->status) {
 897        case 0:
 898                spin_lock_irqsave(&xfer->lock, flags);
 899                wa = xfer->wa;
 900                dev = &wa->usb_iface->dev;
 901                dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
 902                        xfer, wa_xfer_id(xfer), seg->index);
 903                if (xfer->is_inbound &&
 904                        seg->status < WA_SEG_PENDING &&
 905                        !(usb_pipeisoc(xfer->urb->pipe)))
 906                        seg->status = WA_SEG_PENDING;
 907                spin_unlock_irqrestore(&xfer->lock, flags);
 908                break;
 909        case -ECONNRESET:       /* URB unlinked; no need to do anything */
 910        case -ENOENT:           /* as it was done by the who unlinked us */
 911                break;
 912        default:                /* Other errors ... */
 913                spin_lock_irqsave(&xfer->lock, flags);
 914                wa = xfer->wa;
 915                dev = &wa->usb_iface->dev;
 916                rpipe = xfer->ep->hcpriv;
 917                if (printk_ratelimit())
 918                        dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
 919                                xfer, wa_xfer_id(xfer), seg->index,
 920                                urb->status);
 921                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 922                            EDC_ERROR_TIMEFRAME)){
 923                        dev_err(dev, "DTO: URB max acceptable errors "
 924                                "exceeded, resetting device\n");
 925                        wa_reset_all(wa);
 926                }
 927                usb_unlink_urb(seg->isoc_pack_desc_urb);
 928                usb_unlink_urb(seg->dto_urb);
 929                seg->status = WA_SEG_ERROR;
 930                seg->result = urb->status;
 931                xfer->segs_done++;
 932                __wa_xfer_abort(xfer);
 933                rpipe_ready = rpipe_avail_inc(rpipe);
 934                done = __wa_xfer_is_done(xfer);
 935                spin_unlock_irqrestore(&xfer->lock, flags);
 936                if (done)
 937                        wa_xfer_completion(xfer);
 938                if (rpipe_ready)
 939                        wa_xfer_delayed_run(rpipe);
 940        }
 941}
 942
 943/*
 944 * Allocate an SG list to store bytes_to_transfer bytes and copy the
 945 * subset of the in_sg that matches the buffer subset
 946 * we are about to transfer.
 947 */
 948static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
 949        const unsigned int bytes_transferred,
 950        const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
 951{
 952        struct scatterlist *out_sg;
 953        unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
 954                nents;
 955        struct scatterlist *current_xfer_sg = in_sg;
 956        struct scatterlist *current_seg_sg, *last_seg_sg;
 957
 958        /* skip previously transferred pages. */
 959        while ((current_xfer_sg) &&
 960                        (bytes_processed < bytes_transferred)) {
 961                bytes_processed += current_xfer_sg->length;
 962
 963                /* advance the sg if current segment starts on or past the
 964                        next page. */
 965                if (bytes_processed <= bytes_transferred)
 966                        current_xfer_sg = sg_next(current_xfer_sg);
 967        }
 968
 969        /* the data for the current segment starts in current_xfer_sg.
 970                calculate the offset. */
 971        if (bytes_processed > bytes_transferred) {
 972                offset_into_current_page_data = current_xfer_sg->length -
 973                        (bytes_processed - bytes_transferred);
 974        }
 975
 976        /* calculate the number of pages needed by this segment. */
 977        nents = DIV_ROUND_UP((bytes_to_transfer +
 978                offset_into_current_page_data +
 979                current_xfer_sg->offset),
 980                PAGE_SIZE);
 981
 982        out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
 983        if (out_sg) {
 984                sg_init_table(out_sg, nents);
 985
 986                /* copy the portion of the incoming SG that correlates to the
 987                 * data to be transferred by this segment to the segment SG. */
 988                last_seg_sg = current_seg_sg = out_sg;
 989                bytes_processed = 0;
 990
 991                /* reset nents and calculate the actual number of sg entries
 992                        needed. */
 993                nents = 0;
 994                while ((bytes_processed < bytes_to_transfer) &&
 995                                current_seg_sg && current_xfer_sg) {
 996                        unsigned int page_len = min((current_xfer_sg->length -
 997                                offset_into_current_page_data),
 998                                (bytes_to_transfer - bytes_processed));
 999
1000                        sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1001                                page_len,
1002                                current_xfer_sg->offset +
1003                                offset_into_current_page_data);
1004
1005                        bytes_processed += page_len;
1006
1007                        last_seg_sg = current_seg_sg;
1008                        current_seg_sg = sg_next(current_seg_sg);
1009                        current_xfer_sg = sg_next(current_xfer_sg);
1010
1011                        /* only the first page may require additional offset. */
1012                        offset_into_current_page_data = 0;
1013                        nents++;
1014                }
1015
1016                /* update num_sgs and terminate the list since we may have
1017                 *  concatenated pages. */
1018                sg_mark_end(last_seg_sg);
1019                *out_num_sgs = nents;
1020        }
1021
1022        return out_sg;
1023}
1024
1025/*
1026 * Populate DMA buffer info for the isoc dto urb.
1027 */
1028static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1029        struct wa_seg *seg, int curr_iso_frame)
1030{
1031        seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1032        seg->dto_urb->sg = NULL;
1033        seg->dto_urb->num_sgs = 0;
1034        /* dto urb buffer address pulled from iso_frame_desc. */
1035        seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1036                xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1037        /* The Alereon HWA sends a single URB with all isoc segs. */
1038        if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1039                seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1040        else
1041                seg->dto_urb->transfer_buffer_length =
1042                        xfer->urb->iso_frame_desc[curr_iso_frame].length;
1043}
1044
1045/*
1046 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1047 */
1048static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1049        struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1050{
1051        int result = 0;
1052
1053        if (xfer->is_dma) {
1054                seg->dto_urb->transfer_dma =
1055                        xfer->urb->transfer_dma + buf_itr_offset;
1056                seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1057                seg->dto_urb->sg = NULL;
1058                seg->dto_urb->num_sgs = 0;
1059        } else {
1060                /* do buffer or SG processing. */
1061                seg->dto_urb->transfer_flags &=
1062                        ~URB_NO_TRANSFER_DMA_MAP;
1063                /* this should always be 0 before a resubmit. */
1064                seg->dto_urb->num_mapped_sgs = 0;
1065
1066                if (xfer->urb->transfer_buffer) {
1067                        seg->dto_urb->transfer_buffer =
1068                                xfer->urb->transfer_buffer +
1069                                buf_itr_offset;
1070                        seg->dto_urb->sg = NULL;
1071                        seg->dto_urb->num_sgs = 0;
1072                } else {
1073                        seg->dto_urb->transfer_buffer = NULL;
1074
1075                        /*
1076                         * allocate an SG list to store seg_size bytes
1077                         * and copy the subset of the xfer->urb->sg that
1078                         * matches the buffer subset we are about to
1079                         * read.
1080                         */
1081                        seg->dto_urb->sg = wa_xfer_create_subset_sg(
1082                                xfer->urb->sg,
1083                                buf_itr_offset, buf_itr_size,
1084                                &(seg->dto_urb->num_sgs));
1085                        if (!(seg->dto_urb->sg))
1086                                result = -ENOMEM;
1087                }
1088        }
1089        seg->dto_urb->transfer_buffer_length = buf_itr_size;
1090
1091        return result;
1092}
1093
1094/*
1095 * Allocate the segs array and initialize each of them
1096 *
1097 * The segments are freed by wa_xfer_destroy() when the xfer use count
1098 * drops to zero; however, because each segment is given the same life
1099 * cycle as the USB URB it contains, it is actually freed by
1100 * usb_put_urb() on the contained USB URB (twisted, eh?).
1101 */
1102static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1103{
1104        int result, cnt, iso_frame_offset;
1105        size_t alloc_size = sizeof(*xfer->seg[0])
1106                - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1107        struct usb_device *usb_dev = xfer->wa->usb_dev;
1108        const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1109        struct wa_seg *seg;
1110        size_t buf_itr, buf_size, buf_itr_size;
1111        int xfer_isoc_frame_offset = 0;
1112
1113        result = -ENOMEM;
1114        xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1115        if (xfer->seg == NULL)
1116                goto error_segs_kzalloc;
1117        buf_itr = 0;
1118        buf_size = xfer->urb->transfer_buffer_length;
1119        iso_frame_offset = 0;
1120        for (cnt = 0; cnt < xfer->segs; cnt++) {
1121                size_t iso_pkt_descr_size = 0;
1122                int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1123
1124                if (usb_pipeisoc(xfer->urb->pipe)) {
1125                        seg_isoc_frame_count =
1126                                __wa_seg_calculate_isoc_frame_count(xfer,
1127                                        xfer_isoc_frame_offset, &seg_isoc_size);
1128
1129                        iso_pkt_descr_size =
1130                                sizeof(struct wa_xfer_packet_info_hwaiso) +
1131                                (seg_isoc_frame_count * sizeof(__le16));
1132                }
1133                seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1134                                                GFP_ATOMIC);
1135                if (seg == NULL)
1136                        goto error_seg_kmalloc;
1137                wa_seg_init(seg);
1138                seg->xfer = xfer;
1139                seg->index = cnt;
1140                seg->isoc_frame_count = seg_isoc_frame_count;
1141                seg->isoc_frame_offset = xfer_isoc_frame_offset;
1142                seg->isoc_size = seg_isoc_size;
1143                usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1144                                  usb_sndbulkpipe(usb_dev,
1145                                                  dto_epd->bEndpointAddress),
1146                                  &seg->xfer_hdr, xfer_hdr_size,
1147                                  wa_seg_tr_cb, seg);
1148                buf_itr_size = min(buf_size, xfer->seg_size);
1149                if (xfer->is_inbound == 0 && buf_size > 0) {
1150                        /* outbound data. */
1151                        seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1152                        if (seg->dto_urb == NULL)
1153                                goto error_dto_alloc;
1154                        usb_fill_bulk_urb(
1155                                seg->dto_urb, usb_dev,
1156                                usb_sndbulkpipe(usb_dev,
1157                                                dto_epd->bEndpointAddress),
1158                                NULL, 0, wa_seg_dto_cb, seg);
1159
1160                        if (usb_pipeisoc(xfer->urb->pipe)) {
1161                                /* iso packet descriptor. */
1162                                seg->isoc_pack_desc_urb =
1163                                                usb_alloc_urb(0, GFP_ATOMIC);
1164                                if (seg->isoc_pack_desc_urb == NULL)
1165                                        goto error_iso_pack_desc_alloc;
1166                                /*
1167                                 * The buffer for the isoc packet descriptor
1168                                 * after the transfer request header in the
1169                                 * segment object memory buffer.
1170                                 */
1171                                usb_fill_bulk_urb(
1172                                        seg->isoc_pack_desc_urb, usb_dev,
1173                                        usb_sndbulkpipe(usb_dev,
1174                                                dto_epd->bEndpointAddress),
1175                                        (void *)(&seg->xfer_hdr) +
1176                                                xfer_hdr_size,
1177                                        iso_pkt_descr_size,
1178                                        wa_seg_iso_pack_desc_cb, seg);
1179
1180                                /*
1181                                 * Fill in the xfer buffer information for the
1182                                 * first isoc frame.  Subsequent frames in this
1183                                 * segment will be filled in and sent from the
1184                                 * DTO completion routine, if needed.
1185                                 */
1186                                __wa_populate_dto_urb_isoc(xfer, seg,
1187                                        xfer_isoc_frame_offset);
1188                                /* adjust starting frame offset for next seg. */
1189                                xfer_isoc_frame_offset += seg_isoc_frame_count;
1190                        } else {
1191                                /* fill in the xfer buffer information. */
1192                                result = __wa_populate_dto_urb(xfer, seg,
1193                                                        buf_itr, buf_itr_size);
1194                                if (result < 0)
1195                                        goto error_seg_outbound_populate;
1196
1197                                buf_itr += buf_itr_size;
1198                                buf_size -= buf_itr_size;
1199                        }
1200                }
1201                seg->status = WA_SEG_READY;
1202        }
1203        return 0;
1204
1205        /*
1206         * Free the memory for the current segment which failed to init.
1207         * Use the fact that cnt is left at were it failed.  The remaining
1208         * segments will be cleaned up by wa_xfer_destroy.
1209         */
1210error_iso_pack_desc_alloc:
1211error_seg_outbound_populate:
1212        usb_free_urb(xfer->seg[cnt]->dto_urb);
1213error_dto_alloc:
1214        kfree(xfer->seg[cnt]);
1215        xfer->seg[cnt] = NULL;
1216error_seg_kmalloc:
1217error_segs_kzalloc:
1218        return result;
1219}
1220
1221/*
1222 * Allocates all the stuff needed to submit a transfer
1223 *
1224 * Breaks the whole data buffer in a list of segments, each one has a
1225 * structure allocated to it and linked in xfer->seg[index]
1226 *
1227 * FIXME: merge setup_segs() and the last part of this function, no
1228 *        need to do two for loops when we could run everything in a
1229 *        single one
1230 */
1231static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1232{
1233        int result;
1234        struct device *dev = &xfer->wa->usb_iface->dev;
1235        enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1236        size_t xfer_hdr_size, cnt, transfer_size;
1237        struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1238
1239        result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1240        if (result < 0)
1241                goto error_setup_sizes;
1242        xfer_hdr_size = result;
1243        result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1244        if (result < 0) {
1245                dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1246                        xfer, xfer->segs, result);
1247                goto error_setup_segs;
1248        }
1249        /* Fill the first header */
1250        xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1251        wa_xfer_id_init(xfer);
1252        __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1253
1254        /* Fill remaining headers */
1255        xfer_hdr = xfer_hdr0;
1256        if (xfer_type == WA_XFER_TYPE_ISO) {
1257                xfer_hdr0->dwTransferLength =
1258                        cpu_to_le32(xfer->seg[0]->isoc_size);
1259                for (cnt = 1; cnt < xfer->segs; cnt++) {
1260                        struct wa_xfer_packet_info_hwaiso *packet_desc;
1261                        struct wa_seg *seg = xfer->seg[cnt];
1262
1263                        xfer_hdr = &seg->xfer_hdr;
1264                        packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1265                        /*
1266                         * Copy values from the 0th header. Segment specific
1267                         * values are set below.
1268                         */
1269                        memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1270                        xfer_hdr->bTransferSegment = cnt;
1271                        xfer_hdr->dwTransferLength =
1272                                cpu_to_le32(seg->isoc_size);
1273                        __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1274                        seg->status = WA_SEG_READY;
1275                }
1276        } else {
1277                transfer_size = urb->transfer_buffer_length;
1278                xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1279                        cpu_to_le32(xfer->seg_size) :
1280                        cpu_to_le32(transfer_size);
1281                transfer_size -=  xfer->seg_size;
1282                for (cnt = 1; cnt < xfer->segs; cnt++) {
1283                        xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1284                        memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1285                        xfer_hdr->bTransferSegment = cnt;
1286                        xfer_hdr->dwTransferLength =
1287                                transfer_size > xfer->seg_size ?
1288                                        cpu_to_le32(xfer->seg_size)
1289                                        : cpu_to_le32(transfer_size);
1290                        xfer->seg[cnt]->status = WA_SEG_READY;
1291                        transfer_size -=  xfer->seg_size;
1292                }
1293        }
1294        xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
1295        result = 0;
1296error_setup_segs:
1297error_setup_sizes:
1298        return result;
1299}
1300
1301/*
1302 *
1303 *
1304 * rpipe->seg_lock is held!
1305 */
1306static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1307                           struct wa_seg *seg, int *dto_done)
1308{
1309        int result;
1310
1311        /* default to done unless we encounter a multi-frame isoc segment. */
1312        *dto_done = 1;
1313
1314        /* submit the transfer request. */
1315        result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1316        if (result < 0) {
1317                pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1318                       __func__, xfer, seg->index, result);
1319                goto error_seg_submit;
1320        }
1321        /* submit the isoc packet descriptor if present. */
1322        if (seg->isoc_pack_desc_urb) {
1323                struct wahc *wa = xfer->wa;
1324
1325                result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1326                if (result < 0) {
1327                        pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1328                               __func__, xfer, seg->index, result);
1329                        goto error_iso_pack_desc_submit;
1330                }
1331                xfer->dto_isoc_frame_index = 0;
1332                /*
1333                 * If this segment contains more than one isoc frame, hold
1334                 * onto the dto resource until we send all frames.
1335                 * Only applies to non-Alereon devices.
1336                 */
1337                if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1338                        && (seg->isoc_frame_count > 1))
1339                        *dto_done = 0;
1340        }
1341        /* submit the out data if this is an out request. */
1342        if (seg->dto_urb) {
1343                result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1344                if (result < 0) {
1345                        pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1346                               __func__, xfer, seg->index, result);
1347                        goto error_dto_submit;
1348                }
1349        }
1350        seg->status = WA_SEG_SUBMITTED;
1351        rpipe_avail_dec(rpipe);
1352        return 0;
1353
1354error_dto_submit:
1355        usb_unlink_urb(seg->isoc_pack_desc_urb);
1356error_iso_pack_desc_submit:
1357        usb_unlink_urb(&seg->tr_urb);
1358error_seg_submit:
1359        seg->status = WA_SEG_ERROR;
1360        seg->result = result;
1361        *dto_done = 1;
1362        return result;
1363}
1364
1365/*
1366 * Execute more queued request segments until the maximum concurrent allowed.
1367 * Return true if the DTO resource was acquired and released.
1368 *
1369 * The ugly unlock/lock sequence on the error path is needed as the
1370 * xfer->lock normally nests the seg_lock and not viceversa.
1371 */
1372static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1373{
1374        int result, dto_acquired = 0, dto_done = 0;
1375        struct device *dev = &rpipe->wa->usb_iface->dev;
1376        struct wa_seg *seg;
1377        struct wa_xfer *xfer;
1378        unsigned long flags;
1379
1380        *dto_waiting = 0;
1381
1382        spin_lock_irqsave(&rpipe->seg_lock, flags);
1383        while (atomic_read(&rpipe->segs_available) > 0
1384              && !list_empty(&rpipe->seg_list)
1385              && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1386                seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1387                                 list_node);
1388                list_del(&seg->list_node);
1389                xfer = seg->xfer;
1390                result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1391                /* release the dto resource if this RPIPE is done with it. */
1392                if (dto_done)
1393                        __wa_dto_put(rpipe->wa);
1394                dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1395                        xfer, wa_xfer_id(xfer), seg->index,
1396                        atomic_read(&rpipe->segs_available), result);
1397                if (unlikely(result < 0)) {
1398                        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1399                        spin_lock_irqsave(&xfer->lock, flags);
1400                        __wa_xfer_abort(xfer);
1401                        xfer->segs_done++;
1402                        spin_unlock_irqrestore(&xfer->lock, flags);
1403                        spin_lock_irqsave(&rpipe->seg_lock, flags);
1404                }
1405        }
1406        /*
1407         * Mark this RPIPE as waiting if dto was not acquired, there are
1408         * delayed segs and no active transfers to wake us up later.
1409         */
1410        if (!dto_acquired && !list_empty(&rpipe->seg_list)
1411                && (atomic_read(&rpipe->segs_available) ==
1412                        le16_to_cpu(rpipe->descr.wRequests)))
1413                *dto_waiting = 1;
1414
1415        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1416
1417        return dto_done;
1418}
1419
1420static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1421{
1422        int dto_waiting;
1423        int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1424
1425        /*
1426         * If this RPIPE is waiting on the DTO resource, add it to the tail of
1427         * the waiting list.
1428         * Otherwise, if the WA DTO resource was acquired and released by
1429         *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1430         * DTO and failed during that time.  Check the delayed list and process
1431         * any waiters.  Start searching from the next RPIPE index.
1432         */
1433        if (dto_waiting)
1434                wa_add_delayed_rpipe(rpipe->wa, rpipe);
1435        else if (dto_done)
1436                wa_check_for_delayed_rpipes(rpipe->wa);
1437}
1438
1439/*
1440 *
1441 * xfer->lock is taken
1442 *
1443 * On failure submitting we just stop submitting and return error;
1444 * wa_urb_enqueue_b() will execute the completion path
1445 */
1446static int __wa_xfer_submit(struct wa_xfer *xfer)
1447{
1448        int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1449        struct wahc *wa = xfer->wa;
1450        struct device *dev = &wa->usb_iface->dev;
1451        unsigned cnt;
1452        struct wa_seg *seg;
1453        unsigned long flags;
1454        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1455        size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1456        u8 available;
1457        u8 empty;
1458
1459        spin_lock_irqsave(&wa->xfer_list_lock, flags);
1460        list_add_tail(&xfer->list_node, &wa->xfer_list);
1461        spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1462
1463        BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1464        result = 0;
1465        spin_lock_irqsave(&rpipe->seg_lock, flags);
1466        for (cnt = 0; cnt < xfer->segs; cnt++) {
1467                int delay_seg = 1;
1468
1469                available = atomic_read(&rpipe->segs_available);
1470                empty = list_empty(&rpipe->seg_list);
1471                seg = xfer->seg[cnt];
1472                if (available && empty) {
1473                        /*
1474                         * Only attempt to acquire DTO if we have a segment
1475                         * to send.
1476                         */
1477                        dto_acquired = __wa_dto_try_get(rpipe->wa);
1478                        if (dto_acquired) {
1479                                delay_seg = 0;
1480                                result = __wa_seg_submit(rpipe, xfer, seg,
1481                                                        &dto_done);
1482                                dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1483                                        xfer, wa_xfer_id(xfer), cnt, available,
1484                                        empty);
1485                                if (dto_done)
1486                                        __wa_dto_put(rpipe->wa);
1487
1488                                if (result < 0) {
1489                                        __wa_xfer_abort(xfer);
1490                                        goto error_seg_submit;
1491                                }
1492                        }
1493                }
1494
1495                if (delay_seg) {
1496                        dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1497                                xfer, wa_xfer_id(xfer), cnt, available,  empty);
1498                        seg->status = WA_SEG_DELAYED;
1499                        list_add_tail(&seg->list_node, &rpipe->seg_list);
1500                }
1501                xfer->segs_submitted++;
1502        }
1503error_seg_submit:
1504        /*
1505         * Mark this RPIPE as waiting if dto was not acquired, there are
1506         * delayed segs and no active transfers to wake us up later.
1507         */
1508        if (!dto_acquired && !list_empty(&rpipe->seg_list)
1509                && (atomic_read(&rpipe->segs_available) ==
1510                        le16_to_cpu(rpipe->descr.wRequests)))
1511                dto_waiting = 1;
1512        spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1513
1514        if (dto_waiting)
1515                wa_add_delayed_rpipe(rpipe->wa, rpipe);
1516        else if (dto_done)
1517                wa_check_for_delayed_rpipes(rpipe->wa);
1518
1519        return result;
1520}
1521
1522/*
1523 * Second part of a URB/transfer enqueuement
1524 *
1525 * Assumes this comes from wa_urb_enqueue() [maybe through
1526 * wa_urb_enqueue_run()]. At this point:
1527 *
1528 * xfer->wa     filled and refcounted
1529 * xfer->ep     filled with rpipe refcounted if
1530 *              delayed == 0
1531 * xfer->urb    filled and refcounted (this is the case when called
1532 *              from wa_urb_enqueue() as we come from usb_submit_urb()
1533 *              and when called by wa_urb_enqueue_run(), as we took an
1534 *              extra ref dropped by _run() after we return).
1535 * xfer->gfp    filled
1536 *
1537 * If we fail at __wa_xfer_submit(), then we just check if we are done
1538 * and if so, we run the completion procedure. However, if we are not
1539 * yet done, we do nothing and wait for the completion handlers from
1540 * the submitted URBs or from the xfer-result path to kick in. If xfer
1541 * result never kicks in, the xfer will timeout from the USB code and
1542 * dequeue() will be called.
1543 */
1544static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1545{
1546        int result;
1547        unsigned long flags;
1548        struct urb *urb = xfer->urb;
1549        struct wahc *wa = xfer->wa;
1550        struct wusbhc *wusbhc = wa->wusb;
1551        struct wusb_dev *wusb_dev;
1552        unsigned done;
1553
1554        result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1555        if (result < 0) {
1556                pr_err("%s: error_rpipe_get\n", __func__);
1557                goto error_rpipe_get;
1558        }
1559        result = -ENODEV;
1560        /* FIXME: segmentation broken -- kills DWA */
1561        mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
1562        if (urb->dev == NULL) {
1563                mutex_unlock(&wusbhc->mutex);
1564                pr_err("%s: error usb dev gone\n", __func__);
1565                goto error_dev_gone;
1566        }
1567        wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1568        if (wusb_dev == NULL) {
1569                mutex_unlock(&wusbhc->mutex);
1570                pr_err("%s: error wusb dev gone\n", __func__);
1571                goto error_dev_gone;
1572        }
1573        mutex_unlock(&wusbhc->mutex);
1574
1575        spin_lock_irqsave(&xfer->lock, flags);
1576        xfer->wusb_dev = wusb_dev;
1577        result = urb->status;
1578        if (urb->status != -EINPROGRESS) {
1579                pr_err("%s: error_dequeued\n", __func__);
1580                goto error_dequeued;
1581        }
1582
1583        result = __wa_xfer_setup(xfer, urb);
1584        if (result < 0) {
1585                pr_err("%s: error_xfer_setup\n", __func__);
1586                goto error_xfer_setup;
1587        }
1588        result = __wa_xfer_submit(xfer);
1589        if (result < 0) {
1590                pr_err("%s: error_xfer_submit\n", __func__);
1591                goto error_xfer_submit;
1592        }
1593        spin_unlock_irqrestore(&xfer->lock, flags);
1594        return 0;
1595
1596        /*
1597         * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1598         * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1599         * setup().
1600         */
1601error_xfer_setup:
1602error_dequeued:
1603        spin_unlock_irqrestore(&xfer->lock, flags);
1604        /* FIXME: segmentation broken, kills DWA */
1605        if (wusb_dev)
1606                wusb_dev_put(wusb_dev);
1607error_dev_gone:
1608        rpipe_put(xfer->ep->hcpriv);
1609error_rpipe_get:
1610        xfer->result = result;
1611        return result;
1612
1613error_xfer_submit:
1614        done = __wa_xfer_is_done(xfer);
1615        xfer->result = result;
1616        spin_unlock_irqrestore(&xfer->lock, flags);
1617        if (done)
1618                wa_xfer_completion(xfer);
1619        /* return success since the completion routine will run. */
1620        return 0;
1621}
1622
1623/*
1624 * Execute the delayed transfers in the Wire Adapter @wa
1625 *
1626 * We need to be careful here, as dequeue() could be called in the
1627 * middle.  That's why we do the whole thing under the
1628 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1629 * and then checks the list -- so as we would be acquiring in inverse
1630 * order, we move the delayed list to a separate list while locked and then
1631 * submit them without the list lock held.
1632 */
1633void wa_urb_enqueue_run(struct work_struct *ws)
1634{
1635        struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1636        struct wa_xfer *xfer, *next;
1637        struct urb *urb;
1638        LIST_HEAD(tmp_list);
1639
1640        /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1641        spin_lock_irq(&wa->xfer_list_lock);
1642        list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1643                        wa->xfer_delayed_list.prev);
1644        spin_unlock_irq(&wa->xfer_list_lock);
1645
1646        /*
1647         * enqueue from temp list without list lock held since wa_urb_enqueue_b
1648         * can take xfer->lock as well as lock mutexes.
1649         */
1650        list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1651                list_del_init(&xfer->list_node);
1652
1653                urb = xfer->urb;
1654                if (wa_urb_enqueue_b(xfer) < 0)
1655                        wa_xfer_giveback(xfer);
1656                usb_put_urb(urb);       /* taken when queuing */
1657        }
1658}
1659EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1660
1661/*
1662 * Process the errored transfers on the Wire Adapter outside of interrupt.
1663 */
1664void wa_process_errored_transfers_run(struct work_struct *ws)
1665{
1666        struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1667        struct wa_xfer *xfer, *next;
1668        LIST_HEAD(tmp_list);
1669
1670        pr_info("%s: Run delayed STALL processing.\n", __func__);
1671
1672        /* Create a copy of the wa->xfer_errored_list while holding the lock */
1673        spin_lock_irq(&wa->xfer_list_lock);
1674        list_cut_position(&tmp_list, &wa->xfer_errored_list,
1675                        wa->xfer_errored_list.prev);
1676        spin_unlock_irq(&wa->xfer_list_lock);
1677
1678        /*
1679         * run rpipe_clear_feature_stalled from temp list without list lock
1680         * held.
1681         */
1682        list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1683                struct usb_host_endpoint *ep;
1684                unsigned long flags;
1685                struct wa_rpipe *rpipe;
1686
1687                spin_lock_irqsave(&xfer->lock, flags);
1688                ep = xfer->ep;
1689                rpipe = ep->hcpriv;
1690                spin_unlock_irqrestore(&xfer->lock, flags);
1691
1692                /* clear RPIPE feature stalled without holding a lock. */
1693                rpipe_clear_feature_stalled(wa, ep);
1694
1695                /* complete the xfer. This removes it from the tmp list. */
1696                wa_xfer_completion(xfer);
1697
1698                /* check for work. */
1699                wa_xfer_delayed_run(rpipe);
1700        }
1701}
1702EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1703
1704/*
1705 * Submit a transfer to the Wire Adapter in a delayed way
1706 *
1707 * The process of enqueuing involves possible sleeps() [see
1708 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1709 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1710 *
1711 * @urb: We own a reference to it done by the HCI Linux USB stack that
1712 *       will be given up by calling usb_hcd_giveback_urb() or by
1713 *       returning error from this function -> ergo we don't have to
1714 *       refcount it.
1715 */
1716int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1717                   struct urb *urb, gfp_t gfp)
1718{
1719        int result;
1720        struct device *dev = &wa->usb_iface->dev;
1721        struct wa_xfer *xfer;
1722        unsigned long my_flags;
1723        unsigned cant_sleep = irqs_disabled() | in_atomic();
1724
1725        if ((urb->transfer_buffer == NULL)
1726            && (urb->sg == NULL)
1727            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1728            && urb->transfer_buffer_length != 0) {
1729                dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1730                dump_stack();
1731        }
1732
1733        result = -ENOMEM;
1734        xfer = kzalloc(sizeof(*xfer), gfp);
1735        if (xfer == NULL)
1736                goto error_kmalloc;
1737
1738        result = -ENOENT;
1739        if (urb->status != -EINPROGRESS)        /* cancelled */
1740                goto error_dequeued;            /* before starting? */
1741        wa_xfer_init(xfer);
1742        xfer->wa = wa_get(wa);
1743        xfer->urb = urb;
1744        xfer->gfp = gfp;
1745        xfer->ep = ep;
1746        urb->hcpriv = xfer;
1747
1748        dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1749                xfer, urb, urb->pipe, urb->transfer_buffer_length,
1750                urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1751                urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1752                cant_sleep ? "deferred" : "inline");
1753
1754        if (cant_sleep) {
1755                usb_get_urb(urb);
1756                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1757                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1758                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1759                queue_work(wusbd, &wa->xfer_enqueue_work);
1760        } else {
1761                result = wa_urb_enqueue_b(xfer);
1762                if (result < 0) {
1763                        /*
1764                         * URB submit/enqueue failed.  Clean up, return an
1765                         * error and do not run the callback.  This avoids
1766                         * an infinite submit/complete loop.
1767                         */
1768                        dev_err(dev, "%s: URB enqueue failed: %d\n",
1769                           __func__, result);
1770                        wa_put(xfer->wa);
1771                        wa_xfer_put(xfer);
1772                        return result;
1773                }
1774        }
1775        return 0;
1776
1777error_dequeued:
1778        kfree(xfer);
1779error_kmalloc:
1780        return result;
1781}
1782EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1783
1784/*
1785 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1786 * handler] is called.
1787 *
1788 * Until a transfer goes successfully through wa_urb_enqueue() it
1789 * needs to be dequeued with completion calling; when stuck in delayed
1790 * or before wa_xfer_setup() is called, we need to do completion.
1791 *
1792 *  not setup  If there is no hcpriv yet, that means that that enqueue
1793 *             still had no time to set the xfer up. Because
1794 *             urb->status should be other than -EINPROGRESS,
1795 *             enqueue() will catch that and bail out.
1796 *
1797 * If the transfer has gone through setup, we just need to clean it
1798 * up. If it has gone through submit(), we have to abort it [with an
1799 * asynch request] and then make sure we cancel each segment.
1800 *
1801 */
1802int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1803{
1804        unsigned long flags, flags2;
1805        struct wa_xfer *xfer;
1806        struct wa_seg *seg;
1807        struct wa_rpipe *rpipe;
1808        unsigned cnt, done = 0, xfer_abort_pending;
1809        unsigned rpipe_ready = 0;
1810
1811        xfer = urb->hcpriv;
1812        if (xfer == NULL) {
1813                /*
1814                 * Nothing setup yet enqueue will see urb->status !=
1815                 * -EINPROGRESS (by hcd layer) and bail out with
1816                 * error, no need to do completion
1817                 */
1818                BUG_ON(urb->status == -EINPROGRESS);
1819                goto out;
1820        }
1821        spin_lock_irqsave(&xfer->lock, flags);
1822        pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1823        rpipe = xfer->ep->hcpriv;
1824        if (rpipe == NULL) {
1825                pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
1826                        __func__, wa_xfer_id(xfer),
1827                        "Probably already aborted.\n" );
1828                goto out_unlock;
1829        }
1830        /* Check the delayed list -> if there, release and complete */
1831        spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1832        if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1833                goto dequeue_delayed;
1834        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1835        if (xfer->seg == NULL)          /* still hasn't reached */
1836                goto out_unlock;        /* setup(), enqueue_b() completes */
1837        /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1838        xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1839        for (cnt = 0; cnt < xfer->segs; cnt++) {
1840                seg = xfer->seg[cnt];
1841                pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1842                        __func__, wa_xfer_id(xfer), cnt, seg->status);
1843                switch (seg->status) {
1844                case WA_SEG_NOTREADY:
1845                case WA_SEG_READY:
1846                        printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1847                               xfer, cnt, seg->status);
1848                        WARN_ON(1);
1849                        break;
1850                case WA_SEG_DELAYED:
1851                        /*
1852                         * delete from rpipe delayed list.  If no segments on
1853                         * this xfer have been submitted, __wa_xfer_is_done will
1854                         * trigger a giveback below.  Otherwise, the submitted
1855                         * segments will be completed in the DTI interrupt.
1856                         */
1857                        seg->status = WA_SEG_ABORTED;
1858                        spin_lock_irqsave(&rpipe->seg_lock, flags2);
1859                        list_del(&seg->list_node);
1860                        xfer->segs_done++;
1861                        spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1862                        break;
1863                case WA_SEG_DONE:
1864                case WA_SEG_ERROR:
1865                case WA_SEG_ABORTED:
1866                        break;
1867                        /*
1868                         * In the states below, the HWA device already knows
1869                         * about the transfer.  If an abort request was sent,
1870                         * allow the HWA to process it and wait for the
1871                         * results.  Otherwise, the DTI state and seg completed
1872                         * counts can get out of sync.
1873                         */
1874                case WA_SEG_SUBMITTED:
1875                case WA_SEG_PENDING:
1876                case WA_SEG_DTI_PENDING:
1877                        /*
1878                         * Check if the abort was successfully sent.  This could
1879                         * be false if the HWA has been removed but we haven't
1880                         * gotten the disconnect notification yet.
1881                         */
1882                        if (!xfer_abort_pending) {
1883                                seg->status = WA_SEG_ABORTED;
1884                                rpipe_ready = rpipe_avail_inc(rpipe);
1885                                xfer->segs_done++;
1886                        }
1887                        break;
1888                }
1889        }
1890        xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1891        done = __wa_xfer_is_done(xfer);
1892        spin_unlock_irqrestore(&xfer->lock, flags);
1893        if (done)
1894                wa_xfer_completion(xfer);
1895        if (rpipe_ready)
1896                wa_xfer_delayed_run(rpipe);
1897        return 0;
1898
1899out_unlock:
1900        spin_unlock_irqrestore(&xfer->lock, flags);
1901out:
1902        return 0;
1903
1904dequeue_delayed:
1905        list_del_init(&xfer->list_node);
1906        spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1907        xfer->result = urb->status;
1908        spin_unlock_irqrestore(&xfer->lock, flags);
1909        wa_xfer_giveback(xfer);
1910        usb_put_urb(urb);               /* we got a ref in enqueue() */
1911        return 0;
1912}
1913EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1914
1915/*
1916 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1917 * codes
1918 *
1919 * Positive errno values are internal inconsistencies and should be
1920 * flagged louder. Negative are to be passed up to the user in the
1921 * normal way.
1922 *
1923 * @status: USB WA status code -- high two bits are stripped.
1924 */
1925static int wa_xfer_status_to_errno(u8 status)
1926{
1927        int errno;
1928        u8 real_status = status;
1929        static int xlat[] = {
1930                [WA_XFER_STATUS_SUCCESS] =              0,
1931                [WA_XFER_STATUS_HALTED] =               -EPIPE,
1932                [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1933                [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1934                [WA_XFER_RESERVED] =                    EINVAL,
1935                [WA_XFER_STATUS_NOT_FOUND] =            0,
1936                [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1937                [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1938                [WA_XFER_STATUS_ABORTED] =              -EINTR,
1939                [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1940                [WA_XFER_INVALID_FORMAT] =              EINVAL,
1941                [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1942                [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1943        };
1944        status &= 0x3f;
1945
1946        if (status == 0)
1947                return 0;
1948        if (status >= ARRAY_SIZE(xlat)) {
1949                printk_ratelimited(KERN_ERR "%s(): BUG? "
1950                               "Unknown WA transfer status 0x%02x\n",
1951                               __func__, real_status);
1952                return -EINVAL;
1953        }
1954        errno = xlat[status];
1955        if (unlikely(errno > 0)) {
1956                printk_ratelimited(KERN_ERR "%s(): BUG? "
1957                               "Inconsistent WA status: 0x%02x\n",
1958                               __func__, real_status);
1959                errno = -errno;
1960        }
1961        return errno;
1962}
1963
1964/*
1965 * If a last segment flag and/or a transfer result error is encountered,
1966 * no other segment transfer results will be returned from the device.
1967 * Mark the remaining submitted or pending xfers as completed so that
1968 * the xfer will complete cleanly.
1969 */
1970static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
1971                struct wa_seg *incoming_seg)
1972{
1973        int index;
1974        struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1975
1976        for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
1977                index++) {
1978                struct wa_seg *current_seg = xfer->seg[index];
1979
1980                BUG_ON(current_seg == NULL);
1981
1982                switch (current_seg->status) {
1983                case WA_SEG_SUBMITTED:
1984                case WA_SEG_PENDING:
1985                case WA_SEG_DTI_PENDING:
1986                        rpipe_avail_inc(rpipe);
1987                /*
1988                 * do not increment RPIPE avail for the WA_SEG_DELAYED case
1989                 * since it has not been submitted to the RPIPE.
1990                 */
1991                case WA_SEG_DELAYED:
1992                        xfer->segs_done++;
1993                        current_seg->status = incoming_seg->status;
1994                        break;
1995                case WA_SEG_ABORTED:
1996                        break;
1997                default:
1998                        WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
1999                                __func__, wa_xfer_id(xfer), index,
2000                                current_seg->status);
2001                        break;
2002                }
2003        }
2004}
2005
2006/*
2007 * Process a xfer result completion message
2008 *
2009 * inbound transfers: need to schedule a buf_in_urb read
2010 *
2011 * FIXME: this function needs to be broken up in parts
2012 */
2013static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2014                struct wa_xfer_result *xfer_result)
2015{
2016        int result;
2017        struct device *dev = &wa->usb_iface->dev;
2018        unsigned long flags;
2019        u8 seg_idx;
2020        struct wa_seg *seg;
2021        struct wa_rpipe *rpipe;
2022        unsigned done = 0;
2023        u8 usb_status;
2024        unsigned rpipe_ready = 0;
2025
2026        spin_lock_irqsave(&xfer->lock, flags);
2027        seg_idx = xfer_result->bTransferSegment & 0x7f;
2028        if (unlikely(seg_idx >= xfer->segs))
2029                goto error_bad_seg;
2030        seg = xfer->seg[seg_idx];
2031        rpipe = xfer->ep->hcpriv;
2032        usb_status = xfer_result->bTransferStatus;
2033        dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2034                xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2035        if (seg->status == WA_SEG_ABORTED
2036            || seg->status == WA_SEG_ERROR)     /* already handled */
2037                goto segment_aborted;
2038        if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
2039                seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
2040        if (seg->status != WA_SEG_PENDING) {
2041                if (printk_ratelimit())
2042                        dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2043                                xfer, seg_idx, seg->status);
2044                seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
2045        }
2046        if (usb_status & 0x80) {
2047                seg->result = wa_xfer_status_to_errno(usb_status);
2048                dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
2049                        xfer, xfer->id, seg->index, usb_status);
2050                seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2051                        WA_SEG_ABORTED : WA_SEG_ERROR;
2052                goto error_complete;
2053        }
2054        /* FIXME: we ignore warnings, tally them for stats */
2055        if (usb_status & 0x40)          /* Warning?... */
2056                usb_status = 0;         /* ... pass */
2057        if (usb_pipeisoc(xfer->urb->pipe)) {
2058                /* set up WA state to read the isoc packet status next. */
2059                wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2060                wa->dti_isoc_xfer_seg = seg_idx;
2061                wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2062        } else if (xfer->is_inbound) {  /* IN data phase: read to buffer */
2063                seg->status = WA_SEG_DTI_PENDING;
2064                BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
2065                /* this should always be 0 before a resubmit. */
2066                wa->buf_in_urb->num_mapped_sgs  = 0;
2067
2068                if (xfer->is_dma) {
2069                        wa->buf_in_urb->transfer_dma =
2070                                xfer->urb->transfer_dma
2071                                + (seg_idx * xfer->seg_size);
2072                        wa->buf_in_urb->transfer_flags
2073                                |= URB_NO_TRANSFER_DMA_MAP;
2074                        wa->buf_in_urb->transfer_buffer = NULL;
2075                        wa->buf_in_urb->sg = NULL;
2076                        wa->buf_in_urb->num_sgs = 0;
2077                } else {
2078                        /* do buffer or SG processing. */
2079                        wa->buf_in_urb->transfer_flags
2080                                &= ~URB_NO_TRANSFER_DMA_MAP;
2081
2082                        if (xfer->urb->transfer_buffer) {
2083                                wa->buf_in_urb->transfer_buffer =
2084                                        xfer->urb->transfer_buffer
2085                                        + (seg_idx * xfer->seg_size);
2086                                wa->buf_in_urb->sg = NULL;
2087                                wa->buf_in_urb->num_sgs = 0;
2088                        } else {
2089                                /* allocate an SG list to store seg_size bytes
2090                                        and copy the subset of the xfer->urb->sg
2091                                        that matches the buffer subset we are
2092                                        about to read. */
2093                                wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
2094                                        xfer->urb->sg,
2095                                        seg_idx * xfer->seg_size,
2096                                        le32_to_cpu(
2097                                                xfer_result->dwTransferLength),
2098                                        &(wa->buf_in_urb->num_sgs));
2099
2100                                if (!(wa->buf_in_urb->sg)) {
2101                                        wa->buf_in_urb->num_sgs = 0;
2102                                        goto error_sg_alloc;
2103                                }
2104                                wa->buf_in_urb->transfer_buffer = NULL;
2105                        }
2106                }
2107                wa->buf_in_urb->transfer_buffer_length =
2108                        le32_to_cpu(xfer_result->dwTransferLength);
2109                wa->buf_in_urb->context = seg;
2110                result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
2111                if (result < 0)
2112                        goto error_submit_buf_in;
2113        } else {
2114                /* OUT data phase, complete it -- */
2115                seg->status = WA_SEG_DONE;
2116                seg->result = le32_to_cpu(xfer_result->dwTransferLength);
2117                xfer->segs_done++;
2118                rpipe_ready = rpipe_avail_inc(rpipe);
2119                done = __wa_xfer_is_done(xfer);
2120        }
2121        spin_unlock_irqrestore(&xfer->lock, flags);
2122        if (done)
2123                wa_xfer_completion(xfer);
2124        if (rpipe_ready)
2125                wa_xfer_delayed_run(rpipe);
2126        return;
2127
2128error_submit_buf_in:
2129        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2130                dev_err(dev, "DTI: URB max acceptable errors "
2131                        "exceeded, resetting device\n");
2132                wa_reset_all(wa);
2133        }
2134        if (printk_ratelimit())
2135                dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2136                        xfer, seg_idx, result);
2137        seg->result = result;
2138        kfree(wa->buf_in_urb->sg);
2139        wa->buf_in_urb->sg = NULL;
2140error_sg_alloc:
2141        __wa_xfer_abort(xfer);
2142        seg->status = WA_SEG_ERROR;
2143error_complete:
2144        xfer->segs_done++;
2145        rpipe_ready = rpipe_avail_inc(rpipe);
2146        wa_complete_remaining_xfer_segs(xfer, seg);
2147        done = __wa_xfer_is_done(xfer);
2148        /*
2149         * queue work item to clear STALL for control endpoints.
2150         * Otherwise, let endpoint_reset take care of it.
2151         */
2152        if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2153                usb_endpoint_xfer_control(&xfer->ep->desc) &&
2154                done) {
2155
2156                dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2157                spin_lock_irq(&wa->xfer_list_lock);
2158                /* move xfer from xfer_list to xfer_errored_list. */
2159                list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2160                spin_unlock_irq(&wa->xfer_list_lock);
2161                spin_unlock_irqrestore(&xfer->lock, flags);
2162                queue_work(wusbd, &wa->xfer_error_work);
2163        } else {
2164                spin_unlock_irqrestore(&xfer->lock, flags);
2165                if (done)
2166                        wa_xfer_completion(xfer);
2167                if (rpipe_ready)
2168                        wa_xfer_delayed_run(rpipe);
2169        }
2170
2171        return;
2172
2173error_bad_seg:
2174        spin_unlock_irqrestore(&xfer->lock, flags);
2175        wa_urb_dequeue(wa, xfer->urb);
2176        if (printk_ratelimit())
2177                dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2178        if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2179                dev_err(dev, "DTI: URB max acceptable errors "
2180                        "exceeded, resetting device\n");
2181                wa_reset_all(wa);
2182        }
2183        return;
2184
2185segment_aborted:
2186        /* nothing to do, as the aborter did the completion */
2187        spin_unlock_irqrestore(&xfer->lock, flags);
2188}
2189
2190/*
2191 * Process a isochronous packet status message
2192 *
2193 * inbound transfers: need to schedule a buf_in_urb read
2194 */
2195static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2196{
2197        struct device *dev = &wa->usb_iface->dev;
2198        struct wa_xfer_packet_status_hwaiso *packet_status;
2199        struct wa_xfer_packet_status_len_hwaiso *status_array;
2200        struct wa_xfer *xfer;
2201        unsigned long flags;
2202        struct wa_seg *seg;
2203        struct wa_rpipe *rpipe;
2204        unsigned done = 0;
2205        unsigned rpipe_ready = 0, seg_index;
2206        int expected_size;
2207
2208        /* We have a xfer result buffer; check it */
2209        dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2210                urb->actual_length, urb->transfer_buffer);
2211        packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2212        if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2213                dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2214                        packet_status->bPacketType);
2215                goto error_parse_buffer;
2216        }
2217        xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2218        if (xfer == NULL) {
2219                dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2220                        wa->dti_isoc_xfer_in_progress);
2221                goto error_parse_buffer;
2222        }
2223        spin_lock_irqsave(&xfer->lock, flags);
2224        if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2225                goto error_bad_seg;
2226        seg = xfer->seg[wa->dti_isoc_xfer_seg];
2227        rpipe = xfer->ep->hcpriv;
2228        expected_size = sizeof(*packet_status) +
2229                        (sizeof(packet_status->PacketStatus[0]) *
2230                        seg->isoc_frame_count);
2231        if (urb->actual_length != expected_size) {
2232                dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2233                        urb->actual_length, expected_size);
2234                goto error_bad_seg;
2235        }
2236        if (le16_to_cpu(packet_status->wLength) != expected_size) {
2237                dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2238                        le16_to_cpu(packet_status->wLength));
2239                goto error_bad_seg;
2240        }
2241        /* isoc packet status and lengths back xfer urb. */
2242        status_array = packet_status->PacketStatus;
2243        for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2244                xfer->urb->iso_frame_desc[seg->index].status =
2245                        wa_xfer_status_to_errno(
2246                        le16_to_cpu(status_array[seg_index].PacketStatus));
2247                xfer->urb->iso_frame_desc[seg->index].actual_length =
2248                        le16_to_cpu(status_array[seg_index].PacketLength);
2249        }
2250
2251        if (!xfer->is_inbound) {
2252                /* OUT transfer, complete it -- */
2253                seg->status = WA_SEG_DONE;
2254                xfer->segs_done++;
2255                rpipe_ready = rpipe_avail_inc(rpipe);
2256                done = __wa_xfer_is_done(xfer);
2257        }
2258        spin_unlock_irqrestore(&xfer->lock, flags);
2259        wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2260        if (done)
2261                wa_xfer_completion(xfer);
2262        if (rpipe_ready)
2263                wa_xfer_delayed_run(rpipe);
2264        wa_xfer_put(xfer);
2265        return;
2266
2267error_bad_seg:
2268        spin_unlock_irqrestore(&xfer->lock, flags);
2269        wa_xfer_put(xfer);
2270error_parse_buffer:
2271        return;
2272}
2273
2274/*
2275 * Callback for the IN data phase
2276 *
2277 * If successful transition state; otherwise, take a note of the
2278 * error, mark this segment done and try completion.
2279 *
2280 * Note we don't access until we are sure that the transfer hasn't
2281 * been cancelled (ECONNRESET, ENOENT), which could mean that
2282 * seg->xfer could be already gone.
2283 */
2284static void wa_buf_in_cb(struct urb *urb)
2285{
2286        struct wa_seg *seg = urb->context;
2287        struct wa_xfer *xfer = seg->xfer;
2288        struct wahc *wa;
2289        struct device *dev;
2290        struct wa_rpipe *rpipe;
2291        unsigned rpipe_ready;
2292        unsigned long flags;
2293        u8 done = 0;
2294
2295        /* free the sg if it was used. */
2296        kfree(urb->sg);
2297        urb->sg = NULL;
2298
2299        switch (urb->status) {
2300        case 0:
2301                spin_lock_irqsave(&xfer->lock, flags);
2302                wa = xfer->wa;
2303                dev = &wa->usb_iface->dev;
2304                rpipe = xfer->ep->hcpriv;
2305                dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
2306                        xfer, seg->index, (size_t)urb->actual_length);
2307                seg->status = WA_SEG_DONE;
2308                seg->result = urb->actual_length;
2309                xfer->segs_done++;
2310                rpipe_ready = rpipe_avail_inc(rpipe);
2311                done = __wa_xfer_is_done(xfer);
2312                spin_unlock_irqrestore(&xfer->lock, flags);
2313                if (done)
2314                        wa_xfer_completion(xfer);
2315                if (rpipe_ready)
2316                        wa_xfer_delayed_run(rpipe);
2317                break;
2318        case -ECONNRESET:       /* URB unlinked; no need to do anything */
2319        case -ENOENT:           /* as it was done by the who unlinked us */
2320                break;
2321        default:                /* Other errors ... */
2322                spin_lock_irqsave(&xfer->lock, flags);
2323                wa = xfer->wa;
2324                dev = &wa->usb_iface->dev;
2325                rpipe = xfer->ep->hcpriv;
2326                if (printk_ratelimit())
2327                        dev_err(dev, "xfer %p#%u: data in error %d\n",
2328                                xfer, seg->index, urb->status);
2329                if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2330                            EDC_ERROR_TIMEFRAME)){
2331                        dev_err(dev, "DTO: URB max acceptable errors "
2332                                "exceeded, resetting device\n");
2333                        wa_reset_all(wa);
2334                }
2335                seg->status = WA_SEG_ERROR;
2336                seg->result = urb->status;
2337                xfer->segs_done++;
2338                rpipe_ready = rpipe_avail_inc(rpipe);
2339                __wa_xfer_abort(xfer);
2340                done = __wa_xfer_is_done(xfer);
2341                spin_unlock_irqrestore(&xfer->lock, flags);
2342                if (done)
2343                        wa_xfer_completion(xfer);
2344                if (rpipe_ready)
2345                        wa_xfer_delayed_run(rpipe);
2346        }
2347}
2348
2349/*
2350 * Handle an incoming transfer result buffer
2351 *
2352 * Given a transfer result buffer, it completes the transfer (possibly
2353 * scheduling and buffer in read) and then resubmits the DTI URB for a
2354 * new transfer result read.
2355 *
2356 *
2357 * The xfer_result DTI URB state machine
2358 *
2359 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2360 *
2361 * We start in OFF mode, the first xfer_result notification [through
2362 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2363 * read.
2364 *
2365 * We receive a buffer -- if it is not a xfer_result, we complain and
2366 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2367 * request accounting. If it is an IN segment, we move to RBI and post
2368 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2369 * repost the DTI-URB and move to RXR state. if there was no IN
2370 * segment, it will repost the DTI-URB.
2371 *
2372 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2373 * errors) in the URBs.
2374 */
2375static void wa_dti_cb(struct urb *urb)
2376{
2377        int result;
2378        struct wahc *wa = urb->context;
2379        struct device *dev = &wa->usb_iface->dev;
2380        u32 xfer_id;
2381        u8 usb_status;
2382
2383        BUG_ON(wa->dti_urb != urb);
2384        switch (wa->dti_urb->status) {
2385        case 0:
2386                if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2387                        struct wa_xfer_result *xfer_result;
2388                        struct wa_xfer *xfer;
2389
2390                        /* We have a xfer result buffer; check it */
2391                        dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2392                                urb->actual_length, urb->transfer_buffer);
2393                        if (urb->actual_length != sizeof(*xfer_result)) {
2394                                dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2395                                        urb->actual_length,
2396                                        sizeof(*xfer_result));
2397                                break;
2398                        }
2399                        xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2400                        if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2401                                dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2402                                        xfer_result->hdr.bLength);
2403                                break;
2404                        }
2405                        if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2406                                dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2407                                        xfer_result->hdr.bNotifyType);
2408                                break;
2409                        }
2410                        usb_status = xfer_result->bTransferStatus & 0x3f;
2411                        if (usb_status == WA_XFER_STATUS_NOT_FOUND)
2412                                /* taken care of already */
2413                                break;
2414                        xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2415                        xfer = wa_xfer_get_by_id(wa, xfer_id);
2416                        if (xfer == NULL) {
2417                                /* FIXME: transaction not found. */
2418                                dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2419                                        xfer_id, usb_status);
2420                                break;
2421                        }
2422                        wa_xfer_result_chew(wa, xfer, xfer_result);
2423                        wa_xfer_put(xfer);
2424                } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2425                        wa_process_iso_packet_status(wa, urb);
2426                } else {
2427                        dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2428                                wa->dti_state);
2429                }
2430                break;
2431        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
2432        case -ESHUTDOWN:        /* going away! */
2433                dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2434                goto out;
2435        default:
2436                /* Unknown error */
2437                if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2438                            EDC_ERROR_TIMEFRAME)) {
2439                        dev_err(dev, "DTI: URB max acceptable errors "
2440                                "exceeded, resetting device\n");
2441                        wa_reset_all(wa);
2442                        goto out;
2443                }
2444                if (printk_ratelimit())
2445                        dev_err(dev, "DTI: URB error %d\n", urb->status);
2446                break;
2447        }
2448        /* Resubmit the DTI URB */
2449        result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2450        if (result < 0) {
2451                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
2452                        "resetting\n", result);
2453                wa_reset_all(wa);
2454        }
2455out:
2456        return;
2457}
2458
2459/*
2460 * Transfer complete notification
2461 *
2462 * Called from the notif.c code. We get a notification on EP2 saying
2463 * that some endpoint has some transfer result data available. We are
2464 * about to read it.
2465 *
2466 * To speed up things, we always have a URB reading the DTI URB; we
2467 * don't really set it up and start it until the first xfer complete
2468 * notification arrives, which is what we do here.
2469 *
2470 * Follow up in wa_dti_cb(), as that's where the whole state
2471 * machine starts.
2472 *
2473 * So here we just initialize the DTI URB for reading transfer result
2474 * notifications and also the buffer-in URB, for reading buffers. Then
2475 * we just submit the DTI URB.
2476 *
2477 * @wa shall be referenced
2478 */
2479void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2480{
2481        int result;
2482        struct device *dev = &wa->usb_iface->dev;
2483        struct wa_notif_xfer *notif_xfer;
2484        const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2485
2486        notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2487        BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2488
2489        if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2490                /* FIXME: hardcoded limitation, adapt */
2491                dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2492                        notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2493                goto error;
2494        }
2495        if (wa->dti_urb != NULL)        /* DTI URB already started */
2496                goto out;
2497
2498        wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2499        if (wa->dti_urb == NULL) {
2500                dev_err(dev, "Can't allocate DTI URB\n");
2501                goto error_dti_urb_alloc;
2502        }
2503        usb_fill_bulk_urb(
2504                wa->dti_urb, wa->usb_dev,
2505                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
2506                wa->dti_buf, wa->dti_buf_size,
2507                wa_dti_cb, wa);
2508
2509        wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
2510        if (wa->buf_in_urb == NULL) {
2511                dev_err(dev, "Can't allocate BUF-IN URB\n");
2512                goto error_buf_in_urb_alloc;
2513        }
2514        usb_fill_bulk_urb(
2515                wa->buf_in_urb, wa->usb_dev,
2516                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
2517                NULL, 0, wa_buf_in_cb, wa);
2518        result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2519        if (result < 0) {
2520                dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
2521                        "resetting\n", result);
2522                goto error_dti_urb_submit;
2523        }
2524out:
2525        return;
2526
2527error_dti_urb_submit:
2528        usb_put_urb(wa->buf_in_urb);
2529        wa->buf_in_urb = NULL;
2530error_buf_in_urb_alloc:
2531        usb_put_urb(wa->dti_urb);
2532        wa->dti_urb = NULL;
2533error_dti_urb_alloc:
2534error:
2535        wa_reset_all(wa);
2536}
2537