linux/drivers/usb/host/ehci-q.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001-2004 by David Brownell
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the
   6 * Free Software Foundation; either version 2 of the License, or (at your
   7 * option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 * for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software Foundation,
  16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17 */
  18
  19/* this file is part of ehci-hcd.c */
  20
  21/*-------------------------------------------------------------------------*/
  22
  23/*
  24 * EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
  25 *
  26 * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
  27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
  28 * buffers needed for the larger number).  We use one QH per endpoint, queue
  29 * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
  30 *
  31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
  32 * interrupts) needs careful scheduling.  Performance improvements can be
  33 * an ongoing challenge.  That's in "ehci-sched.c".
  34 *
  35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
  36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
  37 * (b) special fields in qh entries or (c) split iso entries.  TTs will
  38 * buffer low/full speed data so the host collects it at high speed.
  39 */
  40
  41/*-------------------------------------------------------------------------*/
  42
  43/* fill a qtd, returning how much of the buffer we were able to queue up */
  44
  45static int
  46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
  47                  size_t len, int token, int maxpacket)
  48{
  49        int     i, count;
  50        u64     addr = buf;
  51
  52        /* one buffer entry per 4K ... first might be short or unaligned */
  53        qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
  54        qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
  55        count = 0x1000 - (buf & 0x0fff);        /* rest of that page */
  56        if (likely (len < count))               /* ... iff needed */
  57                count = len;
  58        else {
  59                buf +=  0x1000;
  60                buf &= ~0x0fff;
  61
  62                /* per-qtd limit: from 16K to 20K (best alignment) */
  63                for (i = 1; count < len && i < 5; i++) {
  64                        addr = buf;
  65                        qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
  66                        qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
  67                                        (u32)(addr >> 32));
  68                        buf += 0x1000;
  69                        if ((count + 0x1000) < len)
  70                                count += 0x1000;
  71                        else
  72                                count = len;
  73                }
  74
  75                /* short packets may only terminate transfers */
  76                if (count != len)
  77                        count -= (count % maxpacket);
  78        }
  79        qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
  80        qtd->length = count;
  81
  82        return count;
  83}
  84
  85/*-------------------------------------------------------------------------*/
  86
  87static inline void
  88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
  89{
  90        struct ehci_qh_hw *hw = qh->hw;
  91
  92        /* writes to an active overlay are unsafe */
  93        WARN_ON(qh->qh_state != QH_STATE_IDLE);
  94
  95        hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
  96        hw->hw_alt_next = EHCI_LIST_END(ehci);
  97
  98        /* Except for control endpoints, we make hardware maintain data
  99         * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
 100         * and set the pseudo-toggle in udev. Only usb_clear_halt() will
 101         * ever clear it.
 102         */
 103        if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
 104                unsigned        is_out, epnum;
 105
 106                is_out = qh->is_out;
 107                epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
 108                if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
 109                        hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
 110                        usb_settoggle(qh->ps.udev, epnum, is_out, 1);
 111                }
 112        }
 113
 114        hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
 115}
 116
 117/* if it weren't for a common silicon quirk (writing the dummy into the qh
 118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
 119 * recovery (including urb dequeue) would need software changes to a QH...
 120 */
 121static void
 122qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
 123{
 124        struct ehci_qtd *qtd;
 125
 126        qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
 127
 128        /*
 129         * first qtd may already be partially processed.
 130         * If we come here during unlink, the QH overlay region
 131         * might have reference to the just unlinked qtd. The
 132         * qtd is updated in qh_completions(). Update the QH
 133         * overlay here.
 134         */
 135        if (qh->hw->hw_token & ACTIVE_BIT(ehci))
 136                qh->hw->hw_qtd_next = qtd->hw_next;
 137        else
 138                qh_update(ehci, qh, qtd);
 139}
 140
 141/*-------------------------------------------------------------------------*/
 142
 143static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
 144
 145static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
 146                struct usb_host_endpoint *ep)
 147{
 148        struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
 149        struct ehci_qh          *qh = ep->hcpriv;
 150        unsigned long           flags;
 151
 152        spin_lock_irqsave(&ehci->lock, flags);
 153        qh->clearing_tt = 0;
 154        if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
 155                        && ehci->rh_state == EHCI_RH_RUNNING)
 156                qh_link_async(ehci, qh);
 157        spin_unlock_irqrestore(&ehci->lock, flags);
 158}
 159
 160static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
 161                struct urb *urb, u32 token)
 162{
 163
 164        /* If an async split transaction gets an error or is unlinked,
 165         * the TT buffer may be left in an indeterminate state.  We
 166         * have to clear the TT buffer.
 167         *
 168         * Note: this routine is never called for Isochronous transfers.
 169         */
 170        if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
 171#ifdef CONFIG_DYNAMIC_DEBUG
 172                struct usb_device *tt = urb->dev->tt->hub;
 173                dev_dbg(&tt->dev,
 174                        "clear tt buffer port %d, a%d ep%d t%08x\n",
 175                        urb->dev->ttport, urb->dev->devnum,
 176                        usb_pipeendpoint(urb->pipe), token);
 177#endif /* CONFIG_DYNAMIC_DEBUG */
 178                if (!ehci_is_TDI(ehci)
 179                                || urb->dev->tt->hub !=
 180                                   ehci_to_hcd(ehci)->self.root_hub) {
 181                        if (usb_hub_clear_tt_buffer(urb) == 0)
 182                                qh->clearing_tt = 1;
 183                } else {
 184
 185                        /* REVISIT ARC-derived cores don't clear the root
 186                         * hub TT buffer in this way...
 187                         */
 188                }
 189        }
 190}
 191
 192static int qtd_copy_status (
 193        struct ehci_hcd *ehci,
 194        struct urb *urb,
 195        size_t length,
 196        u32 token
 197)
 198{
 199        int     status = -EINPROGRESS;
 200
 201        /* count IN/OUT bytes, not SETUP (even short packets) */
 202        if (likely (QTD_PID (token) != 2))
 203                urb->actual_length += length - QTD_LENGTH (token);
 204
 205        /* don't modify error codes */
 206        if (unlikely(urb->unlinked))
 207                return status;
 208
 209        /* force cleanup after short read; not always an error */
 210        if (unlikely (IS_SHORT_READ (token)))
 211                status = -EREMOTEIO;
 212
 213        /* serious "can't proceed" faults reported by the hardware */
 214        if (token & QTD_STS_HALT) {
 215                if (token & QTD_STS_BABBLE) {
 216                        /* FIXME "must" disable babbling device's port too */
 217                        status = -EOVERFLOW;
 218                /* CERR nonzero + halt --> stall */
 219                } else if (QTD_CERR(token)) {
 220                        status = -EPIPE;
 221
 222                /* In theory, more than one of the following bits can be set
 223                 * since they are sticky and the transaction is retried.
 224                 * Which to test first is rather arbitrary.
 225                 */
 226                } else if (token & QTD_STS_MMF) {
 227                        /* fs/ls interrupt xfer missed the complete-split */
 228                        status = -EPROTO;
 229                } else if (token & QTD_STS_DBE) {
 230                        status = (QTD_PID (token) == 1) /* IN ? */
 231                                ? -ENOSR  /* hc couldn't read data */
 232                                : -ECOMM; /* hc couldn't write data */
 233                } else if (token & QTD_STS_XACT) {
 234                        /* timeout, bad CRC, wrong PID, etc */
 235                        ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
 236                                urb->dev->devpath,
 237                                usb_pipeendpoint(urb->pipe),
 238                                usb_pipein(urb->pipe) ? "in" : "out");
 239                        status = -EPROTO;
 240                } else {        /* unknown */
 241                        status = -EPROTO;
 242                }
 243        }
 244
 245        return status;
 246}
 247
 248static void
 249ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
 250{
 251        if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
 252                /* ... update hc-wide periodic stats */
 253                ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
 254        }
 255
 256        if (unlikely(urb->unlinked)) {
 257                COUNT(ehci->stats.unlink);
 258        } else {
 259                /* report non-error and short read status as zero */
 260                if (status == -EINPROGRESS || status == -EREMOTEIO)
 261                        status = 0;
 262                COUNT(ehci->stats.complete);
 263        }
 264
 265#ifdef EHCI_URB_TRACE
 266        ehci_dbg (ehci,
 267                "%s %s urb %p ep%d%s status %d len %d/%d\n",
 268                __func__, urb->dev->devpath, urb,
 269                usb_pipeendpoint (urb->pipe),
 270                usb_pipein (urb->pipe) ? "in" : "out",
 271                status,
 272                urb->actual_length, urb->transfer_buffer_length);
 273#endif
 274
 275        usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 276        usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
 277}
 278
 279static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
 280
 281/*
 282 * Process and free completed qtds for a qh, returning URBs to drivers.
 283 * Chases up to qh->hw_current.  Returns nonzero if the caller should
 284 * unlink qh.
 285 */
 286static unsigned
 287qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
 288{
 289        struct ehci_qtd         *last, *end = qh->dummy;
 290        struct list_head        *entry, *tmp;
 291        int                     last_status;
 292        int                     stopped;
 293        u8                      state;
 294        struct ehci_qh_hw       *hw = qh->hw;
 295
 296        /* completions (or tasks on other cpus) must never clobber HALT
 297         * till we've gone through and cleaned everything up, even when
 298         * they add urbs to this qh's queue or mark them for unlinking.
 299         *
 300         * NOTE:  unlinking expects to be done in queue order.
 301         *
 302         * It's a bug for qh->qh_state to be anything other than
 303         * QH_STATE_IDLE, unless our caller is scan_async() or
 304         * scan_intr().
 305         */
 306        state = qh->qh_state;
 307        qh->qh_state = QH_STATE_COMPLETING;
 308        stopped = (state == QH_STATE_IDLE);
 309
 310 rescan:
 311        last = NULL;
 312        last_status = -EINPROGRESS;
 313        qh->dequeue_during_giveback = 0;
 314
 315        /* remove de-activated QTDs from front of queue.
 316         * after faults (including short reads), cleanup this urb
 317         * then let the queue advance.
 318         * if queue is stopped, handles unlinks.
 319         */
 320        list_for_each_safe (entry, tmp, &qh->qtd_list) {
 321                struct ehci_qtd *qtd;
 322                struct urb      *urb;
 323                u32             token = 0;
 324
 325                qtd = list_entry (entry, struct ehci_qtd, qtd_list);
 326                urb = qtd->urb;
 327
 328                /* clean up any state from previous QTD ...*/
 329                if (last) {
 330                        if (likely (last->urb != urb)) {
 331                                ehci_urb_done(ehci, last->urb, last_status);
 332                                last_status = -EINPROGRESS;
 333                        }
 334                        ehci_qtd_free (ehci, last);
 335                        last = NULL;
 336                }
 337
 338                /* ignore urbs submitted during completions we reported */
 339                if (qtd == end)
 340                        break;
 341
 342                /* hardware copies qtd out of qh overlay */
 343                rmb ();
 344                token = hc32_to_cpu(ehci, qtd->hw_token);
 345
 346                /* always clean up qtds the hc de-activated */
 347 retry_xacterr:
 348                if ((token & QTD_STS_ACTIVE) == 0) {
 349
 350                        /* Report Data Buffer Error: non-fatal but useful */
 351                        if (token & QTD_STS_DBE)
 352                                ehci_dbg(ehci,
 353                                        "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
 354                                        urb,
 355                                        usb_endpoint_num(&urb->ep->desc),
 356                                        usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
 357                                        urb->transfer_buffer_length,
 358                                        qtd,
 359                                        qh);
 360
 361                        /* on STALL, error, and short reads this urb must
 362                         * complete and all its qtds must be recycled.
 363                         */
 364                        if ((token & QTD_STS_HALT) != 0) {
 365
 366                                /* retry transaction errors until we
 367                                 * reach the software xacterr limit
 368                                 */
 369                                if ((token & QTD_STS_XACT) &&
 370                                                QTD_CERR(token) == 0 &&
 371                                                ++qh->xacterrs < QH_XACTERR_MAX &&
 372                                                !urb->unlinked) {
 373                                        ehci_dbg(ehci,
 374        "detected XactErr len %zu/%zu retry %d\n",
 375        qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
 376
 377                                        /* reset the token in the qtd and the
 378                                         * qh overlay (which still contains
 379                                         * the qtd) so that we pick up from
 380                                         * where we left off
 381                                         */
 382                                        token &= ~QTD_STS_HALT;
 383                                        token |= QTD_STS_ACTIVE |
 384                                                        (EHCI_TUNE_CERR << 10);
 385                                        qtd->hw_token = cpu_to_hc32(ehci,
 386                                                        token);
 387                                        wmb();
 388                                        hw->hw_token = cpu_to_hc32(ehci,
 389                                                        token);
 390                                        goto retry_xacterr;
 391                                }
 392                                stopped = 1;
 393
 394                        /* magic dummy for some short reads; qh won't advance.
 395                         * that silicon quirk can kick in with this dummy too.
 396                         *
 397                         * other short reads won't stop the queue, including
 398                         * control transfers (status stage handles that) or
 399                         * most other single-qtd reads ... the queue stops if
 400                         * URB_SHORT_NOT_OK was set so the driver submitting
 401                         * the urbs could clean it up.
 402                         */
 403                        } else if (IS_SHORT_READ (token)
 404                                        && !(qtd->hw_alt_next
 405                                                & EHCI_LIST_END(ehci))) {
 406                                stopped = 1;
 407                        }
 408
 409                /* stop scanning when we reach qtds the hc is using */
 410                } else if (likely (!stopped
 411                                && ehci->rh_state >= EHCI_RH_RUNNING)) {
 412                        break;
 413
 414                /* scan the whole queue for unlinks whenever it stops */
 415                } else {
 416                        stopped = 1;
 417
 418                        /* cancel everything if we halt, suspend, etc */
 419                        if (ehci->rh_state < EHCI_RH_RUNNING)
 420                                last_status = -ESHUTDOWN;
 421
 422                        /* this qtd is active; skip it unless a previous qtd
 423                         * for its urb faulted, or its urb was canceled.
 424                         */
 425                        else if (last_status == -EINPROGRESS && !urb->unlinked)
 426                                continue;
 427
 428                        /*
 429                         * If this was the active qtd when the qh was unlinked
 430                         * and the overlay's token is active, then the overlay
 431                         * hasn't been written back to the qtd yet so use its
 432                         * token instead of the qtd's.  After the qtd is
 433                         * processed and removed, the overlay won't be valid
 434                         * any more.
 435                         */
 436                        if (state == QH_STATE_IDLE &&
 437                                        qh->qtd_list.next == &qtd->qtd_list &&
 438                                        (hw->hw_token & ACTIVE_BIT(ehci))) {
 439                                token = hc32_to_cpu(ehci, hw->hw_token);
 440                                hw->hw_token &= ~ACTIVE_BIT(ehci);
 441
 442                                /* An unlink may leave an incomplete
 443                                 * async transaction in the TT buffer.
 444                                 * We have to clear it.
 445                                 */
 446                                ehci_clear_tt_buffer(ehci, qh, urb, token);
 447                        }
 448                }
 449
 450                /* unless we already know the urb's status, collect qtd status
 451                 * and update count of bytes transferred.  in common short read
 452                 * cases with only one data qtd (including control transfers),
 453                 * queue processing won't halt.  but with two or more qtds (for
 454                 * example, with a 32 KB transfer), when the first qtd gets a
 455                 * short read the second must be removed by hand.
 456                 */
 457                if (last_status == -EINPROGRESS) {
 458                        last_status = qtd_copy_status(ehci, urb,
 459                                        qtd->length, token);
 460                        if (last_status == -EREMOTEIO
 461                                        && (qtd->hw_alt_next
 462                                                & EHCI_LIST_END(ehci)))
 463                                last_status = -EINPROGRESS;
 464
 465                        /* As part of low/full-speed endpoint-halt processing
 466                         * we must clear the TT buffer (11.17.5).
 467                         */
 468                        if (unlikely(last_status != -EINPROGRESS &&
 469                                        last_status != -EREMOTEIO)) {
 470                                /* The TT's in some hubs malfunction when they
 471                                 * receive this request following a STALL (they
 472                                 * stop sending isochronous packets).  Since a
 473                                 * STALL can't leave the TT buffer in a busy
 474                                 * state (if you believe Figures 11-48 - 11-51
 475                                 * in the USB 2.0 spec), we won't clear the TT
 476                                 * buffer in this case.  Strictly speaking this
 477                                 * is a violation of the spec.
 478                                 */
 479                                if (last_status != -EPIPE)
 480                                        ehci_clear_tt_buffer(ehci, qh, urb,
 481                                                        token);
 482                        }
 483                }
 484
 485                /* if we're removing something not at the queue head,
 486                 * patch the hardware queue pointer.
 487                 */
 488                if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
 489                        last = list_entry (qtd->qtd_list.prev,
 490                                        struct ehci_qtd, qtd_list);
 491                        last->hw_next = qtd->hw_next;
 492                }
 493
 494                /* remove qtd; it's recycled after possible urb completion */
 495                list_del (&qtd->qtd_list);
 496                last = qtd;
 497
 498                /* reinit the xacterr counter for the next qtd */
 499                qh->xacterrs = 0;
 500        }
 501
 502        /* last urb's completion might still need calling */
 503        if (likely (last != NULL)) {
 504                ehci_urb_done(ehci, last->urb, last_status);
 505                ehci_qtd_free (ehci, last);
 506        }
 507
 508        /* Do we need to rescan for URBs dequeued during a giveback? */
 509        if (unlikely(qh->dequeue_during_giveback)) {
 510                /* If the QH is already unlinked, do the rescan now. */
 511                if (state == QH_STATE_IDLE)
 512                        goto rescan;
 513
 514                /* Otherwise the caller must unlink the QH. */
 515        }
 516
 517        /* restore original state; caller must unlink or relink */
 518        qh->qh_state = state;
 519
 520        /* be sure the hardware's done with the qh before refreshing
 521         * it after fault cleanup, or recovering from silicon wrongly
 522         * overlaying the dummy qtd (which reduces DMA chatter).
 523         *
 524         * We won't refresh a QH that's linked (after the HC
 525         * stopped the queue).  That avoids a race:
 526         *  - HC reads first part of QH;
 527         *  - CPU updates that first part and the token;
 528         *  - HC reads rest of that QH, including token
 529         * Result:  HC gets an inconsistent image, and then
 530         * DMAs to/from the wrong memory (corrupting it).
 531         *
 532         * That should be rare for interrupt transfers,
 533         * except maybe high bandwidth ...
 534         */
 535        if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
 536                qh->exception = 1;
 537
 538        /* Let the caller know if the QH needs to be unlinked. */
 539        return qh->exception;
 540}
 541
 542/*-------------------------------------------------------------------------*/
 543
 544// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
 545#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
 546// ... and packet size, for any kind of endpoint descriptor
 547#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
 548
 549/*
 550 * reverse of qh_urb_transaction:  free a list of TDs.
 551 * used for cleanup after errors, before HC sees an URB's TDs.
 552 */
 553static void qtd_list_free (
 554        struct ehci_hcd         *ehci,
 555        struct urb              *urb,
 556        struct list_head        *qtd_list
 557) {
 558        struct list_head        *entry, *temp;
 559
 560        list_for_each_safe (entry, temp, qtd_list) {
 561                struct ehci_qtd *qtd;
 562
 563                qtd = list_entry (entry, struct ehci_qtd, qtd_list);
 564                list_del (&qtd->qtd_list);
 565                ehci_qtd_free (ehci, qtd);
 566        }
 567}
 568
 569/*
 570 * create a list of filled qtds for this URB; won't link into qh.
 571 */
 572static struct list_head *
 573qh_urb_transaction (
 574        struct ehci_hcd         *ehci,
 575        struct urb              *urb,
 576        struct list_head        *head,
 577        gfp_t                   flags
 578) {
 579        struct ehci_qtd         *qtd, *qtd_prev;
 580        dma_addr_t              buf;
 581        int                     len, this_sg_len, maxpacket;
 582        int                     is_input;
 583        u32                     token;
 584        int                     i;
 585        struct scatterlist      *sg;
 586
 587        /*
 588         * URBs map to sequences of QTDs:  one logical transaction
 589         */
 590        qtd = ehci_qtd_alloc (ehci, flags);
 591        if (unlikely (!qtd))
 592                return NULL;
 593        list_add_tail (&qtd->qtd_list, head);
 594        qtd->urb = urb;
 595
 596        token = QTD_STS_ACTIVE;
 597        token |= (EHCI_TUNE_CERR << 10);
 598        /* for split transactions, SplitXState initialized to zero */
 599
 600        len = urb->transfer_buffer_length;
 601        is_input = usb_pipein (urb->pipe);
 602        if (usb_pipecontrol (urb->pipe)) {
 603                /* SETUP pid */
 604                qtd_fill(ehci, qtd, urb->setup_dma,
 605                                sizeof (struct usb_ctrlrequest),
 606                                token | (2 /* "setup" */ << 8), 8);
 607
 608                /* ... and always at least one more pid */
 609                token ^= QTD_TOGGLE;
 610                qtd_prev = qtd;
 611                qtd = ehci_qtd_alloc (ehci, flags);
 612                if (unlikely (!qtd))
 613                        goto cleanup;
 614                qtd->urb = urb;
 615                qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
 616                list_add_tail (&qtd->qtd_list, head);
 617
 618                /* for zero length DATA stages, STATUS is always IN */
 619                if (len == 0)
 620                        token |= (1 /* "in" */ << 8);
 621        }
 622
 623        /*
 624         * data transfer stage:  buffer setup
 625         */
 626        i = urb->num_mapped_sgs;
 627        if (len > 0 && i > 0) {
 628                sg = urb->sg;
 629                buf = sg_dma_address(sg);
 630
 631                /* urb->transfer_buffer_length may be smaller than the
 632                 * size of the scatterlist (or vice versa)
 633                 */
 634                this_sg_len = min_t(int, sg_dma_len(sg), len);
 635        } else {
 636                sg = NULL;
 637                buf = urb->transfer_dma;
 638                this_sg_len = len;
 639        }
 640
 641        if (is_input)
 642                token |= (1 /* "in" */ << 8);
 643        /* else it's already initted to "out" pid (0 << 8) */
 644
 645        maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
 646
 647        /*
 648         * buffer gets wrapped in one or more qtds;
 649         * last one may be "short" (including zero len)
 650         * and may serve as a control status ack
 651         */
 652        for (;;) {
 653                int this_qtd_len;
 654
 655                this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
 656                                maxpacket);
 657                this_sg_len -= this_qtd_len;
 658                len -= this_qtd_len;
 659                buf += this_qtd_len;
 660
 661                /*
 662                 * short reads advance to a "magic" dummy instead of the next
 663                 * qtd ... that forces the queue to stop, for manual cleanup.
 664                 * (this will usually be overridden later.)
 665                 */
 666                if (is_input)
 667                        qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
 668
 669                /* qh makes control packets use qtd toggle; maybe switch it */
 670                if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
 671                        token ^= QTD_TOGGLE;
 672
 673                if (likely(this_sg_len <= 0)) {
 674                        if (--i <= 0 || len <= 0)
 675                                break;
 676                        sg = sg_next(sg);
 677                        buf = sg_dma_address(sg);
 678                        this_sg_len = min_t(int, sg_dma_len(sg), len);
 679                }
 680
 681                qtd_prev = qtd;
 682                qtd = ehci_qtd_alloc (ehci, flags);
 683                if (unlikely (!qtd))
 684                        goto cleanup;
 685                qtd->urb = urb;
 686                qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
 687                list_add_tail (&qtd->qtd_list, head);
 688        }
 689
 690        /*
 691         * unless the caller requires manual cleanup after short reads,
 692         * have the alt_next mechanism keep the queue running after the
 693         * last data qtd (the only one, for control and most other cases).
 694         */
 695        if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
 696                                || usb_pipecontrol (urb->pipe)))
 697                qtd->hw_alt_next = EHCI_LIST_END(ehci);
 698
 699        /*
 700         * control requests may need a terminating data "status" ack;
 701         * other OUT ones may need a terminating short packet
 702         * (zero length).
 703         */
 704        if (likely (urb->transfer_buffer_length != 0)) {
 705                int     one_more = 0;
 706
 707                if (usb_pipecontrol (urb->pipe)) {
 708                        one_more = 1;
 709                        token ^= 0x0100;        /* "in" <--> "out"  */
 710                        token |= QTD_TOGGLE;    /* force DATA1 */
 711                } else if (usb_pipeout(urb->pipe)
 712                                && (urb->transfer_flags & URB_ZERO_PACKET)
 713                                && !(urb->transfer_buffer_length % maxpacket)) {
 714                        one_more = 1;
 715                }
 716                if (one_more) {
 717                        qtd_prev = qtd;
 718                        qtd = ehci_qtd_alloc (ehci, flags);
 719                        if (unlikely (!qtd))
 720                                goto cleanup;
 721                        qtd->urb = urb;
 722                        qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
 723                        list_add_tail (&qtd->qtd_list, head);
 724
 725                        /* never any data in such packets */
 726                        qtd_fill(ehci, qtd, 0, 0, token, 0);
 727                }
 728        }
 729
 730        /* by default, enable interrupt on urb completion */
 731        if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
 732                qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
 733        return head;
 734
 735cleanup:
 736        qtd_list_free (ehci, urb, head);
 737        return NULL;
 738}
 739
 740/*-------------------------------------------------------------------------*/
 741
 742// Would be best to create all qh's from config descriptors,
 743// when each interface/altsetting is established.  Unlink
 744// any previous qh and cancel its urbs first; endpoints are
 745// implicitly reset then (data toggle too).
 746// That'd mean updating how usbcore talks to HCDs. (2.7?)
 747
 748
 749/*
 750 * Each QH holds a qtd list; a QH is used for everything except iso.
 751 *
 752 * For interrupt urbs, the scheduler must set the microframe scheduling
 753 * mask(s) each time the QH gets scheduled.  For highspeed, that's
 754 * just one microframe in the s-mask.  For split interrupt transactions
 755 * there are additional complications: c-mask, maybe FSTNs.
 756 */
 757static struct ehci_qh *
 758qh_make (
 759        struct ehci_hcd         *ehci,
 760        struct urb              *urb,
 761        gfp_t                   flags
 762) {
 763        struct ehci_qh          *qh = ehci_qh_alloc (ehci, flags);
 764        u32                     info1 = 0, info2 = 0;
 765        int                     is_input, type;
 766        int                     maxp = 0;
 767        struct usb_tt           *tt = urb->dev->tt;
 768        struct ehci_qh_hw       *hw;
 769
 770        if (!qh)
 771                return qh;
 772
 773        /*
 774         * init endpoint/device data for this QH
 775         */
 776        info1 |= usb_pipeendpoint (urb->pipe) << 8;
 777        info1 |= usb_pipedevice (urb->pipe) << 0;
 778
 779        is_input = usb_pipein (urb->pipe);
 780        type = usb_pipetype (urb->pipe);
 781        maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
 782
 783        /* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
 784         * acts like up to 3KB, but is built from smaller packets.
 785         */
 786        if (max_packet(maxp) > 1024) {
 787                ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
 788                goto done;
 789        }
 790
 791        /* Compute interrupt scheduling parameters just once, and save.
 792         * - allowing for high bandwidth, how many nsec/uframe are used?
 793         * - split transactions need a second CSPLIT uframe; same question
 794         * - splits also need a schedule gap (for full/low speed I/O)
 795         * - qh has a polling interval
 796         *
 797         * For control/bulk requests, the HC or TT handles these.
 798         */
 799        if (type == PIPE_INTERRUPT) {
 800                unsigned        tmp;
 801
 802                qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
 803                                is_input, 0,
 804                                hb_mult(maxp) * max_packet(maxp)));
 805                qh->ps.phase = NO_FRAME;
 806
 807                if (urb->dev->speed == USB_SPEED_HIGH) {
 808                        qh->ps.c_usecs = 0;
 809                        qh->gap_uf = 0;
 810
 811                        if (urb->interval > 1 && urb->interval < 8) {
 812                                /* NOTE interval 2 or 4 uframes could work.
 813                                 * But interval 1 scheduling is simpler, and
 814                                 * includes high bandwidth.
 815                                 */
 816                                urb->interval = 1;
 817                        } else if (urb->interval > ehci->periodic_size << 3) {
 818                                urb->interval = ehci->periodic_size << 3;
 819                        }
 820                        qh->ps.period = urb->interval >> 3;
 821
 822                        /* period for bandwidth allocation */
 823                        tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
 824                                        1 << (urb->ep->desc.bInterval - 1));
 825
 826                        /* Allow urb->interval to override */
 827                        qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
 828                        qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
 829                } else {
 830                        int             think_time;
 831
 832                        /* gap is f(FS/LS transfer times) */
 833                        qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
 834                                        is_input, 0, maxp) / (125 * 1000);
 835
 836                        /* FIXME this just approximates SPLIT/CSPLIT times */
 837                        if (is_input) {         // SPLIT, gap, CSPLIT+DATA
 838                                qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
 839                                qh->ps.usecs = HS_USECS(1);
 840                        } else {                // SPLIT+DATA, gap, CSPLIT
 841                                qh->ps.usecs += HS_USECS(1);
 842                                qh->ps.c_usecs = HS_USECS(0);
 843                        }
 844
 845                        think_time = tt ? tt->think_time : 0;
 846                        qh->ps.tt_usecs = NS_TO_US(think_time +
 847                                        usb_calc_bus_time (urb->dev->speed,
 848                                        is_input, 0, max_packet (maxp)));
 849                        if (urb->interval > ehci->periodic_size)
 850                                urb->interval = ehci->periodic_size;
 851                        qh->ps.period = urb->interval;
 852
 853                        /* period for bandwidth allocation */
 854                        tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
 855                                        urb->ep->desc.bInterval);
 856                        tmp = rounddown_pow_of_two(tmp);
 857
 858                        /* Allow urb->interval to override */
 859                        qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
 860                        qh->ps.bw_uperiod = qh->ps.bw_period << 3;
 861                }
 862        }
 863
 864        /* support for tt scheduling, and access to toggles */
 865        qh->ps.udev = urb->dev;
 866        qh->ps.ep = urb->ep;
 867
 868        /* using TT? */
 869        switch (urb->dev->speed) {
 870        case USB_SPEED_LOW:
 871                info1 |= QH_LOW_SPEED;
 872                /* FALL THROUGH */
 873
 874        case USB_SPEED_FULL:
 875                /* EPS 0 means "full" */
 876                if (type != PIPE_INTERRUPT)
 877                        info1 |= (EHCI_TUNE_RL_TT << 28);
 878                if (type == PIPE_CONTROL) {
 879                        info1 |= QH_CONTROL_EP;         /* for TT */
 880                        info1 |= QH_TOGGLE_CTL;         /* toggle from qtd */
 881                }
 882                info1 |= maxp << 16;
 883
 884                info2 |= (EHCI_TUNE_MULT_TT << 30);
 885
 886                /* Some Freescale processors have an erratum in which the
 887                 * port number in the queue head was 0..N-1 instead of 1..N.
 888                 */
 889                if (ehci_has_fsl_portno_bug(ehci))
 890                        info2 |= (urb->dev->ttport-1) << 23;
 891                else
 892                        info2 |= urb->dev->ttport << 23;
 893
 894                /* set the address of the TT; for TDI's integrated
 895                 * root hub tt, leave it zeroed.
 896                 */
 897                if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
 898                        info2 |= tt->hub->devnum << 16;
 899
 900                /* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
 901
 902                break;
 903
 904        case USB_SPEED_HIGH:            /* no TT involved */
 905                info1 |= QH_HIGH_SPEED;
 906                if (type == PIPE_CONTROL) {
 907                        info1 |= (EHCI_TUNE_RL_HS << 28);
 908                        info1 |= 64 << 16;      /* usb2 fixed maxpacket */
 909                        info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
 910                        info2 |= (EHCI_TUNE_MULT_HS << 30);
 911                } else if (type == PIPE_BULK) {
 912                        info1 |= (EHCI_TUNE_RL_HS << 28);
 913                        /* The USB spec says that high speed bulk endpoints
 914                         * always use 512 byte maxpacket.  But some device
 915                         * vendors decided to ignore that, and MSFT is happy
 916                         * to help them do so.  So now people expect to use
 917                         * such nonconformant devices with Linux too; sigh.
 918                         */
 919                        info1 |= max_packet(maxp) << 16;
 920                        info2 |= (EHCI_TUNE_MULT_HS << 30);
 921                } else {                /* PIPE_INTERRUPT */
 922                        info1 |= max_packet (maxp) << 16;
 923                        info2 |= hb_mult (maxp) << 30;
 924                }
 925                break;
 926        default:
 927                ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
 928                        urb->dev->speed);
 929done:
 930                qh_destroy(ehci, qh);
 931                return NULL;
 932        }
 933
 934        /* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
 935
 936        /* init as live, toggle clear */
 937        qh->qh_state = QH_STATE_IDLE;
 938        hw = qh->hw;
 939        hw->hw_info1 = cpu_to_hc32(ehci, info1);
 940        hw->hw_info2 = cpu_to_hc32(ehci, info2);
 941        qh->is_out = !is_input;
 942        usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
 943        return qh;
 944}
 945
 946/*-------------------------------------------------------------------------*/
 947
 948static void enable_async(struct ehci_hcd *ehci)
 949{
 950        if (ehci->async_count++)
 951                return;
 952
 953        /* Stop waiting to turn off the async schedule */
 954        ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
 955
 956        /* Don't start the schedule until ASS is 0 */
 957        ehci_poll_ASS(ehci);
 958        turn_on_io_watchdog(ehci);
 959}
 960
 961static void disable_async(struct ehci_hcd *ehci)
 962{
 963        if (--ehci->async_count)
 964                return;
 965
 966        /* The async schedule and unlink lists are supposed to be empty */
 967        WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
 968                        !list_empty(&ehci->async_idle));
 969
 970        /* Don't turn off the schedule until ASS is 1 */
 971        ehci_poll_ASS(ehci);
 972}
 973
 974/* move qh (and its qtds) onto async queue; maybe enable queue.  */
 975
 976static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
 977{
 978        __hc32          dma = QH_NEXT(ehci, qh->qh_dma);
 979        struct ehci_qh  *head;
 980
 981        /* Don't link a QH if there's a Clear-TT-Buffer pending */
 982        if (unlikely(qh->clearing_tt))
 983                return;
 984
 985        WARN_ON(qh->qh_state != QH_STATE_IDLE);
 986
 987        /* clear halt and/or toggle; and maybe recover from silicon quirk */
 988        qh_refresh(ehci, qh);
 989
 990        /* splice right after start */
 991        head = ehci->async;
 992        qh->qh_next = head->qh_next;
 993        qh->hw->hw_next = head->hw->hw_next;
 994        wmb ();
 995
 996        head->qh_next.qh = qh;
 997        head->hw->hw_next = dma;
 998
 999        qh->qh_state = QH_STATE_LINKED;
1000        qh->xacterrs = 0;
1001        qh->exception = 0;
1002        /* qtd completions reported later by interrupt */
1003
1004        enable_async(ehci);
1005}
1006
1007/*-------------------------------------------------------------------------*/
1008
1009/*
1010 * For control/bulk/interrupt, return QH with these TDs appended.
1011 * Allocates and initializes the QH if necessary.
1012 * Returns null if it can't allocate a QH it needs to.
1013 * If the QH has TDs (urbs) already, that's great.
1014 */
1015static struct ehci_qh *qh_append_tds (
1016        struct ehci_hcd         *ehci,
1017        struct urb              *urb,
1018        struct list_head        *qtd_list,
1019        int                     epnum,
1020        void                    **ptr
1021)
1022{
1023        struct ehci_qh          *qh = NULL;
1024        __hc32                  qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1025
1026        qh = (struct ehci_qh *) *ptr;
1027        if (unlikely (qh == NULL)) {
1028                /* can't sleep here, we have ehci->lock... */
1029                qh = qh_make (ehci, urb, GFP_ATOMIC);
1030                *ptr = qh;
1031        }
1032        if (likely (qh != NULL)) {
1033                struct ehci_qtd *qtd;
1034
1035                if (unlikely (list_empty (qtd_list)))
1036                        qtd = NULL;
1037                else
1038                        qtd = list_entry (qtd_list->next, struct ehci_qtd,
1039                                        qtd_list);
1040
1041                /* control qh may need patching ... */
1042                if (unlikely (epnum == 0)) {
1043
1044                        /* usb_reset_device() briefly reverts to address 0 */
1045                        if (usb_pipedevice (urb->pipe) == 0)
1046                                qh->hw->hw_info1 &= ~qh_addr_mask;
1047                }
1048
1049                /* just one way to queue requests: swap with the dummy qtd.
1050                 * only hc or qh_refresh() ever modify the overlay.
1051                 */
1052                if (likely (qtd != NULL)) {
1053                        struct ehci_qtd         *dummy;
1054                        dma_addr_t              dma;
1055                        __hc32                  token;
1056
1057                        /* to avoid racing the HC, use the dummy td instead of
1058                         * the first td of our list (becomes new dummy).  both
1059                         * tds stay deactivated until we're done, when the
1060                         * HC is allowed to fetch the old dummy (4.10.2).
1061                         */
1062                        token = qtd->hw_token;
1063                        qtd->hw_token = HALT_BIT(ehci);
1064
1065                        dummy = qh->dummy;
1066
1067                        dma = dummy->qtd_dma;
1068                        *dummy = *qtd;
1069                        dummy->qtd_dma = dma;
1070
1071                        list_del (&qtd->qtd_list);
1072                        list_add (&dummy->qtd_list, qtd_list);
1073                        list_splice_tail(qtd_list, &qh->qtd_list);
1074
1075                        ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1076                        qh->dummy = qtd;
1077
1078                        /* hc must see the new dummy at list end */
1079                        dma = qtd->qtd_dma;
1080                        qtd = list_entry (qh->qtd_list.prev,
1081                                        struct ehci_qtd, qtd_list);
1082                        qtd->hw_next = QTD_NEXT(ehci, dma);
1083
1084                        /* let the hc process these next qtds */
1085                        wmb ();
1086                        dummy->hw_token = token;
1087
1088                        urb->hcpriv = qh;
1089                }
1090        }
1091        return qh;
1092}
1093
1094/*-------------------------------------------------------------------------*/
1095
1096static int
1097submit_async (
1098        struct ehci_hcd         *ehci,
1099        struct urb              *urb,
1100        struct list_head        *qtd_list,
1101        gfp_t                   mem_flags
1102) {
1103        int                     epnum;
1104        unsigned long           flags;
1105        struct ehci_qh          *qh = NULL;
1106        int                     rc;
1107
1108        epnum = urb->ep->desc.bEndpointAddress;
1109
1110#ifdef EHCI_URB_TRACE
1111        {
1112                struct ehci_qtd *qtd;
1113                qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1114                ehci_dbg(ehci,
1115                         "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1116                         __func__, urb->dev->devpath, urb,
1117                         epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
1118                         urb->transfer_buffer_length,
1119                         qtd, urb->ep->hcpriv);
1120        }
1121#endif
1122
1123        spin_lock_irqsave (&ehci->lock, flags);
1124        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1125                rc = -ESHUTDOWN;
1126                goto done;
1127        }
1128        rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1129        if (unlikely(rc))
1130                goto done;
1131
1132        qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1133        if (unlikely(qh == NULL)) {
1134                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1135                rc = -ENOMEM;
1136                goto done;
1137        }
1138
1139        /* Control/bulk operations through TTs don't need scheduling,
1140         * the HC and TT handle it when the TT has a buffer ready.
1141         */
1142        if (likely (qh->qh_state == QH_STATE_IDLE))
1143                qh_link_async(ehci, qh);
1144 done:
1145        spin_unlock_irqrestore (&ehci->lock, flags);
1146        if (unlikely (qh == NULL))
1147                qtd_list_free (ehci, urb, qtd_list);
1148        return rc;
1149}
1150
1151/*-------------------------------------------------------------------------*/
1152#ifdef CONFIG_USB_HCD_TEST_MODE
1153/*
1154 * This function creates the qtds and submits them for the
1155 * SINGLE_STEP_SET_FEATURE Test.
1156 * This is done in two parts: first SETUP req for GetDesc is sent then
1157 * 15 seconds later, the IN stage for GetDesc starts to req data from dev
1158 *
1159 * is_setup : i/p arguement decides which of the two stage needs to be
1160 * performed; TRUE - SETUP and FALSE - IN+STATUS
1161 * Returns 0 if success
1162 */
1163static int submit_single_step_set_feature(
1164        struct usb_hcd  *hcd,
1165        struct urb      *urb,
1166        int             is_setup
1167) {
1168        struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
1169        struct list_head        qtd_list;
1170        struct list_head        *head;
1171
1172        struct ehci_qtd         *qtd, *qtd_prev;
1173        dma_addr_t              buf;
1174        int                     len, maxpacket;
1175        u32                     token;
1176
1177        INIT_LIST_HEAD(&qtd_list);
1178        head = &qtd_list;
1179
1180        /* URBs map to sequences of QTDs:  one logical transaction */
1181        qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1182        if (unlikely(!qtd))
1183                return -1;
1184        list_add_tail(&qtd->qtd_list, head);
1185        qtd->urb = urb;
1186
1187        token = QTD_STS_ACTIVE;
1188        token |= (EHCI_TUNE_CERR << 10);
1189
1190        len = urb->transfer_buffer_length;
1191        /*
1192         * Check if the request is to perform just the SETUP stage (getDesc)
1193         * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
1194         * 15 secs after the setup
1195         */
1196        if (is_setup) {
1197                /* SETUP pid */
1198                qtd_fill(ehci, qtd, urb->setup_dma,
1199                                sizeof(struct usb_ctrlrequest),
1200                                token | (2 /* "setup" */ << 8), 8);
1201
1202                submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1203                return 0; /*Return now; we shall come back after 15 seconds*/
1204        }
1205
1206        /*
1207         * IN: data transfer stage:  buffer setup : start the IN txn phase for
1208         * the get_Desc SETUP which was sent 15seconds back
1209         */
1210        token ^= QTD_TOGGLE;   /*We need to start IN with DATA-1 Pid-sequence*/
1211        buf = urb->transfer_dma;
1212
1213        token |= (1 /* "in" */ << 8);  /*This is IN stage*/
1214
1215        maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
1216
1217        qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1218
1219        /*
1220         * Our IN phase shall always be a short read; so keep the queue running
1221         * and let it advance to the next qtd which zero length OUT status
1222         */
1223        qtd->hw_alt_next = EHCI_LIST_END(ehci);
1224
1225        /* STATUS stage for GetDesc control request */
1226        token ^= 0x0100;        /* "in" <--> "out"  */
1227        token |= QTD_TOGGLE;    /* force DATA1 */
1228
1229        qtd_prev = qtd;
1230        qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1231        if (unlikely(!qtd))
1232                goto cleanup;
1233        qtd->urb = urb;
1234        qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1235        list_add_tail(&qtd->qtd_list, head);
1236
1237        /* dont fill any data in such packets */
1238        qtd_fill(ehci, qtd, 0, 0, token, 0);
1239
1240        /* by default, enable interrupt on urb completion */
1241        if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
1242                qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
1243
1244        submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1245
1246        return 0;
1247
1248cleanup:
1249        qtd_list_free(ehci, urb, head);
1250        return -1;
1251}
1252#endif /* CONFIG_USB_HCD_TEST_MODE */
1253
1254/*-------------------------------------------------------------------------*/
1255
1256static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1257{
1258        struct ehci_qh          *prev;
1259
1260        /* Add to the end of the list of QHs waiting for the next IAAD */
1261        qh->qh_state = QH_STATE_UNLINK_WAIT;
1262        list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1263
1264        /* Unlink it from the schedule */
1265        prev = ehci->async;
1266        while (prev->qh_next.qh != qh)
1267                prev = prev->qh_next.qh;
1268
1269        prev->hw->hw_next = qh->hw->hw_next;
1270        prev->qh_next = qh->qh_next;
1271        if (ehci->qh_scan_next == qh)
1272                ehci->qh_scan_next = qh->qh_next.qh;
1273}
1274
1275static void start_iaa_cycle(struct ehci_hcd *ehci)
1276{
1277        /* Do nothing if an IAA cycle is already running */
1278        if (ehci->iaa_in_progress)
1279                return;
1280        ehci->iaa_in_progress = true;
1281
1282        /* If the controller isn't running, we don't have to wait for it */
1283        if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1284                end_unlink_async(ehci);
1285
1286        /* Otherwise start a new IAA cycle */
1287        } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
1288
1289                /* Make sure the unlinks are all visible to the hardware */
1290                wmb();
1291
1292                ehci_writel(ehci, ehci->command | CMD_IAAD,
1293                                &ehci->regs->command);
1294                ehci_readl(ehci, &ehci->regs->command);
1295                ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
1296        }
1297}
1298
1299/* the async qh for the qtds being unlinked are now gone from the HC */
1300
1301static void end_unlink_async(struct ehci_hcd *ehci)
1302{
1303        struct ehci_qh          *qh;
1304        bool                    early_exit;
1305
1306        if (ehci->has_synopsys_hc_bug)
1307                ehci_writel(ehci, (u32) ehci->async->qh_dma,
1308                            &ehci->regs->async_next);
1309
1310        /* The current IAA cycle has ended */
1311        ehci->iaa_in_progress = false;
1312
1313        if (list_empty(&ehci->async_unlink))
1314                return;
1315        qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1316                        unlink_node);   /* QH whose IAA cycle just ended */
1317
1318        /*
1319         * If async_unlinking is set then this routine is already running,
1320         * either on the stack or on another CPU.
1321         */
1322        early_exit = ehci->async_unlinking;
1323
1324        /* If the controller isn't running, process all the waiting QHs */
1325        if (ehci->rh_state < EHCI_RH_RUNNING)
1326                list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1327
1328        /*
1329         * Intel (?) bug: The HC can write back the overlay region even
1330         * after the IAA interrupt occurs.  In self-defense, always go
1331         * through two IAA cycles for each QH.
1332         */
1333        else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
1334                qh->qh_state = QH_STATE_UNLINK;
1335                early_exit = true;
1336        }
1337
1338        /* Otherwise process only the first waiting QH (NVIDIA bug?) */
1339        else
1340                list_move_tail(&qh->unlink_node, &ehci->async_idle);
1341
1342        /* Start a new IAA cycle if any QHs are waiting for it */
1343        if (!list_empty(&ehci->async_unlink))
1344                start_iaa_cycle(ehci);
1345
1346        /*
1347         * Don't allow nesting or concurrent calls,
1348         * or wait for the second IAA cycle for the next QH.
1349         */
1350        if (early_exit)
1351                return;
1352
1353        /* Process the idle QHs */
1354        ehci->async_unlinking = true;
1355        while (!list_empty(&ehci->async_idle)) {
1356                qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1357                                unlink_node);
1358                list_del(&qh->unlink_node);
1359
1360                qh->qh_state = QH_STATE_IDLE;
1361                qh->qh_next.qh = NULL;
1362
1363                if (!list_empty(&qh->qtd_list))
1364                        qh_completions(ehci, qh);
1365                if (!list_empty(&qh->qtd_list) &&
1366                                ehci->rh_state == EHCI_RH_RUNNING)
1367                        qh_link_async(ehci, qh);
1368                disable_async(ehci);
1369        }
1370        ehci->async_unlinking = false;
1371}
1372
1373static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1374
1375static void unlink_empty_async(struct ehci_hcd *ehci)
1376{
1377        struct ehci_qh          *qh;
1378        struct ehci_qh          *qh_to_unlink = NULL;
1379        int                     count = 0;
1380
1381        /* Find the last async QH which has been empty for a timer cycle */
1382        for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1383                if (list_empty(&qh->qtd_list) &&
1384                                qh->qh_state == QH_STATE_LINKED) {
1385                        ++count;
1386                        if (qh->unlink_cycle != ehci->async_unlink_cycle)
1387                                qh_to_unlink = qh;
1388                }
1389        }
1390
1391        /* If nothing else is being unlinked, unlink the last empty QH */
1392        if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1393                start_unlink_async(ehci, qh_to_unlink);
1394                --count;
1395        }
1396
1397        /* Other QHs will be handled later */
1398        if (count > 0) {
1399                ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1400                ++ehci->async_unlink_cycle;
1401        }
1402}
1403
1404/* The root hub is suspended; unlink all the async QHs */
1405static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
1406{
1407        struct ehci_qh          *qh;
1408
1409        while (ehci->async->qh_next.qh) {
1410                qh = ehci->async->qh_next.qh;
1411                WARN_ON(!list_empty(&qh->qtd_list));
1412                single_unlink_async(ehci, qh);
1413        }
1414        start_iaa_cycle(ehci);
1415}
1416
1417/* makes sure the async qh will become idle */
1418/* caller must own ehci->lock */
1419
1420static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1421{
1422        /* If the QH isn't linked then there's nothing we can do. */
1423        if (qh->qh_state != QH_STATE_LINKED)
1424                return;
1425
1426        single_unlink_async(ehci, qh);
1427        start_iaa_cycle(ehci);
1428}
1429
1430/*-------------------------------------------------------------------------*/
1431
1432static void scan_async (struct ehci_hcd *ehci)
1433{
1434        struct ehci_qh          *qh;
1435        bool                    check_unlinks_later = false;
1436
1437        ehci->qh_scan_next = ehci->async->qh_next.qh;
1438        while (ehci->qh_scan_next) {
1439                qh = ehci->qh_scan_next;
1440                ehci->qh_scan_next = qh->qh_next.qh;
1441
1442                /* clean any finished work for this qh */
1443                if (!list_empty(&qh->qtd_list)) {
1444                        int temp;
1445
1446                        /*
1447                         * Unlinks could happen here; completion reporting
1448                         * drops the lock.  That's why ehci->qh_scan_next
1449                         * always holds the next qh to scan; if the next qh
1450                         * gets unlinked then ehci->qh_scan_next is adjusted
1451                         * in single_unlink_async().
1452                         */
1453                        temp = qh_completions(ehci, qh);
1454                        if (unlikely(temp)) {
1455                                start_unlink_async(ehci, qh);
1456                        } else if (list_empty(&qh->qtd_list)
1457                                        && qh->qh_state == QH_STATE_LINKED) {
1458                                qh->unlink_cycle = ehci->async_unlink_cycle;
1459                                check_unlinks_later = true;
1460                        }
1461                }
1462        }
1463
1464        /*
1465         * Unlink empty entries, reducing DMA usage as well
1466         * as HCD schedule-scanning costs.  Delay for any qh
1467         * we just scanned, there's a not-unusual case that it
1468         * doesn't stay idle for long.
1469         */
1470        if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
1471                        !(ehci->enabled_hrtimer_events &
1472                                BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
1473                ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1474                ++ehci->async_unlink_cycle;
1475        }
1476}
1477