linux/drivers/usb/host/ehci-sched.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2001-2004 by David Brownell
   3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation; either version 2 of the License, or (at your
   8 * option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 * for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software Foundation,
  17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 */
  19
  20/* this file is part of ehci-hcd.c */
  21
  22/*-------------------------------------------------------------------------*/
  23
  24/*
  25 * EHCI scheduled transaction support:  interrupt, iso, split iso
  26 * These are called "periodic" transactions in the EHCI spec.
  27 *
  28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
  29 * with the "asynchronous" transaction support (control/bulk transfers).
  30 * The only real difference is in how interrupt transfers are scheduled.
  31 *
  32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
  34 * pre-calculated schedule data to make appending to the queue be quick.
  35 */
  36
  37static int ehci_get_frame (struct usb_hcd *hcd);
  38
  39/*
  40 * periodic_next_shadow - return "next" pointer on shadow list
  41 * @periodic: host pointer to qh/itd/sitd
  42 * @tag: hardware tag for type of this record
  43 */
  44static union ehci_shadow *
  45periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  46                __hc32 tag)
  47{
  48        switch (hc32_to_cpu(ehci, tag)) {
  49        case Q_TYPE_QH:
  50                return &periodic->qh->qh_next;
  51        case Q_TYPE_FSTN:
  52                return &periodic->fstn->fstn_next;
  53        case Q_TYPE_ITD:
  54                return &periodic->itd->itd_next;
  55        // case Q_TYPE_SITD:
  56        default:
  57                return &periodic->sitd->sitd_next;
  58        }
  59}
  60
  61static __hc32 *
  62shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  63                __hc32 tag)
  64{
  65        switch (hc32_to_cpu(ehci, tag)) {
  66        /* our ehci_shadow.qh is actually software part */
  67        case Q_TYPE_QH:
  68                return &periodic->qh->hw->hw_next;
  69        /* others are hw parts */
  70        default:
  71                return periodic->hw_next;
  72        }
  73}
  74
  75/* caller must hold ehci->lock */
  76static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
  77{
  78        union ehci_shadow       *prev_p = &ehci->pshadow[frame];
  79        __hc32                  *hw_p = &ehci->periodic[frame];
  80        union ehci_shadow       here = *prev_p;
  81
  82        /* find predecessor of "ptr"; hw and shadow lists are in sync */
  83        while (here.ptr && here.ptr != ptr) {
  84                prev_p = periodic_next_shadow(ehci, prev_p,
  85                                Q_NEXT_TYPE(ehci, *hw_p));
  86                hw_p = shadow_next_periodic(ehci, &here,
  87                                Q_NEXT_TYPE(ehci, *hw_p));
  88                here = *prev_p;
  89        }
  90        /* an interrupt entry (at list end) could have been shared */
  91        if (!here.ptr)
  92                return;
  93
  94        /* update shadow and hardware lists ... the old "next" pointers
  95         * from ptr may still be in use, the caller updates them.
  96         */
  97        *prev_p = *periodic_next_shadow(ehci, &here,
  98                        Q_NEXT_TYPE(ehci, *hw_p));
  99
 100        if (!ehci->use_dummy_qh ||
 101            *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
 102                        != EHCI_LIST_END(ehci))
 103                *hw_p = *shadow_next_periodic(ehci, &here,
 104                                Q_NEXT_TYPE(ehci, *hw_p));
 105        else
 106                *hw_p = ehci->dummy->qh_dma;
 107}
 108
 109/* how many of the uframe's 125 usecs are allocated? */
 110static unsigned short
 111periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
 112{
 113        __hc32                  *hw_p = &ehci->periodic [frame];
 114        union ehci_shadow       *q = &ehci->pshadow [frame];
 115        unsigned                usecs = 0;
 116        struct ehci_qh_hw       *hw;
 117
 118        while (q->ptr) {
 119                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 120                case Q_TYPE_QH:
 121                        hw = q->qh->hw;
 122                        /* is it in the S-mask? */
 123                        if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
 124                                usecs += q->qh->usecs;
 125                        /* ... or C-mask? */
 126                        if (hw->hw_info2 & cpu_to_hc32(ehci,
 127                                        1 << (8 + uframe)))
 128                                usecs += q->qh->c_usecs;
 129                        hw_p = &hw->hw_next;
 130                        q = &q->qh->qh_next;
 131                        break;
 132                // case Q_TYPE_FSTN:
 133                default:
 134                        /* for "save place" FSTNs, count the relevant INTR
 135                         * bandwidth from the previous frame
 136                         */
 137                        if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
 138                                ehci_dbg (ehci, "ignoring FSTN cost ...\n");
 139                        }
 140                        hw_p = &q->fstn->hw_next;
 141                        q = &q->fstn->fstn_next;
 142                        break;
 143                case Q_TYPE_ITD:
 144                        if (q->itd->hw_transaction[uframe])
 145                                usecs += q->itd->stream->usecs;
 146                        hw_p = &q->itd->hw_next;
 147                        q = &q->itd->itd_next;
 148                        break;
 149                case Q_TYPE_SITD:
 150                        /* is it in the S-mask?  (count SPLIT, DATA) */
 151                        if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
 152                                        1 << uframe)) {
 153                                if (q->sitd->hw_fullspeed_ep &
 154                                                cpu_to_hc32(ehci, 1<<31))
 155                                        usecs += q->sitd->stream->usecs;
 156                                else    /* worst case for OUT start-split */
 157                                        usecs += HS_USECS_ISO (188);
 158                        }
 159
 160                        /* ... C-mask?  (count CSPLIT, DATA) */
 161                        if (q->sitd->hw_uframe &
 162                                        cpu_to_hc32(ehci, 1 << (8 + uframe))) {
 163                                /* worst case for IN complete-split */
 164                                usecs += q->sitd->stream->c_usecs;
 165                        }
 166
 167                        hw_p = &q->sitd->hw_next;
 168                        q = &q->sitd->sitd_next;
 169                        break;
 170                }
 171        }
 172#ifdef  DEBUG
 173        if (usecs > ehci->uframe_periodic_max)
 174                ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
 175                        frame * 8 + uframe, usecs);
 176#endif
 177        return usecs;
 178}
 179
 180/*-------------------------------------------------------------------------*/
 181
 182static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
 183{
 184        if (!dev1->tt || !dev2->tt)
 185                return 0;
 186        if (dev1->tt != dev2->tt)
 187                return 0;
 188        if (dev1->tt->multi)
 189                return dev1->ttport == dev2->ttport;
 190        else
 191                return 1;
 192}
 193
 194#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 195
 196/* Which uframe does the low/fullspeed transfer start in?
 197 *
 198 * The parameter is the mask of ssplits in "H-frame" terms
 199 * and this returns the transfer start uframe in "B-frame" terms,
 200 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
 201 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
 202 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
 203 */
 204static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 205{
 206        unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
 207        if (!smask) {
 208                ehci_err(ehci, "invalid empty smask!\n");
 209                /* uframe 7 can't have bw so this will indicate failure */
 210                return 7;
 211        }
 212        return ffs(smask) - 1;
 213}
 214
 215static const unsigned char
 216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
 217
 218/* carryover low/fullspeed bandwidth that crosses uframe boundries */
 219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 220{
 221        int i;
 222        for (i=0; i<7; i++) {
 223                if (max_tt_usecs[i] < tt_usecs[i]) {
 224                        tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
 225                        tt_usecs[i] = max_tt_usecs[i];
 226                }
 227        }
 228}
 229
 230/* How many of the tt's periodic downstream 1000 usecs are allocated?
 231 *
 232 * While this measures the bandwidth in terms of usecs/uframe,
 233 * the low/fullspeed bus has no notion of uframes, so any particular
 234 * low/fullspeed transfer can "carry over" from one uframe to the next,
 235 * since the TT just performs downstream transfers in sequence.
 236 *
 237 * For example two separate 100 usec transfers can start in the same uframe,
 238 * and the second one would "carry over" 75 usecs into the next uframe.
 239 */
 240static void
 241periodic_tt_usecs (
 242        struct ehci_hcd *ehci,
 243        struct usb_device *dev,
 244        unsigned frame,
 245        unsigned short tt_usecs[8]
 246)
 247{
 248        __hc32                  *hw_p = &ehci->periodic [frame];
 249        union ehci_shadow       *q = &ehci->pshadow [frame];
 250        unsigned char           uf;
 251
 252        memset(tt_usecs, 0, 16);
 253
 254        while (q->ptr) {
 255                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 256                case Q_TYPE_ITD:
 257                        hw_p = &q->itd->hw_next;
 258                        q = &q->itd->itd_next;
 259                        continue;
 260                case Q_TYPE_QH:
 261                        if (same_tt(dev, q->qh->dev)) {
 262                                uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
 263                                tt_usecs[uf] += q->qh->tt_usecs;
 264                        }
 265                        hw_p = &q->qh->hw->hw_next;
 266                        q = &q->qh->qh_next;
 267                        continue;
 268                case Q_TYPE_SITD:
 269                        if (same_tt(dev, q->sitd->urb->dev)) {
 270                                uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
 271                                tt_usecs[uf] += q->sitd->stream->tt_usecs;
 272                        }
 273                        hw_p = &q->sitd->hw_next;
 274                        q = &q->sitd->sitd_next;
 275                        continue;
 276                // case Q_TYPE_FSTN:
 277                default:
 278                        ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
 279                                        frame);
 280                        hw_p = &q->fstn->hw_next;
 281                        q = &q->fstn->fstn_next;
 282                }
 283        }
 284
 285        carryover_tt_bandwidth(tt_usecs);
 286
 287        if (max_tt_usecs[7] < tt_usecs[7])
 288                ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
 289                        frame, tt_usecs[7] - max_tt_usecs[7]);
 290}
 291
 292/*
 293 * Return true if the device's tt's downstream bus is available for a
 294 * periodic transfer of the specified length (usecs), starting at the
 295 * specified frame/uframe.  Note that (as summarized in section 11.19
 296 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 297 * uframe.
 298 *
 299 * The uframe parameter is when the fullspeed/lowspeed transfer
 300 * should be executed in "B-frame" terms, which is the same as the
 301 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 302 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 303 * See the EHCI spec sec 4.5 and fig 4.7.
 304 *
 305 * This checks if the full/lowspeed bus, at the specified starting uframe,
 306 * has the specified bandwidth available, according to rules listed
 307 * in USB 2.0 spec section 11.18.1 fig 11-60.
 308 *
 309 * This does not check if the transfer would exceed the max ssplit
 310 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 311 * since proper scheduling limits ssplits to less than 16 per uframe.
 312 */
 313static int tt_available (
 314        struct ehci_hcd         *ehci,
 315        unsigned                period,
 316        struct usb_device       *dev,
 317        unsigned                frame,
 318        unsigned                uframe,
 319        u16                     usecs
 320)
 321{
 322        if ((period == 0) || (uframe >= 7))     /* error */
 323                return 0;
 324
 325        for (; frame < ehci->periodic_size; frame += period) {
 326                unsigned short tt_usecs[8];
 327
 328                periodic_tt_usecs (ehci, dev, frame, tt_usecs);
 329
 330                ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
 331                        " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
 332                        frame, usecs, uframe,
 333                        tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
 334                        tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
 335
 336                if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
 337                        ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
 338                                frame, uframe);
 339                        return 0;
 340                }
 341
 342                /* special case for isoc transfers larger than 125us:
 343                 * the first and each subsequent fully used uframe
 344                 * must be empty, so as to not illegally delay
 345                 * already scheduled transactions
 346                 */
 347                if (125 < usecs) {
 348                        int ufs = (usecs / 125);
 349                        int i;
 350                        for (i = uframe; i < (uframe + ufs) && i < 8; i++)
 351                                if (0 < tt_usecs[i]) {
 352                                        ehci_vdbg(ehci,
 353                                                "multi-uframe xfer can't fit "
 354                                                "in frame %d uframe %d\n",
 355                                                frame, i);
 356                                        return 0;
 357                                }
 358                }
 359
 360                tt_usecs[uframe] += usecs;
 361
 362                carryover_tt_bandwidth(tt_usecs);
 363
 364                /* fail if the carryover pushed bw past the last uframe's limit */
 365                if (max_tt_usecs[7] < tt_usecs[7]) {
 366                        ehci_vdbg(ehci,
 367                                "tt unavailable usecs %d frame %d uframe %d\n",
 368                                usecs, frame, uframe);
 369                        return 0;
 370                }
 371        }
 372
 373        return 1;
 374}
 375
 376#else
 377
 378/* return true iff the device's transaction translator is available
 379 * for a periodic transfer starting at the specified frame, using
 380 * all the uframes in the mask.
 381 */
 382static int tt_no_collision (
 383        struct ehci_hcd         *ehci,
 384        unsigned                period,
 385        struct usb_device       *dev,
 386        unsigned                frame,
 387        u32                     uf_mask
 388)
 389{
 390        if (period == 0)        /* error */
 391                return 0;
 392
 393        /* note bandwidth wastage:  split never follows csplit
 394         * (different dev or endpoint) until the next uframe.
 395         * calling convention doesn't make that distinction.
 396         */
 397        for (; frame < ehci->periodic_size; frame += period) {
 398                union ehci_shadow       here;
 399                __hc32                  type;
 400                struct ehci_qh_hw       *hw;
 401
 402                here = ehci->pshadow [frame];
 403                type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
 404                while (here.ptr) {
 405                        switch (hc32_to_cpu(ehci, type)) {
 406                        case Q_TYPE_ITD:
 407                                type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
 408                                here = here.itd->itd_next;
 409                                continue;
 410                        case Q_TYPE_QH:
 411                                hw = here.qh->hw;
 412                                if (same_tt (dev, here.qh->dev)) {
 413                                        u32             mask;
 414
 415                                        mask = hc32_to_cpu(ehci,
 416                                                        hw->hw_info2);
 417                                        /* "knows" no gap is needed */
 418                                        mask |= mask >> 8;
 419                                        if (mask & uf_mask)
 420                                                break;
 421                                }
 422                                type = Q_NEXT_TYPE(ehci, hw->hw_next);
 423                                here = here.qh->qh_next;
 424                                continue;
 425                        case Q_TYPE_SITD:
 426                                if (same_tt (dev, here.sitd->urb->dev)) {
 427                                        u16             mask;
 428
 429                                        mask = hc32_to_cpu(ehci, here.sitd
 430                                                                ->hw_uframe);
 431                                        /* FIXME assumes no gap for IN! */
 432                                        mask |= mask >> 8;
 433                                        if (mask & uf_mask)
 434                                                break;
 435                                }
 436                                type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
 437                                here = here.sitd->sitd_next;
 438                                continue;
 439                        // case Q_TYPE_FSTN:
 440                        default:
 441                                ehci_dbg (ehci,
 442                                        "periodic frame %d bogus type %d\n",
 443                                        frame, type);
 444                        }
 445
 446                        /* collision or error */
 447                        return 0;
 448                }
 449        }
 450
 451        /* no collision */
 452        return 1;
 453}
 454
 455#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
 456
 457/*-------------------------------------------------------------------------*/
 458
 459static void enable_periodic(struct ehci_hcd *ehci)
 460{
 461        if (ehci->periodic_count++)
 462                return;
 463
 464        /* Stop waiting to turn off the periodic schedule */
 465        ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
 466
 467        /* Don't start the schedule until PSS is 0 */
 468        ehci_poll_PSS(ehci);
 469        turn_on_io_watchdog(ehci);
 470}
 471
 472static void disable_periodic(struct ehci_hcd *ehci)
 473{
 474        if (--ehci->periodic_count)
 475                return;
 476
 477        /* Don't turn off the schedule until PSS is 1 */
 478        ehci_poll_PSS(ehci);
 479}
 480
 481/*-------------------------------------------------------------------------*/
 482
 483/* periodic schedule slots have iso tds (normal or split) first, then a
 484 * sparse tree for active interrupt transfers.
 485 *
 486 * this just links in a qh; caller guarantees uframe masks are set right.
 487 * no FSTN support (yet; ehci 0.96+)
 488 */
 489static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 490{
 491        unsigned        i;
 492        unsigned        period = qh->period;
 493
 494        dev_dbg (&qh->dev->dev,
 495                "link qh%d-%04x/%p start %d [%d/%d us]\n",
 496                period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
 497                        & (QH_CMASK | QH_SMASK),
 498                qh, qh->start, qh->usecs, qh->c_usecs);
 499
 500        /* high bandwidth, or otherwise every microframe */
 501        if (period == 0)
 502                period = 1;
 503
 504        for (i = qh->start; i < ehci->periodic_size; i += period) {
 505                union ehci_shadow       *prev = &ehci->pshadow[i];
 506                __hc32                  *hw_p = &ehci->periodic[i];
 507                union ehci_shadow       here = *prev;
 508                __hc32                  type = 0;
 509
 510                /* skip the iso nodes at list head */
 511                while (here.ptr) {
 512                        type = Q_NEXT_TYPE(ehci, *hw_p);
 513                        if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
 514                                break;
 515                        prev = periodic_next_shadow(ehci, prev, type);
 516                        hw_p = shadow_next_periodic(ehci, &here, type);
 517                        here = *prev;
 518                }
 519
 520                /* sorting each branch by period (slow-->fast)
 521                 * enables sharing interior tree nodes
 522                 */
 523                while (here.ptr && qh != here.qh) {
 524                        if (qh->period > here.qh->period)
 525                                break;
 526                        prev = &here.qh->qh_next;
 527                        hw_p = &here.qh->hw->hw_next;
 528                        here = *prev;
 529                }
 530                /* link in this qh, unless some earlier pass did that */
 531                if (qh != here.qh) {
 532                        qh->qh_next = here;
 533                        if (here.qh)
 534                                qh->hw->hw_next = *hw_p;
 535                        wmb ();
 536                        prev->qh = qh;
 537                        *hw_p = QH_NEXT (ehci, qh->qh_dma);
 538                }
 539        }
 540        qh->qh_state = QH_STATE_LINKED;
 541        qh->xacterrs = 0;
 542
 543        /* update per-qh bandwidth for usbfs */
 544        ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
 545                ? ((qh->usecs + qh->c_usecs) / qh->period)
 546                : (qh->usecs * 8);
 547
 548        list_add(&qh->intr_node, &ehci->intr_qh_list);
 549
 550        /* maybe enable periodic schedule processing */
 551        ++ehci->intr_count;
 552        enable_periodic(ehci);
 553}
 554
 555static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 556{
 557        unsigned        i;
 558        unsigned        period;
 559
 560        /*
 561         * If qh is for a low/full-speed device, simply unlinking it
 562         * could interfere with an ongoing split transaction.  To unlink
 563         * it safely would require setting the QH_INACTIVATE bit and
 564         * waiting at least one frame, as described in EHCI 4.12.2.5.
 565         *
 566         * We won't bother with any of this.  Instead, we assume that the
 567         * only reason for unlinking an interrupt QH while the current URB
 568         * is still active is to dequeue all the URBs (flush the whole
 569         * endpoint queue).
 570         *
 571         * If rebalancing the periodic schedule is ever implemented, this
 572         * approach will no longer be valid.
 573         */
 574
 575        /* high bandwidth, or otherwise part of every microframe */
 576        if ((period = qh->period) == 0)
 577                period = 1;
 578
 579        for (i = qh->start; i < ehci->periodic_size; i += period)
 580                periodic_unlink (ehci, i, qh);
 581
 582        /* update per-qh bandwidth for usbfs */
 583        ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
 584                ? ((qh->usecs + qh->c_usecs) / qh->period)
 585                : (qh->usecs * 8);
 586
 587        dev_dbg (&qh->dev->dev,
 588                "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
 589                qh->period,
 590                hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
 591                qh, qh->start, qh->usecs, qh->c_usecs);
 592
 593        /* qh->qh_next still "live" to HC */
 594        qh->qh_state = QH_STATE_UNLINK;
 595        qh->qh_next.ptr = NULL;
 596
 597        if (ehci->qh_scan_next == qh)
 598                ehci->qh_scan_next = list_entry(qh->intr_node.next,
 599                                struct ehci_qh, intr_node);
 600        list_del(&qh->intr_node);
 601}
 602
 603static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 604{
 605        /* If the QH isn't linked then there's nothing we can do
 606         * unless we were called during a giveback, in which case
 607         * qh_completions() has to deal with it.
 608         */
 609        if (qh->qh_state != QH_STATE_LINKED) {
 610                if (qh->qh_state == QH_STATE_COMPLETING)
 611                        qh->needs_rescan = 1;
 612                return;
 613        }
 614
 615        qh_unlink_periodic (ehci, qh);
 616
 617        /* Make sure the unlinks are visible before starting the timer */
 618        wmb();
 619
 620        /*
 621         * The EHCI spec doesn't say how long it takes the controller to
 622         * stop accessing an unlinked interrupt QH.  The timer delay is
 623         * 9 uframes; presumably that will be long enough.
 624         */
 625        qh->unlink_cycle = ehci->intr_unlink_cycle;
 626
 627        /* New entries go at the end of the intr_unlink list */
 628        if (ehci->intr_unlink)
 629                ehci->intr_unlink_last->unlink_next = qh;
 630        else
 631                ehci->intr_unlink = qh;
 632        ehci->intr_unlink_last = qh;
 633
 634        if (ehci->intr_unlinking)
 635                ;       /* Avoid recursive calls */
 636        else if (ehci->rh_state < EHCI_RH_RUNNING)
 637                ehci_handle_intr_unlinks(ehci);
 638        else if (ehci->intr_unlink == qh) {
 639                ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
 640                ++ehci->intr_unlink_cycle;
 641        }
 642}
 643
 644static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 645{
 646        struct ehci_qh_hw       *hw = qh->hw;
 647        int                     rc;
 648
 649        qh->qh_state = QH_STATE_IDLE;
 650        hw->hw_next = EHCI_LIST_END(ehci);
 651
 652        qh_completions(ehci, qh);
 653
 654        /* reschedule QH iff another request is queued */
 655        if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
 656                rc = qh_schedule(ehci, qh);
 657
 658                /* An error here likely indicates handshake failure
 659                 * or no space left in the schedule.  Neither fault
 660                 * should happen often ...
 661                 *
 662                 * FIXME kill the now-dysfunctional queued urbs
 663                 */
 664                if (rc != 0)
 665                        ehci_err(ehci, "can't reschedule qh %p, err %d\n",
 666                                        qh, rc);
 667        }
 668
 669        /* maybe turn off periodic schedule */
 670        --ehci->intr_count;
 671        disable_periodic(ehci);
 672}
 673
 674/*-------------------------------------------------------------------------*/
 675
 676static int check_period (
 677        struct ehci_hcd *ehci,
 678        unsigned        frame,
 679        unsigned        uframe,
 680        unsigned        period,
 681        unsigned        usecs
 682) {
 683        int             claimed;
 684
 685        /* complete split running into next frame?
 686         * given FSTN support, we could sometimes check...
 687         */
 688        if (uframe >= 8)
 689                return 0;
 690
 691        /* convert "usecs we need" to "max already claimed" */
 692        usecs = ehci->uframe_periodic_max - usecs;
 693
 694        /* we "know" 2 and 4 uframe intervals were rejected; so
 695         * for period 0, check _every_ microframe in the schedule.
 696         */
 697        if (unlikely (period == 0)) {
 698                do {
 699                        for (uframe = 0; uframe < 7; uframe++) {
 700                                claimed = periodic_usecs (ehci, frame, uframe);
 701                                if (claimed > usecs)
 702                                        return 0;
 703                        }
 704                } while ((frame += 1) < ehci->periodic_size);
 705
 706        /* just check the specified uframe, at that period */
 707        } else {
 708                do {
 709                        claimed = periodic_usecs (ehci, frame, uframe);
 710                        if (claimed > usecs)
 711                                return 0;
 712                } while ((frame += period) < ehci->periodic_size);
 713        }
 714
 715        // success!
 716        return 1;
 717}
 718
 719static int check_intr_schedule (
 720        struct ehci_hcd         *ehci,
 721        unsigned                frame,
 722        unsigned                uframe,
 723        const struct ehci_qh    *qh,
 724        __hc32                  *c_maskp
 725)
 726{
 727        int             retval = -ENOSPC;
 728        u8              mask = 0;
 729
 730        if (qh->c_usecs && uframe >= 6)         /* FSTN territory? */
 731                goto done;
 732
 733        if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
 734                goto done;
 735        if (!qh->c_usecs) {
 736                retval = 0;
 737                *c_maskp = 0;
 738                goto done;
 739        }
 740
 741#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 742        if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
 743                                qh->tt_usecs)) {
 744                unsigned i;
 745
 746                /* TODO : this may need FSTN for SSPLIT in uframe 5. */
 747                for (i=uframe+1; i<8 && i<uframe+4; i++)
 748                        if (!check_period (ehci, frame, i,
 749                                                qh->period, qh->c_usecs))
 750                                goto done;
 751                        else
 752                                mask |= 1 << i;
 753
 754                retval = 0;
 755
 756                *c_maskp = cpu_to_hc32(ehci, mask << 8);
 757        }
 758#else
 759        /* Make sure this tt's buffer is also available for CSPLITs.
 760         * We pessimize a bit; probably the typical full speed case
 761         * doesn't need the second CSPLIT.
 762         *
 763         * NOTE:  both SPLIT and CSPLIT could be checked in just
 764         * one smart pass...
 765         */
 766        mask = 0x03 << (uframe + qh->gap_uf);
 767        *c_maskp = cpu_to_hc32(ehci, mask << 8);
 768
 769        mask |= 1 << uframe;
 770        if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
 771                if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
 772                                        qh->period, qh->c_usecs))
 773                        goto done;
 774                if (!check_period (ehci, frame, uframe + qh->gap_uf,
 775                                        qh->period, qh->c_usecs))
 776                        goto done;
 777                retval = 0;
 778        }
 779#endif
 780done:
 781        return retval;
 782}
 783
 784/* "first fit" scheduling policy used the first time through,
 785 * or when the previous schedule slot can't be re-used.
 786 */
 787static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
 788{
 789        int             status;
 790        unsigned        uframe;
 791        __hc32          c_mask;
 792        unsigned        frame;          /* 0..(qh->period - 1), or NO_FRAME */
 793        struct ehci_qh_hw       *hw = qh->hw;
 794
 795        qh_refresh(ehci, qh);
 796        hw->hw_next = EHCI_LIST_END(ehci);
 797        frame = qh->start;
 798
 799        /* reuse the previous schedule slots, if we can */
 800        if (frame < qh->period) {
 801                uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
 802                status = check_intr_schedule (ehci, frame, --uframe,
 803                                qh, &c_mask);
 804        } else {
 805                uframe = 0;
 806                c_mask = 0;
 807                status = -ENOSPC;
 808        }
 809
 810        /* else scan the schedule to find a group of slots such that all
 811         * uframes have enough periodic bandwidth available.
 812         */
 813        if (status) {
 814                /* "normal" case, uframing flexible except with splits */
 815                if (qh->period) {
 816                        int             i;
 817
 818                        for (i = qh->period; status && i > 0; --i) {
 819                                frame = ++ehci->random_frame % qh->period;
 820                                for (uframe = 0; uframe < 8; uframe++) {
 821                                        status = check_intr_schedule (ehci,
 822                                                        frame, uframe, qh,
 823                                                        &c_mask);
 824                                        if (status == 0)
 825                                                break;
 826                                }
 827                        }
 828
 829                /* qh->period == 0 means every uframe */
 830                } else {
 831                        frame = 0;
 832                        status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
 833                }
 834                if (status)
 835                        goto done;
 836                qh->start = frame;
 837
 838                /* reset S-frame and (maybe) C-frame masks */
 839                hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
 840                hw->hw_info2 |= qh->period
 841                        ? cpu_to_hc32(ehci, 1 << uframe)
 842                        : cpu_to_hc32(ehci, QH_SMASK);
 843                hw->hw_info2 |= c_mask;
 844        } else
 845                ehci_dbg (ehci, "reused qh %p schedule\n", qh);
 846
 847        /* stuff into the periodic schedule */
 848        qh_link_periodic(ehci, qh);
 849done:
 850        return status;
 851}
 852
 853static int intr_submit (
 854        struct ehci_hcd         *ehci,
 855        struct urb              *urb,
 856        struct list_head        *qtd_list,
 857        gfp_t                   mem_flags
 858) {
 859        unsigned                epnum;
 860        unsigned long           flags;
 861        struct ehci_qh          *qh;
 862        int                     status;
 863        struct list_head        empty;
 864
 865        /* get endpoint and transfer/schedule data */
 866        epnum = urb->ep->desc.bEndpointAddress;
 867
 868        spin_lock_irqsave (&ehci->lock, flags);
 869
 870        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
 871                status = -ESHUTDOWN;
 872                goto done_not_linked;
 873        }
 874        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
 875        if (unlikely(status))
 876                goto done_not_linked;
 877
 878        /* get qh and force any scheduling errors */
 879        INIT_LIST_HEAD (&empty);
 880        qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
 881        if (qh == NULL) {
 882                status = -ENOMEM;
 883                goto done;
 884        }
 885        if (qh->qh_state == QH_STATE_IDLE) {
 886                if ((status = qh_schedule (ehci, qh)) != 0)
 887                        goto done;
 888        }
 889
 890        /* then queue the urb's tds to the qh */
 891        qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
 892        BUG_ON (qh == NULL);
 893
 894        /* ... update usbfs periodic stats */
 895        ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
 896
 897done:
 898        if (unlikely(status))
 899                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 900done_not_linked:
 901        spin_unlock_irqrestore (&ehci->lock, flags);
 902        if (status)
 903                qtd_list_free (ehci, urb, qtd_list);
 904
 905        return status;
 906}
 907
 908static void scan_intr(struct ehci_hcd *ehci)
 909{
 910        struct ehci_qh          *qh;
 911
 912        list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
 913                        intr_node) {
 914 rescan:
 915                /* clean any finished work for this qh */
 916                if (!list_empty(&qh->qtd_list)) {
 917                        int temp;
 918
 919                        /*
 920                         * Unlinks could happen here; completion reporting
 921                         * drops the lock.  That's why ehci->qh_scan_next
 922                         * always holds the next qh to scan; if the next qh
 923                         * gets unlinked then ehci->qh_scan_next is adjusted
 924                         * in qh_unlink_periodic().
 925                         */
 926                        temp = qh_completions(ehci, qh);
 927                        if (unlikely(qh->needs_rescan ||
 928                                        (list_empty(&qh->qtd_list) &&
 929                                                qh->qh_state == QH_STATE_LINKED)))
 930                                start_unlink_intr(ehci, qh);
 931                        else if (temp != 0)
 932                                goto rescan;
 933                }
 934        }
 935}
 936
 937/*-------------------------------------------------------------------------*/
 938
 939/* ehci_iso_stream ops work with both ITD and SITD */
 940
 941static struct ehci_iso_stream *
 942iso_stream_alloc (gfp_t mem_flags)
 943{
 944        struct ehci_iso_stream *stream;
 945
 946        stream = kzalloc(sizeof *stream, mem_flags);
 947        if (likely (stream != NULL)) {
 948                INIT_LIST_HEAD(&stream->td_list);
 949                INIT_LIST_HEAD(&stream->free_list);
 950                stream->next_uframe = -1;
 951        }
 952        return stream;
 953}
 954
 955static void
 956iso_stream_init (
 957        struct ehci_hcd         *ehci,
 958        struct ehci_iso_stream  *stream,
 959        struct usb_device       *dev,
 960        int                     pipe,
 961        unsigned                interval
 962)
 963{
 964        static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
 965
 966        u32                     buf1;
 967        unsigned                epnum, maxp;
 968        int                     is_input;
 969        long                    bandwidth;
 970
 971        /*
 972         * this might be a "high bandwidth" highspeed endpoint,
 973         * as encoded in the ep descriptor's wMaxPacket field
 974         */
 975        epnum = usb_pipeendpoint (pipe);
 976        is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
 977        maxp = usb_maxpacket(dev, pipe, !is_input);
 978        if (is_input) {
 979                buf1 = (1 << 11);
 980        } else {
 981                buf1 = 0;
 982        }
 983
 984        /* knows about ITD vs SITD */
 985        if (dev->speed == USB_SPEED_HIGH) {
 986                unsigned multi = hb_mult(maxp);
 987
 988                stream->highspeed = 1;
 989
 990                maxp = max_packet(maxp);
 991                buf1 |= maxp;
 992                maxp *= multi;
 993
 994                stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
 995                stream->buf1 = cpu_to_hc32(ehci, buf1);
 996                stream->buf2 = cpu_to_hc32(ehci, multi);
 997
 998                /* usbfs wants to report the average usecs per frame tied up
 999                 * when transfers on this endpoint are scheduled ...
1000                 */
1001                stream->usecs = HS_USECS_ISO (maxp);
1002                bandwidth = stream->usecs * 8;
1003                bandwidth /= interval;
1004
1005        } else {
1006                u32             addr;
1007                int             think_time;
1008                int             hs_transfers;
1009
1010                addr = dev->ttport << 24;
1011                if (!ehci_is_TDI(ehci)
1012                                || (dev->tt->hub !=
1013                                        ehci_to_hcd(ehci)->self.root_hub))
1014                        addr |= dev->tt->hub->devnum << 16;
1015                addr |= epnum << 8;
1016                addr |= dev->devnum;
1017                stream->usecs = HS_USECS_ISO (maxp);
1018                think_time = dev->tt ? dev->tt->think_time : 0;
1019                stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
1020                                dev->speed, is_input, 1, maxp));
1021                hs_transfers = max (1u, (maxp + 187) / 188);
1022                if (is_input) {
1023                        u32     tmp;
1024
1025                        addr |= 1 << 31;
1026                        stream->c_usecs = stream->usecs;
1027                        stream->usecs = HS_USECS_ISO (1);
1028                        stream->raw_mask = 1;
1029
1030                        /* c-mask as specified in USB 2.0 11.18.4 3.c */
1031                        tmp = (1 << (hs_transfers + 2)) - 1;
1032                        stream->raw_mask |= tmp << (8 + 2);
1033                } else
1034                        stream->raw_mask = smask_out [hs_transfers - 1];
1035                bandwidth = stream->usecs + stream->c_usecs;
1036                bandwidth /= interval << 3;
1037
1038                /* stream->splits gets created from raw_mask later */
1039                stream->address = cpu_to_hc32(ehci, addr);
1040        }
1041        stream->bandwidth = bandwidth;
1042
1043        stream->udev = dev;
1044
1045        stream->bEndpointAddress = is_input | epnum;
1046        stream->interval = interval;
1047        stream->maxp = maxp;
1048}
1049
1050static struct ehci_iso_stream *
1051iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1052{
1053        unsigned                epnum;
1054        struct ehci_iso_stream  *stream;
1055        struct usb_host_endpoint *ep;
1056        unsigned long           flags;
1057
1058        epnum = usb_pipeendpoint (urb->pipe);
1059        if (usb_pipein(urb->pipe))
1060                ep = urb->dev->ep_in[epnum];
1061        else
1062                ep = urb->dev->ep_out[epnum];
1063
1064        spin_lock_irqsave (&ehci->lock, flags);
1065        stream = ep->hcpriv;
1066
1067        if (unlikely (stream == NULL)) {
1068                stream = iso_stream_alloc(GFP_ATOMIC);
1069                if (likely (stream != NULL)) {
1070                        ep->hcpriv = stream;
1071                        stream->ep = ep;
1072                        iso_stream_init(ehci, stream, urb->dev, urb->pipe,
1073                                        urb->interval);
1074                }
1075
1076        /* if dev->ep [epnum] is a QH, hw is set */
1077        } else if (unlikely (stream->hw != NULL)) {
1078                ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1079                        urb->dev->devpath, epnum,
1080                        usb_pipein(urb->pipe) ? "in" : "out");
1081                stream = NULL;
1082        }
1083
1084        spin_unlock_irqrestore (&ehci->lock, flags);
1085        return stream;
1086}
1087
1088/*-------------------------------------------------------------------------*/
1089
1090/* ehci_iso_sched ops can be ITD-only or SITD-only */
1091
1092static struct ehci_iso_sched *
1093iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1094{
1095        struct ehci_iso_sched   *iso_sched;
1096        int                     size = sizeof *iso_sched;
1097
1098        size += packets * sizeof (struct ehci_iso_packet);
1099        iso_sched = kzalloc(size, mem_flags);
1100        if (likely (iso_sched != NULL)) {
1101                INIT_LIST_HEAD (&iso_sched->td_list);
1102        }
1103        return iso_sched;
1104}
1105
1106static inline void
1107itd_sched_init(
1108        struct ehci_hcd         *ehci,
1109        struct ehci_iso_sched   *iso_sched,
1110        struct ehci_iso_stream  *stream,
1111        struct urb              *urb
1112)
1113{
1114        unsigned        i;
1115        dma_addr_t      dma = urb->transfer_dma;
1116
1117        /* how many uframes are needed for these transfers */
1118        iso_sched->span = urb->number_of_packets * stream->interval;
1119
1120        /* figure out per-uframe itd fields that we'll need later
1121         * when we fit new itds into the schedule.
1122         */
1123        for (i = 0; i < urb->number_of_packets; i++) {
1124                struct ehci_iso_packet  *uframe = &iso_sched->packet [i];
1125                unsigned                length;
1126                dma_addr_t              buf;
1127                u32                     trans;
1128
1129                length = urb->iso_frame_desc [i].length;
1130                buf = dma + urb->iso_frame_desc [i].offset;
1131
1132                trans = EHCI_ISOC_ACTIVE;
1133                trans |= buf & 0x0fff;
1134                if (unlikely (((i + 1) == urb->number_of_packets))
1135                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1136                        trans |= EHCI_ITD_IOC;
1137                trans |= length << 16;
1138                uframe->transaction = cpu_to_hc32(ehci, trans);
1139
1140                /* might need to cross a buffer page within a uframe */
1141                uframe->bufp = (buf & ~(u64)0x0fff);
1142                buf += length;
1143                if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1144                        uframe->cross = 1;
1145        }
1146}
1147
1148static void
1149iso_sched_free (
1150        struct ehci_iso_stream  *stream,
1151        struct ehci_iso_sched   *iso_sched
1152)
1153{
1154        if (!iso_sched)
1155                return;
1156        // caller must hold ehci->lock!
1157        list_splice (&iso_sched->td_list, &stream->free_list);
1158        kfree (iso_sched);
1159}
1160
1161static int
1162itd_urb_transaction (
1163        struct ehci_iso_stream  *stream,
1164        struct ehci_hcd         *ehci,
1165        struct urb              *urb,
1166        gfp_t                   mem_flags
1167)
1168{
1169        struct ehci_itd         *itd;
1170        dma_addr_t              itd_dma;
1171        int                     i;
1172        unsigned                num_itds;
1173        struct ehci_iso_sched   *sched;
1174        unsigned long           flags;
1175
1176        sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1177        if (unlikely (sched == NULL))
1178                return -ENOMEM;
1179
1180        itd_sched_init(ehci, sched, stream, urb);
1181
1182        if (urb->interval < 8)
1183                num_itds = 1 + (sched->span + 7) / 8;
1184        else
1185                num_itds = urb->number_of_packets;
1186
1187        /* allocate/init ITDs */
1188        spin_lock_irqsave (&ehci->lock, flags);
1189        for (i = 0; i < num_itds; i++) {
1190
1191                /*
1192                 * Use iTDs from the free list, but not iTDs that may
1193                 * still be in use by the hardware.
1194                 */
1195                if (likely(!list_empty(&stream->free_list))) {
1196                        itd = list_first_entry(&stream->free_list,
1197                                        struct ehci_itd, itd_list);
1198                        if (itd->frame == ehci->now_frame)
1199                                goto alloc_itd;
1200                        list_del (&itd->itd_list);
1201                        itd_dma = itd->itd_dma;
1202                } else {
1203 alloc_itd:
1204                        spin_unlock_irqrestore (&ehci->lock, flags);
1205                        itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1206                                        &itd_dma);
1207                        spin_lock_irqsave (&ehci->lock, flags);
1208                        if (!itd) {
1209                                iso_sched_free(stream, sched);
1210                                spin_unlock_irqrestore(&ehci->lock, flags);
1211                                return -ENOMEM;
1212                        }
1213                }
1214
1215                memset (itd, 0, sizeof *itd);
1216                itd->itd_dma = itd_dma;
1217                itd->frame = 9999;              /* an invalid value */
1218                list_add (&itd->itd_list, &sched->td_list);
1219        }
1220        spin_unlock_irqrestore (&ehci->lock, flags);
1221
1222        /* temporarily store schedule info in hcpriv */
1223        urb->hcpriv = sched;
1224        urb->error_count = 0;
1225        return 0;
1226}
1227
1228/*-------------------------------------------------------------------------*/
1229
1230static inline int
1231itd_slot_ok (
1232        struct ehci_hcd         *ehci,
1233        u32                     mod,
1234        u32                     uframe,
1235        u8                      usecs,
1236        u32                     period
1237)
1238{
1239        uframe %= period;
1240        do {
1241                /* can't commit more than uframe_periodic_max usec */
1242                if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1243                                > (ehci->uframe_periodic_max - usecs))
1244                        return 0;
1245
1246                /* we know urb->interval is 2^N uframes */
1247                uframe += period;
1248        } while (uframe < mod);
1249        return 1;
1250}
1251
1252static inline int
1253sitd_slot_ok (
1254        struct ehci_hcd         *ehci,
1255        u32                     mod,
1256        struct ehci_iso_stream  *stream,
1257        u32                     uframe,
1258        struct ehci_iso_sched   *sched,
1259        u32                     period_uframes
1260)
1261{
1262        u32                     mask, tmp;
1263        u32                     frame, uf;
1264
1265        mask = stream->raw_mask << (uframe & 7);
1266
1267        /* for IN, don't wrap CSPLIT into the next frame */
1268        if (mask & ~0xffff)
1269                return 0;
1270
1271        /* check bandwidth */
1272        uframe %= period_uframes;
1273        frame = uframe >> 3;
1274
1275#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1276        /* The tt's fullspeed bus bandwidth must be available.
1277         * tt_available scheduling guarantees 10+% for control/bulk.
1278         */
1279        uf = uframe & 7;
1280        if (!tt_available(ehci, period_uframes >> 3,
1281                        stream->udev, frame, uf, stream->tt_usecs))
1282                return 0;
1283#else
1284        /* tt must be idle for start(s), any gap, and csplit.
1285         * assume scheduling slop leaves 10+% for control/bulk.
1286         */
1287        if (!tt_no_collision(ehci, period_uframes >> 3,
1288                        stream->udev, frame, mask))
1289                return 0;
1290#endif
1291
1292        /* this multi-pass logic is simple, but performance may
1293         * suffer when the schedule data isn't cached.
1294         */
1295        do {
1296                u32             max_used;
1297
1298                frame = uframe >> 3;
1299                uf = uframe & 7;
1300
1301                /* check starts (OUT uses more than one) */
1302                max_used = ehci->uframe_periodic_max - stream->usecs;
1303                for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1304                        if (periodic_usecs (ehci, frame, uf) > max_used)
1305                                return 0;
1306                }
1307
1308                /* for IN, check CSPLIT */
1309                if (stream->c_usecs) {
1310                        uf = uframe & 7;
1311                        max_used = ehci->uframe_periodic_max - stream->c_usecs;
1312                        do {
1313                                tmp = 1 << uf;
1314                                tmp <<= 8;
1315                                if ((stream->raw_mask & tmp) == 0)
1316                                        continue;
1317                                if (periodic_usecs (ehci, frame, uf)
1318                                                > max_used)
1319                                        return 0;
1320                        } while (++uf < 8);
1321                }
1322
1323                /* we know urb->interval is 2^N uframes */
1324                uframe += period_uframes;
1325        } while (uframe < mod);
1326
1327        stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1328        return 1;
1329}
1330
1331/*
1332 * This scheduler plans almost as far into the future as it has actual
1333 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1334 * "as small as possible" to be cache-friendlier.)  That limits the size
1335 * transfers you can stream reliably; avoid more than 64 msec per urb.
1336 * Also avoid queue depths of less than ehci's worst irq latency (affected
1337 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1338 * and other factors); or more than about 230 msec total (for portability,
1339 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1340 */
1341
1342#define SCHEDULING_DELAY        40      /* microframes */
1343
1344static int
1345iso_stream_schedule (
1346        struct ehci_hcd         *ehci,
1347        struct urb              *urb,
1348        struct ehci_iso_stream  *stream
1349)
1350{
1351        u32                     now, base, next, start, period, span;
1352        int                     status;
1353        unsigned                mod = ehci->periodic_size << 3;
1354        struct ehci_iso_sched   *sched = urb->hcpriv;
1355
1356        period = urb->interval;
1357        span = sched->span;
1358        if (!stream->highspeed) {
1359                period <<= 3;
1360                span <<= 3;
1361        }
1362
1363        now = ehci_read_frame_index(ehci) & (mod - 1);
1364
1365        /* Typical case: reuse current schedule, stream is still active.
1366         * Hopefully there are no gaps from the host falling behind
1367         * (irq delays etc).  If there are, the behavior depends on
1368         * whether URB_ISO_ASAP is set.
1369         */
1370        if (likely (!list_empty (&stream->td_list))) {
1371
1372                /* Take the isochronous scheduling threshold into account */
1373                if (ehci->i_thresh)
1374                        next = now + ehci->i_thresh;    /* uframe cache */
1375                else
1376                        next = (now + 2 + 7) & ~0x07;   /* full frame cache */
1377
1378                /*
1379                 * Use ehci->last_iso_frame as the base.  There can't be any
1380                 * TDs scheduled for earlier than that.
1381                 */
1382                base = ehci->last_iso_frame << 3;
1383                next = (next - base) & (mod - 1);
1384                start = (stream->next_uframe - base) & (mod - 1);
1385
1386                /* Is the schedule already full? */
1387                if (unlikely(start < period)) {
1388                        ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
1389                                        urb, stream->next_uframe, base,
1390                                        period, mod);
1391                        status = -ENOSPC;
1392                        goto fail;
1393                }
1394
1395                /* Behind the scheduling threshold? */
1396                if (unlikely(start < next)) {
1397
1398                        /* USB_ISO_ASAP: Round up to the first available slot */
1399                        if (urb->transfer_flags & URB_ISO_ASAP)
1400                                start += (next - start + period - 1) & -period;
1401
1402                        /*
1403                         * Not ASAP: Use the next slot in the stream.  If
1404                         * the entire URB falls before the threshold, fail.
1405                         */
1406                        else if (start + span - period < next) {
1407                                ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
1408                                                urb, start + base,
1409                                                span - period, next + base);
1410                                status = -EXDEV;
1411                                goto fail;
1412                        }
1413                }
1414
1415                start += base;
1416        }
1417
1418        /* need to schedule; when's the next (u)frame we could start?
1419         * this is bigger than ehci->i_thresh allows; scheduling itself
1420         * isn't free, the delay should handle reasonably slow cpus.  it
1421         * can also help high bandwidth if the dma and irq loads don't
1422         * jump until after the queue is primed.
1423         */
1424        else {
1425                int done = 0;
1426
1427                base = now & ~0x07;
1428                start = base + SCHEDULING_DELAY;
1429
1430                /* find a uframe slot with enough bandwidth.
1431                 * Early uframes are more precious because full-speed
1432                 * iso IN transfers can't use late uframes,
1433                 * and therefore they should be allocated last.
1434                 */
1435                next = start;
1436                start += period;
1437                do {
1438                        start--;
1439                        /* check schedule: enough space? */
1440                        if (stream->highspeed) {
1441                                if (itd_slot_ok(ehci, mod, start,
1442                                                stream->usecs, period))
1443                                        done = 1;
1444                        } else {
1445                                if ((start % 8) >= 6)
1446                                        continue;
1447                                if (sitd_slot_ok(ehci, mod, stream,
1448                                                start, sched, period))
1449                                        done = 1;
1450                        }
1451                } while (start > next && !done);
1452
1453                /* no room in the schedule */
1454                if (!done) {
1455                        ehci_dbg(ehci, "iso sched full %p", urb);
1456                        status = -ENOSPC;
1457                        goto fail;
1458                }
1459        }
1460
1461        /* Tried to schedule too far into the future? */
1462        if (unlikely(start - base + span - period >= mod)) {
1463                ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1464                                urb, start - base, span - period, mod);
1465                status = -EFBIG;
1466                goto fail;
1467        }
1468
1469        stream->next_uframe = start & (mod - 1);
1470
1471        /* report high speed start in uframes; full speed, in frames */
1472        urb->start_frame = stream->next_uframe;
1473        if (!stream->highspeed)
1474                urb->start_frame >>= 3;
1475
1476        /* Make sure scan_isoc() sees these */
1477        if (ehci->isoc_count == 0)
1478                ehci->last_iso_frame = now >> 3;
1479        return 0;
1480
1481 fail:
1482        iso_sched_free(stream, sched);
1483        urb->hcpriv = NULL;
1484        return status;
1485}
1486
1487/*-------------------------------------------------------------------------*/
1488
1489static inline void
1490itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1491                struct ehci_itd *itd)
1492{
1493        int i;
1494
1495        /* it's been recently zeroed */
1496        itd->hw_next = EHCI_LIST_END(ehci);
1497        itd->hw_bufp [0] = stream->buf0;
1498        itd->hw_bufp [1] = stream->buf1;
1499        itd->hw_bufp [2] = stream->buf2;
1500
1501        for (i = 0; i < 8; i++)
1502                itd->index[i] = -1;
1503
1504        /* All other fields are filled when scheduling */
1505}
1506
1507static inline void
1508itd_patch(
1509        struct ehci_hcd         *ehci,
1510        struct ehci_itd         *itd,
1511        struct ehci_iso_sched   *iso_sched,
1512        unsigned                index,
1513        u16                     uframe
1514)
1515{
1516        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1517        unsigned                pg = itd->pg;
1518
1519        // BUG_ON (pg == 6 && uf->cross);
1520
1521        uframe &= 0x07;
1522        itd->index [uframe] = index;
1523
1524        itd->hw_transaction[uframe] = uf->transaction;
1525        itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1526        itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1527        itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1528
1529        /* iso_frame_desc[].offset must be strictly increasing */
1530        if (unlikely (uf->cross)) {
1531                u64     bufp = uf->bufp + 4096;
1532
1533                itd->pg = ++pg;
1534                itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1535                itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1536        }
1537}
1538
1539static inline void
1540itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1541{
1542        union ehci_shadow       *prev = &ehci->pshadow[frame];
1543        __hc32                  *hw_p = &ehci->periodic[frame];
1544        union ehci_shadow       here = *prev;
1545        __hc32                  type = 0;
1546
1547        /* skip any iso nodes which might belong to previous microframes */
1548        while (here.ptr) {
1549                type = Q_NEXT_TYPE(ehci, *hw_p);
1550                if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1551                        break;
1552                prev = periodic_next_shadow(ehci, prev, type);
1553                hw_p = shadow_next_periodic(ehci, &here, type);
1554                here = *prev;
1555        }
1556
1557        itd->itd_next = here;
1558        itd->hw_next = *hw_p;
1559        prev->itd = itd;
1560        itd->frame = frame;
1561        wmb ();
1562        *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1563}
1564
1565/* fit urb's itds into the selected schedule slot; activate as needed */
1566static void itd_link_urb(
1567        struct ehci_hcd         *ehci,
1568        struct urb              *urb,
1569        unsigned                mod,
1570        struct ehci_iso_stream  *stream
1571)
1572{
1573        int                     packet;
1574        unsigned                next_uframe, uframe, frame;
1575        struct ehci_iso_sched   *iso_sched = urb->hcpriv;
1576        struct ehci_itd         *itd;
1577
1578        next_uframe = stream->next_uframe & (mod - 1);
1579
1580        if (unlikely (list_empty(&stream->td_list))) {
1581                ehci_to_hcd(ehci)->self.bandwidth_allocated
1582                                += stream->bandwidth;
1583                ehci_vdbg (ehci,
1584                        "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1585                        urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1586                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1587                        urb->interval,
1588                        next_uframe >> 3, next_uframe & 0x7);
1589        }
1590
1591        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1592                if (ehci->amd_pll_fix == 1)
1593                        usb_amd_quirk_pll_disable();
1594        }
1595
1596        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1597
1598        /* fill iTDs uframe by uframe */
1599        for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1600                if (itd == NULL) {
1601                        /* ASSERT:  we have all necessary itds */
1602                        // BUG_ON (list_empty (&iso_sched->td_list));
1603
1604                        /* ASSERT:  no itds for this endpoint in this uframe */
1605
1606                        itd = list_entry (iso_sched->td_list.next,
1607                                        struct ehci_itd, itd_list);
1608                        list_move_tail (&itd->itd_list, &stream->td_list);
1609                        itd->stream = stream;
1610                        itd->urb = urb;
1611                        itd_init (ehci, stream, itd);
1612                }
1613
1614                uframe = next_uframe & 0x07;
1615                frame = next_uframe >> 3;
1616
1617                itd_patch(ehci, itd, iso_sched, packet, uframe);
1618
1619                next_uframe += stream->interval;
1620                next_uframe &= mod - 1;
1621                packet++;
1622
1623                /* link completed itds into the schedule */
1624                if (((next_uframe >> 3) != frame)
1625                                || packet == urb->number_of_packets) {
1626                        itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1627                        itd = NULL;
1628                }
1629        }
1630        stream->next_uframe = next_uframe;
1631
1632        /* don't need that schedule data any more */
1633        iso_sched_free (stream, iso_sched);
1634        urb->hcpriv = stream;
1635
1636        ++ehci->isoc_count;
1637        enable_periodic(ehci);
1638}
1639
1640#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1641
1642/* Process and recycle a completed ITD.  Return true iff its urb completed,
1643 * and hence its completion callback probably added things to the hardware
1644 * schedule.
1645 *
1646 * Note that we carefully avoid recycling this descriptor until after any
1647 * completion callback runs, so that it won't be reused quickly.  That is,
1648 * assuming (a) no more than two urbs per frame on this endpoint, and also
1649 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1650 * corrupts things if you reuse completed descriptors very quickly...
1651 */
1652static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1653{
1654        struct urb                              *urb = itd->urb;
1655        struct usb_iso_packet_descriptor        *desc;
1656        u32                                     t;
1657        unsigned                                uframe;
1658        int                                     urb_index = -1;
1659        struct ehci_iso_stream                  *stream = itd->stream;
1660        struct usb_device                       *dev;
1661        bool                                    retval = false;
1662
1663        /* for each uframe with a packet */
1664        for (uframe = 0; uframe < 8; uframe++) {
1665                if (likely (itd->index[uframe] == -1))
1666                        continue;
1667                urb_index = itd->index[uframe];
1668                desc = &urb->iso_frame_desc [urb_index];
1669
1670                t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1671                itd->hw_transaction [uframe] = 0;
1672
1673                /* report transfer status */
1674                if (unlikely (t & ISO_ERRS)) {
1675                        urb->error_count++;
1676                        if (t & EHCI_ISOC_BUF_ERR)
1677                                desc->status = usb_pipein (urb->pipe)
1678                                        ? -ENOSR  /* hc couldn't read */
1679                                        : -ECOMM; /* hc couldn't write */
1680                        else if (t & EHCI_ISOC_BABBLE)
1681                                desc->status = -EOVERFLOW;
1682                        else /* (t & EHCI_ISOC_XACTERR) */
1683                                desc->status = -EPROTO;
1684
1685                        /* HC need not update length with this error */
1686                        if (!(t & EHCI_ISOC_BABBLE)) {
1687                                desc->actual_length = EHCI_ITD_LENGTH(t);
1688                                urb->actual_length += desc->actual_length;
1689                        }
1690                } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1691                        desc->status = 0;
1692                        desc->actual_length = EHCI_ITD_LENGTH(t);
1693                        urb->actual_length += desc->actual_length;
1694                } else {
1695                        /* URB was too late */
1696                        urb->error_count++;
1697                }
1698        }
1699
1700        /* handle completion now? */
1701        if (likely ((urb_index + 1) != urb->number_of_packets))
1702                goto done;
1703
1704        /* ASSERT: it's really the last itd for this urb
1705        list_for_each_entry (itd, &stream->td_list, itd_list)
1706                BUG_ON (itd->urb == urb);
1707         */
1708
1709        /* give urb back to the driver; completion often (re)submits */
1710        dev = urb->dev;
1711        ehci_urb_done(ehci, urb, 0);
1712        retval = true;
1713        urb = NULL;
1714
1715        --ehci->isoc_count;
1716        disable_periodic(ehci);
1717
1718        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1719        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1720                if (ehci->amd_pll_fix == 1)
1721                        usb_amd_quirk_pll_enable();
1722        }
1723
1724        if (unlikely(list_is_singular(&stream->td_list))) {
1725                ehci_to_hcd(ehci)->self.bandwidth_allocated
1726                                -= stream->bandwidth;
1727                ehci_vdbg (ehci,
1728                        "deschedule devp %s ep%d%s-iso\n",
1729                        dev->devpath, stream->bEndpointAddress & 0x0f,
1730                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1731        }
1732
1733done:
1734        itd->urb = NULL;
1735
1736        /* Add to the end of the free list for later reuse */
1737        list_move_tail(&itd->itd_list, &stream->free_list);
1738
1739        /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1740        if (list_empty(&stream->td_list)) {
1741                list_splice_tail_init(&stream->free_list,
1742                                &ehci->cached_itd_list);
1743                start_free_itds(ehci);
1744        }
1745
1746        return retval;
1747}
1748
1749/*-------------------------------------------------------------------------*/
1750
1751static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1752        gfp_t mem_flags)
1753{
1754        int                     status = -EINVAL;
1755        unsigned long           flags;
1756        struct ehci_iso_stream  *stream;
1757
1758        /* Get iso_stream head */
1759        stream = iso_stream_find (ehci, urb);
1760        if (unlikely (stream == NULL)) {
1761                ehci_dbg (ehci, "can't get iso stream\n");
1762                return -ENOMEM;
1763        }
1764        if (unlikely (urb->interval != stream->interval)) {
1765                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1766                        stream->interval, urb->interval);
1767                goto done;
1768        }
1769
1770#ifdef EHCI_URB_TRACE
1771        ehci_dbg (ehci,
1772                "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1773                __func__, urb->dev->devpath, urb,
1774                usb_pipeendpoint (urb->pipe),
1775                usb_pipein (urb->pipe) ? "in" : "out",
1776                urb->transfer_buffer_length,
1777                urb->number_of_packets, urb->interval,
1778                stream);
1779#endif
1780
1781        /* allocate ITDs w/o locking anything */
1782        status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1783        if (unlikely (status < 0)) {
1784                ehci_dbg (ehci, "can't init itds\n");
1785                goto done;
1786        }
1787
1788        /* schedule ... need to lock */
1789        spin_lock_irqsave (&ehci->lock, flags);
1790        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1791                status = -ESHUTDOWN;
1792                goto done_not_linked;
1793        }
1794        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1795        if (unlikely(status))
1796                goto done_not_linked;
1797        status = iso_stream_schedule(ehci, urb, stream);
1798        if (likely (status == 0))
1799                itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1800        else
1801                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1802 done_not_linked:
1803        spin_unlock_irqrestore (&ehci->lock, flags);
1804 done:
1805        return status;
1806}
1807
1808/*-------------------------------------------------------------------------*/
1809
1810/*
1811 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1812 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1813 */
1814
1815static inline void
1816sitd_sched_init(
1817        struct ehci_hcd         *ehci,
1818        struct ehci_iso_sched   *iso_sched,
1819        struct ehci_iso_stream  *stream,
1820        struct urb              *urb
1821)
1822{
1823        unsigned        i;
1824        dma_addr_t      dma = urb->transfer_dma;
1825
1826        /* how many frames are needed for these transfers */
1827        iso_sched->span = urb->number_of_packets * stream->interval;
1828
1829        /* figure out per-frame sitd fields that we'll need later
1830         * when we fit new sitds into the schedule.
1831         */
1832        for (i = 0; i < urb->number_of_packets; i++) {
1833                struct ehci_iso_packet  *packet = &iso_sched->packet [i];
1834                unsigned                length;
1835                dma_addr_t              buf;
1836                u32                     trans;
1837
1838                length = urb->iso_frame_desc [i].length & 0x03ff;
1839                buf = dma + urb->iso_frame_desc [i].offset;
1840
1841                trans = SITD_STS_ACTIVE;
1842                if (((i + 1) == urb->number_of_packets)
1843                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1844                        trans |= SITD_IOC;
1845                trans |= length << 16;
1846                packet->transaction = cpu_to_hc32(ehci, trans);
1847
1848                /* might need to cross a buffer page within a td */
1849                packet->bufp = buf;
1850                packet->buf1 = (buf + length) & ~0x0fff;
1851                if (packet->buf1 != (buf & ~(u64)0x0fff))
1852                        packet->cross = 1;
1853
1854                /* OUT uses multiple start-splits */
1855                if (stream->bEndpointAddress & USB_DIR_IN)
1856                        continue;
1857                length = (length + 187) / 188;
1858                if (length > 1) /* BEGIN vs ALL */
1859                        length |= 1 << 3;
1860                packet->buf1 |= length;
1861        }
1862}
1863
1864static int
1865sitd_urb_transaction (
1866        struct ehci_iso_stream  *stream,
1867        struct ehci_hcd         *ehci,
1868        struct urb              *urb,
1869        gfp_t                   mem_flags
1870)
1871{
1872        struct ehci_sitd        *sitd;
1873        dma_addr_t              sitd_dma;
1874        int                     i;
1875        struct ehci_iso_sched   *iso_sched;
1876        unsigned long           flags;
1877
1878        iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1879        if (iso_sched == NULL)
1880                return -ENOMEM;
1881
1882        sitd_sched_init(ehci, iso_sched, stream, urb);
1883
1884        /* allocate/init sITDs */
1885        spin_lock_irqsave (&ehci->lock, flags);
1886        for (i = 0; i < urb->number_of_packets; i++) {
1887
1888                /* NOTE:  for now, we don't try to handle wraparound cases
1889                 * for IN (using sitd->hw_backpointer, like a FSTN), which
1890                 * means we never need two sitds for full speed packets.
1891                 */
1892
1893                /*
1894                 * Use siTDs from the free list, but not siTDs that may
1895                 * still be in use by the hardware.
1896                 */
1897                if (likely(!list_empty(&stream->free_list))) {
1898                        sitd = list_first_entry(&stream->free_list,
1899                                         struct ehci_sitd, sitd_list);
1900                        if (sitd->frame == ehci->now_frame)
1901                                goto alloc_sitd;
1902                        list_del (&sitd->sitd_list);
1903                        sitd_dma = sitd->sitd_dma;
1904                } else {
1905 alloc_sitd:
1906                        spin_unlock_irqrestore (&ehci->lock, flags);
1907                        sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1908                                        &sitd_dma);
1909                        spin_lock_irqsave (&ehci->lock, flags);
1910                        if (!sitd) {
1911                                iso_sched_free(stream, iso_sched);
1912                                spin_unlock_irqrestore(&ehci->lock, flags);
1913                                return -ENOMEM;
1914                        }
1915                }
1916
1917                memset (sitd, 0, sizeof *sitd);
1918                sitd->sitd_dma = sitd_dma;
1919                sitd->frame = 9999;             /* an invalid value */
1920                list_add (&sitd->sitd_list, &iso_sched->td_list);
1921        }
1922
1923        /* temporarily store schedule info in hcpriv */
1924        urb->hcpriv = iso_sched;
1925        urb->error_count = 0;
1926
1927        spin_unlock_irqrestore (&ehci->lock, flags);
1928        return 0;
1929}
1930
1931/*-------------------------------------------------------------------------*/
1932
1933static inline void
1934sitd_patch(
1935        struct ehci_hcd         *ehci,
1936        struct ehci_iso_stream  *stream,
1937        struct ehci_sitd        *sitd,
1938        struct ehci_iso_sched   *iso_sched,
1939        unsigned                index
1940)
1941{
1942        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1943        u64                     bufp = uf->bufp;
1944
1945        sitd->hw_next = EHCI_LIST_END(ehci);
1946        sitd->hw_fullspeed_ep = stream->address;
1947        sitd->hw_uframe = stream->splits;
1948        sitd->hw_results = uf->transaction;
1949        sitd->hw_backpointer = EHCI_LIST_END(ehci);
1950
1951        bufp = uf->bufp;
1952        sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1953        sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1954
1955        sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1956        if (uf->cross)
1957                bufp += 4096;
1958        sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1959        sitd->index = index;
1960}
1961
1962static inline void
1963sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1964{
1965        /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1966        sitd->sitd_next = ehci->pshadow [frame];
1967        sitd->hw_next = ehci->periodic [frame];
1968        ehci->pshadow [frame].sitd = sitd;
1969        sitd->frame = frame;
1970        wmb ();
1971        ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1972}
1973
1974/* fit urb's sitds into the selected schedule slot; activate as needed */
1975static void sitd_link_urb(
1976        struct ehci_hcd         *ehci,
1977        struct urb              *urb,
1978        unsigned                mod,
1979        struct ehci_iso_stream  *stream
1980)
1981{
1982        int                     packet;
1983        unsigned                next_uframe;
1984        struct ehci_iso_sched   *sched = urb->hcpriv;
1985        struct ehci_sitd        *sitd;
1986
1987        next_uframe = stream->next_uframe;
1988
1989        if (list_empty(&stream->td_list)) {
1990                /* usbfs ignores TT bandwidth */
1991                ehci_to_hcd(ehci)->self.bandwidth_allocated
1992                                += stream->bandwidth;
1993                ehci_vdbg (ehci,
1994                        "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
1995                        urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1996                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1997                        (next_uframe >> 3) & (ehci->periodic_size - 1),
1998                        stream->interval, hc32_to_cpu(ehci, stream->splits));
1999        }
2000
2001        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2002                if (ehci->amd_pll_fix == 1)
2003                        usb_amd_quirk_pll_disable();
2004        }
2005
2006        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2007
2008        /* fill sITDs frame by frame */
2009        for (packet = 0, sitd = NULL;
2010                        packet < urb->number_of_packets;
2011                        packet++) {
2012
2013                /* ASSERT:  we have all necessary sitds */
2014                BUG_ON (list_empty (&sched->td_list));
2015
2016                /* ASSERT:  no itds for this endpoint in this frame */
2017
2018                sitd = list_entry (sched->td_list.next,
2019                                struct ehci_sitd, sitd_list);
2020                list_move_tail (&sitd->sitd_list, &stream->td_list);
2021                sitd->stream = stream;
2022                sitd->urb = urb;
2023
2024                sitd_patch(ehci, stream, sitd, sched, packet);
2025                sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2026                                sitd);
2027
2028                next_uframe += stream->interval << 3;
2029        }
2030        stream->next_uframe = next_uframe & (mod - 1);
2031
2032        /* don't need that schedule data any more */
2033        iso_sched_free (stream, sched);
2034        urb->hcpriv = stream;
2035
2036        ++ehci->isoc_count;
2037        enable_periodic(ehci);
2038}
2039
2040/*-------------------------------------------------------------------------*/
2041
2042#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2043                                | SITD_STS_XACT | SITD_STS_MMF)
2044
2045/* Process and recycle a completed SITD.  Return true iff its urb completed,
2046 * and hence its completion callback probably added things to the hardware
2047 * schedule.
2048 *
2049 * Note that we carefully avoid recycling this descriptor until after any
2050 * completion callback runs, so that it won't be reused quickly.  That is,
2051 * assuming (a) no more than two urbs per frame on this endpoint, and also
2052 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2053 * corrupts things if you reuse completed descriptors very quickly...
2054 */
2055static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2056{
2057        struct urb                              *urb = sitd->urb;
2058        struct usb_iso_packet_descriptor        *desc;
2059        u32                                     t;
2060        int                                     urb_index = -1;
2061        struct ehci_iso_stream                  *stream = sitd->stream;
2062        struct usb_device                       *dev;
2063        bool                                    retval = false;
2064
2065        urb_index = sitd->index;
2066        desc = &urb->iso_frame_desc [urb_index];
2067        t = hc32_to_cpup(ehci, &sitd->hw_results);
2068
2069        /* report transfer status */
2070        if (unlikely(t & SITD_ERRS)) {
2071                urb->error_count++;
2072                if (t & SITD_STS_DBE)
2073                        desc->status = usb_pipein (urb->pipe)
2074                                ? -ENOSR  /* hc couldn't read */
2075                                : -ECOMM; /* hc couldn't write */
2076                else if (t & SITD_STS_BABBLE)
2077                        desc->status = -EOVERFLOW;
2078                else /* XACT, MMF, etc */
2079                        desc->status = -EPROTO;
2080        } else if (unlikely(t & SITD_STS_ACTIVE)) {
2081                /* URB was too late */
2082                urb->error_count++;
2083        } else {
2084                desc->status = 0;
2085                desc->actual_length = desc->length - SITD_LENGTH(t);
2086                urb->actual_length += desc->actual_length;
2087        }
2088
2089        /* handle completion now? */
2090        if ((urb_index + 1) != urb->number_of_packets)
2091                goto done;
2092
2093        /* ASSERT: it's really the last sitd for this urb
2094        list_for_each_entry (sitd, &stream->td_list, sitd_list)
2095                BUG_ON (sitd->urb == urb);
2096         */
2097
2098        /* give urb back to the driver; completion often (re)submits */
2099        dev = urb->dev;
2100        ehci_urb_done(ehci, urb, 0);
2101        retval = true;
2102        urb = NULL;
2103
2104        --ehci->isoc_count;
2105        disable_periodic(ehci);
2106
2107        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2108        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2109                if (ehci->amd_pll_fix == 1)
2110                        usb_amd_quirk_pll_enable();
2111        }
2112
2113        if (list_is_singular(&stream->td_list)) {
2114                ehci_to_hcd(ehci)->self.bandwidth_allocated
2115                                -= stream->bandwidth;
2116                ehci_vdbg (ehci,
2117                        "deschedule devp %s ep%d%s-iso\n",
2118                        dev->devpath, stream->bEndpointAddress & 0x0f,
2119                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2120        }
2121
2122done:
2123        sitd->urb = NULL;
2124
2125        /* Add to the end of the free list for later reuse */
2126        list_move_tail(&sitd->sitd_list, &stream->free_list);
2127
2128        /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2129        if (list_empty(&stream->td_list)) {
2130                list_splice_tail_init(&stream->free_list,
2131                                &ehci->cached_sitd_list);
2132                start_free_itds(ehci);
2133        }
2134
2135        return retval;
2136}
2137
2138
2139static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2140        gfp_t mem_flags)
2141{
2142        int                     status = -EINVAL;
2143        unsigned long           flags;
2144        struct ehci_iso_stream  *stream;
2145
2146        /* Get iso_stream head */
2147        stream = iso_stream_find (ehci, urb);
2148        if (stream == NULL) {
2149                ehci_dbg (ehci, "can't get iso stream\n");
2150                return -ENOMEM;
2151        }
2152        if (urb->interval != stream->interval) {
2153                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2154                        stream->interval, urb->interval);
2155                goto done;
2156        }
2157
2158#ifdef EHCI_URB_TRACE
2159        ehci_dbg (ehci,
2160                "submit %p dev%s ep%d%s-iso len %d\n",
2161                urb, urb->dev->devpath,
2162                usb_pipeendpoint (urb->pipe),
2163                usb_pipein (urb->pipe) ? "in" : "out",
2164                urb->transfer_buffer_length);
2165#endif
2166
2167        /* allocate SITDs */
2168        status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2169        if (status < 0) {
2170                ehci_dbg (ehci, "can't init sitds\n");
2171                goto done;
2172        }
2173
2174        /* schedule ... need to lock */
2175        spin_lock_irqsave (&ehci->lock, flags);
2176        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2177                status = -ESHUTDOWN;
2178                goto done_not_linked;
2179        }
2180        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2181        if (unlikely(status))
2182                goto done_not_linked;
2183        status = iso_stream_schedule(ehci, urb, stream);
2184        if (status == 0)
2185                sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2186        else
2187                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2188 done_not_linked:
2189        spin_unlock_irqrestore (&ehci->lock, flags);
2190 done:
2191        return status;
2192}
2193
2194/*-------------------------------------------------------------------------*/
2195
2196static void scan_isoc(struct ehci_hcd *ehci)
2197{
2198        unsigned        uf, now_frame, frame;
2199        unsigned        fmask = ehci->periodic_size - 1;
2200        bool            modified, live;
2201
2202        /*
2203         * When running, scan from last scan point up to "now"
2204         * else clean up by scanning everything that's left.
2205         * Touches as few pages as possible:  cache-friendly.
2206         */
2207        if (ehci->rh_state >= EHCI_RH_RUNNING) {
2208                uf = ehci_read_frame_index(ehci);
2209                now_frame = (uf >> 3) & fmask;
2210                live = true;
2211        } else  {
2212                now_frame = (ehci->last_iso_frame - 1) & fmask;
2213                live = false;
2214        }
2215        ehci->now_frame = now_frame;
2216
2217        frame = ehci->last_iso_frame;
2218        for (;;) {
2219                union ehci_shadow       q, *q_p;
2220                __hc32                  type, *hw_p;
2221
2222restart:
2223                /* scan each element in frame's queue for completions */
2224                q_p = &ehci->pshadow [frame];
2225                hw_p = &ehci->periodic [frame];
2226                q.ptr = q_p->ptr;
2227                type = Q_NEXT_TYPE(ehci, *hw_p);
2228                modified = false;
2229
2230                while (q.ptr != NULL) {
2231                        switch (hc32_to_cpu(ehci, type)) {
2232                        case Q_TYPE_ITD:
2233                                /* If this ITD is still active, leave it for
2234                                 * later processing ... check the next entry.
2235                                 * No need to check for activity unless the
2236                                 * frame is current.
2237                                 */
2238                                if (frame == now_frame && live) {
2239                                        rmb();
2240                                        for (uf = 0; uf < 8; uf++) {
2241                                                if (q.itd->hw_transaction[uf] &
2242                                                            ITD_ACTIVE(ehci))
2243                                                        break;
2244                                        }
2245                                        if (uf < 8) {
2246                                                q_p = &q.itd->itd_next;
2247                                                hw_p = &q.itd->hw_next;
2248                                                type = Q_NEXT_TYPE(ehci,
2249                                                        q.itd->hw_next);
2250                                                q = *q_p;
2251                                                break;
2252                                        }
2253                                }
2254
2255                                /* Take finished ITDs out of the schedule
2256                                 * and process them:  recycle, maybe report
2257                                 * URB completion.  HC won't cache the
2258                                 * pointer for much longer, if at all.
2259                                 */
2260                                *q_p = q.itd->itd_next;
2261                                if (!ehci->use_dummy_qh ||
2262                                    q.itd->hw_next != EHCI_LIST_END(ehci))
2263                                        *hw_p = q.itd->hw_next;
2264                                else
2265                                        *hw_p = ehci->dummy->qh_dma;
2266                                type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2267                                wmb();
2268                                modified = itd_complete (ehci, q.itd);
2269                                q = *q_p;
2270                                break;
2271                        case Q_TYPE_SITD:
2272                                /* If this SITD is still active, leave it for
2273                                 * later processing ... check the next entry.
2274                                 * No need to check for activity unless the
2275                                 * frame is current.
2276                                 */
2277                                if (((frame == now_frame) ||
2278                                     (((frame + 1) & fmask) == now_frame))
2279                                    && live
2280                                    && (q.sitd->hw_results &
2281                                        SITD_ACTIVE(ehci))) {
2282
2283                                        q_p = &q.sitd->sitd_next;
2284                                        hw_p = &q.sitd->hw_next;
2285                                        type = Q_NEXT_TYPE(ehci,
2286                                                        q.sitd->hw_next);
2287                                        q = *q_p;
2288                                        break;
2289                                }
2290
2291                                /* Take finished SITDs out of the schedule
2292                                 * and process them:  recycle, maybe report
2293                                 * URB completion.
2294                                 */
2295                                *q_p = q.sitd->sitd_next;
2296                                if (!ehci->use_dummy_qh ||
2297                                    q.sitd->hw_next != EHCI_LIST_END(ehci))
2298                                        *hw_p = q.sitd->hw_next;
2299                                else
2300                                        *hw_p = ehci->dummy->qh_dma;
2301                                type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2302                                wmb();
2303                                modified = sitd_complete (ehci, q.sitd);
2304                                q = *q_p;
2305                                break;
2306                        default:
2307                                ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2308                                        type, frame, q.ptr);
2309                                // BUG ();
2310                                /* FALL THROUGH */
2311                        case Q_TYPE_QH:
2312                        case Q_TYPE_FSTN:
2313                                /* End of the iTDs and siTDs */
2314                                q.ptr = NULL;
2315                                break;
2316                        }
2317
2318                        /* assume completion callbacks modify the queue */
2319                        if (unlikely(modified && ehci->isoc_count > 0))
2320                                goto restart;
2321                }
2322
2323                /* Stop when we have reached the current frame */
2324                if (frame == now_frame)
2325                        break;
2326
2327                /* The last frame may still have active siTDs */
2328                ehci->last_iso_frame = frame;
2329                frame = (frame + 1) & fmask;
2330        }
2331}
2332