linux/drivers/usb/host/ehci-sched.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2001-2004 by David Brownell
   3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation; either version 2 of the License, or (at your
   8 * option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 * for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software Foundation,
  17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 */
  19
  20/* this file is part of ehci-hcd.c */
  21
  22/*-------------------------------------------------------------------------*/
  23
  24/*
  25 * EHCI scheduled transaction support:  interrupt, iso, split iso
  26 * These are called "periodic" transactions in the EHCI spec.
  27 *
  28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
  29 * with the "asynchronous" transaction support (control/bulk transfers).
  30 * The only real difference is in how interrupt transfers are scheduled.
  31 *
  32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
  34 * pre-calculated schedule data to make appending to the queue be quick.
  35 */
  36
  37static int ehci_get_frame (struct usb_hcd *hcd);
  38
  39#ifdef CONFIG_PCI
  40
  41static unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
  42{
  43        unsigned uf;
  44
  45        /*
  46         * The MosChip MCS9990 controller updates its microframe counter
  47         * a little before the frame counter, and occasionally we will read
  48         * the invalid intermediate value.  Avoid problems by checking the
  49         * microframe number (the low-order 3 bits); if they are 0 then
  50         * re-read the register to get the correct value.
  51         */
  52        uf = ehci_readl(ehci, &ehci->regs->frame_index);
  53        if (unlikely(ehci->frame_index_bug && ((uf & 7) == 0)))
  54                uf = ehci_readl(ehci, &ehci->regs->frame_index);
  55        return uf;
  56}
  57
  58#endif
  59
  60/*-------------------------------------------------------------------------*/
  61
  62/*
  63 * periodic_next_shadow - return "next" pointer on shadow list
  64 * @periodic: host pointer to qh/itd/sitd
  65 * @tag: hardware tag for type of this record
  66 */
  67static union ehci_shadow *
  68periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  69                __hc32 tag)
  70{
  71        switch (hc32_to_cpu(ehci, tag)) {
  72        case Q_TYPE_QH:
  73                return &periodic->qh->qh_next;
  74        case Q_TYPE_FSTN:
  75                return &periodic->fstn->fstn_next;
  76        case Q_TYPE_ITD:
  77                return &periodic->itd->itd_next;
  78        // case Q_TYPE_SITD:
  79        default:
  80                return &periodic->sitd->sitd_next;
  81        }
  82}
  83
  84static __hc32 *
  85shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  86                __hc32 tag)
  87{
  88        switch (hc32_to_cpu(ehci, tag)) {
  89        /* our ehci_shadow.qh is actually software part */
  90        case Q_TYPE_QH:
  91                return &periodic->qh->hw->hw_next;
  92        /* others are hw parts */
  93        default:
  94                return periodic->hw_next;
  95        }
  96}
  97
  98/* caller must hold ehci->lock */
  99static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
 100{
 101        union ehci_shadow       *prev_p = &ehci->pshadow[frame];
 102        __hc32                  *hw_p = &ehci->periodic[frame];
 103        union ehci_shadow       here = *prev_p;
 104
 105        /* find predecessor of "ptr"; hw and shadow lists are in sync */
 106        while (here.ptr && here.ptr != ptr) {
 107                prev_p = periodic_next_shadow(ehci, prev_p,
 108                                Q_NEXT_TYPE(ehci, *hw_p));
 109                hw_p = shadow_next_periodic(ehci, &here,
 110                                Q_NEXT_TYPE(ehci, *hw_p));
 111                here = *prev_p;
 112        }
 113        /* an interrupt entry (at list end) could have been shared */
 114        if (!here.ptr)
 115                return;
 116
 117        /* update shadow and hardware lists ... the old "next" pointers
 118         * from ptr may still be in use, the caller updates them.
 119         */
 120        *prev_p = *periodic_next_shadow(ehci, &here,
 121                        Q_NEXT_TYPE(ehci, *hw_p));
 122
 123        if (!ehci->use_dummy_qh ||
 124            *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
 125                        != EHCI_LIST_END(ehci))
 126                *hw_p = *shadow_next_periodic(ehci, &here,
 127                                Q_NEXT_TYPE(ehci, *hw_p));
 128        else
 129                *hw_p = ehci->dummy->qh_dma;
 130}
 131
 132/* how many of the uframe's 125 usecs are allocated? */
 133static unsigned short
 134periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
 135{
 136        __hc32                  *hw_p = &ehci->periodic [frame];
 137        union ehci_shadow       *q = &ehci->pshadow [frame];
 138        unsigned                usecs = 0;
 139        struct ehci_qh_hw       *hw;
 140
 141        while (q->ptr) {
 142                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 143                case Q_TYPE_QH:
 144                        hw = q->qh->hw;
 145                        /* is it in the S-mask? */
 146                        if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
 147                                usecs += q->qh->usecs;
 148                        /* ... or C-mask? */
 149                        if (hw->hw_info2 & cpu_to_hc32(ehci,
 150                                        1 << (8 + uframe)))
 151                                usecs += q->qh->c_usecs;
 152                        hw_p = &hw->hw_next;
 153                        q = &q->qh->qh_next;
 154                        break;
 155                // case Q_TYPE_FSTN:
 156                default:
 157                        /* for "save place" FSTNs, count the relevant INTR
 158                         * bandwidth from the previous frame
 159                         */
 160                        if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
 161                                ehci_dbg (ehci, "ignoring FSTN cost ...\n");
 162                        }
 163                        hw_p = &q->fstn->hw_next;
 164                        q = &q->fstn->fstn_next;
 165                        break;
 166                case Q_TYPE_ITD:
 167                        if (q->itd->hw_transaction[uframe])
 168                                usecs += q->itd->stream->usecs;
 169                        hw_p = &q->itd->hw_next;
 170                        q = &q->itd->itd_next;
 171                        break;
 172                case Q_TYPE_SITD:
 173                        /* is it in the S-mask?  (count SPLIT, DATA) */
 174                        if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
 175                                        1 << uframe)) {
 176                                if (q->sitd->hw_fullspeed_ep &
 177                                                cpu_to_hc32(ehci, 1<<31))
 178                                        usecs += q->sitd->stream->usecs;
 179                                else    /* worst case for OUT start-split */
 180                                        usecs += HS_USECS_ISO (188);
 181                        }
 182
 183                        /* ... C-mask?  (count CSPLIT, DATA) */
 184                        if (q->sitd->hw_uframe &
 185                                        cpu_to_hc32(ehci, 1 << (8 + uframe))) {
 186                                /* worst case for IN complete-split */
 187                                usecs += q->sitd->stream->c_usecs;
 188                        }
 189
 190                        hw_p = &q->sitd->hw_next;
 191                        q = &q->sitd->sitd_next;
 192                        break;
 193                }
 194        }
 195#ifdef  DEBUG
 196        if (usecs > ehci->uframe_periodic_max)
 197                ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
 198                        frame * 8 + uframe, usecs);
 199#endif
 200        return usecs;
 201}
 202
 203/*-------------------------------------------------------------------------*/
 204
 205static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
 206{
 207        if (!dev1->tt || !dev2->tt)
 208                return 0;
 209        if (dev1->tt != dev2->tt)
 210                return 0;
 211        if (dev1->tt->multi)
 212                return dev1->ttport == dev2->ttport;
 213        else
 214                return 1;
 215}
 216
 217#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 218
 219/* Which uframe does the low/fullspeed transfer start in?
 220 *
 221 * The parameter is the mask of ssplits in "H-frame" terms
 222 * and this returns the transfer start uframe in "B-frame" terms,
 223 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
 224 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
 225 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
 226 */
 227static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 228{
 229        unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
 230        if (!smask) {
 231                ehci_err(ehci, "invalid empty smask!\n");
 232                /* uframe 7 can't have bw so this will indicate failure */
 233                return 7;
 234        }
 235        return ffs(smask) - 1;
 236}
 237
 238static const unsigned char
 239max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 240
 241/* carryover low/fullspeed bandwidth that crosses uframe boundries */
 242static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 243{
 244        int i;
 245        for (i=0; i<7; i++) {
 246                if (max_tt_usecs[i] < tt_usecs[i]) {
 247                        tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
 248                        tt_usecs[i] = max_tt_usecs[i];
 249                }
 250        }
 251}
 252
 253/* How many of the tt's periodic downstream 1000 usecs are allocated?
 254 *
 255 * While this measures the bandwidth in terms of usecs/uframe,
 256 * the low/fullspeed bus has no notion of uframes, so any particular
 257 * low/fullspeed transfer can "carry over" from one uframe to the next,
 258 * since the TT just performs downstream transfers in sequence.
 259 *
 260 * For example two separate 100 usec transfers can start in the same uframe,
 261 * and the second one would "carry over" 75 usecs into the next uframe.
 262 */
 263static void
 264periodic_tt_usecs (
 265        struct ehci_hcd *ehci,
 266        struct usb_device *dev,
 267        unsigned frame,
 268        unsigned short tt_usecs[8]
 269)
 270{
 271        __hc32                  *hw_p = &ehci->periodic [frame];
 272        union ehci_shadow       *q = &ehci->pshadow [frame];
 273        unsigned char           uf;
 274
 275        memset(tt_usecs, 0, 16);
 276
 277        while (q->ptr) {
 278                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 279                case Q_TYPE_ITD:
 280                        hw_p = &q->itd->hw_next;
 281                        q = &q->itd->itd_next;
 282                        continue;
 283                case Q_TYPE_QH:
 284                        if (same_tt(dev, q->qh->dev)) {
 285                                uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
 286                                tt_usecs[uf] += q->qh->tt_usecs;
 287                        }
 288                        hw_p = &q->qh->hw->hw_next;
 289                        q = &q->qh->qh_next;
 290                        continue;
 291                case Q_TYPE_SITD:
 292                        if (same_tt(dev, q->sitd->urb->dev)) {
 293                                uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
 294                                tt_usecs[uf] += q->sitd->stream->tt_usecs;
 295                        }
 296                        hw_p = &q->sitd->hw_next;
 297                        q = &q->sitd->sitd_next;
 298                        continue;
 299                // case Q_TYPE_FSTN:
 300                default:
 301                        ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
 302                                        frame);
 303                        hw_p = &q->fstn->hw_next;
 304                        q = &q->fstn->fstn_next;
 305                }
 306        }
 307
 308        carryover_tt_bandwidth(tt_usecs);
 309
 310        if (max_tt_usecs[7] < tt_usecs[7])
 311                ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
 312                        frame, tt_usecs[7] - max_tt_usecs[7]);
 313}
 314
 315/*
 316 * Return true if the device's tt's downstream bus is available for a
 317 * periodic transfer of the specified length (usecs), starting at the
 318 * specified frame/uframe.  Note that (as summarized in section 11.19
 319 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 320 * uframe.
 321 *
 322 * The uframe parameter is when the fullspeed/lowspeed transfer
 323 * should be executed in "B-frame" terms, which is the same as the
 324 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 325 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 326 * See the EHCI spec sec 4.5 and fig 4.7.
 327 *
 328 * This checks if the full/lowspeed bus, at the specified starting uframe,
 329 * has the specified bandwidth available, according to rules listed
 330 * in USB 2.0 spec section 11.18.1 fig 11-60.
 331 *
 332 * This does not check if the transfer would exceed the max ssplit
 333 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 334 * since proper scheduling limits ssplits to less than 16 per uframe.
 335 */
 336static int tt_available (
 337        struct ehci_hcd         *ehci,
 338        unsigned                period,
 339        struct usb_device       *dev,
 340        unsigned                frame,
 341        unsigned                uframe,
 342        u16                     usecs
 343)
 344{
 345        if ((period == 0) || (uframe >= 7))     /* error */
 346                return 0;
 347
 348        for (; frame < ehci->periodic_size; frame += period) {
 349                unsigned short tt_usecs[8];
 350
 351                periodic_tt_usecs (ehci, dev, frame, tt_usecs);
 352
 353                ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
 354                        " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
 355                        frame, usecs, uframe,
 356                        tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
 357                        tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
 358
 359                if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
 360                        ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
 361                                frame, uframe);
 362                        return 0;
 363                }
 364
 365                /* special case for isoc transfers larger than 125us:
 366                 * the first and each subsequent fully used uframe
 367                 * must be empty, so as to not illegally delay
 368                 * already scheduled transactions
 369                 */
 370                if (125 < usecs) {
 371                        int ufs = (usecs / 125);
 372                        int i;
 373                        for (i = uframe; i < (uframe + ufs) && i < 8; i++)
 374                                if (0 < tt_usecs[i]) {
 375                                        ehci_vdbg(ehci,
 376                                                "multi-uframe xfer can't fit "
 377                                                "in frame %d uframe %d\n",
 378                                                frame, i);
 379                                        return 0;
 380                                }
 381                }
 382
 383                tt_usecs[uframe] += usecs;
 384
 385                carryover_tt_bandwidth(tt_usecs);
 386
 387                /* fail if the carryover pushed bw past the last uframe's limit */
 388                if (max_tt_usecs[7] < tt_usecs[7]) {
 389                        ehci_vdbg(ehci,
 390                                "tt unavailable usecs %d frame %d uframe %d\n",
 391                                usecs, frame, uframe);
 392                        return 0;
 393                }
 394        }
 395
 396        return 1;
 397}
 398
 399#else
 400
 401/* return true iff the device's transaction translator is available
 402 * for a periodic transfer starting at the specified frame, using
 403 * all the uframes in the mask.
 404 */
 405static int tt_no_collision (
 406        struct ehci_hcd         *ehci,
 407        unsigned                period,
 408        struct usb_device       *dev,
 409        unsigned                frame,
 410        u32                     uf_mask
 411)
 412{
 413        if (period == 0)        /* error */
 414                return 0;
 415
 416        /* note bandwidth wastage:  split never follows csplit
 417         * (different dev or endpoint) until the next uframe.
 418         * calling convention doesn't make that distinction.
 419         */
 420        for (; frame < ehci->periodic_size; frame += period) {
 421                union ehci_shadow       here;
 422                __hc32                  type;
 423                struct ehci_qh_hw       *hw;
 424
 425                here = ehci->pshadow [frame];
 426                type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
 427                while (here.ptr) {
 428                        switch (hc32_to_cpu(ehci, type)) {
 429                        case Q_TYPE_ITD:
 430                                type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
 431                                here = here.itd->itd_next;
 432                                continue;
 433                        case Q_TYPE_QH:
 434                                hw = here.qh->hw;
 435                                if (same_tt (dev, here.qh->dev)) {
 436                                        u32             mask;
 437
 438                                        mask = hc32_to_cpu(ehci,
 439                                                        hw->hw_info2);
 440                                        /* "knows" no gap is needed */
 441                                        mask |= mask >> 8;
 442                                        if (mask & uf_mask)
 443                                                break;
 444                                }
 445                                type = Q_NEXT_TYPE(ehci, hw->hw_next);
 446                                here = here.qh->qh_next;
 447                                continue;
 448                        case Q_TYPE_SITD:
 449                                if (same_tt (dev, here.sitd->urb->dev)) {
 450                                        u16             mask;
 451
 452                                        mask = hc32_to_cpu(ehci, here.sitd
 453                                                                ->hw_uframe);
 454                                        /* FIXME assumes no gap for IN! */
 455                                        mask |= mask >> 8;
 456                                        if (mask & uf_mask)
 457                                                break;
 458                                }
 459                                type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
 460                                here = here.sitd->sitd_next;
 461                                continue;
 462                        // case Q_TYPE_FSTN:
 463                        default:
 464                                ehci_dbg (ehci,
 465                                        "periodic frame %d bogus type %d\n",
 466                                        frame, type);
 467                        }
 468
 469                        /* collision or error */
 470                        return 0;
 471                }
 472        }
 473
 474        /* no collision */
 475        return 1;
 476}
 477
 478#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
 479
 480/*-------------------------------------------------------------------------*/
 481
 482static void enable_periodic(struct ehci_hcd *ehci)
 483{
 484        if (ehci->periodic_count++)
 485                return;
 486
 487        /* Stop waiting to turn off the periodic schedule */
 488        ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
 489
 490        /* Don't start the schedule until PSS is 0 */
 491        ehci_poll_PSS(ehci);
 492        turn_on_io_watchdog(ehci);
 493}
 494
 495static void disable_periodic(struct ehci_hcd *ehci)
 496{
 497        if (--ehci->periodic_count)
 498                return;
 499
 500        /* Don't turn off the schedule until PSS is 1 */
 501        ehci_poll_PSS(ehci);
 502}
 503
 504/*-------------------------------------------------------------------------*/
 505
 506/* periodic schedule slots have iso tds (normal or split) first, then a
 507 * sparse tree for active interrupt transfers.
 508 *
 509 * this just links in a qh; caller guarantees uframe masks are set right.
 510 * no FSTN support (yet; ehci 0.96+)
 511 */
 512static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 513{
 514        unsigned        i;
 515        unsigned        period = qh->period;
 516
 517        dev_dbg (&qh->dev->dev,
 518                "link qh%d-%04x/%p start %d [%d/%d us]\n",
 519                period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
 520                        & (QH_CMASK | QH_SMASK),
 521                qh, qh->start, qh->usecs, qh->c_usecs);
 522
 523        /* high bandwidth, or otherwise every microframe */
 524        if (period == 0)
 525                period = 1;
 526
 527        for (i = qh->start; i < ehci->periodic_size; i += period) {
 528                union ehci_shadow       *prev = &ehci->pshadow[i];
 529                __hc32                  *hw_p = &ehci->periodic[i];
 530                union ehci_shadow       here = *prev;
 531                __hc32                  type = 0;
 532
 533                /* skip the iso nodes at list head */
 534                while (here.ptr) {
 535                        type = Q_NEXT_TYPE(ehci, *hw_p);
 536                        if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
 537                                break;
 538                        prev = periodic_next_shadow(ehci, prev, type);
 539                        hw_p = shadow_next_periodic(ehci, &here, type);
 540                        here = *prev;
 541                }
 542
 543                /* sorting each branch by period (slow-->fast)
 544                 * enables sharing interior tree nodes
 545                 */
 546                while (here.ptr && qh != here.qh) {
 547                        if (qh->period > here.qh->period)
 548                                break;
 549                        prev = &here.qh->qh_next;
 550                        hw_p = &here.qh->hw->hw_next;
 551                        here = *prev;
 552                }
 553                /* link in this qh, unless some earlier pass did that */
 554                if (qh != here.qh) {
 555                        qh->qh_next = here;
 556                        if (here.qh)
 557                                qh->hw->hw_next = *hw_p;
 558                        wmb ();
 559                        prev->qh = qh;
 560                        *hw_p = QH_NEXT (ehci, qh->qh_dma);
 561                }
 562        }
 563        qh->qh_state = QH_STATE_LINKED;
 564        qh->xacterrs = 0;
 565
 566        /* update per-qh bandwidth for usbfs */
 567        ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
 568                ? ((qh->usecs + qh->c_usecs) / qh->period)
 569                : (qh->usecs * 8);
 570
 571        list_add(&qh->intr_node, &ehci->intr_qh_list);
 572
 573        /* maybe enable periodic schedule processing */
 574        ++ehci->intr_count;
 575        enable_periodic(ehci);
 576}
 577
 578static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 579{
 580        unsigned        i;
 581        unsigned        period;
 582
 583        /*
 584         * If qh is for a low/full-speed device, simply unlinking it
 585         * could interfere with an ongoing split transaction.  To unlink
 586         * it safely would require setting the QH_INACTIVATE bit and
 587         * waiting at least one frame, as described in EHCI 4.12.2.5.
 588         *
 589         * We won't bother with any of this.  Instead, we assume that the
 590         * only reason for unlinking an interrupt QH while the current URB
 591         * is still active is to dequeue all the URBs (flush the whole
 592         * endpoint queue).
 593         *
 594         * If rebalancing the periodic schedule is ever implemented, this
 595         * approach will no longer be valid.
 596         */
 597
 598        /* high bandwidth, or otherwise part of every microframe */
 599        if ((period = qh->period) == 0)
 600                period = 1;
 601
 602        for (i = qh->start; i < ehci->periodic_size; i += period)
 603                periodic_unlink (ehci, i, qh);
 604
 605        /* update per-qh bandwidth for usbfs */
 606        ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
 607                ? ((qh->usecs + qh->c_usecs) / qh->period)
 608                : (qh->usecs * 8);
 609
 610        dev_dbg (&qh->dev->dev,
 611                "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
 612                qh->period,
 613                hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
 614                qh, qh->start, qh->usecs, qh->c_usecs);
 615
 616        /* qh->qh_next still "live" to HC */
 617        qh->qh_state = QH_STATE_UNLINK;
 618        qh->qh_next.ptr = NULL;
 619
 620        if (ehci->qh_scan_next == qh)
 621                ehci->qh_scan_next = list_entry(qh->intr_node.next,
 622                                struct ehci_qh, intr_node);
 623        list_del(&qh->intr_node);
 624}
 625
 626static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 627{
 628        /* If the QH isn't linked then there's nothing we can do
 629         * unless we were called during a giveback, in which case
 630         * qh_completions() has to deal with it.
 631         */
 632        if (qh->qh_state != QH_STATE_LINKED) {
 633                if (qh->qh_state == QH_STATE_COMPLETING)
 634                        qh->needs_rescan = 1;
 635                return;
 636        }
 637
 638        qh_unlink_periodic (ehci, qh);
 639
 640        /* Make sure the unlinks are visible before starting the timer */
 641        wmb();
 642
 643        /*
 644         * The EHCI spec doesn't say how long it takes the controller to
 645         * stop accessing an unlinked interrupt QH.  The timer delay is
 646         * 9 uframes; presumably that will be long enough.
 647         */
 648        qh->unlink_cycle = ehci->intr_unlink_cycle;
 649
 650        /* New entries go at the end of the intr_unlink list */
 651        if (ehci->intr_unlink)
 652                ehci->intr_unlink_last->unlink_next = qh;
 653        else
 654                ehci->intr_unlink = qh;
 655        ehci->intr_unlink_last = qh;
 656
 657        if (ehci->intr_unlinking)
 658                ;       /* Avoid recursive calls */
 659        else if (ehci->rh_state < EHCI_RH_RUNNING)
 660                ehci_handle_intr_unlinks(ehci);
 661        else if (ehci->intr_unlink == qh) {
 662                ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
 663                ++ehci->intr_unlink_cycle;
 664        }
 665}
 666
 667static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 668{
 669        struct ehci_qh_hw       *hw = qh->hw;
 670        int                     rc;
 671
 672        qh->qh_state = QH_STATE_IDLE;
 673        hw->hw_next = EHCI_LIST_END(ehci);
 674
 675        qh_completions(ehci, qh);
 676
 677        /* reschedule QH iff another request is queued */
 678        if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
 679                rc = qh_schedule(ehci, qh);
 680
 681                /* An error here likely indicates handshake failure
 682                 * or no space left in the schedule.  Neither fault
 683                 * should happen often ...
 684                 *
 685                 * FIXME kill the now-dysfunctional queued urbs
 686                 */
 687                if (rc != 0)
 688                        ehci_err(ehci, "can't reschedule qh %p, err %d\n",
 689                                        qh, rc);
 690        }
 691
 692        /* maybe turn off periodic schedule */
 693        --ehci->intr_count;
 694        disable_periodic(ehci);
 695}
 696
 697/*-------------------------------------------------------------------------*/
 698
 699static int check_period (
 700        struct ehci_hcd *ehci,
 701        unsigned        frame,
 702        unsigned        uframe,
 703        unsigned        period,
 704        unsigned        usecs
 705) {
 706        int             claimed;
 707
 708        /* complete split running into next frame?
 709         * given FSTN support, we could sometimes check...
 710         */
 711        if (uframe >= 8)
 712                return 0;
 713
 714        /* convert "usecs we need" to "max already claimed" */
 715        usecs = ehci->uframe_periodic_max - usecs;
 716
 717        /* we "know" 2 and 4 uframe intervals were rejected; so
 718         * for period 0, check _every_ microframe in the schedule.
 719         */
 720        if (unlikely (period == 0)) {
 721                do {
 722                        for (uframe = 0; uframe < 7; uframe++) {
 723                                claimed = periodic_usecs (ehci, frame, uframe);
 724                                if (claimed > usecs)
 725                                        return 0;
 726                        }
 727                } while ((frame += 1) < ehci->periodic_size);
 728
 729        /* just check the specified uframe, at that period */
 730        } else {
 731                do {
 732                        claimed = periodic_usecs (ehci, frame, uframe);
 733                        if (claimed > usecs)
 734                                return 0;
 735                } while ((frame += period) < ehci->periodic_size);
 736        }
 737
 738        // success!
 739        return 1;
 740}
 741
 742static int check_intr_schedule (
 743        struct ehci_hcd         *ehci,
 744        unsigned                frame,
 745        unsigned                uframe,
 746        const struct ehci_qh    *qh,
 747        __hc32                  *c_maskp
 748)
 749{
 750        int             retval = -ENOSPC;
 751        u8              mask = 0;
 752
 753        if (qh->c_usecs && uframe >= 6)         /* FSTN territory? */
 754                goto done;
 755
 756        if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
 757                goto done;
 758        if (!qh->c_usecs) {
 759                retval = 0;
 760                *c_maskp = 0;
 761                goto done;
 762        }
 763
 764#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 765        if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
 766                                qh->tt_usecs)) {
 767                unsigned i;
 768
 769                /* TODO : this may need FSTN for SSPLIT in uframe 5. */
 770                for (i=uframe+1; i<8 && i<uframe+4; i++)
 771                        if (!check_period (ehci, frame, i,
 772                                                qh->period, qh->c_usecs))
 773                                goto done;
 774                        else
 775                                mask |= 1 << i;
 776
 777                retval = 0;
 778
 779                *c_maskp = cpu_to_hc32(ehci, mask << 8);
 780        }
 781#else
 782        /* Make sure this tt's buffer is also available for CSPLITs.
 783         * We pessimize a bit; probably the typical full speed case
 784         * doesn't need the second CSPLIT.
 785         *
 786         * NOTE:  both SPLIT and CSPLIT could be checked in just
 787         * one smart pass...
 788         */
 789        mask = 0x03 << (uframe + qh->gap_uf);
 790        *c_maskp = cpu_to_hc32(ehci, mask << 8);
 791
 792        mask |= 1 << uframe;
 793        if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
 794                if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
 795                                        qh->period, qh->c_usecs))
 796                        goto done;
 797                if (!check_period (ehci, frame, uframe + qh->gap_uf,
 798                                        qh->period, qh->c_usecs))
 799                        goto done;
 800                retval = 0;
 801        }
 802#endif
 803done:
 804        return retval;
 805}
 806
 807/* "first fit" scheduling policy used the first time through,
 808 * or when the previous schedule slot can't be re-used.
 809 */
 810static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
 811{
 812        int             status;
 813        unsigned        uframe;
 814        __hc32          c_mask;
 815        unsigned        frame;          /* 0..(qh->period - 1), or NO_FRAME */
 816        struct ehci_qh_hw       *hw = qh->hw;
 817
 818        qh_refresh(ehci, qh);
 819        hw->hw_next = EHCI_LIST_END(ehci);
 820        frame = qh->start;
 821
 822        /* reuse the previous schedule slots, if we can */
 823        if (frame < qh->period) {
 824                uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
 825                status = check_intr_schedule (ehci, frame, --uframe,
 826                                qh, &c_mask);
 827        } else {
 828                uframe = 0;
 829                c_mask = 0;
 830                status = -ENOSPC;
 831        }
 832
 833        /* else scan the schedule to find a group of slots such that all
 834         * uframes have enough periodic bandwidth available.
 835         */
 836        if (status) {
 837                /* "normal" case, uframing flexible except with splits */
 838                if (qh->period) {
 839                        int             i;
 840
 841                        for (i = qh->period; status && i > 0; --i) {
 842                                frame = ++ehci->random_frame % qh->period;
 843                                for (uframe = 0; uframe < 8; uframe++) {
 844                                        status = check_intr_schedule (ehci,
 845                                                        frame, uframe, qh,
 846                                                        &c_mask);
 847                                        if (status == 0)
 848                                                break;
 849                                }
 850                        }
 851
 852                /* qh->period == 0 means every uframe */
 853                } else {
 854                        frame = 0;
 855                        status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
 856                }
 857                if (status)
 858                        goto done;
 859                qh->start = frame;
 860
 861                /* reset S-frame and (maybe) C-frame masks */
 862                hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
 863                hw->hw_info2 |= qh->period
 864                        ? cpu_to_hc32(ehci, 1 << uframe)
 865                        : cpu_to_hc32(ehci, QH_SMASK);
 866                hw->hw_info2 |= c_mask;
 867        } else
 868                ehci_dbg (ehci, "reused qh %p schedule\n", qh);
 869
 870        /* stuff into the periodic schedule */
 871        qh_link_periodic(ehci, qh);
 872done:
 873        return status;
 874}
 875
 876static int intr_submit (
 877        struct ehci_hcd         *ehci,
 878        struct urb              *urb,
 879        struct list_head        *qtd_list,
 880        gfp_t                   mem_flags
 881) {
 882        unsigned                epnum;
 883        unsigned long           flags;
 884        struct ehci_qh          *qh;
 885        int                     status;
 886        struct list_head        empty;
 887
 888        /* get endpoint and transfer/schedule data */
 889        epnum = urb->ep->desc.bEndpointAddress;
 890
 891        spin_lock_irqsave (&ehci->lock, flags);
 892
 893        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
 894                status = -ESHUTDOWN;
 895                goto done_not_linked;
 896        }
 897        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
 898        if (unlikely(status))
 899                goto done_not_linked;
 900
 901        /* get qh and force any scheduling errors */
 902        INIT_LIST_HEAD (&empty);
 903        qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
 904        if (qh == NULL) {
 905                status = -ENOMEM;
 906                goto done;
 907        }
 908        if (qh->qh_state == QH_STATE_IDLE) {
 909                if ((status = qh_schedule (ehci, qh)) != 0)
 910                        goto done;
 911        }
 912
 913        /* then queue the urb's tds to the qh */
 914        qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
 915        BUG_ON (qh == NULL);
 916
 917        /* ... update usbfs periodic stats */
 918        ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
 919
 920done:
 921        if (unlikely(status))
 922                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 923done_not_linked:
 924        spin_unlock_irqrestore (&ehci->lock, flags);
 925        if (status)
 926                qtd_list_free (ehci, urb, qtd_list);
 927
 928        return status;
 929}
 930
 931static void scan_intr(struct ehci_hcd *ehci)
 932{
 933        struct ehci_qh          *qh;
 934
 935        list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
 936                        intr_node) {
 937 rescan:
 938                /* clean any finished work for this qh */
 939                if (!list_empty(&qh->qtd_list)) {
 940                        int temp;
 941
 942                        /*
 943                         * Unlinks could happen here; completion reporting
 944                         * drops the lock.  That's why ehci->qh_scan_next
 945                         * always holds the next qh to scan; if the next qh
 946                         * gets unlinked then ehci->qh_scan_next is adjusted
 947                         * in qh_unlink_periodic().
 948                         */
 949                        temp = qh_completions(ehci, qh);
 950                        if (unlikely(qh->needs_rescan ||
 951                                        (list_empty(&qh->qtd_list) &&
 952                                                qh->qh_state == QH_STATE_LINKED)))
 953                                start_unlink_intr(ehci, qh);
 954                        else if (temp != 0)
 955                                goto rescan;
 956                }
 957        }
 958}
 959
 960/*-------------------------------------------------------------------------*/
 961
 962/* ehci_iso_stream ops work with both ITD and SITD */
 963
 964static struct ehci_iso_stream *
 965iso_stream_alloc (gfp_t mem_flags)
 966{
 967        struct ehci_iso_stream *stream;
 968
 969        stream = kzalloc(sizeof *stream, mem_flags);
 970        if (likely (stream != NULL)) {
 971                INIT_LIST_HEAD(&stream->td_list);
 972                INIT_LIST_HEAD(&stream->free_list);
 973                stream->next_uframe = -1;
 974        }
 975        return stream;
 976}
 977
 978static void
 979iso_stream_init (
 980        struct ehci_hcd         *ehci,
 981        struct ehci_iso_stream  *stream,
 982        struct usb_device       *dev,
 983        int                     pipe,
 984        unsigned                interval
 985)
 986{
 987        static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
 988
 989        u32                     buf1;
 990        unsigned                epnum, maxp;
 991        int                     is_input;
 992        long                    bandwidth;
 993
 994        /*
 995         * this might be a "high bandwidth" highspeed endpoint,
 996         * as encoded in the ep descriptor's wMaxPacket field
 997         */
 998        epnum = usb_pipeendpoint (pipe);
 999        is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
1000        maxp = usb_maxpacket(dev, pipe, !is_input);
1001        if (is_input) {
1002                buf1 = (1 << 11);
1003        } else {
1004                buf1 = 0;
1005        }
1006
1007        /* knows about ITD vs SITD */
1008        if (dev->speed == USB_SPEED_HIGH) {
1009                unsigned multi = hb_mult(maxp);
1010
1011                stream->highspeed = 1;
1012
1013                maxp = max_packet(maxp);
1014                buf1 |= maxp;
1015                maxp *= multi;
1016
1017                stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1018                stream->buf1 = cpu_to_hc32(ehci, buf1);
1019                stream->buf2 = cpu_to_hc32(ehci, multi);
1020
1021                /* usbfs wants to report the average usecs per frame tied up
1022                 * when transfers on this endpoint are scheduled ...
1023                 */
1024                stream->usecs = HS_USECS_ISO (maxp);
1025                bandwidth = stream->usecs * 8;
1026                bandwidth /= interval;
1027
1028        } else {
1029                u32             addr;
1030                int             think_time;
1031                int             hs_transfers;
1032
1033                addr = dev->ttport << 24;
1034                if (!ehci_is_TDI(ehci)
1035                                || (dev->tt->hub !=
1036                                        ehci_to_hcd(ehci)->self.root_hub))
1037                        addr |= dev->tt->hub->devnum << 16;
1038                addr |= epnum << 8;
1039                addr |= dev->devnum;
1040                stream->usecs = HS_USECS_ISO (maxp);
1041                think_time = dev->tt ? dev->tt->think_time : 0;
1042                stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
1043                                dev->speed, is_input, 1, maxp));
1044                hs_transfers = max (1u, (maxp + 187) / 188);
1045                if (is_input) {
1046                        u32     tmp;
1047
1048                        addr |= 1 << 31;
1049                        stream->c_usecs = stream->usecs;
1050                        stream->usecs = HS_USECS_ISO (1);
1051                        stream->raw_mask = 1;
1052
1053                        /* c-mask as specified in USB 2.0 11.18.4 3.c */
1054                        tmp = (1 << (hs_transfers + 2)) - 1;
1055                        stream->raw_mask |= tmp << (8 + 2);
1056                } else
1057                        stream->raw_mask = smask_out [hs_transfers - 1];
1058                bandwidth = stream->usecs + stream->c_usecs;
1059                bandwidth /= interval << 3;
1060
1061                /* stream->splits gets created from raw_mask later */
1062                stream->address = cpu_to_hc32(ehci, addr);
1063        }
1064        stream->bandwidth = bandwidth;
1065
1066        stream->udev = dev;
1067
1068        stream->bEndpointAddress = is_input | epnum;
1069        stream->interval = interval;
1070        stream->maxp = maxp;
1071}
1072
1073static struct ehci_iso_stream *
1074iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1075{
1076        unsigned                epnum;
1077        struct ehci_iso_stream  *stream;
1078        struct usb_host_endpoint *ep;
1079        unsigned long           flags;
1080
1081        epnum = usb_pipeendpoint (urb->pipe);
1082        if (usb_pipein(urb->pipe))
1083                ep = urb->dev->ep_in[epnum];
1084        else
1085                ep = urb->dev->ep_out[epnum];
1086
1087        spin_lock_irqsave (&ehci->lock, flags);
1088        stream = ep->hcpriv;
1089
1090        if (unlikely (stream == NULL)) {
1091                stream = iso_stream_alloc(GFP_ATOMIC);
1092                if (likely (stream != NULL)) {
1093                        ep->hcpriv = stream;
1094                        stream->ep = ep;
1095                        iso_stream_init(ehci, stream, urb->dev, urb->pipe,
1096                                        urb->interval);
1097                }
1098
1099        /* if dev->ep [epnum] is a QH, hw is set */
1100        } else if (unlikely (stream->hw != NULL)) {
1101                ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1102                        urb->dev->devpath, epnum,
1103                        usb_pipein(urb->pipe) ? "in" : "out");
1104                stream = NULL;
1105        }
1106
1107        spin_unlock_irqrestore (&ehci->lock, flags);
1108        return stream;
1109}
1110
1111/*-------------------------------------------------------------------------*/
1112
1113/* ehci_iso_sched ops can be ITD-only or SITD-only */
1114
1115static struct ehci_iso_sched *
1116iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1117{
1118        struct ehci_iso_sched   *iso_sched;
1119        int                     size = sizeof *iso_sched;
1120
1121        size += packets * sizeof (struct ehci_iso_packet);
1122        iso_sched = kzalloc(size, mem_flags);
1123        if (likely (iso_sched != NULL)) {
1124                INIT_LIST_HEAD (&iso_sched->td_list);
1125        }
1126        return iso_sched;
1127}
1128
1129static inline void
1130itd_sched_init(
1131        struct ehci_hcd         *ehci,
1132        struct ehci_iso_sched   *iso_sched,
1133        struct ehci_iso_stream  *stream,
1134        struct urb              *urb
1135)
1136{
1137        unsigned        i;
1138        dma_addr_t      dma = urb->transfer_dma;
1139
1140        /* how many uframes are needed for these transfers */
1141        iso_sched->span = urb->number_of_packets * stream->interval;
1142
1143        /* figure out per-uframe itd fields that we'll need later
1144         * when we fit new itds into the schedule.
1145         */
1146        for (i = 0; i < urb->number_of_packets; i++) {
1147                struct ehci_iso_packet  *uframe = &iso_sched->packet [i];
1148                unsigned                length;
1149                dma_addr_t              buf;
1150                u32                     trans;
1151
1152                length = urb->iso_frame_desc [i].length;
1153                buf = dma + urb->iso_frame_desc [i].offset;
1154
1155                trans = EHCI_ISOC_ACTIVE;
1156                trans |= buf & 0x0fff;
1157                if (unlikely (((i + 1) == urb->number_of_packets))
1158                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1159                        trans |= EHCI_ITD_IOC;
1160                trans |= length << 16;
1161                uframe->transaction = cpu_to_hc32(ehci, trans);
1162
1163                /* might need to cross a buffer page within a uframe */
1164                uframe->bufp = (buf & ~(u64)0x0fff);
1165                buf += length;
1166                if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1167                        uframe->cross = 1;
1168        }
1169}
1170
1171static void
1172iso_sched_free (
1173        struct ehci_iso_stream  *stream,
1174        struct ehci_iso_sched   *iso_sched
1175)
1176{
1177        if (!iso_sched)
1178                return;
1179        // caller must hold ehci->lock!
1180        list_splice (&iso_sched->td_list, &stream->free_list);
1181        kfree (iso_sched);
1182}
1183
1184static int
1185itd_urb_transaction (
1186        struct ehci_iso_stream  *stream,
1187        struct ehci_hcd         *ehci,
1188        struct urb              *urb,
1189        gfp_t                   mem_flags
1190)
1191{
1192        struct ehci_itd         *itd;
1193        dma_addr_t              itd_dma;
1194        int                     i;
1195        unsigned                num_itds;
1196        struct ehci_iso_sched   *sched;
1197        unsigned long           flags;
1198
1199        sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1200        if (unlikely (sched == NULL))
1201                return -ENOMEM;
1202
1203        itd_sched_init(ehci, sched, stream, urb);
1204
1205        if (urb->interval < 8)
1206                num_itds = 1 + (sched->span + 7) / 8;
1207        else
1208                num_itds = urb->number_of_packets;
1209
1210        /* allocate/init ITDs */
1211        spin_lock_irqsave (&ehci->lock, flags);
1212        for (i = 0; i < num_itds; i++) {
1213
1214                /*
1215                 * Use iTDs from the free list, but not iTDs that may
1216                 * still be in use by the hardware.
1217                 */
1218                if (likely(!list_empty(&stream->free_list))) {
1219                        itd = list_first_entry(&stream->free_list,
1220                                        struct ehci_itd, itd_list);
1221                        if (itd->frame == ehci->now_frame)
1222                                goto alloc_itd;
1223                        list_del (&itd->itd_list);
1224                        itd_dma = itd->itd_dma;
1225                } else {
1226 alloc_itd:
1227                        spin_unlock_irqrestore (&ehci->lock, flags);
1228                        itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1229                                        &itd_dma);
1230                        spin_lock_irqsave (&ehci->lock, flags);
1231                        if (!itd) {
1232                                iso_sched_free(stream, sched);
1233                                spin_unlock_irqrestore(&ehci->lock, flags);
1234                                return -ENOMEM;
1235                        }
1236                }
1237
1238                memset (itd, 0, sizeof *itd);
1239                itd->itd_dma = itd_dma;
1240                list_add (&itd->itd_list, &sched->td_list);
1241        }
1242        spin_unlock_irqrestore (&ehci->lock, flags);
1243
1244        /* temporarily store schedule info in hcpriv */
1245        urb->hcpriv = sched;
1246        urb->error_count = 0;
1247        return 0;
1248}
1249
1250/*-------------------------------------------------------------------------*/
1251
1252static inline int
1253itd_slot_ok (
1254        struct ehci_hcd         *ehci,
1255        u32                     mod,
1256        u32                     uframe,
1257        u8                      usecs,
1258        u32                     period
1259)
1260{
1261        uframe %= period;
1262        do {
1263                /* can't commit more than uframe_periodic_max usec */
1264                if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1265                                > (ehci->uframe_periodic_max - usecs))
1266                        return 0;
1267
1268                /* we know urb->interval is 2^N uframes */
1269                uframe += period;
1270        } while (uframe < mod);
1271        return 1;
1272}
1273
1274static inline int
1275sitd_slot_ok (
1276        struct ehci_hcd         *ehci,
1277        u32                     mod,
1278        struct ehci_iso_stream  *stream,
1279        u32                     uframe,
1280        struct ehci_iso_sched   *sched,
1281        u32                     period_uframes
1282)
1283{
1284        u32                     mask, tmp;
1285        u32                     frame, uf;
1286
1287        mask = stream->raw_mask << (uframe & 7);
1288
1289        /* for IN, don't wrap CSPLIT into the next frame */
1290        if (mask & ~0xffff)
1291                return 0;
1292
1293        /* check bandwidth */
1294        uframe %= period_uframes;
1295        frame = uframe >> 3;
1296
1297#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1298        /* The tt's fullspeed bus bandwidth must be available.
1299         * tt_available scheduling guarantees 10+% for control/bulk.
1300         */
1301        uf = uframe & 7;
1302        if (!tt_available(ehci, period_uframes >> 3,
1303                        stream->udev, frame, uf, stream->tt_usecs))
1304                return 0;
1305#else
1306        /* tt must be idle for start(s), any gap, and csplit.
1307         * assume scheduling slop leaves 10+% for control/bulk.
1308         */
1309        if (!tt_no_collision(ehci, period_uframes >> 3,
1310                        stream->udev, frame, mask))
1311                return 0;
1312#endif
1313
1314        /* this multi-pass logic is simple, but performance may
1315         * suffer when the schedule data isn't cached.
1316         */
1317        do {
1318                u32             max_used;
1319
1320                frame = uframe >> 3;
1321                uf = uframe & 7;
1322
1323                /* check starts (OUT uses more than one) */
1324                max_used = ehci->uframe_periodic_max - stream->usecs;
1325                for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1326                        if (periodic_usecs (ehci, frame, uf) > max_used)
1327                                return 0;
1328                }
1329
1330                /* for IN, check CSPLIT */
1331                if (stream->c_usecs) {
1332                        uf = uframe & 7;
1333                        max_used = ehci->uframe_periodic_max - stream->c_usecs;
1334                        do {
1335                                tmp = 1 << uf;
1336                                tmp <<= 8;
1337                                if ((stream->raw_mask & tmp) == 0)
1338                                        continue;
1339                                if (periodic_usecs (ehci, frame, uf)
1340                                                > max_used)
1341                                        return 0;
1342                        } while (++uf < 8);
1343                }
1344
1345                /* we know urb->interval is 2^N uframes */
1346                uframe += period_uframes;
1347        } while (uframe < mod);
1348
1349        stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1350        return 1;
1351}
1352
1353/*
1354 * This scheduler plans almost as far into the future as it has actual
1355 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1356 * "as small as possible" to be cache-friendlier.)  That limits the size
1357 * transfers you can stream reliably; avoid more than 64 msec per urb.
1358 * Also avoid queue depths of less than ehci's worst irq latency (affected
1359 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1360 * and other factors); or more than about 230 msec total (for portability,
1361 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1362 */
1363
1364#define SCHEDULE_SLOP   80      /* microframes */
1365
1366static int
1367iso_stream_schedule (
1368        struct ehci_hcd         *ehci,
1369        struct urb              *urb,
1370        struct ehci_iso_stream  *stream
1371)
1372{
1373        u32                     now, next, start, period, span;
1374        int                     status;
1375        unsigned                mod = ehci->periodic_size << 3;
1376        struct ehci_iso_sched   *sched = urb->hcpriv;
1377
1378        period = urb->interval;
1379        span = sched->span;
1380        if (!stream->highspeed) {
1381                period <<= 3;
1382                span <<= 3;
1383        }
1384
1385        if (span > mod - SCHEDULE_SLOP) {
1386                ehci_dbg (ehci, "iso request %p too long\n", urb);
1387                status = -EFBIG;
1388                goto fail;
1389        }
1390
1391        now = ehci_read_frame_index(ehci) & (mod - 1);
1392
1393        /* Typical case: reuse current schedule, stream is still active.
1394         * Hopefully there are no gaps from the host falling behind
1395         * (irq delays etc), but if there are we'll take the next
1396         * slot in the schedule, implicitly assuming URB_ISO_ASAP.
1397         */
1398        if (likely (!list_empty (&stream->td_list))) {
1399                u32     excess;
1400
1401                /* For high speed devices, allow scheduling within the
1402                 * isochronous scheduling threshold.  For full speed devices
1403                 * and Intel PCI-based controllers, don't (work around for
1404                 * Intel ICH9 bug).
1405                 */
1406                if (!stream->highspeed && ehci->fs_i_thresh)
1407                        next = now + ehci->i_thresh;
1408                else
1409                        next = now;
1410
1411                /* Fell behind (by up to twice the slop amount)?
1412                 * We decide based on the time of the last currently-scheduled
1413                 * slot, not the time of the next available slot.
1414                 */
1415                excess = (stream->next_uframe - period - next) & (mod - 1);
1416                if (excess >= mod - 2 * SCHEDULE_SLOP)
1417                        start = next + excess - mod + period *
1418                                        DIV_ROUND_UP(mod - excess, period);
1419                else
1420                        start = next + excess + period;
1421                if (start - now >= mod) {
1422                        ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
1423                                        urb, start - now - period, period,
1424                                        mod);
1425                        status = -EFBIG;
1426                        goto fail;
1427                }
1428        }
1429
1430        /* need to schedule; when's the next (u)frame we could start?
1431         * this is bigger than ehci->i_thresh allows; scheduling itself
1432         * isn't free, the slop should handle reasonably slow cpus.  it
1433         * can also help high bandwidth if the dma and irq loads don't
1434         * jump until after the queue is primed.
1435         */
1436        else {
1437                int done = 0;
1438                start = SCHEDULE_SLOP + (now & ~0x07);
1439
1440                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
1441
1442                /* find a uframe slot with enough bandwidth.
1443                 * Early uframes are more precious because full-speed
1444                 * iso IN transfers can't use late uframes,
1445                 * and therefore they should be allocated last.
1446                 */
1447                next = start;
1448                start += period;
1449                do {
1450                        start--;
1451                        /* check schedule: enough space? */
1452                        if (stream->highspeed) {
1453                                if (itd_slot_ok(ehci, mod, start,
1454                                                stream->usecs, period))
1455                                        done = 1;
1456                        } else {
1457                                if ((start % 8) >= 6)
1458                                        continue;
1459                                if (sitd_slot_ok(ehci, mod, stream,
1460                                                start, sched, period))
1461                                        done = 1;
1462                        }
1463                } while (start > next && !done);
1464
1465                /* no room in the schedule */
1466                if (!done) {
1467                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
1468                                urb, now, now + mod);
1469                        status = -ENOSPC;
1470                        goto fail;
1471                }
1472        }
1473
1474        /* Tried to schedule too far into the future? */
1475        if (unlikely(start - now + span - period
1476                                >= mod - 2 * SCHEDULE_SLOP)) {
1477                ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
1478                                urb, start - now, span - period,
1479                                mod - 2 * SCHEDULE_SLOP);
1480                status = -EFBIG;
1481                goto fail;
1482        }
1483
1484        stream->next_uframe = start & (mod - 1);
1485
1486        /* report high speed start in uframes; full speed, in frames */
1487        urb->start_frame = stream->next_uframe;
1488        if (!stream->highspeed)
1489                urb->start_frame >>= 3;
1490
1491        /* Make sure scan_isoc() sees these */
1492        if (ehci->isoc_count == 0)
1493                ehci->next_frame = now >> 3;
1494        return 0;
1495
1496 fail:
1497        iso_sched_free(stream, sched);
1498        urb->hcpriv = NULL;
1499        return status;
1500}
1501
1502/*-------------------------------------------------------------------------*/
1503
1504static inline void
1505itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1506                struct ehci_itd *itd)
1507{
1508        int i;
1509
1510        /* it's been recently zeroed */
1511        itd->hw_next = EHCI_LIST_END(ehci);
1512        itd->hw_bufp [0] = stream->buf0;
1513        itd->hw_bufp [1] = stream->buf1;
1514        itd->hw_bufp [2] = stream->buf2;
1515
1516        for (i = 0; i < 8; i++)
1517                itd->index[i] = -1;
1518
1519        /* All other fields are filled when scheduling */
1520}
1521
1522static inline void
1523itd_patch(
1524        struct ehci_hcd         *ehci,
1525        struct ehci_itd         *itd,
1526        struct ehci_iso_sched   *iso_sched,
1527        unsigned                index,
1528        u16                     uframe
1529)
1530{
1531        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1532        unsigned                pg = itd->pg;
1533
1534        // BUG_ON (pg == 6 && uf->cross);
1535
1536        uframe &= 0x07;
1537        itd->index [uframe] = index;
1538
1539        itd->hw_transaction[uframe] = uf->transaction;
1540        itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1541        itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1542        itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1543
1544        /* iso_frame_desc[].offset must be strictly increasing */
1545        if (unlikely (uf->cross)) {
1546                u64     bufp = uf->bufp + 4096;
1547
1548                itd->pg = ++pg;
1549                itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1550                itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1551        }
1552}
1553
1554static inline void
1555itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1556{
1557        union ehci_shadow       *prev = &ehci->pshadow[frame];
1558        __hc32                  *hw_p = &ehci->periodic[frame];
1559        union ehci_shadow       here = *prev;
1560        __hc32                  type = 0;
1561
1562        /* skip any iso nodes which might belong to previous microframes */
1563        while (here.ptr) {
1564                type = Q_NEXT_TYPE(ehci, *hw_p);
1565                if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1566                        break;
1567                prev = periodic_next_shadow(ehci, prev, type);
1568                hw_p = shadow_next_periodic(ehci, &here, type);
1569                here = *prev;
1570        }
1571
1572        itd->itd_next = here;
1573        itd->hw_next = *hw_p;
1574        prev->itd = itd;
1575        itd->frame = frame;
1576        wmb ();
1577        *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1578}
1579
1580/* fit urb's itds into the selected schedule slot; activate as needed */
1581static void itd_link_urb(
1582        struct ehci_hcd         *ehci,
1583        struct urb              *urb,
1584        unsigned                mod,
1585        struct ehci_iso_stream  *stream
1586)
1587{
1588        int                     packet;
1589        unsigned                next_uframe, uframe, frame;
1590        struct ehci_iso_sched   *iso_sched = urb->hcpriv;
1591        struct ehci_itd         *itd;
1592
1593        next_uframe = stream->next_uframe & (mod - 1);
1594
1595        if (unlikely (list_empty(&stream->td_list))) {
1596                ehci_to_hcd(ehci)->self.bandwidth_allocated
1597                                += stream->bandwidth;
1598                ehci_vdbg (ehci,
1599                        "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1600                        urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1601                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1602                        urb->interval,
1603                        next_uframe >> 3, next_uframe & 0x7);
1604        }
1605
1606        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1607                if (ehci->amd_pll_fix == 1)
1608                        usb_amd_quirk_pll_disable();
1609        }
1610
1611        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1612
1613        /* fill iTDs uframe by uframe */
1614        for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1615                if (itd == NULL) {
1616                        /* ASSERT:  we have all necessary itds */
1617                        // BUG_ON (list_empty (&iso_sched->td_list));
1618
1619                        /* ASSERT:  no itds for this endpoint in this uframe */
1620
1621                        itd = list_entry (iso_sched->td_list.next,
1622                                        struct ehci_itd, itd_list);
1623                        list_move_tail (&itd->itd_list, &stream->td_list);
1624                        itd->stream = stream;
1625                        itd->urb = urb;
1626                        itd_init (ehci, stream, itd);
1627                }
1628
1629                uframe = next_uframe & 0x07;
1630                frame = next_uframe >> 3;
1631
1632                itd_patch(ehci, itd, iso_sched, packet, uframe);
1633
1634                next_uframe += stream->interval;
1635                next_uframe &= mod - 1;
1636                packet++;
1637
1638                /* link completed itds into the schedule */
1639                if (((next_uframe >> 3) != frame)
1640                                || packet == urb->number_of_packets) {
1641                        itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1642                        itd = NULL;
1643                }
1644        }
1645        stream->next_uframe = next_uframe;
1646
1647        /* don't need that schedule data any more */
1648        iso_sched_free (stream, iso_sched);
1649        urb->hcpriv = NULL;
1650
1651        ++ehci->isoc_count;
1652        enable_periodic(ehci);
1653}
1654
1655#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1656
1657/* Process and recycle a completed ITD.  Return true iff its urb completed,
1658 * and hence its completion callback probably added things to the hardware
1659 * schedule.
1660 *
1661 * Note that we carefully avoid recycling this descriptor until after any
1662 * completion callback runs, so that it won't be reused quickly.  That is,
1663 * assuming (a) no more than two urbs per frame on this endpoint, and also
1664 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1665 * corrupts things if you reuse completed descriptors very quickly...
1666 */
1667static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1668{
1669        struct urb                              *urb = itd->urb;
1670        struct usb_iso_packet_descriptor        *desc;
1671        u32                                     t;
1672        unsigned                                uframe;
1673        int                                     urb_index = -1;
1674        struct ehci_iso_stream                  *stream = itd->stream;
1675        struct usb_device                       *dev;
1676        bool                                    retval = false;
1677
1678        /* for each uframe with a packet */
1679        for (uframe = 0; uframe < 8; uframe++) {
1680                if (likely (itd->index[uframe] == -1))
1681                        continue;
1682                urb_index = itd->index[uframe];
1683                desc = &urb->iso_frame_desc [urb_index];
1684
1685                t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1686                itd->hw_transaction [uframe] = 0;
1687
1688                /* report transfer status */
1689                if (unlikely (t & ISO_ERRS)) {
1690                        urb->error_count++;
1691                        if (t & EHCI_ISOC_BUF_ERR)
1692                                desc->status = usb_pipein (urb->pipe)
1693                                        ? -ENOSR  /* hc couldn't read */
1694                                        : -ECOMM; /* hc couldn't write */
1695                        else if (t & EHCI_ISOC_BABBLE)
1696                                desc->status = -EOVERFLOW;
1697                        else /* (t & EHCI_ISOC_XACTERR) */
1698                                desc->status = -EPROTO;
1699
1700                        /* HC need not update length with this error */
1701                        if (!(t & EHCI_ISOC_BABBLE)) {
1702                                desc->actual_length = EHCI_ITD_LENGTH(t);
1703                                urb->actual_length += desc->actual_length;
1704                        }
1705                } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1706                        desc->status = 0;
1707                        desc->actual_length = EHCI_ITD_LENGTH(t);
1708                        urb->actual_length += desc->actual_length;
1709                } else {
1710                        /* URB was too late */
1711                        desc->status = -EXDEV;
1712                }
1713        }
1714
1715        /* handle completion now? */
1716        if (likely ((urb_index + 1) != urb->number_of_packets))
1717                goto done;
1718
1719        /* ASSERT: it's really the last itd for this urb
1720        list_for_each_entry (itd, &stream->td_list, itd_list)
1721                BUG_ON (itd->urb == urb);
1722         */
1723
1724        /* give urb back to the driver; completion often (re)submits */
1725        dev = urb->dev;
1726        ehci_urb_done(ehci, urb, 0);
1727        retval = true;
1728        urb = NULL;
1729
1730        --ehci->isoc_count;
1731        disable_periodic(ehci);
1732
1733        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1734        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1735                if (ehci->amd_pll_fix == 1)
1736                        usb_amd_quirk_pll_enable();
1737        }
1738
1739        if (unlikely(list_is_singular(&stream->td_list))) {
1740                ehci_to_hcd(ehci)->self.bandwidth_allocated
1741                                -= stream->bandwidth;
1742                ehci_vdbg (ehci,
1743                        "deschedule devp %s ep%d%s-iso\n",
1744                        dev->devpath, stream->bEndpointAddress & 0x0f,
1745                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1746        }
1747
1748done:
1749        itd->urb = NULL;
1750
1751        /* Add to the end of the free list for later reuse */
1752        list_move_tail(&itd->itd_list, &stream->free_list);
1753
1754        /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1755        if (list_empty(&stream->td_list)) {
1756                list_splice_tail_init(&stream->free_list,
1757                                &ehci->cached_itd_list);
1758                start_free_itds(ehci);
1759        }
1760
1761        return retval;
1762}
1763
1764/*-------------------------------------------------------------------------*/
1765
1766static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1767        gfp_t mem_flags)
1768{
1769        int                     status = -EINVAL;
1770        unsigned long           flags;
1771        struct ehci_iso_stream  *stream;
1772
1773        /* Get iso_stream head */
1774        stream = iso_stream_find (ehci, urb);
1775        if (unlikely (stream == NULL)) {
1776                ehci_dbg (ehci, "can't get iso stream\n");
1777                return -ENOMEM;
1778        }
1779        if (unlikely (urb->interval != stream->interval)) {
1780                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1781                        stream->interval, urb->interval);
1782                goto done;
1783        }
1784
1785#ifdef EHCI_URB_TRACE
1786        ehci_dbg (ehci,
1787                "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1788                __func__, urb->dev->devpath, urb,
1789                usb_pipeendpoint (urb->pipe),
1790                usb_pipein (urb->pipe) ? "in" : "out",
1791                urb->transfer_buffer_length,
1792                urb->number_of_packets, urb->interval,
1793                stream);
1794#endif
1795
1796        /* allocate ITDs w/o locking anything */
1797        status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1798        if (unlikely (status < 0)) {
1799                ehci_dbg (ehci, "can't init itds\n");
1800                goto done;
1801        }
1802
1803        /* schedule ... need to lock */
1804        spin_lock_irqsave (&ehci->lock, flags);
1805        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1806                status = -ESHUTDOWN;
1807                goto done_not_linked;
1808        }
1809        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1810        if (unlikely(status))
1811                goto done_not_linked;
1812        status = iso_stream_schedule(ehci, urb, stream);
1813        if (likely (status == 0))
1814                itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1815        else
1816                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1817 done_not_linked:
1818        spin_unlock_irqrestore (&ehci->lock, flags);
1819 done:
1820        return status;
1821}
1822
1823/*-------------------------------------------------------------------------*/
1824
1825/*
1826 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1827 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1828 */
1829
1830static inline void
1831sitd_sched_init(
1832        struct ehci_hcd         *ehci,
1833        struct ehci_iso_sched   *iso_sched,
1834        struct ehci_iso_stream  *stream,
1835        struct urb              *urb
1836)
1837{
1838        unsigned        i;
1839        dma_addr_t      dma = urb->transfer_dma;
1840
1841        /* how many frames are needed for these transfers */
1842        iso_sched->span = urb->number_of_packets * stream->interval;
1843
1844        /* figure out per-frame sitd fields that we'll need later
1845         * when we fit new sitds into the schedule.
1846         */
1847        for (i = 0; i < urb->number_of_packets; i++) {
1848                struct ehci_iso_packet  *packet = &iso_sched->packet [i];
1849                unsigned                length;
1850                dma_addr_t              buf;
1851                u32                     trans;
1852
1853                length = urb->iso_frame_desc [i].length & 0x03ff;
1854                buf = dma + urb->iso_frame_desc [i].offset;
1855
1856                trans = SITD_STS_ACTIVE;
1857                if (((i + 1) == urb->number_of_packets)
1858                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1859                        trans |= SITD_IOC;
1860                trans |= length << 16;
1861                packet->transaction = cpu_to_hc32(ehci, trans);
1862
1863                /* might need to cross a buffer page within a td */
1864                packet->bufp = buf;
1865                packet->buf1 = (buf + length) & ~0x0fff;
1866                if (packet->buf1 != (buf & ~(u64)0x0fff))
1867                        packet->cross = 1;
1868
1869                /* OUT uses multiple start-splits */
1870                if (stream->bEndpointAddress & USB_DIR_IN)
1871                        continue;
1872                length = (length + 187) / 188;
1873                if (length > 1) /* BEGIN vs ALL */
1874                        length |= 1 << 3;
1875                packet->buf1 |= length;
1876        }
1877}
1878
1879static int
1880sitd_urb_transaction (
1881        struct ehci_iso_stream  *stream,
1882        struct ehci_hcd         *ehci,
1883        struct urb              *urb,
1884        gfp_t                   mem_flags
1885)
1886{
1887        struct ehci_sitd        *sitd;
1888        dma_addr_t              sitd_dma;
1889        int                     i;
1890        struct ehci_iso_sched   *iso_sched;
1891        unsigned long           flags;
1892
1893        iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1894        if (iso_sched == NULL)
1895                return -ENOMEM;
1896
1897        sitd_sched_init(ehci, iso_sched, stream, urb);
1898
1899        /* allocate/init sITDs */
1900        spin_lock_irqsave (&ehci->lock, flags);
1901        for (i = 0; i < urb->number_of_packets; i++) {
1902
1903                /* NOTE:  for now, we don't try to handle wraparound cases
1904                 * for IN (using sitd->hw_backpointer, like a FSTN), which
1905                 * means we never need two sitds for full speed packets.
1906                 */
1907
1908                /*
1909                 * Use siTDs from the free list, but not siTDs that may
1910                 * still be in use by the hardware.
1911                 */
1912                if (likely(!list_empty(&stream->free_list))) {
1913                        sitd = list_first_entry(&stream->free_list,
1914                                         struct ehci_sitd, sitd_list);
1915                        if (sitd->frame == ehci->now_frame)
1916                                goto alloc_sitd;
1917                        list_del (&sitd->sitd_list);
1918                        sitd_dma = sitd->sitd_dma;
1919                } else {
1920 alloc_sitd:
1921                        spin_unlock_irqrestore (&ehci->lock, flags);
1922                        sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1923                                        &sitd_dma);
1924                        spin_lock_irqsave (&ehci->lock, flags);
1925                        if (!sitd) {
1926                                iso_sched_free(stream, iso_sched);
1927                                spin_unlock_irqrestore(&ehci->lock, flags);
1928                                return -ENOMEM;
1929                        }
1930                }
1931
1932                memset (sitd, 0, sizeof *sitd);
1933                sitd->sitd_dma = sitd_dma;
1934                list_add (&sitd->sitd_list, &iso_sched->td_list);
1935        }
1936
1937        /* temporarily store schedule info in hcpriv */
1938        urb->hcpriv = iso_sched;
1939        urb->error_count = 0;
1940
1941        spin_unlock_irqrestore (&ehci->lock, flags);
1942        return 0;
1943}
1944
1945/*-------------------------------------------------------------------------*/
1946
1947static inline void
1948sitd_patch(
1949        struct ehci_hcd         *ehci,
1950        struct ehci_iso_stream  *stream,
1951        struct ehci_sitd        *sitd,
1952        struct ehci_iso_sched   *iso_sched,
1953        unsigned                index
1954)
1955{
1956        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1957        u64                     bufp = uf->bufp;
1958
1959        sitd->hw_next = EHCI_LIST_END(ehci);
1960        sitd->hw_fullspeed_ep = stream->address;
1961        sitd->hw_uframe = stream->splits;
1962        sitd->hw_results = uf->transaction;
1963        sitd->hw_backpointer = EHCI_LIST_END(ehci);
1964
1965        bufp = uf->bufp;
1966        sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1967        sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1968
1969        sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1970        if (uf->cross)
1971                bufp += 4096;
1972        sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1973        sitd->index = index;
1974}
1975
1976static inline void
1977sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1978{
1979        /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1980        sitd->sitd_next = ehci->pshadow [frame];
1981        sitd->hw_next = ehci->periodic [frame];
1982        ehci->pshadow [frame].sitd = sitd;
1983        sitd->frame = frame;
1984        wmb ();
1985        ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1986}
1987
1988/* fit urb's sitds into the selected schedule slot; activate as needed */
1989static void sitd_link_urb(
1990        struct ehci_hcd         *ehci,
1991        struct urb              *urb,
1992        unsigned                mod,
1993        struct ehci_iso_stream  *stream
1994)
1995{
1996        int                     packet;
1997        unsigned                next_uframe;
1998        struct ehci_iso_sched   *sched = urb->hcpriv;
1999        struct ehci_sitd        *sitd;
2000
2001        next_uframe = stream->next_uframe;
2002
2003        if (list_empty(&stream->td_list)) {
2004                /* usbfs ignores TT bandwidth */
2005                ehci_to_hcd(ehci)->self.bandwidth_allocated
2006                                += stream->bandwidth;
2007                ehci_vdbg (ehci,
2008                        "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
2009                        urb->dev->devpath, stream->bEndpointAddress & 0x0f,
2010                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
2011                        (next_uframe >> 3) & (ehci->periodic_size - 1),
2012                        stream->interval, hc32_to_cpu(ehci, stream->splits));
2013        }
2014
2015        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2016                if (ehci->amd_pll_fix == 1)
2017                        usb_amd_quirk_pll_disable();
2018        }
2019
2020        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2021
2022        /* fill sITDs frame by frame */
2023        for (packet = 0, sitd = NULL;
2024                        packet < urb->number_of_packets;
2025                        packet++) {
2026
2027                /* ASSERT:  we have all necessary sitds */
2028                BUG_ON (list_empty (&sched->td_list));
2029
2030                /* ASSERT:  no itds for this endpoint in this frame */
2031
2032                sitd = list_entry (sched->td_list.next,
2033                                struct ehci_sitd, sitd_list);
2034                list_move_tail (&sitd->sitd_list, &stream->td_list);
2035                sitd->stream = stream;
2036                sitd->urb = urb;
2037
2038                sitd_patch(ehci, stream, sitd, sched, packet);
2039                sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2040                                sitd);
2041
2042                next_uframe += stream->interval << 3;
2043        }
2044        stream->next_uframe = next_uframe & (mod - 1);
2045
2046        /* don't need that schedule data any more */
2047        iso_sched_free (stream, sched);
2048        urb->hcpriv = NULL;
2049
2050        ++ehci->isoc_count;
2051        enable_periodic(ehci);
2052}
2053
2054/*-------------------------------------------------------------------------*/
2055
2056#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2057                                | SITD_STS_XACT | SITD_STS_MMF)
2058
2059/* Process and recycle a completed SITD.  Return true iff its urb completed,
2060 * and hence its completion callback probably added things to the hardware
2061 * schedule.
2062 *
2063 * Note that we carefully avoid recycling this descriptor until after any
2064 * completion callback runs, so that it won't be reused quickly.  That is,
2065 * assuming (a) no more than two urbs per frame on this endpoint, and also
2066 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2067 * corrupts things if you reuse completed descriptors very quickly...
2068 */
2069static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2070{
2071        struct urb                              *urb = sitd->urb;
2072        struct usb_iso_packet_descriptor        *desc;
2073        u32                                     t;
2074        int                                     urb_index = -1;
2075        struct ehci_iso_stream                  *stream = sitd->stream;
2076        struct usb_device                       *dev;
2077        bool                                    retval = false;
2078
2079        urb_index = sitd->index;
2080        desc = &urb->iso_frame_desc [urb_index];
2081        t = hc32_to_cpup(ehci, &sitd->hw_results);
2082
2083        /* report transfer status */
2084        if (t & SITD_ERRS) {
2085                urb->error_count++;
2086                if (t & SITD_STS_DBE)
2087                        desc->status = usb_pipein (urb->pipe)
2088                                ? -ENOSR  /* hc couldn't read */
2089                                : -ECOMM; /* hc couldn't write */
2090                else if (t & SITD_STS_BABBLE)
2091                        desc->status = -EOVERFLOW;
2092                else /* XACT, MMF, etc */
2093                        desc->status = -EPROTO;
2094        } else {
2095                desc->status = 0;
2096                desc->actual_length = desc->length - SITD_LENGTH(t);
2097                urb->actual_length += desc->actual_length;
2098        }
2099
2100        /* handle completion now? */
2101        if ((urb_index + 1) != urb->number_of_packets)
2102                goto done;
2103
2104        /* ASSERT: it's really the last sitd for this urb
2105        list_for_each_entry (sitd, &stream->td_list, sitd_list)
2106                BUG_ON (sitd->urb == urb);
2107         */
2108
2109        /* give urb back to the driver; completion often (re)submits */
2110        dev = urb->dev;
2111        ehci_urb_done(ehci, urb, 0);
2112        retval = true;
2113        urb = NULL;
2114
2115        --ehci->isoc_count;
2116        disable_periodic(ehci);
2117
2118        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2119        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2120                if (ehci->amd_pll_fix == 1)
2121                        usb_amd_quirk_pll_enable();
2122        }
2123
2124        if (list_is_singular(&stream->td_list)) {
2125                ehci_to_hcd(ehci)->self.bandwidth_allocated
2126                                -= stream->bandwidth;
2127                ehci_vdbg (ehci,
2128                        "deschedule devp %s ep%d%s-iso\n",
2129                        dev->devpath, stream->bEndpointAddress & 0x0f,
2130                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2131        }
2132
2133done:
2134        sitd->urb = NULL;
2135
2136        /* Add to the end of the free list for later reuse */
2137        list_move_tail(&sitd->sitd_list, &stream->free_list);
2138
2139        /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2140        if (list_empty(&stream->td_list)) {
2141                list_splice_tail_init(&stream->free_list,
2142                                &ehci->cached_sitd_list);
2143                start_free_itds(ehci);
2144        }
2145
2146        return retval;
2147}
2148
2149
2150static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2151        gfp_t mem_flags)
2152{
2153        int                     status = -EINVAL;
2154        unsigned long           flags;
2155        struct ehci_iso_stream  *stream;
2156
2157        /* Get iso_stream head */
2158        stream = iso_stream_find (ehci, urb);
2159        if (stream == NULL) {
2160                ehci_dbg (ehci, "can't get iso stream\n");
2161                return -ENOMEM;
2162        }
2163        if (urb->interval != stream->interval) {
2164                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2165                        stream->interval, urb->interval);
2166                goto done;
2167        }
2168
2169#ifdef EHCI_URB_TRACE
2170        ehci_dbg (ehci,
2171                "submit %p dev%s ep%d%s-iso len %d\n",
2172                urb, urb->dev->devpath,
2173                usb_pipeendpoint (urb->pipe),
2174                usb_pipein (urb->pipe) ? "in" : "out",
2175                urb->transfer_buffer_length);
2176#endif
2177
2178        /* allocate SITDs */
2179        status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2180        if (status < 0) {
2181                ehci_dbg (ehci, "can't init sitds\n");
2182                goto done;
2183        }
2184
2185        /* schedule ... need to lock */
2186        spin_lock_irqsave (&ehci->lock, flags);
2187        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2188                status = -ESHUTDOWN;
2189                goto done_not_linked;
2190        }
2191        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2192        if (unlikely(status))
2193                goto done_not_linked;
2194        status = iso_stream_schedule(ehci, urb, stream);
2195        if (status == 0)
2196                sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2197        else
2198                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2199 done_not_linked:
2200        spin_unlock_irqrestore (&ehci->lock, flags);
2201 done:
2202        return status;
2203}
2204
2205/*-------------------------------------------------------------------------*/
2206
2207static void scan_isoc(struct ehci_hcd *ehci)
2208{
2209        unsigned        uf, now_frame, frame;
2210        unsigned        fmask = ehci->periodic_size - 1;
2211        bool            modified, live;
2212
2213        /*
2214         * When running, scan from last scan point up to "now"
2215         * else clean up by scanning everything that's left.
2216         * Touches as few pages as possible:  cache-friendly.
2217         */
2218        if (ehci->rh_state >= EHCI_RH_RUNNING) {
2219                uf = ehci_read_frame_index(ehci);
2220                now_frame = (uf >> 3) & fmask;
2221                live = true;
2222        } else  {
2223                now_frame = (ehci->next_frame - 1) & fmask;
2224                live = false;
2225        }
2226        ehci->now_frame = now_frame;
2227
2228        frame = ehci->next_frame;
2229        for (;;) {
2230                union ehci_shadow       q, *q_p;
2231                __hc32                  type, *hw_p;
2232
2233restart:
2234                /* scan each element in frame's queue for completions */
2235                q_p = &ehci->pshadow [frame];
2236                hw_p = &ehci->periodic [frame];
2237                q.ptr = q_p->ptr;
2238                type = Q_NEXT_TYPE(ehci, *hw_p);
2239                modified = false;
2240
2241                while (q.ptr != NULL) {
2242                        switch (hc32_to_cpu(ehci, type)) {
2243                        case Q_TYPE_ITD:
2244                                /* If this ITD is still active, leave it for
2245                                 * later processing ... check the next entry.
2246                                 * No need to check for activity unless the
2247                                 * frame is current.
2248                                 */
2249                                if (frame == now_frame && live) {
2250                                        rmb();
2251                                        for (uf = 0; uf < 8; uf++) {
2252                                                if (q.itd->hw_transaction[uf] &
2253                                                            ITD_ACTIVE(ehci))
2254                                                        break;
2255                                        }
2256                                        if (uf < 8) {
2257                                                q_p = &q.itd->itd_next;
2258                                                hw_p = &q.itd->hw_next;
2259                                                type = Q_NEXT_TYPE(ehci,
2260                                                        q.itd->hw_next);
2261                                                q = *q_p;
2262                                                break;
2263                                        }
2264                                }
2265
2266                                /* Take finished ITDs out of the schedule
2267                                 * and process them:  recycle, maybe report
2268                                 * URB completion.  HC won't cache the
2269                                 * pointer for much longer, if at all.
2270                                 */
2271                                *q_p = q.itd->itd_next;
2272                                if (!ehci->use_dummy_qh ||
2273                                    q.itd->hw_next != EHCI_LIST_END(ehci))
2274                                        *hw_p = q.itd->hw_next;
2275                                else
2276                                        *hw_p = ehci->dummy->qh_dma;
2277                                type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2278                                wmb();
2279                                modified = itd_complete (ehci, q.itd);
2280                                q = *q_p;
2281                                break;
2282                        case Q_TYPE_SITD:
2283                                /* If this SITD is still active, leave it for
2284                                 * later processing ... check the next entry.
2285                                 * No need to check for activity unless the
2286                                 * frame is current.
2287                                 */
2288                                if (((frame == now_frame) ||
2289                                     (((frame + 1) & fmask) == now_frame))
2290                                    && live
2291                                    && (q.sitd->hw_results &
2292                                        SITD_ACTIVE(ehci))) {
2293
2294                                        q_p = &q.sitd->sitd_next;
2295                                        hw_p = &q.sitd->hw_next;
2296                                        type = Q_NEXT_TYPE(ehci,
2297                                                        q.sitd->hw_next);
2298                                        q = *q_p;
2299                                        break;
2300                                }
2301
2302                                /* Take finished SITDs out of the schedule
2303                                 * and process them:  recycle, maybe report
2304                                 * URB completion.
2305                                 */
2306                                *q_p = q.sitd->sitd_next;
2307                                if (!ehci->use_dummy_qh ||
2308                                    q.sitd->hw_next != EHCI_LIST_END(ehci))
2309                                        *hw_p = q.sitd->hw_next;
2310                                else
2311                                        *hw_p = ehci->dummy->qh_dma;
2312                                type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2313                                wmb();
2314                                modified = sitd_complete (ehci, q.sitd);
2315                                q = *q_p;
2316                                break;
2317                        default:
2318                                ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2319                                        type, frame, q.ptr);
2320                                // BUG ();
2321                                /* FALL THROUGH */
2322                        case Q_TYPE_QH:
2323                        case Q_TYPE_FSTN:
2324                                /* End of the iTDs and siTDs */
2325                                q.ptr = NULL;
2326                                break;
2327                        }
2328
2329                        /* assume completion callbacks modify the queue */
2330                        if (unlikely(modified && ehci->isoc_count > 0))
2331                                goto restart;
2332                }
2333
2334                /* Stop when we have reached the current frame */
2335                if (frame == now_frame)
2336                        break;
2337                frame = (frame + 1) & fmask;
2338        }
2339        ehci->next_frame = now_frame;
2340}
2341