linux/drivers/usb/host/ehci-sched.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2001-2004 by David Brownell
   3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation; either version 2 of the License, or (at your
   8 * option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 * for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software Foundation,
  17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 */
  19
  20/* this file is part of ehci-hcd.c */
  21
  22/*-------------------------------------------------------------------------*/
  23
  24/*
  25 * EHCI scheduled transaction support:  interrupt, iso, split iso
  26 * These are called "periodic" transactions in the EHCI spec.
  27 *
  28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
  29 * with the "asynchronous" transaction support (control/bulk transfers).
  30 * The only real difference is in how interrupt transfers are scheduled.
  31 *
  32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
  34 * pre-calculated schedule data to make appending to the queue be quick.
  35 */
  36
  37static int ehci_get_frame (struct usb_hcd *hcd);
  38
  39/*
  40 * periodic_next_shadow - return "next" pointer on shadow list
  41 * @periodic: host pointer to qh/itd/sitd
  42 * @tag: hardware tag for type of this record
  43 */
  44static union ehci_shadow *
  45periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  46                __hc32 tag)
  47{
  48        switch (hc32_to_cpu(ehci, tag)) {
  49        case Q_TYPE_QH:
  50                return &periodic->qh->qh_next;
  51        case Q_TYPE_FSTN:
  52                return &periodic->fstn->fstn_next;
  53        case Q_TYPE_ITD:
  54                return &periodic->itd->itd_next;
  55        // case Q_TYPE_SITD:
  56        default:
  57                return &periodic->sitd->sitd_next;
  58        }
  59}
  60
  61static __hc32 *
  62shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  63                __hc32 tag)
  64{
  65        switch (hc32_to_cpu(ehci, tag)) {
  66        /* our ehci_shadow.qh is actually software part */
  67        case Q_TYPE_QH:
  68                return &periodic->qh->hw->hw_next;
  69        /* others are hw parts */
  70        default:
  71                return periodic->hw_next;
  72        }
  73}
  74
  75/* caller must hold ehci->lock */
  76static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
  77{
  78        union ehci_shadow       *prev_p = &ehci->pshadow[frame];
  79        __hc32                  *hw_p = &ehci->periodic[frame];
  80        union ehci_shadow       here = *prev_p;
  81
  82        /* find predecessor of "ptr"; hw and shadow lists are in sync */
  83        while (here.ptr && here.ptr != ptr) {
  84                prev_p = periodic_next_shadow(ehci, prev_p,
  85                                Q_NEXT_TYPE(ehci, *hw_p));
  86                hw_p = shadow_next_periodic(ehci, &here,
  87                                Q_NEXT_TYPE(ehci, *hw_p));
  88                here = *prev_p;
  89        }
  90        /* an interrupt entry (at list end) could have been shared */
  91        if (!here.ptr)
  92                return;
  93
  94        /* update shadow and hardware lists ... the old "next" pointers
  95         * from ptr may still be in use, the caller updates them.
  96         */
  97        *prev_p = *periodic_next_shadow(ehci, &here,
  98                        Q_NEXT_TYPE(ehci, *hw_p));
  99
 100        if (!ehci->use_dummy_qh ||
 101            *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
 102                        != EHCI_LIST_END(ehci))
 103                *hw_p = *shadow_next_periodic(ehci, &here,
 104                                Q_NEXT_TYPE(ehci, *hw_p));
 105        else
 106                *hw_p = ehci->dummy->qh_dma;
 107}
 108
 109/* how many of the uframe's 125 usecs are allocated? */
 110static unsigned short
 111periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
 112{
 113        __hc32                  *hw_p = &ehci->periodic [frame];
 114        union ehci_shadow       *q = &ehci->pshadow [frame];
 115        unsigned                usecs = 0;
 116        struct ehci_qh_hw       *hw;
 117
 118        while (q->ptr) {
 119                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 120                case Q_TYPE_QH:
 121                        hw = q->qh->hw;
 122                        /* is it in the S-mask? */
 123                        if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
 124                                usecs += q->qh->usecs;
 125                        /* ... or C-mask? */
 126                        if (hw->hw_info2 & cpu_to_hc32(ehci,
 127                                        1 << (8 + uframe)))
 128                                usecs += q->qh->c_usecs;
 129                        hw_p = &hw->hw_next;
 130                        q = &q->qh->qh_next;
 131                        break;
 132                // case Q_TYPE_FSTN:
 133                default:
 134                        /* for "save place" FSTNs, count the relevant INTR
 135                         * bandwidth from the previous frame
 136                         */
 137                        if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
 138                                ehci_dbg (ehci, "ignoring FSTN cost ...\n");
 139                        }
 140                        hw_p = &q->fstn->hw_next;
 141                        q = &q->fstn->fstn_next;
 142                        break;
 143                case Q_TYPE_ITD:
 144                        if (q->itd->hw_transaction[uframe])
 145                                usecs += q->itd->stream->usecs;
 146                        hw_p = &q->itd->hw_next;
 147                        q = &q->itd->itd_next;
 148                        break;
 149                case Q_TYPE_SITD:
 150                        /* is it in the S-mask?  (count SPLIT, DATA) */
 151                        if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
 152                                        1 << uframe)) {
 153                                if (q->sitd->hw_fullspeed_ep &
 154                                                cpu_to_hc32(ehci, 1<<31))
 155                                        usecs += q->sitd->stream->usecs;
 156                                else    /* worst case for OUT start-split */
 157                                        usecs += HS_USECS_ISO (188);
 158                        }
 159
 160                        /* ... C-mask?  (count CSPLIT, DATA) */
 161                        if (q->sitd->hw_uframe &
 162                                        cpu_to_hc32(ehci, 1 << (8 + uframe))) {
 163                                /* worst case for IN complete-split */
 164                                usecs += q->sitd->stream->c_usecs;
 165                        }
 166
 167                        hw_p = &q->sitd->hw_next;
 168                        q = &q->sitd->sitd_next;
 169                        break;
 170                }
 171        }
 172#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
 173        if (usecs > ehci->uframe_periodic_max)
 174                ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
 175                        frame * 8 + uframe, usecs);
 176#endif
 177        return usecs;
 178}
 179
 180/*-------------------------------------------------------------------------*/
 181
 182static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
 183{
 184        if (!dev1->tt || !dev2->tt)
 185                return 0;
 186        if (dev1->tt != dev2->tt)
 187                return 0;
 188        if (dev1->tt->multi)
 189                return dev1->ttport == dev2->ttport;
 190        else
 191                return 1;
 192}
 193
 194#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 195
 196/* Which uframe does the low/fullspeed transfer start in?
 197 *
 198 * The parameter is the mask of ssplits in "H-frame" terms
 199 * and this returns the transfer start uframe in "B-frame" terms,
 200 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
 201 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
 202 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
 203 */
 204static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 205{
 206        unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
 207        if (!smask) {
 208                ehci_err(ehci, "invalid empty smask!\n");
 209                /* uframe 7 can't have bw so this will indicate failure */
 210                return 7;
 211        }
 212        return ffs(smask) - 1;
 213}
 214
 215static const unsigned char
 216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 217
 218/* carryover low/fullspeed bandwidth that crosses uframe boundries */
 219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 220{
 221        int i;
 222        for (i=0; i<7; i++) {
 223                if (max_tt_usecs[i] < tt_usecs[i]) {
 224                        tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
 225                        tt_usecs[i] = max_tt_usecs[i];
 226                }
 227        }
 228}
 229
 230/* How many of the tt's periodic downstream 1000 usecs are allocated?
 231 *
 232 * While this measures the bandwidth in terms of usecs/uframe,
 233 * the low/fullspeed bus has no notion of uframes, so any particular
 234 * low/fullspeed transfer can "carry over" from one uframe to the next,
 235 * since the TT just performs downstream transfers in sequence.
 236 *
 237 * For example two separate 100 usec transfers can start in the same uframe,
 238 * and the second one would "carry over" 75 usecs into the next uframe.
 239 */
 240static void
 241periodic_tt_usecs (
 242        struct ehci_hcd *ehci,
 243        struct usb_device *dev,
 244        unsigned frame,
 245        unsigned short tt_usecs[8]
 246)
 247{
 248        __hc32                  *hw_p = &ehci->periodic [frame];
 249        union ehci_shadow       *q = &ehci->pshadow [frame];
 250        unsigned char           uf;
 251
 252        memset(tt_usecs, 0, 16);
 253
 254        while (q->ptr) {
 255                switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
 256                case Q_TYPE_ITD:
 257                        hw_p = &q->itd->hw_next;
 258                        q = &q->itd->itd_next;
 259                        continue;
 260                case Q_TYPE_QH:
 261                        if (same_tt(dev, q->qh->dev)) {
 262                                uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
 263                                tt_usecs[uf] += q->qh->tt_usecs;
 264                        }
 265                        hw_p = &q->qh->hw->hw_next;
 266                        q = &q->qh->qh_next;
 267                        continue;
 268                case Q_TYPE_SITD:
 269                        if (same_tt(dev, q->sitd->urb->dev)) {
 270                                uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
 271                                tt_usecs[uf] += q->sitd->stream->tt_usecs;
 272                        }
 273                        hw_p = &q->sitd->hw_next;
 274                        q = &q->sitd->sitd_next;
 275                        continue;
 276                // case Q_TYPE_FSTN:
 277                default:
 278                        ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
 279                                        frame);
 280                        hw_p = &q->fstn->hw_next;
 281                        q = &q->fstn->fstn_next;
 282                }
 283        }
 284
 285        carryover_tt_bandwidth(tt_usecs);
 286
 287        if (max_tt_usecs[7] < tt_usecs[7])
 288                ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
 289                        frame, tt_usecs[7] - max_tt_usecs[7]);
 290}
 291
 292/*
 293 * Return true if the device's tt's downstream bus is available for a
 294 * periodic transfer of the specified length (usecs), starting at the
 295 * specified frame/uframe.  Note that (as summarized in section 11.19
 296 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 297 * uframe.
 298 *
 299 * The uframe parameter is when the fullspeed/lowspeed transfer
 300 * should be executed in "B-frame" terms, which is the same as the
 301 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 302 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 303 * See the EHCI spec sec 4.5 and fig 4.7.
 304 *
 305 * This checks if the full/lowspeed bus, at the specified starting uframe,
 306 * has the specified bandwidth available, according to rules listed
 307 * in USB 2.0 spec section 11.18.1 fig 11-60.
 308 *
 309 * This does not check if the transfer would exceed the max ssplit
 310 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 311 * since proper scheduling limits ssplits to less than 16 per uframe.
 312 */
 313static int tt_available (
 314        struct ehci_hcd         *ehci,
 315        unsigned                period,
 316        struct usb_device       *dev,
 317        unsigned                frame,
 318        unsigned                uframe,
 319        u16                     usecs
 320)
 321{
 322        if ((period == 0) || (uframe >= 7))     /* error */
 323                return 0;
 324
 325        for (; frame < ehci->periodic_size; frame += period) {
 326                unsigned short tt_usecs[8];
 327
 328                periodic_tt_usecs (ehci, dev, frame, tt_usecs);
 329
 330                if (max_tt_usecs[uframe] <= tt_usecs[uframe])
 331                        return 0;
 332
 333                /* special case for isoc transfers larger than 125us:
 334                 * the first and each subsequent fully used uframe
 335                 * must be empty, so as to not illegally delay
 336                 * already scheduled transactions
 337                 */
 338                if (125 < usecs) {
 339                        int ufs = (usecs / 125);
 340                        int i;
 341                        for (i = uframe; i < (uframe + ufs) && i < 8; i++)
 342                                if (0 < tt_usecs[i])
 343                                        return 0;
 344                }
 345
 346                tt_usecs[uframe] += usecs;
 347
 348                carryover_tt_bandwidth(tt_usecs);
 349
 350                /* fail if the carryover pushed bw past the last uframe's limit */
 351                if (max_tt_usecs[7] < tt_usecs[7])
 352                        return 0;
 353        }
 354
 355        return 1;
 356}
 357
 358#else
 359
 360/* return true iff the device's transaction translator is available
 361 * for a periodic transfer starting at the specified frame, using
 362 * all the uframes in the mask.
 363 */
 364static int tt_no_collision (
 365        struct ehci_hcd         *ehci,
 366        unsigned                period,
 367        struct usb_device       *dev,
 368        unsigned                frame,
 369        u32                     uf_mask
 370)
 371{
 372        if (period == 0)        /* error */
 373                return 0;
 374
 375        /* note bandwidth wastage:  split never follows csplit
 376         * (different dev or endpoint) until the next uframe.
 377         * calling convention doesn't make that distinction.
 378         */
 379        for (; frame < ehci->periodic_size; frame += period) {
 380                union ehci_shadow       here;
 381                __hc32                  type;
 382                struct ehci_qh_hw       *hw;
 383
 384                here = ehci->pshadow [frame];
 385                type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
 386                while (here.ptr) {
 387                        switch (hc32_to_cpu(ehci, type)) {
 388                        case Q_TYPE_ITD:
 389                                type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
 390                                here = here.itd->itd_next;
 391                                continue;
 392                        case Q_TYPE_QH:
 393                                hw = here.qh->hw;
 394                                if (same_tt (dev, here.qh->dev)) {
 395                                        u32             mask;
 396
 397                                        mask = hc32_to_cpu(ehci,
 398                                                        hw->hw_info2);
 399                                        /* "knows" no gap is needed */
 400                                        mask |= mask >> 8;
 401                                        if (mask & uf_mask)
 402                                                break;
 403                                }
 404                                type = Q_NEXT_TYPE(ehci, hw->hw_next);
 405                                here = here.qh->qh_next;
 406                                continue;
 407                        case Q_TYPE_SITD:
 408                                if (same_tt (dev, here.sitd->urb->dev)) {
 409                                        u16             mask;
 410
 411                                        mask = hc32_to_cpu(ehci, here.sitd
 412                                                                ->hw_uframe);
 413                                        /* FIXME assumes no gap for IN! */
 414                                        mask |= mask >> 8;
 415                                        if (mask & uf_mask)
 416                                                break;
 417                                }
 418                                type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
 419                                here = here.sitd->sitd_next;
 420                                continue;
 421                        // case Q_TYPE_FSTN:
 422                        default:
 423                                ehci_dbg (ehci,
 424                                        "periodic frame %d bogus type %d\n",
 425                                        frame, type);
 426                        }
 427
 428                        /* collision or error */
 429                        return 0;
 430                }
 431        }
 432
 433        /* no collision */
 434        return 1;
 435}
 436
 437#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
 438
 439/*-------------------------------------------------------------------------*/
 440
 441static void enable_periodic(struct ehci_hcd *ehci)
 442{
 443        if (ehci->periodic_count++)
 444                return;
 445
 446        /* Stop waiting to turn off the periodic schedule */
 447        ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
 448
 449        /* Don't start the schedule until PSS is 0 */
 450        ehci_poll_PSS(ehci);
 451        turn_on_io_watchdog(ehci);
 452}
 453
 454static void disable_periodic(struct ehci_hcd *ehci)
 455{
 456        if (--ehci->periodic_count)
 457                return;
 458
 459        /* Don't turn off the schedule until PSS is 1 */
 460        ehci_poll_PSS(ehci);
 461}
 462
 463/*-------------------------------------------------------------------------*/
 464
 465/* periodic schedule slots have iso tds (normal or split) first, then a
 466 * sparse tree for active interrupt transfers.
 467 *
 468 * this just links in a qh; caller guarantees uframe masks are set right.
 469 * no FSTN support (yet; ehci 0.96+)
 470 */
 471static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 472{
 473        unsigned        i;
 474        unsigned        period = qh->period;
 475
 476        dev_dbg (&qh->dev->dev,
 477                "link qh%d-%04x/%p start %d [%d/%d us]\n",
 478                period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
 479                        & (QH_CMASK | QH_SMASK),
 480                qh, qh->start, qh->usecs, qh->c_usecs);
 481
 482        /* high bandwidth, or otherwise every microframe */
 483        if (period == 0)
 484                period = 1;
 485
 486        for (i = qh->start; i < ehci->periodic_size; i += period) {
 487                union ehci_shadow       *prev = &ehci->pshadow[i];
 488                __hc32                  *hw_p = &ehci->periodic[i];
 489                union ehci_shadow       here = *prev;
 490                __hc32                  type = 0;
 491
 492                /* skip the iso nodes at list head */
 493                while (here.ptr) {
 494                        type = Q_NEXT_TYPE(ehci, *hw_p);
 495                        if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
 496                                break;
 497                        prev = periodic_next_shadow(ehci, prev, type);
 498                        hw_p = shadow_next_periodic(ehci, &here, type);
 499                        here = *prev;
 500                }
 501
 502                /* sorting each branch by period (slow-->fast)
 503                 * enables sharing interior tree nodes
 504                 */
 505                while (here.ptr && qh != here.qh) {
 506                        if (qh->period > here.qh->period)
 507                                break;
 508                        prev = &here.qh->qh_next;
 509                        hw_p = &here.qh->hw->hw_next;
 510                        here = *prev;
 511                }
 512                /* link in this qh, unless some earlier pass did that */
 513                if (qh != here.qh) {
 514                        qh->qh_next = here;
 515                        if (here.qh)
 516                                qh->hw->hw_next = *hw_p;
 517                        wmb ();
 518                        prev->qh = qh;
 519                        *hw_p = QH_NEXT (ehci, qh->qh_dma);
 520                }
 521        }
 522        qh->qh_state = QH_STATE_LINKED;
 523        qh->xacterrs = 0;
 524        qh->exception = 0;
 525
 526        /* update per-qh bandwidth for usbfs */
 527        ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
 528                ? ((qh->usecs + qh->c_usecs) / qh->period)
 529                : (qh->usecs * 8);
 530
 531        list_add(&qh->intr_node, &ehci->intr_qh_list);
 532
 533        /* maybe enable periodic schedule processing */
 534        ++ehci->intr_count;
 535        enable_periodic(ehci);
 536}
 537
 538static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 539{
 540        unsigned        i;
 541        unsigned        period;
 542
 543        /*
 544         * If qh is for a low/full-speed device, simply unlinking it
 545         * could interfere with an ongoing split transaction.  To unlink
 546         * it safely would require setting the QH_INACTIVATE bit and
 547         * waiting at least one frame, as described in EHCI 4.12.2.5.
 548         *
 549         * We won't bother with any of this.  Instead, we assume that the
 550         * only reason for unlinking an interrupt QH while the current URB
 551         * is still active is to dequeue all the URBs (flush the whole
 552         * endpoint queue).
 553         *
 554         * If rebalancing the periodic schedule is ever implemented, this
 555         * approach will no longer be valid.
 556         */
 557
 558        /* high bandwidth, or otherwise part of every microframe */
 559        if ((period = qh->period) == 0)
 560                period = 1;
 561
 562        for (i = qh->start; i < ehci->periodic_size; i += period)
 563                periodic_unlink (ehci, i, qh);
 564
 565        /* update per-qh bandwidth for usbfs */
 566        ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
 567                ? ((qh->usecs + qh->c_usecs) / qh->period)
 568                : (qh->usecs * 8);
 569
 570        dev_dbg (&qh->dev->dev,
 571                "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
 572                qh->period,
 573                hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
 574                qh, qh->start, qh->usecs, qh->c_usecs);
 575
 576        /* qh->qh_next still "live" to HC */
 577        qh->qh_state = QH_STATE_UNLINK;
 578        qh->qh_next.ptr = NULL;
 579
 580        if (ehci->qh_scan_next == qh)
 581                ehci->qh_scan_next = list_entry(qh->intr_node.next,
 582                                struct ehci_qh, intr_node);
 583        list_del(&qh->intr_node);
 584}
 585
 586static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 587{
 588        if (qh->qh_state != QH_STATE_LINKED ||
 589                        list_empty(&qh->unlink_node))
 590                return;
 591
 592        list_del_init(&qh->unlink_node);
 593
 594        /*
 595         * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
 596         * avoiding unnecessary CPU wakeup
 597         */
 598}
 599
 600static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 601{
 602        /* If the QH isn't linked then there's nothing we can do. */
 603        if (qh->qh_state != QH_STATE_LINKED)
 604                return;
 605
 606        /* if the qh is waiting for unlink, cancel it now */
 607        cancel_unlink_wait_intr(ehci, qh);
 608
 609        qh_unlink_periodic (ehci, qh);
 610
 611        /* Make sure the unlinks are visible before starting the timer */
 612        wmb();
 613
 614        /*
 615         * The EHCI spec doesn't say how long it takes the controller to
 616         * stop accessing an unlinked interrupt QH.  The timer delay is
 617         * 9 uframes; presumably that will be long enough.
 618         */
 619        qh->unlink_cycle = ehci->intr_unlink_cycle;
 620
 621        /* New entries go at the end of the intr_unlink list */
 622        list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
 623
 624        if (ehci->intr_unlinking)
 625                ;       /* Avoid recursive calls */
 626        else if (ehci->rh_state < EHCI_RH_RUNNING)
 627                ehci_handle_intr_unlinks(ehci);
 628        else if (ehci->intr_unlink.next == &qh->unlink_node) {
 629                ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
 630                ++ehci->intr_unlink_cycle;
 631        }
 632}
 633
 634/*
 635 * It is common only one intr URB is scheduled on one qh, and
 636 * given complete() is run in tasklet context, introduce a bit
 637 * delay to avoid unlink qh too early.
 638 */
 639static void start_unlink_intr_wait(struct ehci_hcd *ehci,
 640                                   struct ehci_qh *qh)
 641{
 642        qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
 643
 644        /* New entries go at the end of the intr_unlink_wait list */
 645        list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
 646
 647        if (ehci->rh_state < EHCI_RH_RUNNING)
 648                ehci_handle_start_intr_unlinks(ehci);
 649        else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
 650                ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
 651                ++ehci->intr_unlink_wait_cycle;
 652        }
 653}
 654
 655static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 656{
 657        struct ehci_qh_hw       *hw = qh->hw;
 658        int                     rc;
 659
 660        qh->qh_state = QH_STATE_IDLE;
 661        hw->hw_next = EHCI_LIST_END(ehci);
 662
 663        if (!list_empty(&qh->qtd_list))
 664                qh_completions(ehci, qh);
 665
 666        /* reschedule QH iff another request is queued */
 667        if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
 668                rc = qh_schedule(ehci, qh);
 669                if (rc == 0) {
 670                        qh_refresh(ehci, qh);
 671                        qh_link_periodic(ehci, qh);
 672                }
 673
 674                /* An error here likely indicates handshake failure
 675                 * or no space left in the schedule.  Neither fault
 676                 * should happen often ...
 677                 *
 678                 * FIXME kill the now-dysfunctional queued urbs
 679                 */
 680                else {
 681                        ehci_err(ehci, "can't reschedule qh %p, err %d\n",
 682                                        qh, rc);
 683                }
 684        }
 685
 686        /* maybe turn off periodic schedule */
 687        --ehci->intr_count;
 688        disable_periodic(ehci);
 689}
 690
 691/*-------------------------------------------------------------------------*/
 692
 693static int check_period (
 694        struct ehci_hcd *ehci,
 695        unsigned        frame,
 696        unsigned        uframe,
 697        unsigned        period,
 698        unsigned        usecs
 699) {
 700        int             claimed;
 701
 702        /* complete split running into next frame?
 703         * given FSTN support, we could sometimes check...
 704         */
 705        if (uframe >= 8)
 706                return 0;
 707
 708        /* convert "usecs we need" to "max already claimed" */
 709        usecs = ehci->uframe_periodic_max - usecs;
 710
 711        /* we "know" 2 and 4 uframe intervals were rejected; so
 712         * for period 0, check _every_ microframe in the schedule.
 713         */
 714        if (unlikely (period == 0)) {
 715                do {
 716                        for (uframe = 0; uframe < 7; uframe++) {
 717                                claimed = periodic_usecs (ehci, frame, uframe);
 718                                if (claimed > usecs)
 719                                        return 0;
 720                        }
 721                } while ((frame += 1) < ehci->periodic_size);
 722
 723        /* just check the specified uframe, at that period */
 724        } else {
 725                do {
 726                        claimed = periodic_usecs (ehci, frame, uframe);
 727                        if (claimed > usecs)
 728                                return 0;
 729                } while ((frame += period) < ehci->periodic_size);
 730        }
 731
 732        // success!
 733        return 1;
 734}
 735
 736static int check_intr_schedule (
 737        struct ehci_hcd         *ehci,
 738        unsigned                frame,
 739        unsigned                uframe,
 740        const struct ehci_qh    *qh,
 741        __hc32                  *c_maskp
 742)
 743{
 744        int             retval = -ENOSPC;
 745        u8              mask = 0;
 746
 747        if (qh->c_usecs && uframe >= 6)         /* FSTN territory? */
 748                goto done;
 749
 750        if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
 751                goto done;
 752        if (!qh->c_usecs) {
 753                retval = 0;
 754                *c_maskp = 0;
 755                goto done;
 756        }
 757
 758#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 759        if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
 760                                qh->tt_usecs)) {
 761                unsigned i;
 762
 763                /* TODO : this may need FSTN for SSPLIT in uframe 5. */
 764                for (i=uframe+1; i<8 && i<uframe+4; i++)
 765                        if (!check_period (ehci, frame, i,
 766                                                qh->period, qh->c_usecs))
 767                                goto done;
 768                        else
 769                                mask |= 1 << i;
 770
 771                retval = 0;
 772
 773                *c_maskp = cpu_to_hc32(ehci, mask << 8);
 774        }
 775#else
 776        /* Make sure this tt's buffer is also available for CSPLITs.
 777         * We pessimize a bit; probably the typical full speed case
 778         * doesn't need the second CSPLIT.
 779         *
 780         * NOTE:  both SPLIT and CSPLIT could be checked in just
 781         * one smart pass...
 782         */
 783        mask = 0x03 << (uframe + qh->gap_uf);
 784        *c_maskp = cpu_to_hc32(ehci, mask << 8);
 785
 786        mask |= 1 << uframe;
 787        if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
 788                if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
 789                                        qh->period, qh->c_usecs))
 790                        goto done;
 791                if (!check_period (ehci, frame, uframe + qh->gap_uf,
 792                                        qh->period, qh->c_usecs))
 793                        goto done;
 794                retval = 0;
 795        }
 796#endif
 797done:
 798        return retval;
 799}
 800
 801/* "first fit" scheduling policy used the first time through,
 802 * or when the previous schedule slot can't be re-used.
 803 */
 804static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
 805{
 806        int             status;
 807        unsigned        uframe;
 808        __hc32          c_mask;
 809        unsigned        frame;          /* 0..(qh->period - 1), or NO_FRAME */
 810        struct ehci_qh_hw       *hw = qh->hw;
 811
 812        hw->hw_next = EHCI_LIST_END(ehci);
 813        frame = qh->start;
 814
 815        /* reuse the previous schedule slots, if we can */
 816        if (frame < qh->period) {
 817                uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
 818                status = check_intr_schedule (ehci, frame, --uframe,
 819                                qh, &c_mask);
 820        } else {
 821                uframe = 0;
 822                c_mask = 0;
 823                status = -ENOSPC;
 824        }
 825
 826        /* else scan the schedule to find a group of slots such that all
 827         * uframes have enough periodic bandwidth available.
 828         */
 829        if (status) {
 830                /* "normal" case, uframing flexible except with splits */
 831                if (qh->period) {
 832                        int             i;
 833
 834                        for (i = qh->period; status && i > 0; --i) {
 835                                frame = ++ehci->random_frame % qh->period;
 836                                for (uframe = 0; uframe < 8; uframe++) {
 837                                        status = check_intr_schedule (ehci,
 838                                                        frame, uframe, qh,
 839                                                        &c_mask);
 840                                        if (status == 0)
 841                                                break;
 842                                }
 843                        }
 844
 845                /* qh->period == 0 means every uframe */
 846                } else {
 847                        frame = 0;
 848                        status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
 849                }
 850                if (status)
 851                        goto done;
 852                qh->start = frame;
 853
 854                /* reset S-frame and (maybe) C-frame masks */
 855                hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
 856                hw->hw_info2 |= qh->period
 857                        ? cpu_to_hc32(ehci, 1 << uframe)
 858                        : cpu_to_hc32(ehci, QH_SMASK);
 859                hw->hw_info2 |= c_mask;
 860        } else
 861                ehci_dbg (ehci, "reused qh %p schedule\n", qh);
 862
 863done:
 864        return status;
 865}
 866
 867static int intr_submit (
 868        struct ehci_hcd         *ehci,
 869        struct urb              *urb,
 870        struct list_head        *qtd_list,
 871        gfp_t                   mem_flags
 872) {
 873        unsigned                epnum;
 874        unsigned long           flags;
 875        struct ehci_qh          *qh;
 876        int                     status;
 877        struct list_head        empty;
 878
 879        /* get endpoint and transfer/schedule data */
 880        epnum = urb->ep->desc.bEndpointAddress;
 881
 882        spin_lock_irqsave (&ehci->lock, flags);
 883
 884        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
 885                status = -ESHUTDOWN;
 886                goto done_not_linked;
 887        }
 888        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
 889        if (unlikely(status))
 890                goto done_not_linked;
 891
 892        /* get qh and force any scheduling errors */
 893        INIT_LIST_HEAD (&empty);
 894        qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
 895        if (qh == NULL) {
 896                status = -ENOMEM;
 897                goto done;
 898        }
 899        if (qh->qh_state == QH_STATE_IDLE) {
 900                if ((status = qh_schedule (ehci, qh)) != 0)
 901                        goto done;
 902        }
 903
 904        /* then queue the urb's tds to the qh */
 905        qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
 906        BUG_ON (qh == NULL);
 907
 908        /* stuff into the periodic schedule */
 909        if (qh->qh_state == QH_STATE_IDLE) {
 910                qh_refresh(ehci, qh);
 911                qh_link_periodic(ehci, qh);
 912        } else {
 913                /* cancel unlink wait for the qh */
 914                cancel_unlink_wait_intr(ehci, qh);
 915        }
 916
 917        /* ... update usbfs periodic stats */
 918        ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
 919
 920done:
 921        if (unlikely(status))
 922                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 923done_not_linked:
 924        spin_unlock_irqrestore (&ehci->lock, flags);
 925        if (status)
 926                qtd_list_free (ehci, urb, qtd_list);
 927
 928        return status;
 929}
 930
 931static void scan_intr(struct ehci_hcd *ehci)
 932{
 933        struct ehci_qh          *qh;
 934
 935        list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
 936                        intr_node) {
 937
 938                /* clean any finished work for this qh */
 939                if (!list_empty(&qh->qtd_list)) {
 940                        int temp;
 941
 942                        /*
 943                         * Unlinks could happen here; completion reporting
 944                         * drops the lock.  That's why ehci->qh_scan_next
 945                         * always holds the next qh to scan; if the next qh
 946                         * gets unlinked then ehci->qh_scan_next is adjusted
 947                         * in qh_unlink_periodic().
 948                         */
 949                        temp = qh_completions(ehci, qh);
 950                        if (unlikely(temp))
 951                                start_unlink_intr(ehci, qh);
 952                        else if (unlikely(list_empty(&qh->qtd_list) &&
 953                                        qh->qh_state == QH_STATE_LINKED))
 954                                start_unlink_intr_wait(ehci, qh);
 955                }
 956        }
 957}
 958
 959/*-------------------------------------------------------------------------*/
 960
 961/* ehci_iso_stream ops work with both ITD and SITD */
 962
 963static struct ehci_iso_stream *
 964iso_stream_alloc (gfp_t mem_flags)
 965{
 966        struct ehci_iso_stream *stream;
 967
 968        stream = kzalloc(sizeof *stream, mem_flags);
 969        if (likely (stream != NULL)) {
 970                INIT_LIST_HEAD(&stream->td_list);
 971                INIT_LIST_HEAD(&stream->free_list);
 972                stream->next_uframe = -1;
 973        }
 974        return stream;
 975}
 976
 977static void
 978iso_stream_init (
 979        struct ehci_hcd         *ehci,
 980        struct ehci_iso_stream  *stream,
 981        struct usb_device       *dev,
 982        int                     pipe,
 983        unsigned                interval
 984)
 985{
 986        static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
 987
 988        u32                     buf1;
 989        unsigned                epnum, maxp;
 990        int                     is_input;
 991        long                    bandwidth;
 992
 993        /*
 994         * this might be a "high bandwidth" highspeed endpoint,
 995         * as encoded in the ep descriptor's wMaxPacket field
 996         */
 997        epnum = usb_pipeendpoint (pipe);
 998        is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
 999        maxp = usb_maxpacket(dev, pipe, !is_input);
1000        if (is_input) {
1001                buf1 = (1 << 11);
1002        } else {
1003                buf1 = 0;
1004        }
1005
1006        /* knows about ITD vs SITD */
1007        if (dev->speed == USB_SPEED_HIGH) {
1008                unsigned multi = hb_mult(maxp);
1009
1010                stream->highspeed = 1;
1011
1012                maxp = max_packet(maxp);
1013                buf1 |= maxp;
1014                maxp *= multi;
1015
1016                stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1017                stream->buf1 = cpu_to_hc32(ehci, buf1);
1018                stream->buf2 = cpu_to_hc32(ehci, multi);
1019
1020                /* usbfs wants to report the average usecs per frame tied up
1021                 * when transfers on this endpoint are scheduled ...
1022                 */
1023                stream->usecs = HS_USECS_ISO (maxp);
1024                bandwidth = stream->usecs * 8;
1025                bandwidth /= interval;
1026
1027        } else {
1028                u32             addr;
1029                int             think_time;
1030                int             hs_transfers;
1031
1032                addr = dev->ttport << 24;
1033                if (!ehci_is_TDI(ehci)
1034                                || (dev->tt->hub !=
1035                                        ehci_to_hcd(ehci)->self.root_hub))
1036                        addr |= dev->tt->hub->devnum << 16;
1037                addr |= epnum << 8;
1038                addr |= dev->devnum;
1039                stream->usecs = HS_USECS_ISO (maxp);
1040                think_time = dev->tt ? dev->tt->think_time : 0;
1041                stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
1042                                dev->speed, is_input, 1, maxp));
1043                hs_transfers = max (1u, (maxp + 187) / 188);
1044                if (is_input) {
1045                        u32     tmp;
1046
1047                        addr |= 1 << 31;
1048                        stream->c_usecs = stream->usecs;
1049                        stream->usecs = HS_USECS_ISO (1);
1050                        stream->raw_mask = 1;
1051
1052                        /* c-mask as specified in USB 2.0 11.18.4 3.c */
1053                        tmp = (1 << (hs_transfers + 2)) - 1;
1054                        stream->raw_mask |= tmp << (8 + 2);
1055                } else
1056                        stream->raw_mask = smask_out [hs_transfers - 1];
1057                bandwidth = stream->usecs + stream->c_usecs;
1058                bandwidth /= interval << 3;
1059
1060                /* stream->splits gets created from raw_mask later */
1061                stream->address = cpu_to_hc32(ehci, addr);
1062        }
1063        stream->bandwidth = bandwidth;
1064
1065        stream->udev = dev;
1066
1067        stream->bEndpointAddress = is_input | epnum;
1068        stream->interval = interval;
1069        stream->maxp = maxp;
1070}
1071
1072static struct ehci_iso_stream *
1073iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1074{
1075        unsigned                epnum;
1076        struct ehci_iso_stream  *stream;
1077        struct usb_host_endpoint *ep;
1078        unsigned long           flags;
1079
1080        epnum = usb_pipeendpoint (urb->pipe);
1081        if (usb_pipein(urb->pipe))
1082                ep = urb->dev->ep_in[epnum];
1083        else
1084                ep = urb->dev->ep_out[epnum];
1085
1086        spin_lock_irqsave (&ehci->lock, flags);
1087        stream = ep->hcpriv;
1088
1089        if (unlikely (stream == NULL)) {
1090                stream = iso_stream_alloc(GFP_ATOMIC);
1091                if (likely (stream != NULL)) {
1092                        ep->hcpriv = stream;
1093                        stream->ep = ep;
1094                        iso_stream_init(ehci, stream, urb->dev, urb->pipe,
1095                                        urb->interval);
1096                }
1097
1098        /* if dev->ep [epnum] is a QH, hw is set */
1099        } else if (unlikely (stream->hw != NULL)) {
1100                ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1101                        urb->dev->devpath, epnum,
1102                        usb_pipein(urb->pipe) ? "in" : "out");
1103                stream = NULL;
1104        }
1105
1106        spin_unlock_irqrestore (&ehci->lock, flags);
1107        return stream;
1108}
1109
1110/*-------------------------------------------------------------------------*/
1111
1112/* ehci_iso_sched ops can be ITD-only or SITD-only */
1113
1114static struct ehci_iso_sched *
1115iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1116{
1117        struct ehci_iso_sched   *iso_sched;
1118        int                     size = sizeof *iso_sched;
1119
1120        size += packets * sizeof (struct ehci_iso_packet);
1121        iso_sched = kzalloc(size, mem_flags);
1122        if (likely (iso_sched != NULL)) {
1123                INIT_LIST_HEAD (&iso_sched->td_list);
1124        }
1125        return iso_sched;
1126}
1127
1128static inline void
1129itd_sched_init(
1130        struct ehci_hcd         *ehci,
1131        struct ehci_iso_sched   *iso_sched,
1132        struct ehci_iso_stream  *stream,
1133        struct urb              *urb
1134)
1135{
1136        unsigned        i;
1137        dma_addr_t      dma = urb->transfer_dma;
1138
1139        /* how many uframes are needed for these transfers */
1140        iso_sched->span = urb->number_of_packets * stream->interval;
1141
1142        /* figure out per-uframe itd fields that we'll need later
1143         * when we fit new itds into the schedule.
1144         */
1145        for (i = 0; i < urb->number_of_packets; i++) {
1146                struct ehci_iso_packet  *uframe = &iso_sched->packet [i];
1147                unsigned                length;
1148                dma_addr_t              buf;
1149                u32                     trans;
1150
1151                length = urb->iso_frame_desc [i].length;
1152                buf = dma + urb->iso_frame_desc [i].offset;
1153
1154                trans = EHCI_ISOC_ACTIVE;
1155                trans |= buf & 0x0fff;
1156                if (unlikely (((i + 1) == urb->number_of_packets))
1157                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1158                        trans |= EHCI_ITD_IOC;
1159                trans |= length << 16;
1160                uframe->transaction = cpu_to_hc32(ehci, trans);
1161
1162                /* might need to cross a buffer page within a uframe */
1163                uframe->bufp = (buf & ~(u64)0x0fff);
1164                buf += length;
1165                if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1166                        uframe->cross = 1;
1167        }
1168}
1169
1170static void
1171iso_sched_free (
1172        struct ehci_iso_stream  *stream,
1173        struct ehci_iso_sched   *iso_sched
1174)
1175{
1176        if (!iso_sched)
1177                return;
1178        // caller must hold ehci->lock!
1179        list_splice (&iso_sched->td_list, &stream->free_list);
1180        kfree (iso_sched);
1181}
1182
1183static int
1184itd_urb_transaction (
1185        struct ehci_iso_stream  *stream,
1186        struct ehci_hcd         *ehci,
1187        struct urb              *urb,
1188        gfp_t                   mem_flags
1189)
1190{
1191        struct ehci_itd         *itd;
1192        dma_addr_t              itd_dma;
1193        int                     i;
1194        unsigned                num_itds;
1195        struct ehci_iso_sched   *sched;
1196        unsigned long           flags;
1197
1198        sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1199        if (unlikely (sched == NULL))
1200                return -ENOMEM;
1201
1202        itd_sched_init(ehci, sched, stream, urb);
1203
1204        if (urb->interval < 8)
1205                num_itds = 1 + (sched->span + 7) / 8;
1206        else
1207                num_itds = urb->number_of_packets;
1208
1209        /* allocate/init ITDs */
1210        spin_lock_irqsave (&ehci->lock, flags);
1211        for (i = 0; i < num_itds; i++) {
1212
1213                /*
1214                 * Use iTDs from the free list, but not iTDs that may
1215                 * still be in use by the hardware.
1216                 */
1217                if (likely(!list_empty(&stream->free_list))) {
1218                        itd = list_first_entry(&stream->free_list,
1219                                        struct ehci_itd, itd_list);
1220                        if (itd->frame == ehci->now_frame)
1221                                goto alloc_itd;
1222                        list_del (&itd->itd_list);
1223                        itd_dma = itd->itd_dma;
1224                } else {
1225 alloc_itd:
1226                        spin_unlock_irqrestore (&ehci->lock, flags);
1227                        itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1228                                        &itd_dma);
1229                        spin_lock_irqsave (&ehci->lock, flags);
1230                        if (!itd) {
1231                                iso_sched_free(stream, sched);
1232                                spin_unlock_irqrestore(&ehci->lock, flags);
1233                                return -ENOMEM;
1234                        }
1235                }
1236
1237                memset (itd, 0, sizeof *itd);
1238                itd->itd_dma = itd_dma;
1239                itd->frame = 9999;              /* an invalid value */
1240                list_add (&itd->itd_list, &sched->td_list);
1241        }
1242        spin_unlock_irqrestore (&ehci->lock, flags);
1243
1244        /* temporarily store schedule info in hcpriv */
1245        urb->hcpriv = sched;
1246        urb->error_count = 0;
1247        return 0;
1248}
1249
1250/*-------------------------------------------------------------------------*/
1251
1252static inline int
1253itd_slot_ok (
1254        struct ehci_hcd         *ehci,
1255        u32                     mod,
1256        u32                     uframe,
1257        u8                      usecs,
1258        u32                     period
1259)
1260{
1261        uframe %= period;
1262        do {
1263                /* can't commit more than uframe_periodic_max usec */
1264                if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1265                                > (ehci->uframe_periodic_max - usecs))
1266                        return 0;
1267
1268                /* we know urb->interval is 2^N uframes */
1269                uframe += period;
1270        } while (uframe < mod);
1271        return 1;
1272}
1273
1274static inline int
1275sitd_slot_ok (
1276        struct ehci_hcd         *ehci,
1277        u32                     mod,
1278        struct ehci_iso_stream  *stream,
1279        u32                     uframe,
1280        struct ehci_iso_sched   *sched,
1281        u32                     period_uframes
1282)
1283{
1284        u32                     mask, tmp;
1285        u32                     frame, uf;
1286
1287        mask = stream->raw_mask << (uframe & 7);
1288
1289        /* for IN, don't wrap CSPLIT into the next frame */
1290        if (mask & ~0xffff)
1291                return 0;
1292
1293        /* check bandwidth */
1294        uframe %= period_uframes;
1295        frame = uframe >> 3;
1296
1297#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1298        /* The tt's fullspeed bus bandwidth must be available.
1299         * tt_available scheduling guarantees 10+% for control/bulk.
1300         */
1301        uf = uframe & 7;
1302        if (!tt_available(ehci, period_uframes >> 3,
1303                        stream->udev, frame, uf, stream->tt_usecs))
1304                return 0;
1305#else
1306        /* tt must be idle for start(s), any gap, and csplit.
1307         * assume scheduling slop leaves 10+% for control/bulk.
1308         */
1309        if (!tt_no_collision(ehci, period_uframes >> 3,
1310                        stream->udev, frame, mask))
1311                return 0;
1312#endif
1313
1314        /* this multi-pass logic is simple, but performance may
1315         * suffer when the schedule data isn't cached.
1316         */
1317        do {
1318                u32             max_used;
1319
1320                frame = uframe >> 3;
1321                uf = uframe & 7;
1322
1323                /* check starts (OUT uses more than one) */
1324                max_used = ehci->uframe_periodic_max - stream->usecs;
1325                for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1326                        if (periodic_usecs (ehci, frame, uf) > max_used)
1327                                return 0;
1328                }
1329
1330                /* for IN, check CSPLIT */
1331                if (stream->c_usecs) {
1332                        uf = uframe & 7;
1333                        max_used = ehci->uframe_periodic_max - stream->c_usecs;
1334                        do {
1335                                tmp = 1 << uf;
1336                                tmp <<= 8;
1337                                if ((stream->raw_mask & tmp) == 0)
1338                                        continue;
1339                                if (periodic_usecs (ehci, frame, uf)
1340                                                > max_used)
1341                                        return 0;
1342                        } while (++uf < 8);
1343                }
1344
1345                /* we know urb->interval is 2^N uframes */
1346                uframe += period_uframes;
1347        } while (uframe < mod);
1348
1349        stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1350        return 1;
1351}
1352
1353/*
1354 * This scheduler plans almost as far into the future as it has actual
1355 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1356 * "as small as possible" to be cache-friendlier.)  That limits the size
1357 * transfers you can stream reliably; avoid more than 64 msec per urb.
1358 * Also avoid queue depths of less than ehci's worst irq latency (affected
1359 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1360 * and other factors); or more than about 230 msec total (for portability,
1361 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1362 */
1363
1364#define SCHEDULING_DELAY        40      /* microframes */
1365
1366static int
1367iso_stream_schedule (
1368        struct ehci_hcd         *ehci,
1369        struct urb              *urb,
1370        struct ehci_iso_stream  *stream
1371)
1372{
1373        u32                     now, base, next, start, period, span;
1374        int                     status;
1375        unsigned                mod = ehci->periodic_size << 3;
1376        struct ehci_iso_sched   *sched = urb->hcpriv;
1377
1378        period = urb->interval;
1379        span = sched->span;
1380        if (!stream->highspeed) {
1381                period <<= 3;
1382                span <<= 3;
1383        }
1384
1385        now = ehci_read_frame_index(ehci) & (mod - 1);
1386
1387        /* Typical case: reuse current schedule, stream is still active.
1388         * Hopefully there are no gaps from the host falling behind
1389         * (irq delays etc).  If there are, the behavior depends on
1390         * whether URB_ISO_ASAP is set.
1391         */
1392        if (likely (!list_empty (&stream->td_list))) {
1393
1394                /* Take the isochronous scheduling threshold into account */
1395                if (ehci->i_thresh)
1396                        next = now + ehci->i_thresh;    /* uframe cache */
1397                else
1398                        next = (now + 2 + 7) & ~0x07;   /* full frame cache */
1399
1400                /*
1401                 * Use ehci->last_iso_frame as the base.  There can't be any
1402                 * TDs scheduled for earlier than that.
1403                 */
1404                base = ehci->last_iso_frame << 3;
1405                next = (next - base) & (mod - 1);
1406                start = (stream->next_uframe - base) & (mod - 1);
1407
1408                /* Is the schedule already full? */
1409                if (unlikely(start < period)) {
1410                        ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
1411                                        urb, stream->next_uframe, base,
1412                                        period, mod);
1413                        status = -ENOSPC;
1414                        goto fail;
1415                }
1416
1417                /* Behind the scheduling threshold? */
1418                if (unlikely(start < next)) {
1419                        unsigned now2 = (now - base) & (mod - 1);
1420
1421                        /* USB_ISO_ASAP: Round up to the first available slot */
1422                        if (urb->transfer_flags & URB_ISO_ASAP)
1423                                start += (next - start + period - 1) & -period;
1424
1425                        /*
1426                         * Not ASAP: Use the next slot in the stream,
1427                         * no matter what.
1428                         */
1429                        else if (start + span - period < now2) {
1430                                ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
1431                                                urb, start + base,
1432                                                span - period, now2 + base);
1433                        }
1434                }
1435
1436                start += base;
1437        }
1438
1439        /* need to schedule; when's the next (u)frame we could start?
1440         * this is bigger than ehci->i_thresh allows; scheduling itself
1441         * isn't free, the delay should handle reasonably slow cpus.  it
1442         * can also help high bandwidth if the dma and irq loads don't
1443         * jump until after the queue is primed.
1444         */
1445        else {
1446                int done = 0;
1447
1448                base = now & ~0x07;
1449                start = base + SCHEDULING_DELAY;
1450
1451                /* find a uframe slot with enough bandwidth.
1452                 * Early uframes are more precious because full-speed
1453                 * iso IN transfers can't use late uframes,
1454                 * and therefore they should be allocated last.
1455                 */
1456                next = start;
1457                start += period;
1458                do {
1459                        start--;
1460                        /* check schedule: enough space? */
1461                        if (stream->highspeed) {
1462                                if (itd_slot_ok(ehci, mod, start,
1463                                                stream->usecs, period))
1464                                        done = 1;
1465                        } else {
1466                                if ((start % 8) >= 6)
1467                                        continue;
1468                                if (sitd_slot_ok(ehci, mod, stream,
1469                                                start, sched, period))
1470                                        done = 1;
1471                        }
1472                } while (start > next && !done);
1473
1474                /* no room in the schedule */
1475                if (!done) {
1476                        ehci_dbg(ehci, "iso sched full %p", urb);
1477                        status = -ENOSPC;
1478                        goto fail;
1479                }
1480        }
1481
1482        /* Tried to schedule too far into the future? */
1483        if (unlikely(start - base + span - period >= mod)) {
1484                ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1485                                urb, start - base, span - period, mod);
1486                status = -EFBIG;
1487                goto fail;
1488        }
1489
1490        stream->next_uframe = start & (mod - 1);
1491
1492        /* report high speed start in uframes; full speed, in frames */
1493        urb->start_frame = stream->next_uframe;
1494        if (!stream->highspeed)
1495                urb->start_frame >>= 3;
1496
1497        /* Make sure scan_isoc() sees these */
1498        if (ehci->isoc_count == 0)
1499                ehci->last_iso_frame = now >> 3;
1500        return 0;
1501
1502 fail:
1503        iso_sched_free(stream, sched);
1504        urb->hcpriv = NULL;
1505        return status;
1506}
1507
1508/*-------------------------------------------------------------------------*/
1509
1510static inline void
1511itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1512                struct ehci_itd *itd)
1513{
1514        int i;
1515
1516        /* it's been recently zeroed */
1517        itd->hw_next = EHCI_LIST_END(ehci);
1518        itd->hw_bufp [0] = stream->buf0;
1519        itd->hw_bufp [1] = stream->buf1;
1520        itd->hw_bufp [2] = stream->buf2;
1521
1522        for (i = 0; i < 8; i++)
1523                itd->index[i] = -1;
1524
1525        /* All other fields are filled when scheduling */
1526}
1527
1528static inline void
1529itd_patch(
1530        struct ehci_hcd         *ehci,
1531        struct ehci_itd         *itd,
1532        struct ehci_iso_sched   *iso_sched,
1533        unsigned                index,
1534        u16                     uframe
1535)
1536{
1537        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1538        unsigned                pg = itd->pg;
1539
1540        // BUG_ON (pg == 6 && uf->cross);
1541
1542        uframe &= 0x07;
1543        itd->index [uframe] = index;
1544
1545        itd->hw_transaction[uframe] = uf->transaction;
1546        itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1547        itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1548        itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1549
1550        /* iso_frame_desc[].offset must be strictly increasing */
1551        if (unlikely (uf->cross)) {
1552                u64     bufp = uf->bufp + 4096;
1553
1554                itd->pg = ++pg;
1555                itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1556                itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1557        }
1558}
1559
1560static inline void
1561itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1562{
1563        union ehci_shadow       *prev = &ehci->pshadow[frame];
1564        __hc32                  *hw_p = &ehci->periodic[frame];
1565        union ehci_shadow       here = *prev;
1566        __hc32                  type = 0;
1567
1568        /* skip any iso nodes which might belong to previous microframes */
1569        while (here.ptr) {
1570                type = Q_NEXT_TYPE(ehci, *hw_p);
1571                if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1572                        break;
1573                prev = periodic_next_shadow(ehci, prev, type);
1574                hw_p = shadow_next_periodic(ehci, &here, type);
1575                here = *prev;
1576        }
1577
1578        itd->itd_next = here;
1579        itd->hw_next = *hw_p;
1580        prev->itd = itd;
1581        itd->frame = frame;
1582        wmb ();
1583        *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1584}
1585
1586/* fit urb's itds into the selected schedule slot; activate as needed */
1587static void itd_link_urb(
1588        struct ehci_hcd         *ehci,
1589        struct urb              *urb,
1590        unsigned                mod,
1591        struct ehci_iso_stream  *stream
1592)
1593{
1594        int                     packet;
1595        unsigned                next_uframe, uframe, frame;
1596        struct ehci_iso_sched   *iso_sched = urb->hcpriv;
1597        struct ehci_itd         *itd;
1598
1599        next_uframe = stream->next_uframe & (mod - 1);
1600
1601        if (unlikely (list_empty(&stream->td_list)))
1602                ehci_to_hcd(ehci)->self.bandwidth_allocated
1603                                += stream->bandwidth;
1604
1605        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1606                if (ehci->amd_pll_fix == 1)
1607                        usb_amd_quirk_pll_disable();
1608        }
1609
1610        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1611
1612        /* fill iTDs uframe by uframe */
1613        for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1614                if (itd == NULL) {
1615                        /* ASSERT:  we have all necessary itds */
1616                        // BUG_ON (list_empty (&iso_sched->td_list));
1617
1618                        /* ASSERT:  no itds for this endpoint in this uframe */
1619
1620                        itd = list_entry (iso_sched->td_list.next,
1621                                        struct ehci_itd, itd_list);
1622                        list_move_tail (&itd->itd_list, &stream->td_list);
1623                        itd->stream = stream;
1624                        itd->urb = urb;
1625                        itd_init (ehci, stream, itd);
1626                }
1627
1628                uframe = next_uframe & 0x07;
1629                frame = next_uframe >> 3;
1630
1631                itd_patch(ehci, itd, iso_sched, packet, uframe);
1632
1633                next_uframe += stream->interval;
1634                next_uframe &= mod - 1;
1635                packet++;
1636
1637                /* link completed itds into the schedule */
1638                if (((next_uframe >> 3) != frame)
1639                                || packet == urb->number_of_packets) {
1640                        itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1641                        itd = NULL;
1642                }
1643        }
1644        stream->next_uframe = next_uframe;
1645
1646        /* don't need that schedule data any more */
1647        iso_sched_free (stream, iso_sched);
1648        urb->hcpriv = stream;
1649
1650        ++ehci->isoc_count;
1651        enable_periodic(ehci);
1652}
1653
1654#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1655
1656/* Process and recycle a completed ITD.  Return true iff its urb completed,
1657 * and hence its completion callback probably added things to the hardware
1658 * schedule.
1659 *
1660 * Note that we carefully avoid recycling this descriptor until after any
1661 * completion callback runs, so that it won't be reused quickly.  That is,
1662 * assuming (a) no more than two urbs per frame on this endpoint, and also
1663 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1664 * corrupts things if you reuse completed descriptors very quickly...
1665 */
1666static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1667{
1668        struct urb                              *urb = itd->urb;
1669        struct usb_iso_packet_descriptor        *desc;
1670        u32                                     t;
1671        unsigned                                uframe;
1672        int                                     urb_index = -1;
1673        struct ehci_iso_stream                  *stream = itd->stream;
1674        struct usb_device                       *dev;
1675        bool                                    retval = false;
1676
1677        /* for each uframe with a packet */
1678        for (uframe = 0; uframe < 8; uframe++) {
1679                if (likely (itd->index[uframe] == -1))
1680                        continue;
1681                urb_index = itd->index[uframe];
1682                desc = &urb->iso_frame_desc [urb_index];
1683
1684                t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1685                itd->hw_transaction [uframe] = 0;
1686
1687                /* report transfer status */
1688                if (unlikely (t & ISO_ERRS)) {
1689                        urb->error_count++;
1690                        if (t & EHCI_ISOC_BUF_ERR)
1691                                desc->status = usb_pipein (urb->pipe)
1692                                        ? -ENOSR  /* hc couldn't read */
1693                                        : -ECOMM; /* hc couldn't write */
1694                        else if (t & EHCI_ISOC_BABBLE)
1695                                desc->status = -EOVERFLOW;
1696                        else /* (t & EHCI_ISOC_XACTERR) */
1697                                desc->status = -EPROTO;
1698
1699                        /* HC need not update length with this error */
1700                        if (!(t & EHCI_ISOC_BABBLE)) {
1701                                desc->actual_length = EHCI_ITD_LENGTH(t);
1702                                urb->actual_length += desc->actual_length;
1703                        }
1704                } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1705                        desc->status = 0;
1706                        desc->actual_length = EHCI_ITD_LENGTH(t);
1707                        urb->actual_length += desc->actual_length;
1708                } else {
1709                        /* URB was too late */
1710                        urb->error_count++;
1711                }
1712        }
1713
1714        /* handle completion now? */
1715        if (likely ((urb_index + 1) != urb->number_of_packets))
1716                goto done;
1717
1718        /* ASSERT: it's really the last itd for this urb
1719        list_for_each_entry (itd, &stream->td_list, itd_list)
1720                BUG_ON (itd->urb == urb);
1721         */
1722
1723        /* give urb back to the driver; completion often (re)submits */
1724        dev = urb->dev;
1725        ehci_urb_done(ehci, urb, 0);
1726        retval = true;
1727        urb = NULL;
1728
1729        --ehci->isoc_count;
1730        disable_periodic(ehci);
1731
1732        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1733        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1734                if (ehci->amd_pll_fix == 1)
1735                        usb_amd_quirk_pll_enable();
1736        }
1737
1738        if (unlikely(list_is_singular(&stream->td_list)))
1739                ehci_to_hcd(ehci)->self.bandwidth_allocated
1740                                -= stream->bandwidth;
1741
1742done:
1743        itd->urb = NULL;
1744
1745        /* Add to the end of the free list for later reuse */
1746        list_move_tail(&itd->itd_list, &stream->free_list);
1747
1748        /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1749        if (list_empty(&stream->td_list)) {
1750                list_splice_tail_init(&stream->free_list,
1751                                &ehci->cached_itd_list);
1752                start_free_itds(ehci);
1753        }
1754
1755        return retval;
1756}
1757
1758/*-------------------------------------------------------------------------*/
1759
1760static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1761        gfp_t mem_flags)
1762{
1763        int                     status = -EINVAL;
1764        unsigned long           flags;
1765        struct ehci_iso_stream  *stream;
1766
1767        /* Get iso_stream head */
1768        stream = iso_stream_find (ehci, urb);
1769        if (unlikely (stream == NULL)) {
1770                ehci_dbg (ehci, "can't get iso stream\n");
1771                return -ENOMEM;
1772        }
1773        if (unlikely (urb->interval != stream->interval)) {
1774                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1775                        stream->interval, urb->interval);
1776                goto done;
1777        }
1778
1779#ifdef EHCI_URB_TRACE
1780        ehci_dbg (ehci,
1781                "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1782                __func__, urb->dev->devpath, urb,
1783                usb_pipeendpoint (urb->pipe),
1784                usb_pipein (urb->pipe) ? "in" : "out",
1785                urb->transfer_buffer_length,
1786                urb->number_of_packets, urb->interval,
1787                stream);
1788#endif
1789
1790        /* allocate ITDs w/o locking anything */
1791        status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1792        if (unlikely (status < 0)) {
1793                ehci_dbg (ehci, "can't init itds\n");
1794                goto done;
1795        }
1796
1797        /* schedule ... need to lock */
1798        spin_lock_irqsave (&ehci->lock, flags);
1799        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1800                status = -ESHUTDOWN;
1801                goto done_not_linked;
1802        }
1803        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1804        if (unlikely(status))
1805                goto done_not_linked;
1806        status = iso_stream_schedule(ehci, urb, stream);
1807        if (likely (status == 0))
1808                itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1809        else
1810                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1811 done_not_linked:
1812        spin_unlock_irqrestore (&ehci->lock, flags);
1813 done:
1814        return status;
1815}
1816
1817/*-------------------------------------------------------------------------*/
1818
1819/*
1820 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1821 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1822 */
1823
1824static inline void
1825sitd_sched_init(
1826        struct ehci_hcd         *ehci,
1827        struct ehci_iso_sched   *iso_sched,
1828        struct ehci_iso_stream  *stream,
1829        struct urb              *urb
1830)
1831{
1832        unsigned        i;
1833        dma_addr_t      dma = urb->transfer_dma;
1834
1835        /* how many frames are needed for these transfers */
1836        iso_sched->span = urb->number_of_packets * stream->interval;
1837
1838        /* figure out per-frame sitd fields that we'll need later
1839         * when we fit new sitds into the schedule.
1840         */
1841        for (i = 0; i < urb->number_of_packets; i++) {
1842                struct ehci_iso_packet  *packet = &iso_sched->packet [i];
1843                unsigned                length;
1844                dma_addr_t              buf;
1845                u32                     trans;
1846
1847                length = urb->iso_frame_desc [i].length & 0x03ff;
1848                buf = dma + urb->iso_frame_desc [i].offset;
1849
1850                trans = SITD_STS_ACTIVE;
1851                if (((i + 1) == urb->number_of_packets)
1852                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
1853                        trans |= SITD_IOC;
1854                trans |= length << 16;
1855                packet->transaction = cpu_to_hc32(ehci, trans);
1856
1857                /* might need to cross a buffer page within a td */
1858                packet->bufp = buf;
1859                packet->buf1 = (buf + length) & ~0x0fff;
1860                if (packet->buf1 != (buf & ~(u64)0x0fff))
1861                        packet->cross = 1;
1862
1863                /* OUT uses multiple start-splits */
1864                if (stream->bEndpointAddress & USB_DIR_IN)
1865                        continue;
1866                length = (length + 187) / 188;
1867                if (length > 1) /* BEGIN vs ALL */
1868                        length |= 1 << 3;
1869                packet->buf1 |= length;
1870        }
1871}
1872
1873static int
1874sitd_urb_transaction (
1875        struct ehci_iso_stream  *stream,
1876        struct ehci_hcd         *ehci,
1877        struct urb              *urb,
1878        gfp_t                   mem_flags
1879)
1880{
1881        struct ehci_sitd        *sitd;
1882        dma_addr_t              sitd_dma;
1883        int                     i;
1884        struct ehci_iso_sched   *iso_sched;
1885        unsigned long           flags;
1886
1887        iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1888        if (iso_sched == NULL)
1889                return -ENOMEM;
1890
1891        sitd_sched_init(ehci, iso_sched, stream, urb);
1892
1893        /* allocate/init sITDs */
1894        spin_lock_irqsave (&ehci->lock, flags);
1895        for (i = 0; i < urb->number_of_packets; i++) {
1896
1897                /* NOTE:  for now, we don't try to handle wraparound cases
1898                 * for IN (using sitd->hw_backpointer, like a FSTN), which
1899                 * means we never need two sitds for full speed packets.
1900                 */
1901
1902                /*
1903                 * Use siTDs from the free list, but not siTDs that may
1904                 * still be in use by the hardware.
1905                 */
1906                if (likely(!list_empty(&stream->free_list))) {
1907                        sitd = list_first_entry(&stream->free_list,
1908                                         struct ehci_sitd, sitd_list);
1909                        if (sitd->frame == ehci->now_frame)
1910                                goto alloc_sitd;
1911                        list_del (&sitd->sitd_list);
1912                        sitd_dma = sitd->sitd_dma;
1913                } else {
1914 alloc_sitd:
1915                        spin_unlock_irqrestore (&ehci->lock, flags);
1916                        sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1917                                        &sitd_dma);
1918                        spin_lock_irqsave (&ehci->lock, flags);
1919                        if (!sitd) {
1920                                iso_sched_free(stream, iso_sched);
1921                                spin_unlock_irqrestore(&ehci->lock, flags);
1922                                return -ENOMEM;
1923                        }
1924                }
1925
1926                memset (sitd, 0, sizeof *sitd);
1927                sitd->sitd_dma = sitd_dma;
1928                sitd->frame = 9999;             /* an invalid value */
1929                list_add (&sitd->sitd_list, &iso_sched->td_list);
1930        }
1931
1932        /* temporarily store schedule info in hcpriv */
1933        urb->hcpriv = iso_sched;
1934        urb->error_count = 0;
1935
1936        spin_unlock_irqrestore (&ehci->lock, flags);
1937        return 0;
1938}
1939
1940/*-------------------------------------------------------------------------*/
1941
1942static inline void
1943sitd_patch(
1944        struct ehci_hcd         *ehci,
1945        struct ehci_iso_stream  *stream,
1946        struct ehci_sitd        *sitd,
1947        struct ehci_iso_sched   *iso_sched,
1948        unsigned                index
1949)
1950{
1951        struct ehci_iso_packet  *uf = &iso_sched->packet [index];
1952        u64                     bufp = uf->bufp;
1953
1954        sitd->hw_next = EHCI_LIST_END(ehci);
1955        sitd->hw_fullspeed_ep = stream->address;
1956        sitd->hw_uframe = stream->splits;
1957        sitd->hw_results = uf->transaction;
1958        sitd->hw_backpointer = EHCI_LIST_END(ehci);
1959
1960        bufp = uf->bufp;
1961        sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1962        sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1963
1964        sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1965        if (uf->cross)
1966                bufp += 4096;
1967        sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1968        sitd->index = index;
1969}
1970
1971static inline void
1972sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1973{
1974        /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1975        sitd->sitd_next = ehci->pshadow [frame];
1976        sitd->hw_next = ehci->periodic [frame];
1977        ehci->pshadow [frame].sitd = sitd;
1978        sitd->frame = frame;
1979        wmb ();
1980        ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1981}
1982
1983/* fit urb's sitds into the selected schedule slot; activate as needed */
1984static void sitd_link_urb(
1985        struct ehci_hcd         *ehci,
1986        struct urb              *urb,
1987        unsigned                mod,
1988        struct ehci_iso_stream  *stream
1989)
1990{
1991        int                     packet;
1992        unsigned                next_uframe;
1993        struct ehci_iso_sched   *sched = urb->hcpriv;
1994        struct ehci_sitd        *sitd;
1995
1996        next_uframe = stream->next_uframe;
1997
1998        if (list_empty(&stream->td_list))
1999                /* usbfs ignores TT bandwidth */
2000                ehci_to_hcd(ehci)->self.bandwidth_allocated
2001                                += stream->bandwidth;
2002
2003        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2004                if (ehci->amd_pll_fix == 1)
2005                        usb_amd_quirk_pll_disable();
2006        }
2007
2008        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2009
2010        /* fill sITDs frame by frame */
2011        for (packet = 0, sitd = NULL;
2012                        packet < urb->number_of_packets;
2013                        packet++) {
2014
2015                /* ASSERT:  we have all necessary sitds */
2016                BUG_ON (list_empty (&sched->td_list));
2017
2018                /* ASSERT:  no itds for this endpoint in this frame */
2019
2020                sitd = list_entry (sched->td_list.next,
2021                                struct ehci_sitd, sitd_list);
2022                list_move_tail (&sitd->sitd_list, &stream->td_list);
2023                sitd->stream = stream;
2024                sitd->urb = urb;
2025
2026                sitd_patch(ehci, stream, sitd, sched, packet);
2027                sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2028                                sitd);
2029
2030                next_uframe += stream->interval << 3;
2031        }
2032        stream->next_uframe = next_uframe & (mod - 1);
2033
2034        /* don't need that schedule data any more */
2035        iso_sched_free (stream, sched);
2036        urb->hcpriv = stream;
2037
2038        ++ehci->isoc_count;
2039        enable_periodic(ehci);
2040}
2041
2042/*-------------------------------------------------------------------------*/
2043
2044#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2045                                | SITD_STS_XACT | SITD_STS_MMF)
2046
2047/* Process and recycle a completed SITD.  Return true iff its urb completed,
2048 * and hence its completion callback probably added things to the hardware
2049 * schedule.
2050 *
2051 * Note that we carefully avoid recycling this descriptor until after any
2052 * completion callback runs, so that it won't be reused quickly.  That is,
2053 * assuming (a) no more than two urbs per frame on this endpoint, and also
2054 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2055 * corrupts things if you reuse completed descriptors very quickly...
2056 */
2057static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2058{
2059        struct urb                              *urb = sitd->urb;
2060        struct usb_iso_packet_descriptor        *desc;
2061        u32                                     t;
2062        int                                     urb_index = -1;
2063        struct ehci_iso_stream                  *stream = sitd->stream;
2064        struct usb_device                       *dev;
2065        bool                                    retval = false;
2066
2067        urb_index = sitd->index;
2068        desc = &urb->iso_frame_desc [urb_index];
2069        t = hc32_to_cpup(ehci, &sitd->hw_results);
2070
2071        /* report transfer status */
2072        if (unlikely(t & SITD_ERRS)) {
2073                urb->error_count++;
2074                if (t & SITD_STS_DBE)
2075                        desc->status = usb_pipein (urb->pipe)
2076                                ? -ENOSR  /* hc couldn't read */
2077                                : -ECOMM; /* hc couldn't write */
2078                else if (t & SITD_STS_BABBLE)
2079                        desc->status = -EOVERFLOW;
2080                else /* XACT, MMF, etc */
2081                        desc->status = -EPROTO;
2082        } else if (unlikely(t & SITD_STS_ACTIVE)) {
2083                /* URB was too late */
2084                urb->error_count++;
2085        } else {
2086                desc->status = 0;
2087                desc->actual_length = desc->length - SITD_LENGTH(t);
2088                urb->actual_length += desc->actual_length;
2089        }
2090
2091        /* handle completion now? */
2092        if ((urb_index + 1) != urb->number_of_packets)
2093                goto done;
2094
2095        /* ASSERT: it's really the last sitd for this urb
2096        list_for_each_entry (sitd, &stream->td_list, sitd_list)
2097                BUG_ON (sitd->urb == urb);
2098         */
2099
2100        /* give urb back to the driver; completion often (re)submits */
2101        dev = urb->dev;
2102        ehci_urb_done(ehci, urb, 0);
2103        retval = true;
2104        urb = NULL;
2105
2106        --ehci->isoc_count;
2107        disable_periodic(ehci);
2108
2109        ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2110        if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2111                if (ehci->amd_pll_fix == 1)
2112                        usb_amd_quirk_pll_enable();
2113        }
2114
2115        if (list_is_singular(&stream->td_list))
2116                ehci_to_hcd(ehci)->self.bandwidth_allocated
2117                                -= stream->bandwidth;
2118
2119done:
2120        sitd->urb = NULL;
2121
2122        /* Add to the end of the free list for later reuse */
2123        list_move_tail(&sitd->sitd_list, &stream->free_list);
2124
2125        /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2126        if (list_empty(&stream->td_list)) {
2127                list_splice_tail_init(&stream->free_list,
2128                                &ehci->cached_sitd_list);
2129                start_free_itds(ehci);
2130        }
2131
2132        return retval;
2133}
2134
2135
2136static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2137        gfp_t mem_flags)
2138{
2139        int                     status = -EINVAL;
2140        unsigned long           flags;
2141        struct ehci_iso_stream  *stream;
2142
2143        /* Get iso_stream head */
2144        stream = iso_stream_find (ehci, urb);
2145        if (stream == NULL) {
2146                ehci_dbg (ehci, "can't get iso stream\n");
2147                return -ENOMEM;
2148        }
2149        if (urb->interval != stream->interval) {
2150                ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2151                        stream->interval, urb->interval);
2152                goto done;
2153        }
2154
2155#ifdef EHCI_URB_TRACE
2156        ehci_dbg (ehci,
2157                "submit %p dev%s ep%d%s-iso len %d\n",
2158                urb, urb->dev->devpath,
2159                usb_pipeendpoint (urb->pipe),
2160                usb_pipein (urb->pipe) ? "in" : "out",
2161                urb->transfer_buffer_length);
2162#endif
2163
2164        /* allocate SITDs */
2165        status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2166        if (status < 0) {
2167                ehci_dbg (ehci, "can't init sitds\n");
2168                goto done;
2169        }
2170
2171        /* schedule ... need to lock */
2172        spin_lock_irqsave (&ehci->lock, flags);
2173        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2174                status = -ESHUTDOWN;
2175                goto done_not_linked;
2176        }
2177        status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2178        if (unlikely(status))
2179                goto done_not_linked;
2180        status = iso_stream_schedule(ehci, urb, stream);
2181        if (status == 0)
2182                sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2183        else
2184                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2185 done_not_linked:
2186        spin_unlock_irqrestore (&ehci->lock, flags);
2187 done:
2188        return status;
2189}
2190
2191/*-------------------------------------------------------------------------*/
2192
2193static void scan_isoc(struct ehci_hcd *ehci)
2194{
2195        unsigned        uf, now_frame, frame;
2196        unsigned        fmask = ehci->periodic_size - 1;
2197        bool            modified, live;
2198
2199        /*
2200         * When running, scan from last scan point up to "now"
2201         * else clean up by scanning everything that's left.
2202         * Touches as few pages as possible:  cache-friendly.
2203         */
2204        if (ehci->rh_state >= EHCI_RH_RUNNING) {
2205                uf = ehci_read_frame_index(ehci);
2206                now_frame = (uf >> 3) & fmask;
2207                live = true;
2208        } else  {
2209                now_frame = (ehci->last_iso_frame - 1) & fmask;
2210                live = false;
2211        }
2212        ehci->now_frame = now_frame;
2213
2214        frame = ehci->last_iso_frame;
2215        for (;;) {
2216                union ehci_shadow       q, *q_p;
2217                __hc32                  type, *hw_p;
2218
2219restart:
2220                /* scan each element in frame's queue for completions */
2221                q_p = &ehci->pshadow [frame];
2222                hw_p = &ehci->periodic [frame];
2223                q.ptr = q_p->ptr;
2224                type = Q_NEXT_TYPE(ehci, *hw_p);
2225                modified = false;
2226
2227                while (q.ptr != NULL) {
2228                        switch (hc32_to_cpu(ehci, type)) {
2229                        case Q_TYPE_ITD:
2230                                /* If this ITD is still active, leave it for
2231                                 * later processing ... check the next entry.
2232                                 * No need to check for activity unless the
2233                                 * frame is current.
2234                                 */
2235                                if (frame == now_frame && live) {
2236                                        rmb();
2237                                        for (uf = 0; uf < 8; uf++) {
2238                                                if (q.itd->hw_transaction[uf] &
2239                                                            ITD_ACTIVE(ehci))
2240                                                        break;
2241                                        }
2242                                        if (uf < 8) {
2243                                                q_p = &q.itd->itd_next;
2244                                                hw_p = &q.itd->hw_next;
2245                                                type = Q_NEXT_TYPE(ehci,
2246                                                        q.itd->hw_next);
2247                                                q = *q_p;
2248                                                break;
2249                                        }
2250                                }
2251
2252                                /* Take finished ITDs out of the schedule
2253                                 * and process them:  recycle, maybe report
2254                                 * URB completion.  HC won't cache the
2255                                 * pointer for much longer, if at all.
2256                                 */
2257                                *q_p = q.itd->itd_next;
2258                                if (!ehci->use_dummy_qh ||
2259                                    q.itd->hw_next != EHCI_LIST_END(ehci))
2260                                        *hw_p = q.itd->hw_next;
2261                                else
2262                                        *hw_p = ehci->dummy->qh_dma;
2263                                type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2264                                wmb();
2265                                modified = itd_complete (ehci, q.itd);
2266                                q = *q_p;
2267                                break;
2268                        case Q_TYPE_SITD:
2269                                /* If this SITD is still active, leave it for
2270                                 * later processing ... check the next entry.
2271                                 * No need to check for activity unless the
2272                                 * frame is current.
2273                                 */
2274                                if (((frame == now_frame) ||
2275                                     (((frame + 1) & fmask) == now_frame))
2276                                    && live
2277                                    && (q.sitd->hw_results &
2278                                        SITD_ACTIVE(ehci))) {
2279
2280                                        q_p = &q.sitd->sitd_next;
2281                                        hw_p = &q.sitd->hw_next;
2282                                        type = Q_NEXT_TYPE(ehci,
2283                                                        q.sitd->hw_next);
2284                                        q = *q_p;
2285                                        break;
2286                                }
2287
2288                                /* Take finished SITDs out of the schedule
2289                                 * and process them:  recycle, maybe report
2290                                 * URB completion.
2291                                 */
2292                                *q_p = q.sitd->sitd_next;
2293                                if (!ehci->use_dummy_qh ||
2294                                    q.sitd->hw_next != EHCI_LIST_END(ehci))
2295                                        *hw_p = q.sitd->hw_next;
2296                                else
2297                                        *hw_p = ehci->dummy->qh_dma;
2298                                type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2299                                wmb();
2300                                modified = sitd_complete (ehci, q.sitd);
2301                                q = *q_p;
2302                                break;
2303                        default:
2304                                ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2305                                        type, frame, q.ptr);
2306                                // BUG ();
2307                                /* FALL THROUGH */
2308                        case Q_TYPE_QH:
2309                        case Q_TYPE_FSTN:
2310                                /* End of the iTDs and siTDs */
2311                                q.ptr = NULL;
2312                                break;
2313                        }
2314
2315                        /* assume completion callbacks modify the queue */
2316                        if (unlikely(modified && ehci->isoc_count > 0))
2317                                goto restart;
2318                }
2319
2320                /* Stop when we have reached the current frame */
2321                if (frame == now_frame)
2322                        break;
2323
2324                /* The last frame may still have active siTDs */
2325                ehci->last_iso_frame = frame;
2326                frame = (frame + 1) & fmask;
2327        }
2328}
2329