1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29{
30 if (uhci->is_stopped)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
33}
34
35static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36{
37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38}
39
40
41
42
43
44
45
46static void uhci_fsbr_on(struct uhci_hcd *uhci)
47{
48 struct uhci_qh *lqh;
49
50
51
52
53 uhci->fsbr_is_on = 1;
54 lqh = list_entry(uhci->skel_async_qh->node.prev,
55 struct uhci_qh, node);
56 lqh->link = LINK_TO_QH(uhci->skel_term_qh);
57}
58
59static void uhci_fsbr_off(struct uhci_hcd *uhci)
60{
61 struct uhci_qh *lqh;
62
63
64
65 uhci->fsbr_is_on = 0;
66 lqh = list_entry(uhci->skel_async_qh->node.prev,
67 struct uhci_qh, node);
68 lqh->link = UHCI_PTR_TERM;
69}
70
71static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
72{
73 struct urb_priv *urbp = urb->hcpriv;
74
75 if (!(urb->transfer_flags & URB_NO_FSBR))
76 urbp->fsbr = 1;
77}
78
79static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
80{
81 if (urbp->fsbr) {
82 uhci->fsbr_is_wanted = 1;
83 if (!uhci->fsbr_is_on)
84 uhci_fsbr_on(uhci);
85 else if (uhci->fsbr_expiring) {
86 uhci->fsbr_expiring = 0;
87 del_timer(&uhci->fsbr_timer);
88 }
89 }
90}
91
92static void uhci_fsbr_timeout(unsigned long _uhci)
93{
94 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
95 unsigned long flags;
96
97 spin_lock_irqsave(&uhci->lock, flags);
98 if (uhci->fsbr_expiring) {
99 uhci->fsbr_expiring = 0;
100 uhci_fsbr_off(uhci);
101 }
102 spin_unlock_irqrestore(&uhci->lock, flags);
103}
104
105
106static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
107{
108 dma_addr_t dma_handle;
109 struct uhci_td *td;
110
111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
112 if (!td)
113 return NULL;
114
115 td->dma_handle = dma_handle;
116 td->frame = -1;
117
118 INIT_LIST_HEAD(&td->list);
119 INIT_LIST_HEAD(&td->fl_list);
120
121 return td;
122}
123
124static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
125{
126 if (!list_empty(&td->list))
127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
128 if (!list_empty(&td->fl_list))
129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
130
131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
132}
133
134static inline void uhci_fill_td(struct uhci_td *td, u32 status,
135 u32 token, u32 buffer)
136{
137 td->status = cpu_to_le32(status);
138 td->token = cpu_to_le32(token);
139 td->buffer = cpu_to_le32(buffer);
140}
141
142static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
143{
144 list_add_tail(&td->list, &urbp->td_list);
145}
146
147static void uhci_remove_td_from_urbp(struct uhci_td *td)
148{
149 list_del_init(&td->list);
150}
151
152
153
154
155static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156 struct uhci_td *td, unsigned framenum)
157{
158 framenum &= (UHCI_NUMFRAMES - 1);
159
160 td->frame = framenum;
161
162
163 if (uhci->frame_cpu[framenum]) {
164 struct uhci_td *ftd, *ltd;
165
166 ftd = uhci->frame_cpu[framenum];
167 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
168
169 list_add_tail(&td->fl_list, &ftd->fl_list);
170
171 td->link = ltd->link;
172 wmb();
173 ltd->link = LINK_TO_TD(td);
174 } else {
175 td->link = uhci->frame[framenum];
176 wmb();
177 uhci->frame[framenum] = LINK_TO_TD(td);
178 uhci->frame_cpu[framenum] = td;
179 }
180}
181
182static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183 struct uhci_td *td)
184{
185
186 if (td->frame == -1) {
187 WARN_ON(!list_empty(&td->fl_list));
188 return;
189 }
190
191 if (uhci->frame_cpu[td->frame] == td) {
192 if (list_empty(&td->fl_list)) {
193 uhci->frame[td->frame] = td->link;
194 uhci->frame_cpu[td->frame] = NULL;
195 } else {
196 struct uhci_td *ntd;
197
198 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
199 uhci->frame[td->frame] = LINK_TO_TD(ntd);
200 uhci->frame_cpu[td->frame] = ntd;
201 }
202 } else {
203 struct uhci_td *ptd;
204
205 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
206 ptd->link = td->link;
207 }
208
209 list_del_init(&td->fl_list);
210 td->frame = -1;
211}
212
213static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
214 unsigned int framenum)
215{
216 struct uhci_td *ftd, *ltd;
217
218 framenum &= (UHCI_NUMFRAMES - 1);
219
220 ftd = uhci->frame_cpu[framenum];
221 if (ftd) {
222 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
223 uhci->frame[framenum] = ltd->link;
224 uhci->frame_cpu[framenum] = NULL;
225
226 while (!list_empty(&ftd->fl_list))
227 list_del_init(ftd->fl_list.prev);
228 }
229}
230
231
232
233
234static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
235{
236 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
237 struct uhci_td *td;
238
239 list_for_each_entry(td, &urbp->td_list, list)
240 uhci_remove_td_from_frame_list(uhci, td);
241}
242
243static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
244 struct usb_device *udev, struct usb_host_endpoint *hep)
245{
246 dma_addr_t dma_handle;
247 struct uhci_qh *qh;
248
249 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
250 if (!qh)
251 return NULL;
252
253 memset(qh, 0, sizeof(*qh));
254 qh->dma_handle = dma_handle;
255
256 qh->element = UHCI_PTR_TERM;
257 qh->link = UHCI_PTR_TERM;
258
259 INIT_LIST_HEAD(&qh->queue);
260 INIT_LIST_HEAD(&qh->node);
261
262 if (udev) {
263 qh->type = usb_endpoint_type(&hep->desc);
264 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
265 qh->dummy_td = uhci_alloc_td(uhci);
266 if (!qh->dummy_td) {
267 dma_pool_free(uhci->qh_pool, qh, dma_handle);
268 return NULL;
269 }
270 }
271 qh->state = QH_STATE_IDLE;
272 qh->hep = hep;
273 qh->udev = udev;
274 hep->hcpriv = qh;
275
276 if (qh->type == USB_ENDPOINT_XFER_INT ||
277 qh->type == USB_ENDPOINT_XFER_ISOC)
278 qh->load = usb_calc_bus_time(udev->speed,
279 usb_endpoint_dir_in(&hep->desc),
280 qh->type == USB_ENDPOINT_XFER_ISOC,
281 le16_to_cpu(hep->desc.wMaxPacketSize))
282 / 1000 + 1;
283
284 } else {
285 qh->state = QH_STATE_ACTIVE;
286 qh->type = -1;
287 }
288 return qh;
289}
290
291static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
292{
293 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
294 if (!list_empty(&qh->queue))
295 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
296
297 list_del(&qh->node);
298 if (qh->udev) {
299 qh->hep->hcpriv = NULL;
300 if (qh->dummy_td)
301 uhci_free_td(uhci, qh->dummy_td);
302 }
303 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
304}
305
306
307
308
309
310
311
312
313static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
314 struct urb *urb)
315{
316 struct urb_priv *urbp = urb->hcpriv;
317 struct uhci_td *td;
318 int ret = 1;
319
320
321
322
323
324 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
325 ret = (uhci->frame_number + uhci->is_stopped !=
326 qh->unlink_frame);
327 goto done;
328 }
329
330
331
332
333 if (qh->queue.next != &urbp->node) {
334 struct urb_priv *purbp;
335 struct uhci_td *ptd;
336
337 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
338 WARN_ON(list_empty(&purbp->td_list));
339 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
340 list);
341 td = list_entry(urbp->td_list.prev, struct uhci_td,
342 list);
343 ptd->link = td->link;
344 goto done;
345 }
346
347
348
349 if (qh_element(qh) == UHCI_PTR_TERM)
350 goto done;
351 qh->element = UHCI_PTR_TERM;
352
353
354 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
355 goto done;
356
357
358 WARN_ON(list_empty(&urbp->td_list));
359 td = list_entry(urbp->td_list.next, struct uhci_td, list);
360 qh->needs_fixup = 1;
361 qh->initial_toggle = uhci_toggle(td_token(td));
362
363done:
364 return ret;
365}
366
367
368
369
370
371static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
372{
373 struct urb_priv *urbp = NULL;
374 struct uhci_td *td;
375 unsigned int toggle = qh->initial_toggle;
376 unsigned int pipe;
377
378
379
380 if (skip_first)
381 urbp = list_entry(qh->queue.next, struct urb_priv, node);
382
383
384
385 else if (qh_element(qh) != UHCI_PTR_TERM)
386 toggle = 2;
387
388
389
390
391 urbp = list_prepare_entry(urbp, &qh->queue, node);
392 list_for_each_entry_continue(urbp, &qh->queue, node) {
393
394
395
396 td = list_entry(urbp->td_list.next, struct uhci_td, list);
397 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
398 td = list_entry(urbp->td_list.prev, struct uhci_td,
399 list);
400 toggle = uhci_toggle(td_token(td)) ^ 1;
401
402
403 } else {
404 list_for_each_entry(td, &urbp->td_list, list) {
405 td->token ^= cpu_to_le32(
406 TD_TOKEN_TOGGLE);
407 toggle ^= 1;
408 }
409 }
410 }
411
412 wmb();
413 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
414 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
415 usb_pipeout(pipe), toggle);
416 qh->needs_fixup = 0;
417}
418
419
420
421
422static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
423{
424 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
425
426
427}
428
429
430
431
432
433static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
434{
435 struct uhci_qh *pqh;
436
437 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
438
439 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
440 qh->link = pqh->link;
441 wmb();
442 pqh->link = LINK_TO_QH(qh);
443}
444
445
446
447
448
449static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
450{
451 struct uhci_qh *pqh;
452 __le32 link_to_new_qh;
453
454
455
456
457 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
458 if (pqh->skel <= qh->skel)
459 break;
460 }
461 list_add(&qh->node, &pqh->node);
462
463
464 qh->link = pqh->link;
465 wmb();
466 link_to_new_qh = LINK_TO_QH(qh);
467 pqh->link = link_to_new_qh;
468
469
470
471 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
472 uhci->skel_term_qh->link = link_to_new_qh;
473}
474
475
476
477
478static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
479{
480 WARN_ON(list_empty(&qh->queue));
481
482
483
484 if (qh_element(qh) == UHCI_PTR_TERM) {
485 struct urb_priv *urbp = list_entry(qh->queue.next,
486 struct urb_priv, node);
487 struct uhci_td *td = list_entry(urbp->td_list.next,
488 struct uhci_td, list);
489
490 qh->element = LINK_TO_TD(td);
491 }
492
493
494 qh->wait_expired = 0;
495 qh->advance_jiffies = jiffies;
496
497 if (qh->state == QH_STATE_ACTIVE)
498 return;
499 qh->state = QH_STATE_ACTIVE;
500
501
502
503 if (qh == uhci->next_qh)
504 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
505 node);
506 list_del(&qh->node);
507
508 if (qh->skel == SKEL_ISO)
509 link_iso(uhci, qh);
510 else if (qh->skel < SKEL_ASYNC)
511 link_interrupt(uhci, qh);
512 else
513 link_async(uhci, qh);
514}
515
516
517
518
519static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
520{
521 struct uhci_qh *pqh;
522
523 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
524 pqh->link = qh->link;
525 mb();
526}
527
528
529
530
531static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
532{
533 struct uhci_qh *pqh;
534 __le32 link_to_next_qh = qh->link;
535
536 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
537 pqh->link = link_to_next_qh;
538
539
540
541 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
542 uhci->skel_term_qh->link = link_to_next_qh;
543 mb();
544}
545
546
547
548
549static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
550{
551 if (qh->state == QH_STATE_UNLINKING)
552 return;
553 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
554 qh->state = QH_STATE_UNLINKING;
555
556
557 if (qh->skel == SKEL_ISO)
558 ;
559 else if (qh->skel < SKEL_ASYNC)
560 unlink_interrupt(uhci, qh);
561 else
562 unlink_async(uhci, qh);
563
564 uhci_get_current_frame_number(uhci);
565 qh->unlink_frame = uhci->frame_number;
566
567
568 if (list_empty(&uhci->skel_unlink_qh->node))
569 uhci_set_next_interrupt(uhci);
570
571
572 if (qh == uhci->next_qh)
573 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
574 node);
575 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
576}
577
578
579
580
581
582
583
584static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
585{
586 WARN_ON(qh->state == QH_STATE_ACTIVE);
587
588 if (qh == uhci->next_qh)
589 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
590 node);
591 list_move(&qh->node, &uhci->idle_qh_list);
592 qh->state = QH_STATE_IDLE;
593
594
595 if (qh->post_td) {
596 uhci_free_td(uhci, qh->post_td);
597 qh->post_td = NULL;
598 }
599
600
601 if (uhci->num_waiting)
602 wake_up_all(&uhci->waitqh);
603}
604
605
606
607
608static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
609{
610 int highest_load = uhci->load[phase];
611
612 for (phase += period; phase < MAX_PHASE; phase += period)
613 highest_load = max_t(int, highest_load, uhci->load[phase]);
614 return highest_load;
615}
616
617
618
619
620
621static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
622{
623 int minimax_load;
624
625
626
627 if (qh->phase >= 0)
628 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
629 else {
630 int phase, load;
631 int max_phase = min_t(int, MAX_PHASE, qh->period);
632
633 qh->phase = 0;
634 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
635 for (phase = 1; phase < max_phase; ++phase) {
636 load = uhci_highest_load(uhci, phase, qh->period);
637 if (load < minimax_load) {
638 minimax_load = load;
639 qh->phase = phase;
640 }
641 }
642 }
643
644
645 if (minimax_load + qh->load > 900) {
646 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
647 "period %d, phase %d, %d + %d us\n",
648 qh->period, qh->phase, minimax_load, qh->load);
649 return -ENOSPC;
650 }
651 return 0;
652}
653
654
655
656
657static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
658{
659 int i;
660 int load = qh->load;
661 char *p = "??";
662
663 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
664 uhci->load[i] += load;
665 uhci->total_load += load;
666 }
667 uhci_to_hcd(uhci)->self.bandwidth_allocated =
668 uhci->total_load / MAX_PHASE;
669 switch (qh->type) {
670 case USB_ENDPOINT_XFER_INT:
671 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
672 p = "INT";
673 break;
674 case USB_ENDPOINT_XFER_ISOC:
675 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
676 p = "ISO";
677 break;
678 }
679 qh->bandwidth_reserved = 1;
680 dev_dbg(uhci_dev(uhci),
681 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
682 "reserve", qh->udev->devnum,
683 qh->hep->desc.bEndpointAddress, p,
684 qh->period, qh->phase, load);
685}
686
687
688
689
690static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
691{
692 int i;
693 int load = qh->load;
694 char *p = "??";
695
696 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
697 uhci->load[i] -= load;
698 uhci->total_load -= load;
699 }
700 uhci_to_hcd(uhci)->self.bandwidth_allocated =
701 uhci->total_load / MAX_PHASE;
702 switch (qh->type) {
703 case USB_ENDPOINT_XFER_INT:
704 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
705 p = "INT";
706 break;
707 case USB_ENDPOINT_XFER_ISOC:
708 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
709 p = "ISO";
710 break;
711 }
712 qh->bandwidth_reserved = 0;
713 dev_dbg(uhci_dev(uhci),
714 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
715 "release", qh->udev->devnum,
716 qh->hep->desc.bEndpointAddress, p,
717 qh->period, qh->phase, load);
718}
719
720static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
721 struct urb *urb)
722{
723 struct urb_priv *urbp;
724
725 urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
726 if (!urbp)
727 return NULL;
728
729 urbp->urb = urb;
730 urb->hcpriv = urbp;
731
732 INIT_LIST_HEAD(&urbp->node);
733 INIT_LIST_HEAD(&urbp->td_list);
734
735 return urbp;
736}
737
738static void uhci_free_urb_priv(struct uhci_hcd *uhci,
739 struct urb_priv *urbp)
740{
741 struct uhci_td *td, *tmp;
742
743 if (!list_empty(&urbp->node))
744 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
745 urbp->urb);
746
747 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
748 uhci_remove_td_from_urbp(td);
749 uhci_free_td(uhci, td);
750 }
751
752 kmem_cache_free(uhci_up_cachep, urbp);
753}
754
755
756
757
758
759
760
761
762
763static int uhci_map_status(int status, int dir_out)
764{
765 if (!status)
766 return 0;
767 if (status & TD_CTRL_BITSTUFF)
768 return -EPROTO;
769 if (status & TD_CTRL_CRCTIMEO) {
770 if (dir_out)
771 return -EPROTO;
772 else
773 return -EILSEQ;
774 }
775 if (status & TD_CTRL_BABBLE)
776 return -EOVERFLOW;
777 if (status & TD_CTRL_DBUFERR)
778 return -ENOSR;
779 if (status & TD_CTRL_STALLED)
780 return -EPIPE;
781 return 0;
782}
783
784
785
786
787static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
788 struct uhci_qh *qh)
789{
790 struct uhci_td *td;
791 unsigned long destination, status;
792 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
793 int len = urb->transfer_buffer_length;
794 dma_addr_t data = urb->transfer_dma;
795 __le32 *plink;
796 struct urb_priv *urbp = urb->hcpriv;
797 int skel;
798
799
800 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
801
802
803 status = uhci_maxerr(3);
804 if (urb->dev->speed == USB_SPEED_LOW)
805 status |= TD_CTRL_LS;
806
807
808
809
810 td = qh->dummy_td;
811 uhci_add_td_to_urbp(td, urbp);
812 uhci_fill_td(td, status, destination | uhci_explen(8),
813 urb->setup_dma);
814 plink = &td->link;
815 status |= TD_CTRL_ACTIVE;
816
817
818
819
820
821
822
823
824 if (usb_pipeout(urb->pipe) || len == 0)
825 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
826 else {
827 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
828 status |= TD_CTRL_SPD;
829 }
830
831
832
833
834 while (len > 0) {
835 int pktsze = maxsze;
836
837 if (len <= pktsze) {
838 pktsze = len;
839 status &= ~TD_CTRL_SPD;
840 }
841
842 td = uhci_alloc_td(uhci);
843 if (!td)
844 goto nomem;
845 *plink = LINK_TO_TD(td);
846
847
848 destination ^= TD_TOKEN_TOGGLE;
849
850 uhci_add_td_to_urbp(td, urbp);
851 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
852 data);
853 plink = &td->link;
854
855 data += pktsze;
856 len -= pktsze;
857 }
858
859
860
861
862 td = uhci_alloc_td(uhci);
863 if (!td)
864 goto nomem;
865 *plink = LINK_TO_TD(td);
866
867
868 destination ^= (USB_PID_IN ^ USB_PID_OUT);
869 destination |= TD_TOKEN_TOGGLE;
870
871 uhci_add_td_to_urbp(td, urbp);
872 uhci_fill_td(td, status | TD_CTRL_IOC,
873 destination | uhci_explen(0), 0);
874 plink = &td->link;
875
876
877
878
879 td = uhci_alloc_td(uhci);
880 if (!td)
881 goto nomem;
882 *plink = LINK_TO_TD(td);
883
884 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
885 wmb();
886 qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
887 qh->dummy_td = td;
888
889
890
891
892
893 if (urb->dev->speed == USB_SPEED_LOW ||
894 urb->dev->state != USB_STATE_CONFIGURED)
895 skel = SKEL_LS_CONTROL;
896 else {
897 skel = SKEL_FS_CONTROL;
898 uhci_add_fsbr(uhci, urb);
899 }
900 if (qh->state != QH_STATE_ACTIVE)
901 qh->skel = skel;
902 return 0;
903
904nomem:
905
906 uhci_remove_td_from_urbp(qh->dummy_td);
907 return -ENOMEM;
908}
909
910
911
912
913static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
914 struct uhci_qh *qh)
915{
916 struct uhci_td *td;
917 unsigned long destination, status;
918 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
919 int len = urb->transfer_buffer_length;
920 dma_addr_t data = urb->transfer_dma;
921 __le32 *plink;
922 struct urb_priv *urbp = urb->hcpriv;
923 unsigned int toggle;
924
925 if (len < 0)
926 return -EINVAL;
927
928
929 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
930 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
931 usb_pipeout(urb->pipe));
932
933
934 status = uhci_maxerr(3);
935 if (urb->dev->speed == USB_SPEED_LOW)
936 status |= TD_CTRL_LS;
937 if (usb_pipein(urb->pipe))
938 status |= TD_CTRL_SPD;
939
940
941
942
943 plink = NULL;
944 td = qh->dummy_td;
945 do {
946 int pktsze = maxsze;
947
948 if (len <= pktsze) {
949 pktsze = len;
950 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
951 status &= ~TD_CTRL_SPD;
952 }
953
954 if (plink) {
955 td = uhci_alloc_td(uhci);
956 if (!td)
957 goto nomem;
958 *plink = LINK_TO_TD(td);
959 }
960 uhci_add_td_to_urbp(td, urbp);
961 uhci_fill_td(td, status,
962 destination | uhci_explen(pktsze) |
963 (toggle << TD_TOKEN_TOGGLE_SHIFT),
964 data);
965 plink = &td->link;
966 status |= TD_CTRL_ACTIVE;
967
968 data += pktsze;
969 len -= maxsze;
970 toggle ^= 1;
971 } while (len > 0);
972
973
974
975
976
977
978
979
980 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
981 usb_pipeout(urb->pipe) && len == 0 &&
982 urb->transfer_buffer_length > 0) {
983 td = uhci_alloc_td(uhci);
984 if (!td)
985 goto nomem;
986 *plink = LINK_TO_TD(td);
987
988 uhci_add_td_to_urbp(td, urbp);
989 uhci_fill_td(td, status,
990 destination | uhci_explen(0) |
991 (toggle << TD_TOKEN_TOGGLE_SHIFT),
992 data);
993 plink = &td->link;
994
995 toggle ^= 1;
996 }
997
998
999
1000
1001
1002
1003
1004 td->status |= cpu_to_le32(TD_CTRL_IOC);
1005
1006
1007
1008
1009 td = uhci_alloc_td(uhci);
1010 if (!td)
1011 goto nomem;
1012 *plink = LINK_TO_TD(td);
1013
1014 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1015 wmb();
1016 qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
1017 qh->dummy_td = td;
1018
1019 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1020 usb_pipeout(urb->pipe), toggle);
1021 return 0;
1022
1023nomem:
1024
1025 uhci_remove_td_from_urbp(qh->dummy_td);
1026 return -ENOMEM;
1027}
1028
1029static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1030 struct uhci_qh *qh)
1031{
1032 int ret;
1033
1034
1035 if (urb->dev->speed == USB_SPEED_LOW)
1036 return -EINVAL;
1037
1038 if (qh->state != QH_STATE_ACTIVE)
1039 qh->skel = SKEL_BULK;
1040 ret = uhci_submit_common(uhci, urb, qh);
1041 if (ret == 0)
1042 uhci_add_fsbr(uhci, urb);
1043 return ret;
1044}
1045
1046static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1047 struct uhci_qh *qh)
1048{
1049 int ret;
1050
1051
1052
1053
1054
1055
1056 if (!qh->bandwidth_reserved) {
1057 int exponent;
1058
1059
1060 for (exponent = 7; exponent >= 0; --exponent) {
1061 if ((1 << exponent) <= urb->interval)
1062 break;
1063 }
1064 if (exponent < 0)
1065 return -EINVAL;
1066
1067
1068 do {
1069 qh->period = 1 << exponent;
1070 qh->skel = SKEL_INDEX(exponent);
1071
1072
1073
1074
1075 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1076 ret = uhci_check_bandwidth(uhci, qh);
1077 } while (ret != 0 && --exponent >= 0);
1078 if (ret)
1079 return ret;
1080 } else if (qh->period > urb->interval)
1081 return -EINVAL;
1082
1083 ret = uhci_submit_common(uhci, urb, qh);
1084 if (ret == 0) {
1085 urb->interval = qh->period;
1086 if (!qh->bandwidth_reserved)
1087 uhci_reserve_bandwidth(uhci, qh);
1088 }
1089 return ret;
1090}
1091
1092
1093
1094
1095static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1096 struct uhci_qh *qh, struct urb_priv *urbp)
1097{
1098 struct uhci_td *td;
1099 struct list_head *tmp;
1100 int ret;
1101
1102 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1103 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1104
1105
1106
1107
1108 WARN_ON(list_empty(&urbp->td_list));
1109 qh->element = LINK_TO_TD(td);
1110 tmp = td->list.prev;
1111 ret = -EINPROGRESS;
1112
1113 } else {
1114
1115
1116
1117
1118 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1119 uhci_fixup_toggles(qh, 1);
1120
1121 if (list_empty(&urbp->td_list))
1122 td = qh->post_td;
1123 qh->element = td->link;
1124 tmp = urbp->td_list.prev;
1125 ret = 0;
1126 }
1127
1128
1129 while (tmp != &urbp->td_list) {
1130 td = list_entry(tmp, struct uhci_td, list);
1131 tmp = tmp->prev;
1132
1133 uhci_remove_td_from_urbp(td);
1134 uhci_free_td(uhci, td);
1135 }
1136 return ret;
1137}
1138
1139
1140
1141
1142static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1143{
1144 struct urb_priv *urbp = urb->hcpriv;
1145 struct uhci_qh *qh = urbp->qh;
1146 struct uhci_td *td, *tmp;
1147 unsigned status;
1148 int ret = 0;
1149
1150 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1151 unsigned int ctrlstat;
1152 int len;
1153
1154 ctrlstat = td_status(td);
1155 status = uhci_status_bits(ctrlstat);
1156 if (status & TD_CTRL_ACTIVE)
1157 return -EINPROGRESS;
1158
1159 len = uhci_actual_length(ctrlstat);
1160 urb->actual_length += len;
1161
1162 if (status) {
1163 ret = uhci_map_status(status,
1164 uhci_packetout(td_token(td)));
1165 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1166
1167 dev_dbg(&urb->dev->dev,
1168 "%s: failed with status %x\n",
1169 __func__, status);
1170
1171 if (debug > 1 && errbuf) {
1172
1173 uhci_show_qh(uhci, urbp->qh, errbuf,
1174 ERRBUF_LEN, 0);
1175 lprintk(errbuf);
1176 }
1177 }
1178
1179
1180 } else if (len < uhci_expected_length(td_token(td))) {
1181
1182
1183
1184 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1185 if (td->list.next != urbp->td_list.prev)
1186 ret = 1;
1187 }
1188
1189
1190 else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1191 ret = -EREMOTEIO;
1192
1193
1194 else if (&td->list != urbp->td_list.prev)
1195 ret = 1;
1196 }
1197
1198 uhci_remove_td_from_urbp(td);
1199 if (qh->post_td)
1200 uhci_free_td(uhci, qh->post_td);
1201 qh->post_td = td;
1202
1203 if (ret != 0)
1204 goto err;
1205 }
1206 return ret;
1207
1208err:
1209 if (ret < 0) {
1210
1211
1212 qh->element = UHCI_PTR_TERM;
1213 qh->is_stopped = 1;
1214 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1215 qh->initial_toggle = uhci_toggle(td_token(td)) ^
1216 (ret == -EREMOTEIO);
1217
1218 } else
1219 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1220 return ret;
1221}
1222
1223
1224
1225
1226static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1227 struct uhci_qh *qh)
1228{
1229 struct uhci_td *td = NULL;
1230 int i, frame;
1231 unsigned long destination, status;
1232 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1233
1234
1235 if (urb->interval >= UHCI_NUMFRAMES ||
1236 urb->number_of_packets >= UHCI_NUMFRAMES)
1237 return -EFBIG;
1238
1239
1240 if (!qh->bandwidth_reserved) {
1241 qh->period = urb->interval;
1242 if (urb->transfer_flags & URB_ISO_ASAP) {
1243 qh->phase = -1;
1244 i = uhci_check_bandwidth(uhci, qh);
1245 if (i)
1246 return i;
1247
1248
1249 uhci_get_current_frame_number(uhci);
1250 frame = uhci->frame_number + 10;
1251
1252
1253
1254 urb->start_frame = frame + ((qh->phase - frame) &
1255 (qh->period - 1));
1256 } else {
1257 i = urb->start_frame - uhci->last_iso_frame;
1258 if (i <= 0 || i >= UHCI_NUMFRAMES)
1259 return -EINVAL;
1260 qh->phase = urb->start_frame & (qh->period - 1);
1261 i = uhci_check_bandwidth(uhci, qh);
1262 if (i)
1263 return i;
1264 }
1265
1266 } else if (qh->period != urb->interval) {
1267 return -EINVAL;
1268
1269 } else {
1270
1271 if (list_empty(&qh->queue)) {
1272 frame = qh->iso_frame;
1273 } else {
1274 struct urb *lurb;
1275
1276 lurb = list_entry(qh->queue.prev,
1277 struct urb_priv, node)->urb;
1278 frame = lurb->start_frame +
1279 lurb->number_of_packets *
1280 lurb->interval;
1281 }
1282 if (urb->transfer_flags & URB_ISO_ASAP) {
1283
1284
1285
1286 uhci_get_current_frame_number(uhci);
1287 if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1288 frame = uhci->frame_number + 1;
1289 frame += ((qh->phase - frame) &
1290 (qh->period - 1));
1291 }
1292 }
1293 urb->start_frame = frame;
1294 }
1295
1296
1297 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1298 urb->start_frame + urb->number_of_packets *
1299 urb->interval))
1300 return -EFBIG;
1301
1302 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1303 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1304
1305 for (i = 0; i < urb->number_of_packets; i++) {
1306 td = uhci_alloc_td(uhci);
1307 if (!td)
1308 return -ENOMEM;
1309
1310 uhci_add_td_to_urbp(td, urbp);
1311 uhci_fill_td(td, status, destination |
1312 uhci_explen(urb->iso_frame_desc[i].length),
1313 urb->transfer_dma +
1314 urb->iso_frame_desc[i].offset);
1315 }
1316
1317
1318 td->status |= cpu_to_le32(TD_CTRL_IOC);
1319
1320
1321 frame = urb->start_frame;
1322 list_for_each_entry(td, &urbp->td_list, list) {
1323 uhci_insert_td_in_frame_list(uhci, td, frame);
1324 frame += qh->period;
1325 }
1326
1327 if (list_empty(&qh->queue)) {
1328 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1329 qh->iso_frame = urb->start_frame;
1330 }
1331
1332 qh->skel = SKEL_ISO;
1333 if (!qh->bandwidth_reserved)
1334 uhci_reserve_bandwidth(uhci, qh);
1335 return 0;
1336}
1337
1338static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1339{
1340 struct uhci_td *td, *tmp;
1341 struct urb_priv *urbp = urb->hcpriv;
1342 struct uhci_qh *qh = urbp->qh;
1343
1344 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1345 unsigned int ctrlstat;
1346 int status;
1347 int actlength;
1348
1349 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1350 return -EINPROGRESS;
1351
1352 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1353
1354 ctrlstat = td_status(td);
1355 if (ctrlstat & TD_CTRL_ACTIVE) {
1356 status = -EXDEV;
1357 } else {
1358 status = uhci_map_status(uhci_status_bits(ctrlstat),
1359 usb_pipeout(urb->pipe));
1360 actlength = uhci_actual_length(ctrlstat);
1361
1362 urb->actual_length += actlength;
1363 qh->iso_packet_desc->actual_length = actlength;
1364 qh->iso_packet_desc->status = status;
1365 }
1366 if (status)
1367 urb->error_count++;
1368
1369 uhci_remove_td_from_urbp(td);
1370 uhci_free_td(uhci, td);
1371 qh->iso_frame += qh->period;
1372 ++qh->iso_packet_desc;
1373 }
1374 return 0;
1375}
1376
1377static int uhci_urb_enqueue(struct usb_hcd *hcd,
1378 struct urb *urb, gfp_t mem_flags)
1379{
1380 int ret;
1381 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1382 unsigned long flags;
1383 struct urb_priv *urbp;
1384 struct uhci_qh *qh;
1385
1386 spin_lock_irqsave(&uhci->lock, flags);
1387
1388 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1389 if (ret)
1390 goto done_not_linked;
1391
1392 ret = -ENOMEM;
1393 urbp = uhci_alloc_urb_priv(uhci, urb);
1394 if (!urbp)
1395 goto done;
1396
1397 if (urb->ep->hcpriv)
1398 qh = urb->ep->hcpriv;
1399 else {
1400 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1401 if (!qh)
1402 goto err_no_qh;
1403 }
1404 urbp->qh = qh;
1405
1406 switch (qh->type) {
1407 case USB_ENDPOINT_XFER_CONTROL:
1408 ret = uhci_submit_control(uhci, urb, qh);
1409 break;
1410 case USB_ENDPOINT_XFER_BULK:
1411 ret = uhci_submit_bulk(uhci, urb, qh);
1412 break;
1413 case USB_ENDPOINT_XFER_INT:
1414 ret = uhci_submit_interrupt(uhci, urb, qh);
1415 break;
1416 case USB_ENDPOINT_XFER_ISOC:
1417 urb->error_count = 0;
1418 ret = uhci_submit_isochronous(uhci, urb, qh);
1419 break;
1420 }
1421 if (ret != 0)
1422 goto err_submit_failed;
1423
1424
1425 list_add_tail(&urbp->node, &qh->queue);
1426
1427
1428
1429
1430
1431 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1432 uhci_activate_qh(uhci, qh);
1433 uhci_urbp_wants_fsbr(uhci, urbp);
1434 }
1435 goto done;
1436
1437err_submit_failed:
1438 if (qh->state == QH_STATE_IDLE)
1439 uhci_make_qh_idle(uhci, qh);
1440err_no_qh:
1441 uhci_free_urb_priv(uhci, urbp);
1442done:
1443 if (ret)
1444 usb_hcd_unlink_urb_from_ep(hcd, urb);
1445done_not_linked:
1446 spin_unlock_irqrestore(&uhci->lock, flags);
1447 return ret;
1448}
1449
1450static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1451{
1452 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1453 unsigned long flags;
1454 struct uhci_qh *qh;
1455 int rc;
1456
1457 spin_lock_irqsave(&uhci->lock, flags);
1458 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1459 if (rc)
1460 goto done;
1461
1462 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1463
1464
1465 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1466 uhci_unlink_isochronous_tds(uhci, urb);
1467 mb();
1468
1469
1470 uhci_get_current_frame_number(uhci);
1471 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1472 qh->unlink_frame = uhci->frame_number;
1473 }
1474
1475 uhci_unlink_qh(uhci, qh);
1476
1477done:
1478 spin_unlock_irqrestore(&uhci->lock, flags);
1479 return rc;
1480}
1481
1482
1483
1484
1485static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1486 struct urb *urb, int status)
1487__releases(uhci->lock)
1488__acquires(uhci->lock)
1489{
1490 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1491
1492 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1493
1494
1495
1496
1497 urb->actual_length -= min_t(u32, 8, urb->actual_length);
1498 }
1499
1500
1501
1502 else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1503 urbp->node.prev == &qh->queue &&
1504 urbp->node.next != &qh->queue) {
1505 struct urb *nurb = list_entry(urbp->node.next,
1506 struct urb_priv, node)->urb;
1507
1508 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1509 qh->iso_frame = nurb->start_frame;
1510 }
1511
1512
1513
1514 list_del_init(&urbp->node);
1515 if (list_empty(&qh->queue) && qh->needs_fixup) {
1516 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1517 usb_pipeout(urb->pipe), qh->initial_toggle);
1518 qh->needs_fixup = 0;
1519 }
1520
1521 uhci_free_urb_priv(uhci, urbp);
1522 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1523
1524 spin_unlock(&uhci->lock);
1525 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1526 spin_lock(&uhci->lock);
1527
1528
1529
1530 if (list_empty(&qh->queue)) {
1531 uhci_unlink_qh(uhci, qh);
1532 if (qh->bandwidth_reserved)
1533 uhci_release_bandwidth(uhci, qh);
1534 }
1535}
1536
1537
1538
1539
1540#define QH_FINISHED_UNLINKING(qh) \
1541 (qh->state == QH_STATE_UNLINKING && \
1542 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1543
1544static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1545{
1546 struct urb_priv *urbp;
1547 struct urb *urb;
1548 int status;
1549
1550 while (!list_empty(&qh->queue)) {
1551 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1552 urb = urbp->urb;
1553
1554 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1555 status = uhci_result_isochronous(uhci, urb);
1556 else
1557 status = uhci_result_common(uhci, urb);
1558 if (status == -EINPROGRESS)
1559 break;
1560
1561
1562
1563 if (urb->unlinked) {
1564 if (QH_FINISHED_UNLINKING(qh))
1565 qh->is_stopped = 1;
1566 else if (!qh->is_stopped)
1567 return;
1568 }
1569
1570 uhci_giveback_urb(uhci, qh, urb, status);
1571 if (status < 0)
1572 break;
1573 }
1574
1575
1576
1577 if (QH_FINISHED_UNLINKING(qh))
1578 qh->is_stopped = 1;
1579 else if (!qh->is_stopped)
1580 return;
1581
1582
1583restart:
1584 list_for_each_entry(urbp, &qh->queue, node) {
1585 urb = urbp->urb;
1586 if (urb->unlinked) {
1587
1588
1589
1590
1591 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1592 qh->is_stopped = 0;
1593 return;
1594 }
1595 uhci_giveback_urb(uhci, qh, urb, 0);
1596 goto restart;
1597 }
1598 }
1599 qh->is_stopped = 0;
1600
1601
1602
1603 if (!list_empty(&qh->queue)) {
1604 if (qh->needs_fixup)
1605 uhci_fixup_toggles(qh, 0);
1606
1607
1608
1609
1610 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1611 if (urbp->fsbr && qh->wait_expired) {
1612 struct uhci_td *td = list_entry(urbp->td_list.next,
1613 struct uhci_td, list);
1614
1615 td->status |= __cpu_to_le32(TD_CTRL_IOC);
1616 }
1617
1618 uhci_activate_qh(uhci, qh);
1619 }
1620
1621
1622
1623 else if (QH_FINISHED_UNLINKING(qh))
1624 uhci_make_qh_idle(uhci, qh);
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1638{
1639 struct urb_priv *urbp = NULL;
1640 struct uhci_td *td;
1641 int ret = 1;
1642 unsigned status;
1643
1644 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1645 goto done;
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 if (qh->state != QH_STATE_ACTIVE) {
1656 urbp = NULL;
1657 status = 0;
1658
1659 } else {
1660 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1661 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1662 status = td_status(td);
1663 if (!(status & TD_CTRL_ACTIVE)) {
1664
1665
1666 qh->wait_expired = 0;
1667 qh->advance_jiffies = jiffies;
1668 goto done;
1669 }
1670 ret = 0;
1671 }
1672
1673
1674 if (qh->wait_expired)
1675 goto done;
1676
1677 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1678
1679
1680 if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1681 qh->element = qh->post_td->link;
1682 qh->advance_jiffies = jiffies;
1683 ret = 1;
1684 goto done;
1685 }
1686
1687 qh->wait_expired = 1;
1688
1689
1690
1691
1692
1693 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1694 uhci_unlink_qh(uhci, qh);
1695
1696 } else {
1697
1698 if (urbp)
1699 uhci_urbp_wants_fsbr(uhci, urbp);
1700 }
1701
1702done:
1703 return ret;
1704}
1705
1706
1707
1708
1709static void uhci_scan_schedule(struct uhci_hcd *uhci)
1710{
1711 int i;
1712 struct uhci_qh *qh;
1713
1714
1715 if (uhci->scan_in_progress) {
1716 uhci->need_rescan = 1;
1717 return;
1718 }
1719 uhci->scan_in_progress = 1;
1720rescan:
1721 uhci->need_rescan = 0;
1722 uhci->fsbr_is_wanted = 0;
1723
1724 uhci_clear_next_interrupt(uhci);
1725 uhci_get_current_frame_number(uhci);
1726 uhci->cur_iso_frame = uhci->frame_number;
1727
1728
1729 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1730 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1731 struct uhci_qh, node);
1732 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1733 uhci->next_qh = list_entry(qh->node.next,
1734 struct uhci_qh, node);
1735
1736 if (uhci_advance_check(uhci, qh)) {
1737 uhci_scan_qh(uhci, qh);
1738 if (qh->state == QH_STATE_ACTIVE) {
1739 uhci_urbp_wants_fsbr(uhci,
1740 list_entry(qh->queue.next, struct urb_priv, node));
1741 }
1742 }
1743 }
1744 }
1745
1746 uhci->last_iso_frame = uhci->cur_iso_frame;
1747 if (uhci->need_rescan)
1748 goto rescan;
1749 uhci->scan_in_progress = 0;
1750
1751 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1752 !uhci->fsbr_expiring) {
1753 uhci->fsbr_expiring = 1;
1754 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1755 }
1756
1757 if (list_empty(&uhci->skel_unlink_qh->node))
1758 uhci_clear_next_interrupt(uhci);
1759 else
1760 uhci_set_next_interrupt(uhci);
1761}
1762