1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/scatterlist.h>
68#include <linux/slab.h>
69#include <linux/dma-mapping.h>
70#include "xhci.h"
71#include "xhci-trace.h"
72#include "xhci-mtk.h"
73
74
75
76
77
78dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
79 union xhci_trb *trb)
80{
81 unsigned long segment_offset;
82
83 if (!seg || !trb || trb < seg->trbs)
84 return 0;
85
86 segment_offset = trb - seg->trbs;
87 if (segment_offset >= TRBS_PER_SEGMENT)
88 return 0;
89 return seg->dma + (segment_offset * sizeof(*trb));
90}
91
92static bool trb_is_link(union xhci_trb *trb)
93{
94 return TRB_TYPE_LINK_LE32(trb->link.control);
95}
96
97static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
98{
99 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
100}
101
102static bool last_trb_on_ring(struct xhci_ring *ring,
103 struct xhci_segment *seg, union xhci_trb *trb)
104{
105 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
106}
107
108static bool link_trb_toggles_cycle(union xhci_trb *trb)
109{
110 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
111}
112
113
114
115
116
117static void next_trb(struct xhci_hcd *xhci,
118 struct xhci_ring *ring,
119 struct xhci_segment **seg,
120 union xhci_trb **trb)
121{
122 if (trb_is_link(*trb)) {
123 *seg = (*seg)->next;
124 *trb = ((*seg)->trbs);
125 } else {
126 (*trb)++;
127 }
128}
129
130
131
132
133
134static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
135{
136 ring->deq_updates++;
137
138
139 if (ring->type == TYPE_EVENT) {
140 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
141 ring->dequeue++;
142 return;
143 }
144 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
145 ring->cycle_state ^= 1;
146 ring->deq_seg = ring->deq_seg->next;
147 ring->dequeue = ring->deq_seg->trbs;
148 return;
149 }
150
151
152 if (!trb_is_link(ring->dequeue)) {
153 ring->dequeue++;
154 ring->num_trbs_free++;
155 }
156 while (trb_is_link(ring->dequeue)) {
157 ring->deq_seg = ring->deq_seg->next;
158 ring->dequeue = ring->deq_seg->trbs;
159 }
160 return;
161}
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
181 bool more_trbs_coming)
182{
183 u32 chain;
184 union xhci_trb *next;
185
186 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
187
188 if (!trb_is_link(ring->enqueue))
189 ring->num_trbs_free--;
190 next = ++(ring->enqueue);
191
192 ring->enq_updates++;
193
194 while (trb_is_link(next)) {
195
196
197
198
199
200
201
202
203 if (!chain && !more_trbs_coming)
204 break;
205
206
207
208
209
210 if (!(ring->type == TYPE_ISOC &&
211 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
212 !xhci_link_trb_quirk(xhci)) {
213 next->link.control &= cpu_to_le32(~TRB_CHAIN);
214 next->link.control |= cpu_to_le32(chain);
215 }
216
217 wmb();
218 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219
220
221 if (link_trb_toggles_cycle(next))
222 ring->cycle_state ^= 1;
223
224 ring->enq_seg = ring->enq_seg->next;
225 ring->enqueue = ring->enq_seg->trbs;
226 next = ring->enqueue;
227 }
228}
229
230
231
232
233
234static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
235 unsigned int num_trbs)
236{
237 int num_trbs_in_deq_seg;
238
239 if (ring->num_trbs_free < num_trbs)
240 return 0;
241
242 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
243 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
244 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
245 return 0;
246 }
247
248 return 1;
249}
250
251
252void xhci_ring_cmd_db(struct xhci_hcd *xhci)
253{
254 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
255 return;
256
257 xhci_dbg(xhci, "// Ding dong!\n");
258 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
259
260 readl(&xhci->dba->doorbell[0]);
261}
262
263static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
264{
265 u64 temp_64;
266 int ret;
267
268 xhci_dbg(xhci, "Abort command ring\n");
269
270 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
271 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
272
273
274
275
276
277
278
279 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
280 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
281 &xhci->op_regs->cmd_ring);
282
283
284
285
286
287
288
289
290 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
291 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
292 if (ret < 0) {
293
294 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
295 &xhci->op_regs->cmd_ring);
296 udelay(1000);
297 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
298 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
299 if (ret == 0)
300 return 0;
301
302 xhci_err(xhci, "Stopped the command ring failed, "
303 "maybe the host is dead\n");
304 del_timer(&xhci->cmd_timer);
305 xhci->xhc_state |= XHCI_STATE_DYING;
306 xhci_quiesce(xhci);
307 xhci_halt(xhci);
308 return -ESHUTDOWN;
309 }
310
311 return 0;
312}
313
314void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
315 unsigned int slot_id,
316 unsigned int ep_index,
317 unsigned int stream_id)
318{
319 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
320 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
321 unsigned int ep_state = ep->ep_state;
322
323
324
325
326
327
328
329 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
330 (ep_state & EP_HALTED))
331 return;
332 writel(DB_VALUE(ep_index, stream_id), db_addr);
333
334
335
336}
337
338
339static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
340 unsigned int slot_id,
341 unsigned int ep_index)
342{
343 unsigned int stream_id;
344 struct xhci_virt_ep *ep;
345
346 ep = &xhci->devs[slot_id]->eps[ep_index];
347
348
349 if (!(ep->ep_state & EP_HAS_STREAMS)) {
350 if (ep->ring && !(list_empty(&ep->ring->td_list)))
351 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
352 return;
353 }
354
355 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
356 stream_id++) {
357 struct xhci_stream_info *stream_info = ep->stream_info;
358 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
359 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
360 stream_id);
361 }
362}
363
364
365
366
367
368struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
369 unsigned int slot_id, unsigned int ep_index,
370 unsigned int stream_id)
371{
372 struct xhci_virt_ep *ep;
373
374 ep = &xhci->devs[slot_id]->eps[ep_index];
375
376 if (!(ep->ep_state & EP_HAS_STREAMS))
377 return ep->ring;
378
379 if (stream_id == 0) {
380 xhci_warn(xhci,
381 "WARN: Slot ID %u, ep index %u has streams, "
382 "but URB has no stream ID.\n",
383 slot_id, ep_index);
384 return NULL;
385 }
386
387 if (stream_id < ep->stream_info->num_streams)
388 return ep->stream_info->stream_rings[stream_id];
389
390 xhci_warn(xhci,
391 "WARN: Slot ID %u, ep index %u has "
392 "stream IDs 1 to %u allocated, "
393 "but stream ID %u is requested.\n",
394 slot_id, ep_index,
395 ep->stream_info->num_streams - 1,
396 stream_id);
397 return NULL;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
419 unsigned int slot_id, unsigned int ep_index,
420 unsigned int stream_id, struct xhci_td *cur_td,
421 struct xhci_dequeue_state *state)
422{
423 struct xhci_virt_device *dev = xhci->devs[slot_id];
424 struct xhci_virt_ep *ep = &dev->eps[ep_index];
425 struct xhci_ring *ep_ring;
426 struct xhci_segment *new_seg;
427 union xhci_trb *new_deq;
428 dma_addr_t addr;
429 u64 hw_dequeue;
430 bool cycle_found = false;
431 bool td_last_trb_found = false;
432
433 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
434 ep_index, stream_id);
435 if (!ep_ring) {
436 xhci_warn(xhci, "WARN can't find new dequeue state "
437 "for invalid stream ID %u.\n",
438 stream_id);
439 return;
440 }
441
442
443 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
444 "Finding endpoint context");
445
446 if (ep->ep_state & EP_HAS_STREAMS) {
447 struct xhci_stream_ctx *ctx =
448 &ep->stream_info->stream_ctx_array[stream_id];
449 hw_dequeue = le64_to_cpu(ctx->stream_ring);
450 } else {
451 struct xhci_ep_ctx *ep_ctx
452 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
453 hw_dequeue = le64_to_cpu(ep_ctx->deq);
454 }
455
456 new_seg = ep_ring->deq_seg;
457 new_deq = ep_ring->dequeue;
458 state->new_cycle_state = hw_dequeue & 0x1;
459
460
461
462
463
464
465
466 do {
467 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
468 == (dma_addr_t)(hw_dequeue & ~0xf)) {
469 cycle_found = true;
470 if (td_last_trb_found)
471 break;
472 }
473 if (new_deq == cur_td->last_trb)
474 td_last_trb_found = true;
475
476 if (cycle_found &&
477 TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
478 new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
479 state->new_cycle_state ^= 0x1;
480
481 next_trb(xhci, ep_ring, &new_seg, &new_deq);
482
483
484 if (new_deq == ep->ring->dequeue) {
485 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
486 state->new_deq_seg = NULL;
487 state->new_deq_ptr = NULL;
488 return;
489 }
490
491 } while (!cycle_found || !td_last_trb_found);
492
493 state->new_deq_seg = new_seg;
494 state->new_deq_ptr = new_deq;
495
496
497 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
498 "Cycle state = 0x%x", state->new_cycle_state);
499
500 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
501 "New dequeue segment = %p (virtual)",
502 state->new_deq_seg);
503 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
504 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
505 "New dequeue pointer = 0x%llx (DMA)",
506 (unsigned long long) addr);
507}
508
509
510
511
512
513static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
514 struct xhci_td *cur_td, bool flip_cycle)
515{
516 struct xhci_segment *cur_seg;
517 union xhci_trb *cur_trb;
518
519 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
520 true;
521 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
522 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
523
524
525
526 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
527
528
529
530 if (flip_cycle)
531 cur_trb->generic.field[3] ^=
532 cpu_to_le32(TRB_CYCLE);
533 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
534 "Cancel (unchain) link TRB");
535 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
536 "Address = %p (0x%llx dma); "
537 "in seg %p (0x%llx dma)",
538 cur_trb,
539 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
540 cur_seg,
541 (unsigned long long)cur_seg->dma);
542 } else {
543 cur_trb->generic.field[0] = 0;
544 cur_trb->generic.field[1] = 0;
545 cur_trb->generic.field[2] = 0;
546
547 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
548
549 if (flip_cycle && cur_trb != cur_td->first_trb &&
550 cur_trb != cur_td->last_trb)
551 cur_trb->generic.field[3] ^=
552 cpu_to_le32(TRB_CYCLE);
553 cur_trb->generic.field[3] |= cpu_to_le32(
554 TRB_TYPE(TRB_TR_NOOP));
555 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
556 "TRB to noop at offset 0x%llx",
557 (unsigned long long)
558 xhci_trb_virt_to_dma(cur_seg, cur_trb));
559 }
560 if (cur_trb == cur_td->last_trb)
561 break;
562 }
563}
564
565static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
566 struct xhci_virt_ep *ep)
567{
568 ep->ep_state &= ~EP_HALT_PENDING;
569
570
571
572
573 if (del_timer(&ep->stop_cmd_timer))
574 ep->stop_cmds_pending--;
575}
576
577
578static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
579 struct xhci_td *cur_td, int status)
580{
581 struct usb_hcd *hcd;
582 struct urb *urb;
583 struct urb_priv *urb_priv;
584
585 urb = cur_td->urb;
586 urb_priv = urb->hcpriv;
587 urb_priv->td_cnt++;
588 hcd = bus_to_hcd(urb->dev->bus);
589
590
591 if (urb_priv->td_cnt == urb_priv->length) {
592 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
593 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
594 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
595 if (xhci->quirks & XHCI_AMD_PLL_FIX)
596 usb_amd_quirk_pll_enable();
597 }
598 }
599 usb_hcd_unlink_urb_from_ep(hcd, urb);
600
601 spin_unlock(&xhci->lock);
602 usb_hcd_giveback_urb(hcd, urb, status);
603 xhci_urb_free_priv(urb_priv);
604 spin_lock(&xhci->lock);
605 }
606}
607
608void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
609 struct xhci_td *td)
610{
611 struct device *dev = xhci_to_hcd(xhci)->self.controller;
612 struct xhci_segment *seg = td->bounce_seg;
613 struct urb *urb = td->urb;
614
615 if (!seg || !urb)
616 return;
617
618 if (usb_urb_dir_out(urb)) {
619 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
620 DMA_TO_DEVICE);
621 return;
622 }
623
624
625 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
626 seg->bounce_len, seg->bounce_offs);
627 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
628 DMA_FROM_DEVICE);
629 seg->bounce_len = 0;
630 seg->bounce_offs = 0;
631}
632
633
634
635
636
637
638
639
640
641
642
643static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
644 union xhci_trb *trb, struct xhci_event_cmd *event)
645{
646 unsigned int ep_index;
647 struct xhci_ring *ep_ring;
648 struct xhci_virt_ep *ep;
649 struct list_head *entry;
650 struct xhci_td *cur_td = NULL;
651 struct xhci_td *last_unlinked_td;
652
653 struct xhci_dequeue_state deq_state;
654
655 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
656 if (!xhci->devs[slot_id])
657 xhci_warn(xhci, "Stop endpoint command "
658 "completion for disabled slot %u\n",
659 slot_id);
660 return;
661 }
662
663 memset(&deq_state, 0, sizeof(deq_state));
664 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
665 ep = &xhci->devs[slot_id]->eps[ep_index];
666
667 if (list_empty(&ep->cancelled_td_list)) {
668 xhci_stop_watchdog_timer_in_irq(xhci, ep);
669 ep->stopped_td = NULL;
670 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
671 return;
672 }
673
674
675
676
677
678
679 list_for_each(entry, &ep->cancelled_td_list) {
680 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
681 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
682 "Removing canceled TD starting at 0x%llx (dma).",
683 (unsigned long long)xhci_trb_virt_to_dma(
684 cur_td->start_seg, cur_td->first_trb));
685 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
686 if (!ep_ring) {
687
688
689
690
691
692
693
694
695
696
697
698 xhci_warn(xhci, "WARN Cancelled URB %p "
699 "has invalid stream ID %u.\n",
700 cur_td->urb,
701 cur_td->urb->stream_id);
702 goto remove_finished_td;
703 }
704
705
706
707
708 if (cur_td == ep->stopped_td)
709 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
710 cur_td->urb->stream_id,
711 cur_td, &deq_state);
712 else
713 td_to_noop(xhci, ep_ring, cur_td, false);
714remove_finished_td:
715
716
717
718
719
720 list_del_init(&cur_td->td_list);
721 }
722 last_unlinked_td = cur_td;
723 xhci_stop_watchdog_timer_in_irq(xhci, ep);
724
725
726 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
727 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
728 ep->stopped_td->urb->stream_id, &deq_state);
729 xhci_ring_cmd_db(xhci);
730 } else {
731
732 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
733 }
734
735 ep->stopped_td = NULL;
736
737
738
739
740
741
742
743 do {
744 cur_td = list_entry(ep->cancelled_td_list.next,
745 struct xhci_td, cancelled_td_list);
746 list_del_init(&cur_td->cancelled_td_list);
747
748
749
750
751
752 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
753 if (ep_ring && cur_td->bounce_seg)
754 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
755
756 if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
757 (ep_ring->stream_timeout_handler == true)) {
758
759
760
761
762 xhci_giveback_urb_in_irq(xhci, cur_td, -EAGAIN);
763 ep_ring->stream_timeout_handler = false;
764 } else {
765 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
766 }
767
768
769
770
771 if (xhci->xhc_state & XHCI_STATE_DYING)
772 return;
773 } while (cur_td != last_unlinked_td);
774
775
776}
777
778static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
779{
780 struct xhci_td *cur_td;
781
782 while (!list_empty(&ring->td_list)) {
783 cur_td = list_first_entry(&ring->td_list,
784 struct xhci_td, td_list);
785 list_del_init(&cur_td->td_list);
786 if (!list_empty(&cur_td->cancelled_td_list))
787 list_del_init(&cur_td->cancelled_td_list);
788
789 if (cur_td->bounce_seg)
790 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
791 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
792 }
793}
794
795static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
796 int slot_id, int ep_index)
797{
798 struct xhci_td *cur_td;
799 struct xhci_virt_ep *ep;
800 struct xhci_ring *ring;
801
802 ep = &xhci->devs[slot_id]->eps[ep_index];
803 if ((ep->ep_state & EP_HAS_STREAMS) ||
804 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
805 int stream_id;
806
807 for (stream_id = 0; stream_id < ep->stream_info->num_streams;
808 stream_id++) {
809 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
810 "Killing URBs for slot ID %u, ep index %u, stream %u",
811 slot_id, ep_index, stream_id + 1);
812 xhci_kill_ring_urbs(xhci,
813 ep->stream_info->stream_rings[stream_id]);
814 }
815 } else {
816 ring = ep->ring;
817 if (!ring)
818 return;
819 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
820 "Killing URBs for slot ID %u, ep index %u",
821 slot_id, ep_index);
822 xhci_kill_ring_urbs(xhci, ring);
823 }
824 while (!list_empty(&ep->cancelled_td_list)) {
825 cur_td = list_first_entry(&ep->cancelled_td_list,
826 struct xhci_td, cancelled_td_list);
827 list_del_init(&cur_td->cancelled_td_list);
828 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
829 }
830}
831
832
833
834
835
836
837
838
839
840void xhci_stream_timeout(unsigned long arg)
841{
842 struct xhci_hcd *xhci;
843 struct xhci_virt_ep *ep;
844 struct xhci_ring *ep_ring;
845 unsigned int slot_id, ep_index, stream_id;
846 struct xhci_td *td = NULL;
847 struct urb *urb = NULL;
848 struct urb_priv *urb_priv;
849 struct xhci_command *command;
850 unsigned long flags;
851 int i;
852
853 ep_ring = (struct xhci_ring *) arg;
854 xhci = ep_ring->xhci;
855
856 spin_lock_irqsave(&xhci->lock, flags);
857
858 if (!list_empty(&ep_ring->td_list)) {
859 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
860 urb = td->urb;
861 urb_priv = urb->hcpriv;
862
863 slot_id = urb->dev->slot_id;
864 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
865 stream_id = ep_ring->stream_id;
866 ep = &xhci->devs[slot_id]->eps[ep_index];
867 ep_ring->stream_timeout_handler = true;
868
869
870 del_timer(&ep_ring->stream_timer);
871
872 for (i = 0; i < urb_priv->length; i++) {
873 td = urb_priv->td[i];
874 list_add_tail(&td->cancelled_td_list,
875 &ep->cancelled_td_list);
876 }
877
878
879
880
881 if (!(ep->ep_state & EP_HALT_PENDING)) {
882 command = xhci_alloc_command(xhci, false,
883 false, GFP_ATOMIC);
884 if (!command) {
885 xhci_warn(xhci,
886 "%s: Failed to allocate command\n",
887 __func__);
888 spin_unlock_irqrestore(&xhci->lock, flags);
889 return;
890 }
891
892 ep->ep_state |= EP_HALT_PENDING;
893 ep->stop_cmds_pending++;
894 ep->stop_cmd_timer.expires = jiffies +
895 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
896 add_timer(&ep->stop_cmd_timer);
897 xhci_queue_stop_endpoint(xhci, command,
898 urb->dev->slot_id, ep_index, 0);
899 xhci_ring_cmd_db(xhci);
900 }
901
902 spin_unlock_irqrestore(&xhci->lock, flags);
903 return;
904 }
905
906 spin_unlock_irqrestore(&xhci->lock, flags);
907
908 del_timer(&ep_ring->stream_timer);
909}
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930void xhci_stop_endpoint_command_watchdog(unsigned long arg)
931{
932 struct xhci_hcd *xhci;
933 struct xhci_virt_ep *ep;
934 int ret, i, j;
935 unsigned long flags;
936
937 ep = (struct xhci_virt_ep *) arg;
938 xhci = ep->xhci;
939
940 spin_lock_irqsave(&xhci->lock, flags);
941
942 ep->stop_cmds_pending--;
943 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
944 spin_unlock_irqrestore(&xhci->lock, flags);
945 return;
946 }
947 if (xhci->xhc_state & XHCI_STATE_DYING) {
948 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
949 "Stop EP timer ran, but another timer marked "
950 "xHCI as DYING, exiting.");
951 spin_unlock_irqrestore(&xhci->lock, flags);
952 return;
953 }
954 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
955 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
956 "Stop EP timer ran, but no command pending, "
957 "exiting.");
958 spin_unlock_irqrestore(&xhci->lock, flags);
959 return;
960 }
961
962 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
963 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
964
965
966
967 xhci->xhc_state |= XHCI_STATE_DYING;
968
969 xhci_quiesce(xhci);
970 spin_unlock_irqrestore(&xhci->lock, flags);
971
972 ret = xhci_halt(xhci);
973
974 spin_lock_irqsave(&xhci->lock, flags);
975 if (ret < 0) {
976
977
978
979
980
981
982
983 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
984 xhci_warn(xhci, "Completing active URBs anyway.\n");
985
986
987
988
989
990 }
991 for (i = 0; i < MAX_HC_SLOTS; i++) {
992 if (!xhci->devs[i])
993 continue;
994 for (j = 0; j < 31; j++)
995 xhci_kill_endpoint_urbs(xhci, i, j);
996 }
997 spin_unlock_irqrestore(&xhci->lock, flags);
998 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
999 "Calling usb_hc_died()");
1000 usb_hc_died(xhci_to_hcd(xhci));
1001 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1002 "xHCI host controller is dead.");
1003}
1004
1005
1006static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1007 struct xhci_virt_device *dev,
1008 struct xhci_ring *ep_ring,
1009 unsigned int ep_index)
1010{
1011 union xhci_trb *dequeue_temp;
1012 int num_trbs_free_temp;
1013 bool revert = false;
1014
1015 num_trbs_free_temp = ep_ring->num_trbs_free;
1016 dequeue_temp = ep_ring->dequeue;
1017
1018
1019
1020
1021
1022
1023
1024 if (trb_is_link(ep_ring->dequeue)) {
1025 ep_ring->deq_seg = ep_ring->deq_seg->next;
1026 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1027 }
1028
1029 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1030
1031 ep_ring->num_trbs_free++;
1032 ep_ring->dequeue++;
1033 if (trb_is_link(ep_ring->dequeue)) {
1034 if (ep_ring->dequeue ==
1035 dev->eps[ep_index].queued_deq_ptr)
1036 break;
1037 ep_ring->deq_seg = ep_ring->deq_seg->next;
1038 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1039 }
1040 if (ep_ring->dequeue == dequeue_temp) {
1041 revert = true;
1042 break;
1043 }
1044 }
1045
1046 if (revert) {
1047 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1048 ep_ring->num_trbs_free = num_trbs_free_temp;
1049 }
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1060 union xhci_trb *trb, u32 cmd_comp_code)
1061{
1062 unsigned int ep_index;
1063 unsigned int stream_id;
1064 struct xhci_ring *ep_ring;
1065 struct xhci_virt_device *dev;
1066 struct xhci_virt_ep *ep;
1067 struct xhci_ep_ctx *ep_ctx;
1068 struct xhci_slot_ctx *slot_ctx;
1069
1070 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1071 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1072 dev = xhci->devs[slot_id];
1073 ep = &dev->eps[ep_index];
1074
1075 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1076 if (!ep_ring) {
1077 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1078 stream_id);
1079
1080 goto cleanup;
1081 }
1082
1083 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1084 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1085
1086 if (cmd_comp_code != COMP_SUCCESS) {
1087 unsigned int ep_state;
1088 unsigned int slot_state;
1089
1090 switch (cmd_comp_code) {
1091 case COMP_TRB_ERR:
1092 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1093 break;
1094 case COMP_CTX_STATE:
1095 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1096 ep_state = le32_to_cpu(ep_ctx->ep_info);
1097 ep_state &= EP_STATE_MASK;
1098 slot_state = le32_to_cpu(slot_ctx->dev_state);
1099 slot_state = GET_SLOT_STATE(slot_state);
1100 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1101 "Slot state = %u, EP state = %u",
1102 slot_state, ep_state);
1103 break;
1104 case COMP_EBADSLT:
1105 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1106 slot_id);
1107 break;
1108 default:
1109 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1110 cmd_comp_code);
1111 break;
1112 }
1113
1114
1115
1116
1117
1118
1119 } else {
1120 u64 deq;
1121
1122 if (ep->ep_state & EP_HAS_STREAMS) {
1123 struct xhci_stream_ctx *ctx =
1124 &ep->stream_info->stream_ctx_array[stream_id];
1125 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1126 } else {
1127 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1128 }
1129 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1130 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1131 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1132 ep->queued_deq_ptr) == deq) {
1133
1134
1135
1136 update_ring_for_set_deq_completion(xhci, dev,
1137 ep_ring, ep_index);
1138 } else {
1139 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1140 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1141 ep->queued_deq_seg, ep->queued_deq_ptr);
1142 }
1143 }
1144
1145cleanup:
1146 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1147 dev->eps[ep_index].queued_deq_seg = NULL;
1148 dev->eps[ep_index].queued_deq_ptr = NULL;
1149
1150 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1151}
1152
1153static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1154 union xhci_trb *trb, u32 cmd_comp_code)
1155{
1156 unsigned int ep_index;
1157
1158 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1159
1160
1161
1162 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1163 "Ignoring reset ep completion code of %u", cmd_comp_code);
1164
1165
1166
1167
1168
1169 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1170 struct xhci_command *command;
1171 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1172 if (!command) {
1173 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1174 return;
1175 }
1176 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1177 "Queueing configure endpoint command");
1178 xhci_queue_configure_endpoint(xhci, command,
1179 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1180 false);
1181 xhci_ring_cmd_db(xhci);
1182 } else {
1183
1184 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1185 }
1186}
1187
1188static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1189 u32 cmd_comp_code)
1190{
1191 if (cmd_comp_code == COMP_SUCCESS)
1192 xhci->slot_id = slot_id;
1193 else
1194 xhci->slot_id = 0;
1195}
1196
1197static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1198{
1199 struct xhci_virt_device *virt_dev;
1200
1201 virt_dev = xhci->devs[slot_id];
1202 if (!virt_dev)
1203 return;
1204 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1205
1206 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1207 xhci_free_virt_device(xhci, slot_id);
1208}
1209
1210static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1211 struct xhci_event_cmd *event, u32 cmd_comp_code)
1212{
1213 struct xhci_virt_device *virt_dev;
1214 struct xhci_input_control_ctx *ctrl_ctx;
1215 unsigned int ep_index;
1216 unsigned int ep_state;
1217 u32 add_flags, drop_flags;
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 virt_dev = xhci->devs[slot_id];
1228 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1229 if (!ctrl_ctx) {
1230 xhci_warn(xhci, "Could not get input context, bad type.\n");
1231 return;
1232 }
1233
1234 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1235 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1236
1237 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1238
1239
1240
1241
1242
1243
1244
1245 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1246 ep_index != (unsigned int) -1 &&
1247 add_flags - SLOT_FLAG == drop_flags) {
1248 ep_state = virt_dev->eps[ep_index].ep_state;
1249 if (!(ep_state & EP_HALTED))
1250 return;
1251 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1252 "Completed config ep cmd - "
1253 "last ep index = %d, state = %d",
1254 ep_index, ep_state);
1255
1256 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1257 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1258 return;
1259 }
1260 return;
1261}
1262
1263static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1264 struct xhci_event_cmd *event)
1265{
1266 xhci_dbg(xhci, "Completed reset device command.\n");
1267 if (!xhci->devs[slot_id])
1268 xhci_warn(xhci, "Reset device command completion "
1269 "for disabled slot %u\n", slot_id);
1270}
1271
1272static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1273 struct xhci_event_cmd *event)
1274{
1275 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1276 xhci->error_bitmask |= 1 << 6;
1277 return;
1278 }
1279 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1280 "NEC firmware version %2x.%02x",
1281 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1282 NEC_FW_MINOR(le32_to_cpu(event->status)));
1283}
1284
1285static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1286{
1287 list_del(&cmd->cmd_list);
1288
1289 if (cmd->completion) {
1290 cmd->status = status;
1291 complete(cmd->completion);
1292 } else {
1293 kfree(cmd);
1294 }
1295}
1296
1297void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1298{
1299 struct xhci_command *cur_cmd, *tmp_cmd;
1300 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1301 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1302}
1303
1304
1305
1306
1307
1308
1309static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1310 struct xhci_command *cur_cmd)
1311{
1312 struct xhci_command *i_cmd, *tmp_cmd;
1313 u32 cycle_state;
1314
1315
1316 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1317 cmd_list) {
1318
1319 if (i_cmd->status != COMP_CMD_ABORT)
1320 continue;
1321
1322 i_cmd->status = COMP_CMD_STOP;
1323
1324 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1325 i_cmd->command_trb);
1326
1327 cycle_state = le32_to_cpu(
1328 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1329
1330 i_cmd->command_trb->generic.field[0] = 0;
1331 i_cmd->command_trb->generic.field[1] = 0;
1332 i_cmd->command_trb->generic.field[2] = 0;
1333 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1334 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1335
1336
1337
1338
1339
1340 }
1341
1342 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1343
1344
1345 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1346 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1347 xhci->current_cmd = cur_cmd;
1348 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1349 xhci_ring_cmd_db(xhci);
1350 }
1351 return;
1352}
1353
1354
1355void xhci_handle_command_timeout(unsigned long data)
1356{
1357 struct xhci_hcd *xhci;
1358 int ret;
1359 unsigned long flags;
1360 u64 hw_ring_state;
1361 bool second_timeout = false;
1362 xhci = (struct xhci_hcd *) data;
1363
1364
1365 spin_lock_irqsave(&xhci->lock, flags);
1366 if (xhci->current_cmd) {
1367 if (xhci->current_cmd->status == COMP_CMD_ABORT)
1368 second_timeout = true;
1369 xhci->current_cmd->status = COMP_CMD_ABORT;
1370 }
1371
1372
1373 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1374 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1375 (hw_ring_state & CMD_RING_RUNNING)) {
1376 spin_unlock_irqrestore(&xhci->lock, flags);
1377 xhci_dbg(xhci, "Command timeout\n");
1378 ret = xhci_abort_cmd_ring(xhci);
1379 if (unlikely(ret == -ESHUTDOWN)) {
1380 xhci_err(xhci, "Abort command ring failed\n");
1381 xhci_cleanup_command_queue(xhci);
1382 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1383 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1384 }
1385 return;
1386 }
1387
1388
1389 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1390 spin_unlock_irqrestore(&xhci->lock, flags);
1391 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1392 xhci_cleanup_command_queue(xhci);
1393 return;
1394 }
1395
1396
1397 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1398 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1399 spin_unlock_irqrestore(&xhci->lock, flags);
1400 return;
1401}
1402
1403static void handle_cmd_completion(struct xhci_hcd *xhci,
1404 struct xhci_event_cmd *event)
1405{
1406 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1407 u64 cmd_dma;
1408 dma_addr_t cmd_dequeue_dma;
1409 u32 cmd_comp_code;
1410 union xhci_trb *cmd_trb;
1411 struct xhci_command *cmd;
1412 u32 cmd_type;
1413
1414 cmd_dma = le64_to_cpu(event->cmd_trb);
1415 cmd_trb = xhci->cmd_ring->dequeue;
1416 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1417 cmd_trb);
1418
1419 if (cmd_dequeue_dma == 0) {
1420 xhci->error_bitmask |= 1 << 4;
1421 return;
1422 }
1423
1424 if (cmd_dma != (u64) cmd_dequeue_dma) {
1425 xhci->error_bitmask |= 1 << 5;
1426 return;
1427 }
1428
1429 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1430
1431 del_timer(&xhci->cmd_timer);
1432
1433 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1434
1435 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1436
1437
1438 if (cmd_comp_code == COMP_CMD_STOP) {
1439 xhci_handle_stopped_cmd_ring(xhci, cmd);
1440 return;
1441 }
1442
1443 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1444 xhci_err(xhci,
1445 "Command completion event does not match command\n");
1446 return;
1447 }
1448
1449
1450
1451
1452
1453
1454
1455 if (cmd_comp_code == COMP_CMD_ABORT) {
1456 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1457 if (cmd->status == COMP_CMD_ABORT)
1458 goto event_handled;
1459 }
1460
1461 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1462 switch (cmd_type) {
1463 case TRB_ENABLE_SLOT:
1464 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1465 break;
1466 case TRB_DISABLE_SLOT:
1467 xhci_handle_cmd_disable_slot(xhci, slot_id);
1468 break;
1469 case TRB_CONFIG_EP:
1470 if (!cmd->completion)
1471 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1472 cmd_comp_code);
1473 break;
1474 case TRB_EVAL_CONTEXT:
1475 break;
1476 case TRB_ADDR_DEV:
1477 break;
1478 case TRB_STOP_RING:
1479 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1480 le32_to_cpu(cmd_trb->generic.field[3])));
1481 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1482 break;
1483 case TRB_SET_DEQ:
1484 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1485 le32_to_cpu(cmd_trb->generic.field[3])));
1486 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1487 break;
1488 case TRB_CMD_NOOP:
1489
1490 if (cmd->status == COMP_CMD_STOP)
1491 cmd_comp_code = COMP_CMD_STOP;
1492 break;
1493 case TRB_RESET_EP:
1494 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1495 le32_to_cpu(cmd_trb->generic.field[3])));
1496 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1497 break;
1498 case TRB_RESET_DEV:
1499
1500
1501
1502 slot_id = TRB_TO_SLOT_ID(
1503 le32_to_cpu(cmd_trb->generic.field[3]));
1504 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1505 break;
1506 case TRB_NEC_GET_FW:
1507 xhci_handle_cmd_nec_get_fw(xhci, event);
1508 break;
1509 default:
1510
1511 xhci->error_bitmask |= 1 << 6;
1512 break;
1513 }
1514
1515
1516 if (cmd->cmd_list.next != &xhci->cmd_list) {
1517 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1518 struct xhci_command, cmd_list);
1519 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1520 }
1521
1522event_handled:
1523 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1524
1525 inc_deq(xhci, xhci->cmd_ring);
1526}
1527
1528static void handle_vendor_event(struct xhci_hcd *xhci,
1529 union xhci_trb *event)
1530{
1531 u32 trb_type;
1532
1533 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1534 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1535 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1536 handle_cmd_completion(xhci, &event->event_cmd);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1547 struct xhci_hcd *xhci, u32 port_id)
1548{
1549 unsigned int i;
1550 unsigned int num_similar_speed_ports = 0;
1551
1552
1553
1554
1555
1556 for (i = 0; i < (port_id - 1); i++) {
1557 u8 port_speed = xhci->port_array[i];
1558
1559
1560
1561
1562
1563 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1564 continue;
1565
1566
1567
1568
1569
1570
1571 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1572 num_similar_speed_ports++;
1573 }
1574 return num_similar_speed_ports;
1575}
1576
1577static void handle_device_notification(struct xhci_hcd *xhci,
1578 union xhci_trb *event)
1579{
1580 u32 slot_id;
1581 struct usb_device *udev;
1582
1583 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1584 if (!xhci->devs[slot_id]) {
1585 xhci_warn(xhci, "Device Notification event for "
1586 "unused slot %u\n", slot_id);
1587 return;
1588 }
1589
1590 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1591 slot_id);
1592 udev = xhci->devs[slot_id]->udev;
1593 if (udev && udev->parent)
1594 usb_wakeup_notification(udev->parent, udev->portnum);
1595}
1596
1597static void handle_port_status(struct xhci_hcd *xhci,
1598 union xhci_trb *event)
1599{
1600 struct usb_hcd *hcd;
1601 u32 port_id;
1602 u32 temp, temp1;
1603 int max_ports;
1604 int slot_id;
1605 unsigned int faked_port_index;
1606 u8 major_revision;
1607 struct xhci_bus_state *bus_state;
1608 __le32 __iomem **port_array;
1609 bool bogus_port_status = false;
1610
1611
1612 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1613 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1614 xhci->error_bitmask |= 1 << 8;
1615 }
1616 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1617 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1618
1619 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1620 if ((port_id <= 0) || (port_id > max_ports)) {
1621 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1622 inc_deq(xhci, xhci->event_ring);
1623 return;
1624 }
1625
1626
1627
1628
1629 major_revision = xhci->port_array[port_id - 1];
1630
1631
1632 hcd = xhci_to_hcd(xhci);
1633 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
1634 hcd = xhci->shared_hcd;
1635
1636 if (major_revision == 0) {
1637 xhci_warn(xhci, "Event for port %u not in "
1638 "Extended Capabilities, ignoring.\n",
1639 port_id);
1640 bogus_port_status = true;
1641 goto cleanup;
1642 }
1643 if (major_revision == DUPLICATE_ENTRY) {
1644 xhci_warn(xhci, "Event for port %u duplicated in"
1645 "Extended Capabilities, ignoring.\n",
1646 port_id);
1647 bogus_port_status = true;
1648 goto cleanup;
1649 }
1650
1651
1652
1653
1654
1655
1656
1657
1658 bus_state = &xhci->bus_state[hcd_index(hcd)];
1659 if (hcd->speed >= HCD_USB3)
1660 port_array = xhci->usb3_ports;
1661 else
1662 port_array = xhci->usb2_ports;
1663
1664 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1665 port_id);
1666
1667 temp = readl(port_array[faked_port_index]);
1668 if (hcd->state == HC_STATE_SUSPENDED) {
1669 xhci_dbg(xhci, "resume root hub\n");
1670 usb_hcd_resume_root_hub(hcd);
1671 }
1672
1673 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1674 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1675
1676 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1677 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1678
1679 temp1 = readl(&xhci->op_regs->command);
1680 if (!(temp1 & CMD_RUN)) {
1681 xhci_warn(xhci, "xHC is not running.\n");
1682 goto cleanup;
1683 }
1684
1685 if (DEV_SUPERSPEED_ANY(temp)) {
1686 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1687
1688
1689
1690
1691 bus_state->port_remote_wakeup |= 1 << faked_port_index;
1692 xhci_test_and_clear_bit(xhci, port_array,
1693 faked_port_index, PORT_PLC);
1694 xhci_set_link_state(xhci, port_array, faked_port_index,
1695 XDEV_U0);
1696
1697
1698
1699 bogus_port_status = true;
1700 goto cleanup;
1701 } else if (!test_bit(faked_port_index,
1702 &bus_state->resuming_ports)) {
1703 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1704 bus_state->resume_done[faked_port_index] = jiffies +
1705 msecs_to_jiffies(USB_RESUME_TIMEOUT);
1706 set_bit(faked_port_index, &bus_state->resuming_ports);
1707 mod_timer(&hcd->rh_timer,
1708 bus_state->resume_done[faked_port_index]);
1709
1710 }
1711 }
1712
1713 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1714 DEV_SUPERSPEED_ANY(temp)) {
1715 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1716
1717
1718
1719
1720
1721
1722
1723 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1724 faked_port_index + 1);
1725 if (slot_id && xhci->devs[slot_id])
1726 xhci_ring_device(xhci, slot_id);
1727 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1728 bus_state->port_remote_wakeup &=
1729 ~(1 << faked_port_index);
1730 xhci_test_and_clear_bit(xhci, port_array,
1731 faked_port_index, PORT_PLC);
1732 usb_wakeup_notification(hcd->self.root_hub,
1733 faked_port_index + 1);
1734 bogus_port_status = true;
1735 goto cleanup;
1736 }
1737 }
1738
1739
1740
1741
1742
1743
1744 if (!DEV_SUPERSPEED_ANY(temp) &&
1745 test_and_clear_bit(faked_port_index,
1746 &bus_state->rexit_ports)) {
1747 complete(&bus_state->rexit_done[faked_port_index]);
1748 bogus_port_status = true;
1749 goto cleanup;
1750 }
1751
1752 if (hcd->speed < HCD_USB3)
1753 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1754 PORT_PLC);
1755
1756cleanup:
1757
1758 inc_deq(xhci, xhci->event_ring);
1759
1760
1761
1762
1763
1764 if (bogus_port_status)
1765 return;
1766
1767
1768
1769
1770
1771
1772
1773
1774 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1775 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1776 spin_unlock(&xhci->lock);
1777
1778 usb_hcd_poll_rh_status(hcd);
1779 spin_lock(&xhci->lock);
1780}
1781
1782
1783
1784
1785
1786
1787
1788struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1789 struct xhci_segment *start_seg,
1790 union xhci_trb *start_trb,
1791 union xhci_trb *end_trb,
1792 dma_addr_t suspect_dma,
1793 bool debug)
1794{
1795 dma_addr_t start_dma;
1796 dma_addr_t end_seg_dma;
1797 dma_addr_t end_trb_dma;
1798 struct xhci_segment *cur_seg;
1799
1800 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1801 cur_seg = start_seg;
1802
1803 do {
1804 if (start_dma == 0)
1805 return NULL;
1806
1807 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1808 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1809
1810 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1811
1812 if (debug)
1813 xhci_warn(xhci,
1814 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1815 (unsigned long long)suspect_dma,
1816 (unsigned long long)start_dma,
1817 (unsigned long long)end_trb_dma,
1818 (unsigned long long)cur_seg->dma,
1819 (unsigned long long)end_seg_dma);
1820
1821 if (end_trb_dma > 0) {
1822
1823 if (start_dma <= end_trb_dma) {
1824 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1825 return cur_seg;
1826 } else {
1827
1828
1829
1830 if ((suspect_dma >= start_dma &&
1831 suspect_dma <= end_seg_dma) ||
1832 (suspect_dma >= cur_seg->dma &&
1833 suspect_dma <= end_trb_dma))
1834 return cur_seg;
1835 }
1836 return NULL;
1837 } else {
1838
1839 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1840 return cur_seg;
1841 }
1842 cur_seg = cur_seg->next;
1843 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1844 } while (cur_seg != start_seg);
1845
1846 return NULL;
1847}
1848
1849static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1850 unsigned int slot_id, unsigned int ep_index,
1851 unsigned int stream_id,
1852 struct xhci_td *td, union xhci_trb *event_trb)
1853{
1854 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1855 struct xhci_command *command;
1856 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1857 if (!command)
1858 return;
1859
1860 ep->ep_state |= EP_HALTED;
1861 ep->stopped_stream = stream_id;
1862
1863 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1864 xhci_cleanup_stalled_ring(xhci, ep_index, td);
1865
1866 ep->stopped_stream = 0;
1867
1868 xhci_ring_cmd_db(xhci);
1869}
1870
1871
1872
1873
1874
1875
1876
1877static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1878 struct xhci_ep_ctx *ep_ctx,
1879 unsigned int trb_comp_code)
1880{
1881
1882 if (trb_comp_code == COMP_TX_ERR ||
1883 trb_comp_code == COMP_BABBLE ||
1884 trb_comp_code == COMP_SPLIT_ERR)
1885
1886
1887
1888
1889
1890
1891 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1892 cpu_to_le32(EP_STATE_HALTED))
1893 return 1;
1894
1895 return 0;
1896}
1897
1898int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1899{
1900 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1901
1902
1903
1904 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1905 trb_comp_code);
1906 xhci_dbg(xhci, "Treating code as success.\n");
1907 return 1;
1908 }
1909 return 0;
1910}
1911
1912
1913
1914
1915
1916static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1917 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1918 struct xhci_virt_ep *ep, int *status, bool skip)
1919{
1920 struct xhci_virt_device *xdev;
1921 struct xhci_ring *ep_ring;
1922 unsigned int slot_id;
1923 int ep_index;
1924 struct urb *urb = NULL;
1925 struct xhci_ep_ctx *ep_ctx;
1926 int ret = 0;
1927 struct urb_priv *urb_priv;
1928 u32 trb_comp_code;
1929
1930 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1931 xdev = xhci->devs[slot_id];
1932 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1933 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1934 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1935 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1936
1937 if (skip)
1938 goto td_cleanup;
1939
1940 if (trb_comp_code == COMP_STOP_INVAL ||
1941 trb_comp_code == COMP_STOP ||
1942 trb_comp_code == COMP_STOP_SHORT) {
1943
1944
1945
1946
1947 ep->stopped_td = td;
1948 return 0;
1949 }
1950 if (trb_comp_code == COMP_STALL ||
1951 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1952 trb_comp_code)) {
1953
1954
1955
1956
1957
1958 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1959 ep_ring->stream_id, td, event_trb);
1960 } else {
1961
1962 while (ep_ring->dequeue != td->last_trb)
1963 inc_deq(xhci, ep_ring);
1964 inc_deq(xhci, ep_ring);
1965 }
1966
1967td_cleanup:
1968
1969 urb = td->urb;
1970 urb_priv = urb->hcpriv;
1971
1972
1973 if (td->bounce_seg)
1974 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1975
1976
1977
1978
1979
1980
1981 if (urb->actual_length > urb->transfer_buffer_length) {
1982 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1983 urb->transfer_buffer_length,
1984 urb->actual_length);
1985 urb->actual_length = 0;
1986 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1987 *status = -EREMOTEIO;
1988 else
1989 *status = 0;
1990 }
1991 list_del_init(&td->td_list);
1992
1993 if (!list_empty(&td->cancelled_td_list))
1994 list_del_init(&td->cancelled_td_list);
1995
1996 urb_priv->td_cnt++;
1997
1998 if (urb_priv->td_cnt == urb_priv->length) {
1999 ret = 1;
2000 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2001 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
2002 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
2003 if (xhci->quirks & XHCI_AMD_PLL_FIX)
2004 usb_amd_quirk_pll_enable();
2005 }
2006 }
2007 }
2008
2009 return ret;
2010}
2011
2012
2013
2014
2015static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2016 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2017 struct xhci_virt_ep *ep, int *status)
2018{
2019 struct xhci_virt_device *xdev;
2020 struct xhci_ring *ep_ring;
2021 unsigned int slot_id;
2022 int ep_index;
2023 struct xhci_ep_ctx *ep_ctx;
2024 u32 trb_comp_code;
2025
2026 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2027 xdev = xhci->devs[slot_id];
2028 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2029 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2030 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2031 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2032
2033 switch (trb_comp_code) {
2034 case COMP_SUCCESS:
2035 if (event_trb == ep_ring->dequeue) {
2036 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
2037 "without IOC set??\n");
2038 *status = -ESHUTDOWN;
2039 } else if (event_trb != td->last_trb) {
2040 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
2041 "without IOC set??\n");
2042 *status = -ESHUTDOWN;
2043 } else {
2044 *status = 0;
2045 }
2046 break;
2047 case COMP_SHORT_TX:
2048 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2049 *status = -EREMOTEIO;
2050 else
2051 *status = 0;
2052 break;
2053 case COMP_STOP_SHORT:
2054 if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
2055 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2056 else
2057 td->urb->actual_length =
2058 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2059
2060 return finish_td(xhci, td, event_trb, event, ep, status, false);
2061 case COMP_STOP:
2062
2063 if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
2064 td->urb->actual_length =
2065 td->urb->transfer_buffer_length -
2066 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2067
2068 case COMP_STOP_INVAL:
2069 return finish_td(xhci, td, event_trb, event, ep, status, false);
2070 default:
2071 if (!xhci_requires_manual_halt_cleanup(xhci,
2072 ep_ctx, trb_comp_code))
2073 break;
2074 xhci_dbg(xhci, "TRB error code %u, "
2075 "halted endpoint index = %u\n",
2076 trb_comp_code, ep_index);
2077
2078 case COMP_STALL:
2079
2080 if (event_trb != ep_ring->dequeue &&
2081 event_trb != td->last_trb)
2082 td->urb->actual_length =
2083 td->urb->transfer_buffer_length -
2084 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2085 else if (!td->urb_length_set)
2086 td->urb->actual_length = 0;
2087
2088 return finish_td(xhci, td, event_trb, event, ep, status, false);
2089 }
2090
2091
2092
2093
2094 if (event_trb != ep_ring->dequeue) {
2095
2096 if (event_trb == td->last_trb) {
2097 if (td->urb_length_set) {
2098
2099
2100 if ((*status == -EINPROGRESS || *status == 0) &&
2101 (td->urb->transfer_flags
2102 & URB_SHORT_NOT_OK))
2103
2104
2105 *status = -EREMOTEIO;
2106 } else {
2107 td->urb->actual_length =
2108 td->urb->transfer_buffer_length;
2109 }
2110 } else {
2111
2112
2113
2114
2115
2116
2117 td->urb_length_set = true;
2118 td->urb->actual_length =
2119 td->urb->transfer_buffer_length -
2120 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2121 xhci_dbg(xhci, "Waiting for status "
2122 "stage event\n");
2123 return 0;
2124 }
2125 }
2126
2127 return finish_td(xhci, td, event_trb, event, ep, status, false);
2128}
2129
2130
2131
2132
2133static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2134 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2135 struct xhci_virt_ep *ep, int *status)
2136{
2137 struct xhci_ring *ep_ring;
2138 struct urb_priv *urb_priv;
2139 int idx;
2140 int len = 0;
2141 union xhci_trb *cur_trb;
2142 struct xhci_segment *cur_seg;
2143 struct usb_iso_packet_descriptor *frame;
2144 u32 trb_comp_code;
2145 bool skip_td = false;
2146
2147 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2148 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2149 urb_priv = td->urb->hcpriv;
2150 idx = urb_priv->td_cnt;
2151 frame = &td->urb->iso_frame_desc[idx];
2152
2153
2154 switch (trb_comp_code) {
2155 case COMP_SUCCESS:
2156 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2157 frame->status = 0;
2158 break;
2159 }
2160 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2161 trb_comp_code = COMP_SHORT_TX;
2162
2163 case COMP_STOP_SHORT:
2164 case COMP_SHORT_TX:
2165 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2166 -EREMOTEIO : 0;
2167 break;
2168 case COMP_BW_OVER:
2169 frame->status = -ECOMM;
2170 skip_td = true;
2171 break;
2172 case COMP_BUFF_OVER:
2173 case COMP_BABBLE:
2174 frame->status = -EOVERFLOW;
2175 skip_td = true;
2176 break;
2177 case COMP_DEV_ERR:
2178 case COMP_STALL:
2179 frame->status = -EPROTO;
2180 skip_td = true;
2181 break;
2182 case COMP_TX_ERR:
2183 frame->status = -EPROTO;
2184 if (event_trb != td->last_trb)
2185 return 0;
2186 skip_td = true;
2187 break;
2188 case COMP_STOP:
2189 case COMP_STOP_INVAL:
2190 break;
2191 default:
2192 frame->status = -1;
2193 break;
2194 }
2195
2196 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2197 frame->actual_length = frame->length;
2198 td->urb->actual_length += frame->length;
2199 } else if (trb_comp_code == COMP_STOP_SHORT) {
2200 frame->actual_length =
2201 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2202 td->urb->actual_length += frame->actual_length;
2203 } else {
2204 for (cur_trb = ep_ring->dequeue,
2205 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2206 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2207 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2208 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2209 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2210 }
2211 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2212 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2213
2214 if (trb_comp_code != COMP_STOP_INVAL) {
2215 frame->actual_length = len;
2216 td->urb->actual_length += len;
2217 }
2218 }
2219
2220 return finish_td(xhci, td, event_trb, event, ep, status, false);
2221}
2222
2223static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2224 struct xhci_transfer_event *event,
2225 struct xhci_virt_ep *ep, int *status)
2226{
2227 struct xhci_ring *ep_ring;
2228 struct urb_priv *urb_priv;
2229 struct usb_iso_packet_descriptor *frame;
2230 int idx;
2231
2232 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2233 urb_priv = td->urb->hcpriv;
2234 idx = urb_priv->td_cnt;
2235 frame = &td->urb->iso_frame_desc[idx];
2236
2237
2238 frame->status = -EXDEV;
2239
2240
2241 frame->actual_length = 0;
2242
2243
2244 while (ep_ring->dequeue != td->last_trb)
2245 inc_deq(xhci, ep_ring);
2246 inc_deq(xhci, ep_ring);
2247
2248 return finish_td(xhci, td, NULL, event, ep, status, true);
2249}
2250
2251
2252
2253
2254static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2255 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2256 struct xhci_virt_ep *ep, int *status)
2257{
2258 struct xhci_ring *ep_ring;
2259 union xhci_trb *cur_trb;
2260 struct xhci_segment *cur_seg;
2261 u32 trb_comp_code;
2262
2263 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2264 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2265
2266 switch (trb_comp_code) {
2267 case COMP_SUCCESS:
2268
2269 if (event_trb != td->last_trb ||
2270 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2271 xhci_warn(xhci, "WARN Successful completion "
2272 "on short TX\n");
2273 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2274 *status = -EREMOTEIO;
2275 else
2276 *status = 0;
2277 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2278 trb_comp_code = COMP_SHORT_TX;
2279 } else {
2280 *status = 0;
2281 }
2282 break;
2283 case COMP_STOP_SHORT:
2284 case COMP_SHORT_TX:
2285 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2286 *status = -EREMOTEIO;
2287 else
2288 *status = 0;
2289 break;
2290 default:
2291
2292 break;
2293 }
2294 if (trb_comp_code == COMP_SHORT_TX)
2295 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2296 "%d bytes untransferred\n",
2297 td->urb->ep->desc.bEndpointAddress,
2298 td->urb->transfer_buffer_length,
2299 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2300
2301 if (trb_comp_code == COMP_STOP_SHORT) {
2302 td->urb->actual_length =
2303 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2304
2305 if (td->urb->transfer_buffer_length <
2306 td->urb->actual_length) {
2307 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2308 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2309 td->urb->actual_length = 0;
2310
2311 }
2312
2313 } else if (event_trb == td->last_trb) {
2314 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2315 td->urb->actual_length =
2316 td->urb->transfer_buffer_length -
2317 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2318 if (td->urb->transfer_buffer_length <
2319 td->urb->actual_length) {
2320 xhci_warn(xhci, "HC gave bad length "
2321 "of %d bytes left\n",
2322 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2323 td->urb->actual_length = 0;
2324 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2325 *status = -EREMOTEIO;
2326 else
2327 *status = 0;
2328 }
2329
2330 if (*status == -EINPROGRESS) {
2331 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2332 *status = -EREMOTEIO;
2333 else
2334 *status = 0;
2335 }
2336 } else {
2337 td->urb->actual_length =
2338 td->urb->transfer_buffer_length;
2339
2340
2341
2342 if (*status == -EREMOTEIO)
2343 *status = 0;
2344 }
2345 } else {
2346
2347
2348
2349 td->urb->actual_length = 0;
2350 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2351 cur_trb != event_trb;
2352 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2353 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2354 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2355 td->urb->actual_length +=
2356 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2357 }
2358
2359
2360
2361 if (trb_comp_code != COMP_STOP_INVAL)
2362 td->urb->actual_length +=
2363 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2364 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2365 }
2366
2367 return finish_td(xhci, td, event_trb, event, ep, status, false);
2368}
2369
2370
2371
2372
2373
2374
2375static int handle_tx_event(struct xhci_hcd *xhci,
2376 struct xhci_transfer_event *event)
2377 __releases(&xhci->lock)
2378 __acquires(&xhci->lock)
2379{
2380 struct xhci_virt_device *xdev;
2381 struct xhci_virt_ep *ep;
2382 struct xhci_ring *ep_ring;
2383 unsigned int slot_id;
2384 int ep_index;
2385 struct xhci_td *td = NULL;
2386 dma_addr_t event_dma;
2387 struct xhci_segment *event_seg;
2388 union xhci_trb *event_trb;
2389 struct urb *urb = NULL;
2390 int status = -EINPROGRESS;
2391 struct urb_priv *urb_priv;
2392 struct xhci_ep_ctx *ep_ctx;
2393 struct list_head *tmp;
2394 u32 trb_comp_code;
2395 int ret = 0;
2396 int td_num = 0;
2397 bool handling_skipped_tds = false;
2398
2399 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2400 xdev = xhci->devs[slot_id];
2401 if (!xdev) {
2402 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2403 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2404 (unsigned long long) xhci_trb_virt_to_dma(
2405 xhci->event_ring->deq_seg,
2406 xhci->event_ring->dequeue),
2407 lower_32_bits(le64_to_cpu(event->buffer)),
2408 upper_32_bits(le64_to_cpu(event->buffer)),
2409 le32_to_cpu(event->transfer_len),
2410 le32_to_cpu(event->flags));
2411 xhci_dbg(xhci, "Event ring:\n");
2412 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2413 return -ENODEV;
2414 }
2415
2416
2417 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2418 ep = &xdev->eps[ep_index];
2419 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2420 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2421 if (!ep_ring ||
2422 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2423 EP_STATE_DISABLED) {
2424 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2425 "or incorrect stream ring\n");
2426 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2427 (unsigned long long) xhci_trb_virt_to_dma(
2428 xhci->event_ring->deq_seg,
2429 xhci->event_ring->dequeue),
2430 lower_32_bits(le64_to_cpu(event->buffer)),
2431 upper_32_bits(le64_to_cpu(event->buffer)),
2432 le32_to_cpu(event->transfer_len),
2433 le32_to_cpu(event->flags));
2434 xhci_dbg(xhci, "Event ring:\n");
2435 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2436 return -ENODEV;
2437 }
2438
2439
2440 if (ep->skip) {
2441 list_for_each(tmp, &ep_ring->td_list)
2442 td_num++;
2443 }
2444
2445 if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
2446 (ep->ep_state & EP_HAS_STREAMS))
2447 del_timer(&ep_ring->stream_timer);
2448
2449 event_dma = le64_to_cpu(event->buffer);
2450 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2451
2452 switch (trb_comp_code) {
2453
2454
2455
2456 case COMP_SUCCESS:
2457 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2458 break;
2459 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2460 trb_comp_code = COMP_SHORT_TX;
2461 else
2462 xhci_warn_ratelimited(xhci,
2463 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2464 case COMP_SHORT_TX:
2465 break;
2466 case COMP_STOP:
2467 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2468 break;
2469 case COMP_STOP_INVAL:
2470 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2471 break;
2472 case COMP_STOP_SHORT:
2473 xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2474 break;
2475 case COMP_STALL:
2476 xhci_dbg(xhci, "Stalled endpoint\n");
2477 ep->ep_state |= EP_HALTED;
2478 status = -EPIPE;
2479 break;
2480 case COMP_TRB_ERR:
2481 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2482 status = -EILSEQ;
2483 break;
2484 case COMP_SPLIT_ERR:
2485 case COMP_TX_ERR:
2486 xhci_dbg(xhci, "Transfer error on endpoint\n");
2487 status = -EPROTO;
2488 break;
2489 case COMP_BABBLE:
2490 xhci_dbg(xhci, "Babble error on endpoint\n");
2491 status = -EOVERFLOW;
2492 break;
2493 case COMP_DB_ERR:
2494 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2495 status = -ENOSR;
2496 break;
2497 case COMP_BW_OVER:
2498 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2499 break;
2500 case COMP_BUFF_OVER:
2501 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2502 break;
2503 case COMP_UNDERRUN:
2504
2505
2506
2507
2508
2509 xhci_dbg(xhci, "underrun event on endpoint\n");
2510 if (!list_empty(&ep_ring->td_list))
2511 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2512 "still with TDs queued?\n",
2513 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2514 ep_index);
2515 goto cleanup;
2516 case COMP_OVERRUN:
2517 xhci_dbg(xhci, "overrun event on endpoint\n");
2518 if (!list_empty(&ep_ring->td_list))
2519 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2520 "still with TDs queued?\n",
2521 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2522 ep_index);
2523 goto cleanup;
2524 case COMP_DEV_ERR:
2525 xhci_warn(xhci, "WARN: detect an incompatible device");
2526 status = -EPROTO;
2527 break;
2528 case COMP_MISSED_INT:
2529
2530
2531
2532
2533
2534
2535 ep->skip = true;
2536 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2537 goto cleanup;
2538 case COMP_PING_ERR:
2539 ep->skip = true;
2540 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2541 goto cleanup;
2542 default:
2543 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2544 status = 0;
2545 break;
2546 }
2547 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2548 trb_comp_code);
2549 goto cleanup;
2550 }
2551
2552 do {
2553
2554
2555
2556 if (list_empty(&ep_ring->td_list)) {
2557
2558
2559
2560
2561
2562 if (!(trb_comp_code == COMP_STOP ||
2563 trb_comp_code == COMP_STOP_INVAL)) {
2564 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2565 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2566 ep_index);
2567 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2568 (le32_to_cpu(event->flags) &
2569 TRB_TYPE_BITMASK)>>10);
2570 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2571 }
2572 if (ep->skip) {
2573 ep->skip = false;
2574 xhci_dbg(xhci, "td_list is empty while skip "
2575 "flag set. Clear skip flag.\n");
2576 }
2577 ret = 0;
2578 goto cleanup;
2579 }
2580
2581
2582 if (ep->skip && td_num == 0) {
2583 ep->skip = false;
2584 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2585 "Clear skip flag.\n");
2586 ret = 0;
2587 goto cleanup;
2588 }
2589
2590 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2591 if (ep->skip)
2592 td_num--;
2593
2594
2595 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2596 td->last_trb, event_dma, false);
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 if (!event_seg && (trb_comp_code == COMP_STOP ||
2607 trb_comp_code == COMP_STOP_INVAL)) {
2608 ret = 0;
2609 goto cleanup;
2610 }
2611
2612 if (!event_seg) {
2613 if (!ep->skip ||
2614 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2615
2616
2617
2618
2619 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2620 ep_ring->last_td_was_short) {
2621 ep_ring->last_td_was_short = false;
2622 ret = 0;
2623 goto cleanup;
2624 }
2625
2626 xhci_err(xhci,
2627 "ERROR Transfer event TRB DMA ptr not "
2628 "part of current TD ep_index %d "
2629 "comp_code %u\n", ep_index,
2630 trb_comp_code);
2631 trb_in_td(xhci, ep_ring->deq_seg,
2632 ep_ring->dequeue, td->last_trb,
2633 event_dma, true);
2634 return -ESHUTDOWN;
2635 }
2636
2637 ret = skip_isoc_td(xhci, td, event, ep, &status);
2638 goto cleanup;
2639 }
2640 if (trb_comp_code == COMP_SHORT_TX)
2641 ep_ring->last_td_was_short = true;
2642 else
2643 ep_ring->last_td_was_short = false;
2644
2645 if (ep->skip) {
2646 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2647 ep->skip = false;
2648 }
2649
2650 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2651 sizeof(*event_trb)];
2652
2653
2654
2655
2656
2657
2658 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2659 xhci_dbg(xhci,
2660 "event_trb is a no-op TRB. Skip it\n");
2661 goto cleanup;
2662 }
2663
2664
2665
2666
2667 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2668 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2669 &status);
2670 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2671 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2672 &status);
2673 else
2674 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2675 ep, &status);
2676
2677cleanup:
2678
2679
2680 handling_skipped_tds = ep->skip &&
2681 trb_comp_code != COMP_MISSED_INT &&
2682 trb_comp_code != COMP_PING_ERR;
2683
2684
2685
2686
2687
2688 if (!handling_skipped_tds)
2689 inc_deq(xhci, xhci->event_ring);
2690
2691 if (ret) {
2692 urb = td->urb;
2693 urb_priv = urb->hcpriv;
2694
2695 xhci_urb_free_priv(urb_priv);
2696
2697 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2698 if ((urb->actual_length != urb->transfer_buffer_length &&
2699 (urb->transfer_flags &
2700 URB_SHORT_NOT_OK)) ||
2701 (status != 0 &&
2702 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2703 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2704 "expected = %d, status = %d\n",
2705 urb, urb->actual_length,
2706 urb->transfer_buffer_length,
2707 status);
2708 spin_unlock(&xhci->lock);
2709
2710
2711
2712 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2713 status = 0;
2714 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2715 spin_lock(&xhci->lock);
2716 }
2717
2718
2719
2720
2721
2722
2723
2724 } while (handling_skipped_tds);
2725
2726 return 0;
2727}
2728
2729
2730
2731
2732
2733
2734
2735static int xhci_handle_event(struct xhci_hcd *xhci)
2736{
2737 union xhci_trb *event;
2738 int update_ptrs = 1;
2739 int ret;
2740
2741 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2742 xhci->error_bitmask |= 1 << 1;
2743 return 0;
2744 }
2745
2746 event = xhci->event_ring->dequeue;
2747
2748 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2749 xhci->event_ring->cycle_state) {
2750 xhci->error_bitmask |= 1 << 2;
2751 return 0;
2752 }
2753
2754
2755
2756
2757
2758 rmb();
2759
2760 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2761 case TRB_TYPE(TRB_COMPLETION):
2762 handle_cmd_completion(xhci, &event->event_cmd);
2763 break;
2764 case TRB_TYPE(TRB_PORT_STATUS):
2765 handle_port_status(xhci, event);
2766 update_ptrs = 0;
2767 break;
2768 case TRB_TYPE(TRB_TRANSFER):
2769 ret = handle_tx_event(xhci, &event->trans_event);
2770 if (ret < 0)
2771 xhci->error_bitmask |= 1 << 9;
2772 else
2773 update_ptrs = 0;
2774 break;
2775 case TRB_TYPE(TRB_DEV_NOTE):
2776 handle_device_notification(xhci, event);
2777 break;
2778 default:
2779 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2780 TRB_TYPE(48))
2781 handle_vendor_event(xhci, event);
2782 else
2783 xhci->error_bitmask |= 1 << 3;
2784 }
2785
2786
2787
2788 if (xhci->xhc_state & XHCI_STATE_DYING) {
2789 xhci_dbg(xhci, "xHCI host dying, returning from "
2790 "event handler.\n");
2791 return 0;
2792 }
2793
2794 if (update_ptrs)
2795
2796 inc_deq(xhci, xhci->event_ring);
2797
2798
2799
2800
2801 return 1;
2802}
2803
2804
2805
2806
2807
2808
2809irqreturn_t xhci_irq(struct usb_hcd *hcd)
2810{
2811 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2812 u32 status;
2813 u64 temp_64;
2814 union xhci_trb *event_ring_deq;
2815 dma_addr_t deq;
2816
2817 spin_lock(&xhci->lock);
2818
2819 status = readl(&xhci->op_regs->status);
2820 if (status == 0xffffffff)
2821 goto hw_died;
2822
2823 if (!(status & STS_EINT)) {
2824 spin_unlock(&xhci->lock);
2825 return IRQ_NONE;
2826 }
2827 if (status & STS_FATAL) {
2828 xhci_warn(xhci, "WARNING: Host System Error\n");
2829 xhci_halt(xhci);
2830hw_died:
2831 spin_unlock(&xhci->lock);
2832 return IRQ_HANDLED;
2833 }
2834
2835
2836
2837
2838
2839
2840 status |= STS_EINT;
2841 writel(status, &xhci->op_regs->status);
2842
2843
2844
2845 if (hcd->irq) {
2846 u32 irq_pending;
2847
2848 irq_pending = readl(&xhci->ir_set->irq_pending);
2849 irq_pending |= IMAN_IP;
2850 writel(irq_pending, &xhci->ir_set->irq_pending);
2851 }
2852
2853 if (xhci->xhc_state & XHCI_STATE_DYING ||
2854 xhci->xhc_state & XHCI_STATE_HALTED) {
2855 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2856 "Shouldn't IRQs be disabled?\n");
2857
2858
2859
2860 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2861 xhci_write_64(xhci, temp_64 | ERST_EHB,
2862 &xhci->ir_set->erst_dequeue);
2863 spin_unlock(&xhci->lock);
2864
2865 return IRQ_HANDLED;
2866 }
2867
2868 event_ring_deq = xhci->event_ring->dequeue;
2869
2870
2871
2872 while (xhci_handle_event(xhci) > 0) {}
2873
2874 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2875
2876 if (event_ring_deq != xhci->event_ring->dequeue) {
2877 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2878 xhci->event_ring->dequeue);
2879 if (deq == 0)
2880 xhci_warn(xhci, "WARN something wrong with SW event "
2881 "ring dequeue ptr.\n");
2882
2883 temp_64 &= ERST_PTR_MASK;
2884 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2885 }
2886
2887
2888 temp_64 |= ERST_EHB;
2889 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2890
2891 spin_unlock(&xhci->lock);
2892
2893 return IRQ_HANDLED;
2894}
2895
2896irqreturn_t xhci_msi_irq(int irq, void *hcd)
2897{
2898 return xhci_irq(hcd);
2899}
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2911 bool more_trbs_coming,
2912 u32 field1, u32 field2, u32 field3, u32 field4)
2913{
2914 struct xhci_generic_trb *trb;
2915
2916 trb = &ring->enqueue->generic;
2917 trb->field[0] = cpu_to_le32(field1);
2918 trb->field[1] = cpu_to_le32(field2);
2919 trb->field[2] = cpu_to_le32(field3);
2920 trb->field[3] = cpu_to_le32(field4);
2921 inc_enq(xhci, ring, more_trbs_coming);
2922}
2923
2924
2925
2926
2927
2928static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2929 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2930{
2931 unsigned int num_trbs_needed;
2932
2933
2934 switch (ep_state) {
2935 case EP_STATE_DISABLED:
2936
2937
2938
2939
2940 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2941 return -ENOENT;
2942 case EP_STATE_ERROR:
2943 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2944
2945
2946 return -EINVAL;
2947 case EP_STATE_HALTED:
2948 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2949 case EP_STATE_STOPPED:
2950 case EP_STATE_RUNNING:
2951 break;
2952 default:
2953 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2954
2955
2956
2957
2958 return -EINVAL;
2959 }
2960
2961 while (1) {
2962 if (room_on_ring(xhci, ep_ring, num_trbs))
2963 break;
2964
2965 if (ep_ring == xhci->cmd_ring) {
2966 xhci_err(xhci, "Do not support expand command ring\n");
2967 return -ENOMEM;
2968 }
2969
2970 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2971 "ERROR no room on ep ring, try ring expansion");
2972 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2973 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2974 mem_flags)) {
2975 xhci_err(xhci, "Ring expansion failed\n");
2976 return -ENOMEM;
2977 }
2978 }
2979
2980 while (trb_is_link(ep_ring->enqueue)) {
2981
2982
2983
2984 if (!xhci_link_trb_quirk(xhci) &&
2985 !(ep_ring->type == TYPE_ISOC &&
2986 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2987 ep_ring->enqueue->link.control &=
2988 cpu_to_le32(~TRB_CHAIN);
2989 else
2990 ep_ring->enqueue->link.control |=
2991 cpu_to_le32(TRB_CHAIN);
2992
2993 wmb();
2994 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2995
2996
2997 if (link_trb_toggles_cycle(ep_ring->enqueue))
2998 ep_ring->cycle_state ^= 1;
2999
3000 ep_ring->enq_seg = ep_ring->enq_seg->next;
3001 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3002 }
3003 return 0;
3004}
3005
3006static int prepare_transfer(struct xhci_hcd *xhci,
3007 struct xhci_virt_device *xdev,
3008 unsigned int ep_index,
3009 unsigned int stream_id,
3010 unsigned int num_trbs,
3011 struct urb *urb,
3012 unsigned int td_index,
3013 gfp_t mem_flags)
3014{
3015 int ret;
3016 struct urb_priv *urb_priv;
3017 struct xhci_td *td;
3018 struct xhci_ring *ep_ring;
3019 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3020
3021 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
3022 if (!ep_ring) {
3023 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3024 stream_id);
3025 return -EINVAL;
3026 }
3027
3028 ret = prepare_ring(xhci, ep_ring,
3029 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3030 num_trbs, mem_flags);
3031 if (ret)
3032 return ret;
3033
3034 urb_priv = urb->hcpriv;
3035 td = urb_priv->td[td_index];
3036
3037 INIT_LIST_HEAD(&td->td_list);
3038 INIT_LIST_HEAD(&td->cancelled_td_list);
3039
3040 if (td_index == 0) {
3041 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3042 if (unlikely(ret))
3043 return ret;
3044 }
3045
3046 td->urb = urb;
3047
3048 list_add_tail(&td->td_list, &ep_ring->td_list);
3049 td->start_seg = ep_ring->enq_seg;
3050 td->first_trb = ep_ring->enqueue;
3051
3052 urb_priv->td[td_index] = td;
3053
3054 return 0;
3055}
3056
3057static unsigned int count_trbs(u64 addr, u64 len)
3058{
3059 unsigned int num_trbs;
3060
3061 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3062 TRB_MAX_BUFF_SIZE);
3063 if (num_trbs == 0)
3064 num_trbs++;
3065
3066 return num_trbs;
3067}
3068
3069static inline unsigned int count_trbs_needed(struct urb *urb)
3070{
3071 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3072}
3073
3074static unsigned int count_sg_trbs_needed(struct urb *urb)
3075{
3076 struct scatterlist *sg;
3077 unsigned int i, len, full_len, num_trbs = 0;
3078
3079 full_len = urb->transfer_buffer_length;
3080
3081 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3082 len = sg_dma_len(sg);
3083 num_trbs += count_trbs(sg_dma_address(sg), len);
3084 len = min_t(unsigned int, len, full_len);
3085 full_len -= len;
3086 if (full_len == 0)
3087 break;
3088 }
3089
3090 return num_trbs;
3091}
3092
3093static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3094{
3095 u64 addr, len;
3096
3097 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3098 len = urb->iso_frame_desc[i].length;
3099
3100 return count_trbs(addr, len);
3101}
3102
3103static void check_trb_math(struct urb *urb, int running_total)
3104{
3105 if (unlikely(running_total != urb->transfer_buffer_length))
3106 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3107 "queued %#x (%d), asked for %#x (%d)\n",
3108 __func__,
3109 urb->ep->desc.bEndpointAddress,
3110 running_total, running_total,
3111 urb->transfer_buffer_length,
3112 urb->transfer_buffer_length);
3113}
3114
3115static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3116 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3117 struct xhci_generic_trb *start_trb)
3118{
3119
3120
3121
3122
3123 wmb();
3124 if (start_cycle)
3125 start_trb->field[3] |= cpu_to_le32(start_cycle);
3126 else
3127 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3128 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3129}
3130
3131static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3132 struct xhci_ep_ctx *ep_ctx)
3133{
3134 int xhci_interval;
3135 int ep_interval;
3136
3137 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3138 ep_interval = urb->interval;
3139
3140
3141 if (urb->dev->speed == USB_SPEED_LOW ||
3142 urb->dev->speed == USB_SPEED_FULL)
3143 ep_interval *= 8;
3144
3145
3146
3147
3148 if (xhci_interval != ep_interval) {
3149 dev_dbg_ratelimited(&urb->dev->dev,
3150 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3151 ep_interval, ep_interval == 1 ? "" : "s",
3152 xhci_interval, xhci_interval == 1 ? "" : "s");
3153 urb->interval = xhci_interval;
3154
3155 if (urb->dev->speed == USB_SPEED_LOW ||
3156 urb->dev->speed == USB_SPEED_FULL)
3157 urb->interval /= 8;
3158 }
3159}
3160
3161
3162
3163
3164
3165
3166
3167int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3168 struct urb *urb, int slot_id, unsigned int ep_index)
3169{
3170 struct xhci_ep_ctx *ep_ctx;
3171
3172 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3173 check_interval(xhci, urb, ep_ctx);
3174
3175 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3176}
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3199 int trb_buff_len, unsigned int td_total_len,
3200 struct urb *urb, bool more_trbs_coming)
3201{
3202 u32 maxp, total_packet_count;
3203
3204
3205 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3206 return ((td_total_len - transferred) >> 10);
3207
3208
3209 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3210 trb_buff_len == td_total_len)
3211 return 0;
3212
3213
3214 if (xhci->quirks & XHCI_MTK_HOST)
3215 trb_buff_len = 0;
3216
3217 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3218 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3219
3220
3221 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3222}
3223
3224
3225static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3226 u32 *trb_buff_len, struct xhci_segment *seg)
3227{
3228 struct device *dev = xhci_to_hcd(xhci)->self.controller;
3229 unsigned int unalign;
3230 unsigned int max_pkt;
3231 u32 new_buff_len;
3232
3233 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3234 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3235
3236
3237 if (unalign == 0)
3238 return 0;
3239
3240 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3241 unalign, *trb_buff_len);
3242
3243
3244 if (*trb_buff_len > unalign) {
3245 *trb_buff_len -= unalign;
3246 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3247 return 0;
3248 }
3249
3250
3251
3252
3253
3254
3255 new_buff_len = max_pkt - (enqd_len % max_pkt);
3256
3257 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3258 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3259
3260
3261 if (usb_urb_dir_out(urb)) {
3262 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3263 seg->bounce_buf, new_buff_len, enqd_len);
3264 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3265 max_pkt, DMA_TO_DEVICE);
3266 } else {
3267 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3268 max_pkt, DMA_FROM_DEVICE);
3269 }
3270
3271 if (dma_mapping_error(dev, seg->bounce_dma)) {
3272
3273 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3274 return 0;
3275 }
3276 *trb_buff_len = new_buff_len;
3277 seg->bounce_len = new_buff_len;
3278 seg->bounce_offs = enqd_len;
3279
3280 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3281
3282 return 1;
3283}
3284
3285
3286int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3287 struct urb *urb, int slot_id, unsigned int ep_index)
3288{
3289 struct xhci_ring *ring;
3290 struct urb_priv *urb_priv;
3291 struct xhci_td *td;
3292 struct xhci_generic_trb *start_trb;
3293 struct scatterlist *sg = NULL;
3294 bool more_trbs_coming = true;
3295 bool need_zero_pkt = false;
3296 bool first_trb = true;
3297 unsigned int num_trbs;
3298 unsigned int start_cycle, num_sgs = 0;
3299 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3300 int sent_len, ret;
3301 u32 field, length_field, remainder;
3302 u64 addr, send_addr;
3303
3304 ring = xhci_urb_to_transfer_ring(xhci, urb);
3305 if (!ring)
3306 return -EINVAL;
3307
3308 full_len = urb->transfer_buffer_length;
3309
3310 if (urb->num_sgs) {
3311 num_sgs = urb->num_mapped_sgs;
3312 sg = urb->sg;
3313 addr = (u64) sg_dma_address(sg);
3314 block_len = sg_dma_len(sg);
3315 num_trbs = count_sg_trbs_needed(urb);
3316 } else {
3317 num_trbs = count_trbs_needed(urb);
3318 addr = (u64) urb->transfer_dma;
3319 block_len = full_len;
3320 }
3321 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3322 ep_index, urb->stream_id,
3323 num_trbs, urb, 0, mem_flags);
3324 if (unlikely(ret < 0))
3325 return ret;
3326
3327 urb_priv = urb->hcpriv;
3328
3329
3330 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
3331 need_zero_pkt = true;
3332
3333 td = urb_priv->td[0];
3334
3335
3336
3337
3338
3339
3340 start_trb = &ring->enqueue->generic;
3341 start_cycle = ring->cycle_state;
3342 send_addr = addr;
3343
3344
3345 for (enqd_len = 0; first_trb || enqd_len < full_len;
3346 enqd_len += trb_buff_len) {
3347 field = TRB_TYPE(TRB_NORMAL);
3348
3349
3350 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3351 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3352
3353 if (enqd_len + trb_buff_len > full_len)
3354 trb_buff_len = full_len - enqd_len;
3355
3356
3357 if (first_trb) {
3358 first_trb = false;
3359 if (start_cycle == 0)
3360 field |= TRB_CYCLE;
3361 } else
3362 field |= ring->cycle_state;
3363
3364
3365
3366
3367 if (enqd_len + trb_buff_len < full_len) {
3368 field |= TRB_CHAIN;
3369 if (trb_is_link(ring->enqueue + 1)) {
3370 if (xhci_align_td(xhci, urb, enqd_len,
3371 &trb_buff_len,
3372 ring->enq_seg)) {
3373 send_addr = ring->enq_seg->bounce_dma;
3374
3375 td->bounce_seg = ring->enq_seg;
3376 }
3377 }
3378 }
3379 if (enqd_len + trb_buff_len >= full_len) {
3380 field &= ~TRB_CHAIN;
3381 field |= TRB_IOC;
3382 more_trbs_coming = false;
3383 td->last_trb = ring->enqueue;
3384 }
3385
3386
3387 if (usb_urb_dir_in(urb))
3388 field |= TRB_ISP;
3389
3390
3391 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3392 full_len, urb, more_trbs_coming);
3393
3394 length_field = TRB_LEN(trb_buff_len) |
3395 TRB_TD_SIZE(remainder) |
3396 TRB_INTR_TARGET(0);
3397
3398 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3399 lower_32_bits(send_addr),
3400 upper_32_bits(send_addr),
3401 length_field,
3402 field);
3403
3404 addr += trb_buff_len;
3405 sent_len = trb_buff_len;
3406
3407 while (sg && sent_len >= block_len) {
3408
3409 --num_sgs;
3410 sent_len -= block_len;
3411 if (num_sgs != 0) {
3412 sg = sg_next(sg);
3413 block_len = sg_dma_len(sg);
3414 addr = (u64) sg_dma_address(sg);
3415 addr += sent_len;
3416 }
3417 }
3418 block_len -= sent_len;
3419 send_addr = addr;
3420 }
3421
3422 if (need_zero_pkt) {
3423 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3424 ep_index, urb->stream_id,
3425 1, urb, 1, mem_flags);
3426 urb_priv->td[1]->last_trb = ring->enqueue;
3427 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3428 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3429 }
3430
3431 check_trb_math(urb, enqd_len);
3432
3433 if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
3434 (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
3435
3436
3437
3438 ring->stream_timeout_handler = false;
3439 mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
3440 }
3441 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3442 start_cycle, start_trb);
3443 return 0;
3444}
3445
3446
3447int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3448 struct urb *urb, int slot_id, unsigned int ep_index)
3449{
3450 struct xhci_ring *ep_ring;
3451 int num_trbs;
3452 int ret;
3453 struct usb_ctrlrequest *setup;
3454 struct xhci_generic_trb *start_trb;
3455 int start_cycle;
3456 u32 field, length_field, remainder;
3457 struct urb_priv *urb_priv;
3458 struct xhci_td *td;
3459
3460 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3461 if (!ep_ring)
3462 return -EINVAL;
3463
3464
3465
3466
3467
3468 if (!urb->setup_packet)
3469 return -EINVAL;
3470
3471
3472 num_trbs = 2;
3473
3474
3475
3476
3477
3478 if (urb->transfer_buffer_length > 0)
3479 num_trbs++;
3480 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3481 ep_index, urb->stream_id,
3482 num_trbs, urb, 0, mem_flags);
3483 if (ret < 0)
3484 return ret;
3485
3486 urb_priv = urb->hcpriv;
3487 td = urb_priv->td[0];
3488
3489
3490
3491
3492
3493
3494 start_trb = &ep_ring->enqueue->generic;
3495 start_cycle = ep_ring->cycle_state;
3496
3497
3498
3499 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3500 field = 0;
3501 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3502 if (start_cycle == 0)
3503 field |= 0x1;
3504
3505
3506 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3507 if (urb->transfer_buffer_length > 0) {
3508 if (setup->bRequestType & USB_DIR_IN)
3509 field |= TRB_TX_TYPE(TRB_DATA_IN);
3510 else
3511 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3512 }
3513 }
3514
3515 queue_trb(xhci, ep_ring, true,
3516 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3517 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3518 TRB_LEN(8) | TRB_INTR_TARGET(0),
3519
3520 field);
3521
3522
3523
3524 if (usb_urb_dir_in(urb))
3525 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3526 else
3527 field = TRB_TYPE(TRB_DATA);
3528
3529 remainder = xhci_td_remainder(xhci, 0,
3530 urb->transfer_buffer_length,
3531 urb->transfer_buffer_length,
3532 urb, 1);
3533
3534 length_field = TRB_LEN(urb->transfer_buffer_length) |
3535 TRB_TD_SIZE(remainder) |
3536 TRB_INTR_TARGET(0);
3537
3538 if (urb->transfer_buffer_length > 0) {
3539 if (setup->bRequestType & USB_DIR_IN)
3540 field |= TRB_DIR_IN;
3541 queue_trb(xhci, ep_ring, true,
3542 lower_32_bits(urb->transfer_dma),
3543 upper_32_bits(urb->transfer_dma),
3544 length_field,
3545 field | ep_ring->cycle_state);
3546 }
3547
3548
3549 td->last_trb = ep_ring->enqueue;
3550
3551
3552
3553 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3554 field = 0;
3555 else
3556 field = TRB_DIR_IN;
3557 queue_trb(xhci, ep_ring, false,
3558 0,
3559 0,
3560 TRB_INTR_TARGET(0),
3561
3562 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3563
3564 giveback_first_trb(xhci, slot_id, ep_index, 0,
3565 start_cycle, start_trb);
3566 return 0;
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3578 struct urb *urb, unsigned int total_packet_count)
3579{
3580 unsigned int max_burst;
3581
3582 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3583 return 0;
3584
3585 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3586 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3587}
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3598 struct urb *urb, unsigned int total_packet_count)
3599{
3600 unsigned int max_burst;
3601 unsigned int residue;
3602
3603 if (xhci->hci_version < 0x100)
3604 return 0;
3605
3606 if (urb->dev->speed >= USB_SPEED_SUPER) {
3607
3608 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3609 residue = total_packet_count % (max_burst + 1);
3610
3611
3612
3613 if (residue == 0)
3614 return max_burst;
3615 return residue - 1;
3616 }
3617 if (total_packet_count == 0)
3618 return 0;
3619 return total_packet_count - 1;
3620}
3621
3622
3623
3624
3625
3626
3627
3628
3629static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3630 struct urb *urb, int index)
3631{
3632 int start_frame, ist, ret = 0;
3633 int start_frame_id, end_frame_id, current_frame_id;
3634
3635 if (urb->dev->speed == USB_SPEED_LOW ||
3636 urb->dev->speed == USB_SPEED_FULL)
3637 start_frame = urb->start_frame + index * urb->interval;
3638 else
3639 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3650 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3651 ist <<= 3;
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666 current_frame_id = readl(&xhci->run_regs->microframe_index);
3667 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3668 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3669
3670 start_frame &= 0x7ff;
3671 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3672 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3673
3674 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3675 __func__, index, readl(&xhci->run_regs->microframe_index),
3676 start_frame_id, end_frame_id, start_frame);
3677
3678 if (start_frame_id < end_frame_id) {
3679 if (start_frame > end_frame_id ||
3680 start_frame < start_frame_id)
3681 ret = -EINVAL;
3682 } else if (start_frame_id > end_frame_id) {
3683 if ((start_frame > end_frame_id &&
3684 start_frame < start_frame_id))
3685 ret = -EINVAL;
3686 } else {
3687 ret = -EINVAL;
3688 }
3689
3690 if (index == 0) {
3691 if (ret == -EINVAL || start_frame == start_frame_id) {
3692 start_frame = start_frame_id + 1;
3693 if (urb->dev->speed == USB_SPEED_LOW ||
3694 urb->dev->speed == USB_SPEED_FULL)
3695 urb->start_frame = start_frame;
3696 else
3697 urb->start_frame = start_frame << 3;
3698 ret = 0;
3699 }
3700 }
3701
3702 if (ret) {
3703 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3704 start_frame, current_frame_id, index,
3705 start_frame_id, end_frame_id);
3706 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3707 return ret;
3708 }
3709
3710 return start_frame;
3711}
3712
3713
3714static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3715 struct urb *urb, int slot_id, unsigned int ep_index)
3716{
3717 struct xhci_ring *ep_ring;
3718 struct urb_priv *urb_priv;
3719 struct xhci_td *td;
3720 int num_tds, trbs_per_td;
3721 struct xhci_generic_trb *start_trb;
3722 bool first_trb;
3723 int start_cycle;
3724 u32 field, length_field;
3725 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3726 u64 start_addr, addr;
3727 int i, j;
3728 bool more_trbs_coming;
3729 struct xhci_virt_ep *xep;
3730 int frame_id;
3731
3732 xep = &xhci->devs[slot_id]->eps[ep_index];
3733 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3734
3735 num_tds = urb->number_of_packets;
3736 if (num_tds < 1) {
3737 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3738 return -EINVAL;
3739 }
3740 start_addr = (u64) urb->transfer_dma;
3741 start_trb = &ep_ring->enqueue->generic;
3742 start_cycle = ep_ring->cycle_state;
3743
3744 urb_priv = urb->hcpriv;
3745
3746 for (i = 0; i < num_tds; i++) {
3747 unsigned int total_pkt_count, max_pkt;
3748 unsigned int burst_count, last_burst_pkt_count;
3749 u32 sia_frame_id;
3750
3751 first_trb = true;
3752 running_total = 0;
3753 addr = start_addr + urb->iso_frame_desc[i].offset;
3754 td_len = urb->iso_frame_desc[i].length;
3755 td_remain_len = td_len;
3756 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3757 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3758
3759
3760 if (total_pkt_count == 0)
3761 total_pkt_count++;
3762 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3763 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3764 urb, total_pkt_count);
3765
3766 trbs_per_td = count_isoc_trbs_needed(urb, i);
3767
3768 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3769 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3770 if (ret < 0) {
3771 if (i == 0)
3772 return ret;
3773 goto cleanup;
3774 }
3775 td = urb_priv->td[i];
3776
3777
3778 sia_frame_id = TRB_SIA;
3779 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3780 HCC_CFC(xhci->hcc_params)) {
3781 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3782 if (frame_id >= 0)
3783 sia_frame_id = TRB_FRAME_ID(frame_id);
3784 }
3785
3786
3787
3788
3789
3790 field = TRB_TYPE(TRB_ISOC) |
3791 TRB_TLBPC(last_burst_pkt_count) |
3792 sia_frame_id |
3793 (i ? ep_ring->cycle_state : !start_cycle);
3794
3795
3796 if (!xep->use_extended_tbc)
3797 field |= TRB_TBC(burst_count);
3798
3799
3800 for (j = 0; j < trbs_per_td; j++) {
3801 u32 remainder = 0;
3802
3803
3804 if (!first_trb)
3805 field = TRB_TYPE(TRB_NORMAL) |
3806 ep_ring->cycle_state;
3807
3808
3809 if (usb_urb_dir_in(urb))
3810 field |= TRB_ISP;
3811
3812
3813 if (j < trbs_per_td - 1) {
3814 more_trbs_coming = true;
3815 field |= TRB_CHAIN;
3816 } else {
3817 more_trbs_coming = false;
3818 td->last_trb = ep_ring->enqueue;
3819 field |= TRB_IOC;
3820
3821 if (xhci->hci_version >= 0x100 &&
3822 !(xhci->quirks & XHCI_AVOID_BEI) &&
3823 i < num_tds - 1)
3824 field |= TRB_BEI;
3825 }
3826
3827 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3828 if (trb_buff_len > td_remain_len)
3829 trb_buff_len = td_remain_len;
3830
3831
3832 remainder = xhci_td_remainder(xhci, running_total,
3833 trb_buff_len, td_len,
3834 urb, more_trbs_coming);
3835
3836 length_field = TRB_LEN(trb_buff_len) |
3837 TRB_INTR_TARGET(0);
3838
3839
3840 if (first_trb && xep->use_extended_tbc)
3841 length_field |= TRB_TD_SIZE_TBC(burst_count);
3842 else
3843 length_field |= TRB_TD_SIZE(remainder);
3844 first_trb = false;
3845
3846 queue_trb(xhci, ep_ring, more_trbs_coming,
3847 lower_32_bits(addr),
3848 upper_32_bits(addr),
3849 length_field,
3850 field);
3851 running_total += trb_buff_len;
3852
3853 addr += trb_buff_len;
3854 td_remain_len -= trb_buff_len;
3855 }
3856
3857
3858 if (running_total != td_len) {
3859 xhci_err(xhci, "ISOC TD length unmatch\n");
3860 ret = -EINVAL;
3861 goto cleanup;
3862 }
3863 }
3864
3865
3866 if (HCC_CFC(xhci->hcc_params))
3867 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3868
3869 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3870 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3871 usb_amd_quirk_pll_disable();
3872 }
3873 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3874
3875 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3876 start_cycle, start_trb);
3877 return 0;
3878cleanup:
3879
3880
3881 for (i--; i >= 0; i--)
3882 list_del_init(&urb_priv->td[i]->td_list);
3883
3884
3885
3886
3887
3888
3889 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3890
3891 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3892
3893
3894 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3895 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3896 ep_ring->cycle_state = start_cycle;
3897 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3898 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3899 return ret;
3900}
3901
3902
3903
3904
3905
3906
3907
3908
3909int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3910 struct urb *urb, int slot_id, unsigned int ep_index)
3911{
3912 struct xhci_virt_device *xdev;
3913 struct xhci_ring *ep_ring;
3914 struct xhci_ep_ctx *ep_ctx;
3915 int start_frame;
3916 int num_tds, num_trbs, i;
3917 int ret;
3918 struct xhci_virt_ep *xep;
3919 int ist;
3920
3921 xdev = xhci->devs[slot_id];
3922 xep = &xhci->devs[slot_id]->eps[ep_index];
3923 ep_ring = xdev->eps[ep_index].ring;
3924 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3925
3926 num_trbs = 0;
3927 num_tds = urb->number_of_packets;
3928 for (i = 0; i < num_tds; i++)
3929 num_trbs += count_isoc_trbs_needed(urb, i);
3930
3931
3932
3933
3934 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3935 num_trbs, mem_flags);
3936 if (ret)
3937 return ret;
3938
3939
3940
3941
3942
3943 check_interval(xhci, urb, ep_ctx);
3944
3945
3946 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3947 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3948 EP_STATE_RUNNING) {
3949 urb->start_frame = xep->next_frame_id;
3950 goto skip_start_over;
3951 }
3952 }
3953
3954 start_frame = readl(&xhci->run_regs->microframe_index);
3955 start_frame &= 0x3fff;
3956
3957
3958
3959
3960 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3961 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3962 ist <<= 3;
3963 start_frame += ist + XHCI_CFC_DELAY;
3964 start_frame = roundup(start_frame, 8);
3965
3966
3967
3968
3969
3970 if (urb->dev->speed == USB_SPEED_LOW ||
3971 urb->dev->speed == USB_SPEED_FULL) {
3972 start_frame = roundup(start_frame, urb->interval << 3);
3973 urb->start_frame = start_frame >> 3;
3974 } else {
3975 start_frame = roundup(start_frame, urb->interval);
3976 urb->start_frame = start_frame;
3977 }
3978
3979skip_start_over:
3980 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3981
3982 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3983}
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3996 u32 field1, u32 field2,
3997 u32 field3, u32 field4, bool command_must_succeed)
3998{
3999 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4000 int ret;
4001
4002 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4003 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4004 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4005 return -ESHUTDOWN;
4006 }
4007
4008 if (!command_must_succeed)
4009 reserved_trbs++;
4010
4011 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4012 reserved_trbs, GFP_ATOMIC);
4013 if (ret < 0) {
4014 xhci_err(xhci, "ERR: No room for command on command ring\n");
4015 if (command_must_succeed)
4016 xhci_err(xhci, "ERR: Reserved TRB counting for "
4017 "unfailable commands failed.\n");
4018 return ret;
4019 }
4020
4021 cmd->command_trb = xhci->cmd_ring->enqueue;
4022 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4023
4024
4025 if (xhci->cmd_list.next == &cmd->cmd_list &&
4026 !timer_pending(&xhci->cmd_timer)) {
4027 xhci->current_cmd = cmd;
4028 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
4029 }
4030
4031 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4032 field4 | xhci->cmd_ring->cycle_state);
4033 return 0;
4034}
4035
4036
4037int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4038 u32 trb_type, u32 slot_id)
4039{
4040 return queue_command(xhci, cmd, 0, 0, 0,
4041 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4042}
4043
4044
4045int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4046 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4047{
4048 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4049 upper_32_bits(in_ctx_ptr), 0,
4050 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4051 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4052}
4053
4054int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4055 u32 field1, u32 field2, u32 field3, u32 field4)
4056{
4057 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4058}
4059
4060
4061int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4062 u32 slot_id)
4063{
4064 return queue_command(xhci, cmd, 0, 0, 0,
4065 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4066 false);
4067}
4068
4069
4070int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4071 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4072 u32 slot_id, bool command_must_succeed)
4073{
4074 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4075 upper_32_bits(in_ctx_ptr), 0,
4076 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4077 command_must_succeed);
4078}
4079
4080
4081int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4082 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4083{
4084 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4085 upper_32_bits(in_ctx_ptr), 0,
4086 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4087 command_must_succeed);
4088}
4089
4090
4091
4092
4093
4094int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4095 int slot_id, unsigned int ep_index, int suspend)
4096{
4097 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4098 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4099 u32 type = TRB_TYPE(TRB_STOP_RING);
4100 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4101
4102 return queue_command(xhci, cmd, 0, 0, 0,
4103 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4104}
4105
4106
4107void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4108 unsigned int slot_id, unsigned int ep_index,
4109 unsigned int stream_id,
4110 struct xhci_dequeue_state *deq_state)
4111{
4112 dma_addr_t addr;
4113 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4114 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4115 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4116 u32 trb_sct = 0;
4117 u32 type = TRB_TYPE(TRB_SET_DEQ);
4118 struct xhci_virt_ep *ep;
4119 struct xhci_command *cmd;
4120 int ret;
4121
4122 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4123 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4124 deq_state->new_deq_seg,
4125 (unsigned long long)deq_state->new_deq_seg->dma,
4126 deq_state->new_deq_ptr,
4127 (unsigned long long)xhci_trb_virt_to_dma(
4128 deq_state->new_deq_seg, deq_state->new_deq_ptr),
4129 deq_state->new_cycle_state);
4130
4131 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4132 deq_state->new_deq_ptr);
4133 if (addr == 0) {
4134 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4135 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4136 deq_state->new_deq_seg, deq_state->new_deq_ptr);
4137 return;
4138 }
4139 ep = &xhci->devs[slot_id]->eps[ep_index];
4140 if ((ep->ep_state & SET_DEQ_PENDING)) {
4141 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4142 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4143 return;
4144 }
4145
4146
4147 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4148 if (!cmd) {
4149 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
4150 return;
4151 }
4152
4153 ep->queued_deq_seg = deq_state->new_deq_seg;
4154 ep->queued_deq_ptr = deq_state->new_deq_ptr;
4155 if (stream_id)
4156 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4157 ret = queue_command(xhci, cmd,
4158 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4159 upper_32_bits(addr), trb_stream_id,
4160 trb_slot_id | trb_ep_index | type, false);
4161 if (ret < 0) {
4162 xhci_free_command(xhci, cmd);
4163 return;
4164 }
4165
4166
4167
4168
4169
4170
4171 ep->ep_state |= SET_DEQ_PENDING;
4172}
4173
4174int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4175 int slot_id, unsigned int ep_index)
4176{
4177 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4178 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4179 u32 type = TRB_TYPE(TRB_RESET_EP);
4180
4181 return queue_command(xhci, cmd, 0, 0, 0,
4182 trb_slot_id | trb_ep_index | type, false);
4183}
4184