1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/scatterlist.h>
68#include <linux/slab.h>
69#include "xhci.h"
70#include "xhci-trace.h"
71
72static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
73 struct xhci_virt_device *virt_dev,
74 struct xhci_event_cmd *event);
75
76
77
78
79
80dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
81 union xhci_trb *trb)
82{
83 unsigned long segment_offset;
84
85 if (!seg || !trb || trb < seg->trbs)
86 return 0;
87
88 segment_offset = trb - seg->trbs;
89 if (segment_offset > TRBS_PER_SEGMENT)
90 return 0;
91 return seg->dma + (segment_offset * sizeof(*trb));
92}
93
94
95
96
97static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
98 struct xhci_segment *seg, union xhci_trb *trb)
99{
100 if (ring == xhci->event_ring)
101 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
102 (seg->next == xhci->event_ring->first_seg);
103 else
104 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
105}
106
107
108
109
110
111static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
112 struct xhci_segment *seg, union xhci_trb *trb)
113{
114 if (ring == xhci->event_ring)
115 return trb == &seg->trbs[TRBS_PER_SEGMENT];
116 else
117 return TRB_TYPE_LINK_LE32(trb->link.control);
118}
119
120static int enqueue_is_link_trb(struct xhci_ring *ring)
121{
122 struct xhci_link_trb *link = &ring->enqueue->link;
123 return TRB_TYPE_LINK_LE32(link->control);
124}
125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128
129
130
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
136
137
138
139
140static void next_trb(struct xhci_hcd *xhci,
141 struct xhci_ring *ring,
142 struct xhci_segment **seg,
143 union xhci_trb **trb)
144{
145 if (last_trb(xhci, ring, *seg, *trb)) {
146 *seg = (*seg)->next;
147 *trb = ((*seg)->trbs);
148 } else {
149 (*trb)++;
150 }
151}
152
153
154
155
156
157static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
158{
159 unsigned long long addr;
160
161 ring->deq_updates++;
162
163
164
165
166
167 if (ring->type != TYPE_EVENT &&
168 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
169 ring->num_trbs_free++;
170
171 do {
172
173
174
175
176
177 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
178 if (ring->type == TYPE_EVENT &&
179 last_trb_on_last_seg(xhci, ring,
180 ring->deq_seg, ring->dequeue)) {
181 ring->cycle_state ^= 1;
182 }
183 ring->deq_seg = ring->deq_seg->next;
184 ring->dequeue = ring->deq_seg->trbs;
185 } else {
186 ring->dequeue++;
187 }
188 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
189
190 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
211 bool more_trbs_coming)
212{
213 u32 chain;
214 union xhci_trb *next;
215 unsigned long long addr;
216
217 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
218
219 if (ring->type != TYPE_EVENT &&
220 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
221 ring->num_trbs_free--;
222 next = ++(ring->enqueue);
223
224 ring->enq_updates++;
225
226
227
228 while (last_trb(xhci, ring, ring->enq_seg, next)) {
229 if (ring->type != TYPE_EVENT) {
230
231
232
233
234
235
236
237
238 if (!chain && !more_trbs_coming)
239 break;
240
241
242
243
244
245
246 if (!(ring->type == TYPE_ISOC &&
247 (xhci->quirks & XHCI_AMD_0x96_HOST))
248 && !xhci_link_trb_quirk(xhci)) {
249 next->link.control &=
250 cpu_to_le32(~TRB_CHAIN);
251 next->link.control |=
252 cpu_to_le32(chain);
253 }
254
255 wmb();
256 next->link.control ^= cpu_to_le32(TRB_CYCLE);
257
258
259 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
260 ring->cycle_state = (ring->cycle_state ? 0 : 1);
261 }
262 }
263 ring->enq_seg = ring->enq_seg->next;
264 ring->enqueue = ring->enq_seg->trbs;
265 next = ring->enqueue;
266 }
267 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
268}
269
270
271
272
273
274static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
275 unsigned int num_trbs)
276{
277 int num_trbs_in_deq_seg;
278
279 if (ring->num_trbs_free < num_trbs)
280 return 0;
281
282 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
283 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
284 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
285 return 0;
286 }
287
288 return 1;
289}
290
291
292void xhci_ring_cmd_db(struct xhci_hcd *xhci)
293{
294 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
295 return;
296
297 xhci_dbg(xhci, "// Ding dong!\n");
298 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
299
300 xhci_readl(xhci, &xhci->dba->doorbell[0]);
301}
302
303static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
304{
305 u64 temp_64;
306 int ret;
307
308 xhci_dbg(xhci, "Abort command ring\n");
309
310 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
311 xhci_dbg(xhci, "The command ring isn't running, "
312 "Have the command ring been stopped?\n");
313 return 0;
314 }
315
316 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
317 if (!(temp_64 & CMD_RING_RUNNING)) {
318 xhci_dbg(xhci, "Command ring had been stopped\n");
319 return 0;
320 }
321 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
322 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
323 &xhci->op_regs->cmd_ring);
324
325
326
327
328
329
330
331
332 ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
333 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
334 if (ret < 0) {
335 xhci_err(xhci, "Stopped the command ring failed, "
336 "maybe the host is dead\n");
337 xhci->xhc_state |= XHCI_STATE_DYING;
338 xhci_quiesce(xhci);
339 xhci_halt(xhci);
340 return -ESHUTDOWN;
341 }
342
343 return 0;
344}
345
346static int xhci_queue_cd(struct xhci_hcd *xhci,
347 struct xhci_command *command,
348 union xhci_trb *cmd_trb)
349{
350 struct xhci_cd *cd;
351 cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
352 if (!cd)
353 return -ENOMEM;
354 INIT_LIST_HEAD(&cd->cancel_cmd_list);
355
356 cd->command = command;
357 cd->cmd_trb = cmd_trb;
358 list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
359
360 return 0;
361}
362
363
364
365
366
367
368
369
370
371
372int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
373 union xhci_trb *cmd_trb)
374{
375 int retval = 0;
376 unsigned long flags;
377
378 spin_lock_irqsave(&xhci->lock, flags);
379
380 if (xhci->xhc_state & XHCI_STATE_DYING) {
381 xhci_warn(xhci, "Abort the command ring,"
382 " but the xHCI is dead.\n");
383 retval = -ESHUTDOWN;
384 goto fail;
385 }
386
387
388 retval = xhci_queue_cd(xhci, command, cmd_trb);
389 if (retval) {
390 xhci_warn(xhci, "Queuing command descriptor failed.\n");
391 goto fail;
392 }
393
394
395 retval = xhci_abort_cmd_ring(xhci);
396 if (retval) {
397 xhci_err(xhci, "Abort command ring failed\n");
398 if (unlikely(retval == -ESHUTDOWN)) {
399 spin_unlock_irqrestore(&xhci->lock, flags);
400 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
401 xhci_dbg(xhci, "xHCI host controller is dead.\n");
402 return retval;
403 }
404 }
405
406fail:
407 spin_unlock_irqrestore(&xhci->lock, flags);
408 return retval;
409}
410
411void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
412 unsigned int slot_id,
413 unsigned int ep_index,
414 unsigned int stream_id)
415{
416 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
417 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
418 unsigned int ep_state = ep->ep_state;
419
420
421
422
423
424
425
426
427 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
428 (ep_state & EP_HALTED))
429 return;
430 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
431
432
433
434}
435
436
437static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
438 unsigned int slot_id,
439 unsigned int ep_index)
440{
441 unsigned int stream_id;
442 struct xhci_virt_ep *ep;
443
444 ep = &xhci->devs[slot_id]->eps[ep_index];
445
446
447 if (!(ep->ep_state & EP_HAS_STREAMS)) {
448 if (ep->ring && !(list_empty(&ep->ring->td_list)))
449 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
450 return;
451 }
452
453 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
454 stream_id++) {
455 struct xhci_stream_info *stream_info = ep->stream_info;
456 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
457 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
458 stream_id);
459 }
460}
461
462
463
464
465
466
467static struct xhci_segment *find_trb_seg(
468 struct xhci_segment *start_seg,
469 union xhci_trb *trb, int *cycle_state)
470{
471 struct xhci_segment *cur_seg = start_seg;
472 struct xhci_generic_trb *generic_trb;
473
474 while (cur_seg->trbs > trb ||
475 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
476 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
477 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
478 *cycle_state ^= 0x1;
479 cur_seg = cur_seg->next;
480 if (cur_seg == start_seg)
481
482 return NULL;
483 }
484 return cur_seg;
485}
486
487
488static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
489 unsigned int slot_id, unsigned int ep_index,
490 unsigned int stream_id)
491{
492 struct xhci_virt_ep *ep;
493
494 ep = &xhci->devs[slot_id]->eps[ep_index];
495
496 if (!(ep->ep_state & EP_HAS_STREAMS))
497 return ep->ring;
498
499 if (stream_id == 0) {
500 xhci_warn(xhci,
501 "WARN: Slot ID %u, ep index %u has streams, "
502 "but URB has no stream ID.\n",
503 slot_id, ep_index);
504 return NULL;
505 }
506
507 if (stream_id < ep->stream_info->num_streams)
508 return ep->stream_info->stream_rings[stream_id];
509
510 xhci_warn(xhci,
511 "WARN: Slot ID %u, ep index %u has "
512 "stream IDs 1 to %u allocated, "
513 "but stream ID %u is requested.\n",
514 slot_id, ep_index,
515 ep->stream_info->num_streams - 1,
516 stream_id);
517 return NULL;
518}
519
520
521
522
523
524static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
525 struct urb *urb)
526{
527 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
528 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
550 unsigned int slot_id, unsigned int ep_index,
551 unsigned int stream_id, struct xhci_td *cur_td,
552 struct xhci_dequeue_state *state)
553{
554 struct xhci_virt_device *dev = xhci->devs[slot_id];
555 struct xhci_ring *ep_ring;
556 struct xhci_generic_trb *trb;
557 struct xhci_ep_ctx *ep_ctx;
558 dma_addr_t addr;
559
560 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
561 ep_index, stream_id);
562 if (!ep_ring) {
563 xhci_warn(xhci, "WARN can't find new dequeue state "
564 "for invalid stream ID %u.\n",
565 stream_id);
566 return;
567 }
568 state->new_cycle_state = 0;
569 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
570 "Finding segment containing stopped TRB.");
571 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
572 dev->eps[ep_index].stopped_trb,
573 &state->new_cycle_state);
574 if (!state->new_deq_seg) {
575 WARN_ON(1);
576 return;
577 }
578
579
580 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
581 "Finding endpoint context");
582 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
583 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
584
585 state->new_deq_ptr = cur_td->last_trb;
586 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
587 "Finding segment containing last TRB in TD.");
588 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
589 state->new_deq_ptr,
590 &state->new_cycle_state);
591 if (!state->new_deq_seg) {
592 WARN_ON(1);
593 return;
594 }
595
596 trb = &state->new_deq_ptr->generic;
597 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
598 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
599 state->new_cycle_state ^= 0x1;
600 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
601
602
603
604
605
606
607
608
609
610
611 if (ep_ring->first_seg == ep_ring->first_seg->next &&
612 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
613 state->new_cycle_state ^= 0x1;
614 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
615 "Cycle state = 0x%x", state->new_cycle_state);
616
617
618 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
619 "New dequeue segment = %p (virtual)",
620 state->new_deq_seg);
621 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
622 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
623 "New dequeue pointer = 0x%llx (DMA)",
624 (unsigned long long) addr);
625}
626
627
628
629
630
631static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
632 struct xhci_td *cur_td, bool flip_cycle)
633{
634 struct xhci_segment *cur_seg;
635 union xhci_trb *cur_trb;
636
637 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
638 true;
639 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
640 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
641
642
643
644 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
645
646
647
648 if (flip_cycle)
649 cur_trb->generic.field[3] ^=
650 cpu_to_le32(TRB_CYCLE);
651 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
652 "Cancel (unchain) link TRB");
653 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
654 "Address = %p (0x%llx dma); "
655 "in seg %p (0x%llx dma)",
656 cur_trb,
657 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
658 cur_seg,
659 (unsigned long long)cur_seg->dma);
660 } else {
661 cur_trb->generic.field[0] = 0;
662 cur_trb->generic.field[1] = 0;
663 cur_trb->generic.field[2] = 0;
664
665 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
666
667 if (flip_cycle && cur_trb != cur_td->first_trb &&
668 cur_trb != cur_td->last_trb)
669 cur_trb->generic.field[3] ^=
670 cpu_to_le32(TRB_CYCLE);
671 cur_trb->generic.field[3] |= cpu_to_le32(
672 TRB_TYPE(TRB_TR_NOOP));
673 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
674 "TRB to noop at offset 0x%llx",
675 (unsigned long long)
676 xhci_trb_virt_to_dma(cur_seg, cur_trb));
677 }
678 if (cur_trb == cur_td->last_trb)
679 break;
680 }
681}
682
683static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
684 unsigned int ep_index, unsigned int stream_id,
685 struct xhci_segment *deq_seg,
686 union xhci_trb *deq_ptr, u32 cycle_state);
687
688void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
689 unsigned int slot_id, unsigned int ep_index,
690 unsigned int stream_id,
691 struct xhci_dequeue_state *deq_state)
692{
693 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
694
695 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
696 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
697 "new deq ptr = %p (0x%llx dma), new cycle = %u",
698 deq_state->new_deq_seg,
699 (unsigned long long)deq_state->new_deq_seg->dma,
700 deq_state->new_deq_ptr,
701 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
702 deq_state->new_cycle_state);
703 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
704 deq_state->new_deq_seg,
705 deq_state->new_deq_ptr,
706 (u32) deq_state->new_cycle_state);
707
708
709
710
711
712 ep->ep_state |= SET_DEQ_PENDING;
713}
714
715static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
716 struct xhci_virt_ep *ep)
717{
718 ep->ep_state &= ~EP_HALT_PENDING;
719
720
721
722
723 if (del_timer(&ep->stop_cmd_timer))
724 ep->stop_cmds_pending--;
725}
726
727
728static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
729 struct xhci_td *cur_td, int status)
730{
731 struct usb_hcd *hcd;
732 struct urb *urb;
733 struct urb_priv *urb_priv;
734
735 urb = cur_td->urb;
736 urb_priv = urb->hcpriv;
737 urb_priv->td_cnt++;
738 hcd = bus_to_hcd(urb->dev->bus);
739
740
741 if (urb_priv->td_cnt == urb_priv->length) {
742 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
743 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
744 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
745 if (xhci->quirks & XHCI_AMD_PLL_FIX)
746 usb_amd_quirk_pll_enable();
747 }
748 }
749 usb_hcd_unlink_urb_from_ep(hcd, urb);
750
751 spin_unlock(&xhci->lock);
752 usb_hcd_giveback_urb(hcd, urb, status);
753 xhci_urb_free_priv(xhci, urb_priv);
754 spin_lock(&xhci->lock);
755 }
756}
757
758
759
760
761
762
763
764
765
766
767
768static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
769 union xhci_trb *trb, struct xhci_event_cmd *event)
770{
771 unsigned int ep_index;
772 struct xhci_virt_device *virt_dev;
773 struct xhci_ring *ep_ring;
774 struct xhci_virt_ep *ep;
775 struct list_head *entry;
776 struct xhci_td *cur_td = NULL;
777 struct xhci_td *last_unlinked_td;
778
779 struct xhci_dequeue_state deq_state;
780
781 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
782 virt_dev = xhci->devs[slot_id];
783 if (virt_dev)
784 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
785 event);
786 else
787 xhci_warn(xhci, "Stop endpoint command "
788 "completion for disabled slot %u\n",
789 slot_id);
790 return;
791 }
792
793 memset(&deq_state, 0, sizeof(deq_state));
794 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
795 ep = &xhci->devs[slot_id]->eps[ep_index];
796
797 if (list_empty(&ep->cancelled_td_list)) {
798 xhci_stop_watchdog_timer_in_irq(xhci, ep);
799 ep->stopped_td = NULL;
800 ep->stopped_trb = NULL;
801 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
802 return;
803 }
804
805
806
807
808
809
810 list_for_each(entry, &ep->cancelled_td_list) {
811 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
812 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
813 "Removing canceled TD starting at 0x%llx (dma).",
814 (unsigned long long)xhci_trb_virt_to_dma(
815 cur_td->start_seg, cur_td->first_trb));
816 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
817 if (!ep_ring) {
818
819
820
821
822
823
824
825
826
827
828
829 xhci_warn(xhci, "WARN Cancelled URB %p "
830 "has invalid stream ID %u.\n",
831 cur_td->urb,
832 cur_td->urb->stream_id);
833 goto remove_finished_td;
834 }
835
836
837
838
839 if (cur_td == ep->stopped_td)
840 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
841 cur_td->urb->stream_id,
842 cur_td, &deq_state);
843 else
844 td_to_noop(xhci, ep_ring, cur_td, false);
845remove_finished_td:
846
847
848
849
850
851 list_del_init(&cur_td->td_list);
852 }
853 last_unlinked_td = cur_td;
854 xhci_stop_watchdog_timer_in_irq(xhci, ep);
855
856
857 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
858 xhci_queue_new_dequeue_state(xhci,
859 slot_id, ep_index,
860 ep->stopped_td->urb->stream_id,
861 &deq_state);
862 xhci_ring_cmd_db(xhci);
863 } else {
864
865 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
866 }
867
868
869 if (!(ep->ep_state & EP_HALTED)) {
870 ep->stopped_td = NULL;
871 ep->stopped_trb = NULL;
872 }
873
874
875
876
877
878
879
880 do {
881 cur_td = list_entry(ep->cancelled_td_list.next,
882 struct xhci_td, cancelled_td_list);
883 list_del_init(&cur_td->cancelled_td_list);
884
885
886
887
888
889 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
890
891
892
893
894 if (xhci->xhc_state & XHCI_STATE_DYING)
895 return;
896 } while (cur_td != last_unlinked_td);
897
898
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920void xhci_stop_endpoint_command_watchdog(unsigned long arg)
921{
922 struct xhci_hcd *xhci;
923 struct xhci_virt_ep *ep;
924 struct xhci_virt_ep *temp_ep;
925 struct xhci_ring *ring;
926 struct xhci_td *cur_td;
927 int ret, i, j;
928 unsigned long flags;
929
930 ep = (struct xhci_virt_ep *) arg;
931 xhci = ep->xhci;
932
933 spin_lock_irqsave(&xhci->lock, flags);
934
935 ep->stop_cmds_pending--;
936 if (xhci->xhc_state & XHCI_STATE_DYING) {
937 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
938 "Stop EP timer ran, but another timer marked "
939 "xHCI as DYING, exiting.");
940 spin_unlock_irqrestore(&xhci->lock, flags);
941 return;
942 }
943 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
944 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
945 "Stop EP timer ran, but no command pending, "
946 "exiting.");
947 spin_unlock_irqrestore(&xhci->lock, flags);
948 return;
949 }
950
951 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
952 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
953
954
955
956 xhci->xhc_state |= XHCI_STATE_DYING;
957
958 xhci_quiesce(xhci);
959 spin_unlock_irqrestore(&xhci->lock, flags);
960
961 ret = xhci_halt(xhci);
962
963 spin_lock_irqsave(&xhci->lock, flags);
964 if (ret < 0) {
965
966
967
968
969
970
971
972 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
973 xhci_warn(xhci, "Completing active URBs anyway.\n");
974
975
976
977
978
979 }
980 for (i = 0; i < MAX_HC_SLOTS; i++) {
981 if (!xhci->devs[i])
982 continue;
983 for (j = 0; j < 31; j++) {
984 temp_ep = &xhci->devs[i]->eps[j];
985 ring = temp_ep->ring;
986 if (!ring)
987 continue;
988 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
989 "Killing URBs for slot ID %u, "
990 "ep index %u", i, j);
991 while (!list_empty(&ring->td_list)) {
992 cur_td = list_first_entry(&ring->td_list,
993 struct xhci_td,
994 td_list);
995 list_del_init(&cur_td->td_list);
996 if (!list_empty(&cur_td->cancelled_td_list))
997 list_del_init(&cur_td->cancelled_td_list);
998 xhci_giveback_urb_in_irq(xhci, cur_td,
999 -ESHUTDOWN);
1000 }
1001 while (!list_empty(&temp_ep->cancelled_td_list)) {
1002 cur_td = list_first_entry(
1003 &temp_ep->cancelled_td_list,
1004 struct xhci_td,
1005 cancelled_td_list);
1006 list_del_init(&cur_td->cancelled_td_list);
1007 xhci_giveback_urb_in_irq(xhci, cur_td,
1008 -ESHUTDOWN);
1009 }
1010 }
1011 }
1012 spin_unlock_irqrestore(&xhci->lock, flags);
1013 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1014 "Calling usb_hc_died()");
1015 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1016 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1017 "xHCI host controller is dead.");
1018}
1019
1020
1021static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1022 struct xhci_virt_device *dev,
1023 struct xhci_ring *ep_ring,
1024 unsigned int ep_index)
1025{
1026 union xhci_trb *dequeue_temp;
1027 int num_trbs_free_temp;
1028 bool revert = false;
1029
1030 num_trbs_free_temp = ep_ring->num_trbs_free;
1031 dequeue_temp = ep_ring->dequeue;
1032
1033
1034
1035
1036
1037
1038
1039 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
1040 ep_ring->deq_seg = ep_ring->deq_seg->next;
1041 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1042 }
1043
1044 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1045
1046 ep_ring->num_trbs_free++;
1047 ep_ring->dequeue++;
1048 if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
1049 ep_ring->dequeue)) {
1050 if (ep_ring->dequeue ==
1051 dev->eps[ep_index].queued_deq_ptr)
1052 break;
1053 ep_ring->deq_seg = ep_ring->deq_seg->next;
1054 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1055 }
1056 if (ep_ring->dequeue == dequeue_temp) {
1057 revert = true;
1058 break;
1059 }
1060 }
1061
1062 if (revert) {
1063 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1064 ep_ring->num_trbs_free = num_trbs_free_temp;
1065 }
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1076 union xhci_trb *trb, u32 cmd_comp_code)
1077{
1078 unsigned int ep_index;
1079 unsigned int stream_id;
1080 struct xhci_ring *ep_ring;
1081 struct xhci_virt_device *dev;
1082 struct xhci_ep_ctx *ep_ctx;
1083 struct xhci_slot_ctx *slot_ctx;
1084
1085 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1086 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1087 dev = xhci->devs[slot_id];
1088
1089 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1090 if (!ep_ring) {
1091 xhci_warn(xhci, "WARN Set TR deq ptr command for "
1092 "freed stream ID %u\n",
1093 stream_id);
1094
1095 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1096 return;
1097 }
1098
1099 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1100 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1101
1102 if (cmd_comp_code != COMP_SUCCESS) {
1103 unsigned int ep_state;
1104 unsigned int slot_state;
1105
1106 switch (cmd_comp_code) {
1107 case COMP_TRB_ERR:
1108 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
1109 "of stream ID configuration\n");
1110 break;
1111 case COMP_CTX_STATE:
1112 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
1113 "to incorrect slot or ep state.\n");
1114 ep_state = le32_to_cpu(ep_ctx->ep_info);
1115 ep_state &= EP_STATE_MASK;
1116 slot_state = le32_to_cpu(slot_ctx->dev_state);
1117 slot_state = GET_SLOT_STATE(slot_state);
1118 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1119 "Slot state = %u, EP state = %u",
1120 slot_state, ep_state);
1121 break;
1122 case COMP_EBADSLT:
1123 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
1124 "slot %u was not enabled.\n", slot_id);
1125 break;
1126 default:
1127 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
1128 "completion code of %u.\n",
1129 cmd_comp_code);
1130 break;
1131 }
1132
1133
1134
1135
1136
1137
1138 } else {
1139 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1140 "Successful Set TR Deq Ptr cmd, deq = @%08llx",
1141 le64_to_cpu(ep_ctx->deq));
1142 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1143 dev->eps[ep_index].queued_deq_ptr) ==
1144 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1145
1146
1147
1148 update_ring_for_set_deq_completion(xhci, dev,
1149 ep_ring, ep_index);
1150 } else {
1151 xhci_warn(xhci, "Mismatch between completed Set TR Deq "
1152 "Ptr command & xHCI internal state.\n");
1153 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1154 dev->eps[ep_index].queued_deq_seg,
1155 dev->eps[ep_index].queued_deq_ptr);
1156 }
1157 }
1158
1159 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1160 dev->eps[ep_index].queued_deq_seg = NULL;
1161 dev->eps[ep_index].queued_deq_ptr = NULL;
1162
1163 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1164}
1165
1166static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1167 union xhci_trb *trb, u32 cmd_comp_code)
1168{
1169 unsigned int ep_index;
1170
1171 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1172
1173
1174
1175 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1176 "Ignoring reset ep completion code of %u", cmd_comp_code);
1177
1178
1179
1180
1181
1182 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1183 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1184 "Queueing configure endpoint command");
1185 xhci_queue_configure_endpoint(xhci,
1186 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1187 false);
1188 xhci_ring_cmd_db(xhci);
1189 } else {
1190
1191 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1192 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1193 }
1194}
1195
1196
1197
1198static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1199 struct xhci_command *command, u32 status)
1200{
1201 command->status = status;
1202 list_del(&command->cmd_list);
1203 if (command->completion)
1204 complete(command->completion);
1205 else
1206 xhci_free_command(xhci, command);
1207}
1208
1209
1210
1211
1212
1213
1214static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1215 struct xhci_virt_device *virt_dev,
1216 struct xhci_event_cmd *event)
1217{
1218 struct xhci_command *command;
1219
1220 if (list_empty(&virt_dev->cmd_list))
1221 return 0;
1222
1223 command = list_entry(virt_dev->cmd_list.next,
1224 struct xhci_command, cmd_list);
1225 if (xhci->cmd_ring->dequeue != command->command_trb)
1226 return 0;
1227
1228 xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1229 GET_COMP_CODE(le32_to_cpu(event->status)));
1230 return 1;
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1242{
1243 struct xhci_segment *cur_seg;
1244 union xhci_trb *cmd_trb;
1245 u32 cycle_state;
1246
1247 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1248 return;
1249
1250
1251 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1252 xhci->cmd_ring->dequeue, &cycle_state);
1253
1254 if (!cur_seg) {
1255 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1256 xhci->cmd_ring->dequeue,
1257 (unsigned long long)
1258 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1259 xhci->cmd_ring->dequeue));
1260 xhci_debug_ring(xhci, xhci->cmd_ring);
1261 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1262 return;
1263 }
1264
1265
1266 for (cmd_trb = xhci->cmd_ring->dequeue;
1267 cmd_trb != xhci->cmd_ring->enqueue;
1268 next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1269
1270 if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1271 continue;
1272
1273 if (cur_cd->cmd_trb == cmd_trb) {
1274
1275
1276
1277
1278 if (cur_cd->command)
1279 xhci_complete_cmd_in_cmd_wait_list(xhci,
1280 cur_cd->command, COMP_CMD_STOP);
1281
1282
1283 cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1284 & TRB_CYCLE;
1285
1286
1287 cmd_trb->generic.field[0] = 0;
1288 cmd_trb->generic.field[1] = 0;
1289 cmd_trb->generic.field[2] = 0;
1290 cmd_trb->generic.field[3] = cpu_to_le32(
1291 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1292 break;
1293 }
1294 }
1295}
1296
1297static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1298{
1299 struct xhci_cd *cur_cd, *next_cd;
1300
1301 if (list_empty(&xhci->cancel_cmd_list))
1302 return;
1303
1304 list_for_each_entry_safe(cur_cd, next_cd,
1305 &xhci->cancel_cmd_list, cancel_cmd_list) {
1306 xhci_cmd_to_noop(xhci, cur_cd);
1307 list_del(&cur_cd->cancel_cmd_list);
1308 kfree(cur_cd);
1309 }
1310}
1311
1312
1313
1314
1315
1316
1317static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1318 union xhci_trb *cmd_trb)
1319{
1320 struct xhci_cd *cur_cd, *next_cd;
1321
1322 if (list_empty(&xhci->cancel_cmd_list))
1323 return 0;
1324
1325 list_for_each_entry_safe(cur_cd, next_cd,
1326 &xhci->cancel_cmd_list, cancel_cmd_list) {
1327 if (cur_cd->cmd_trb == cmd_trb) {
1328 if (cur_cd->command)
1329 xhci_complete_cmd_in_cmd_wait_list(xhci,
1330 cur_cd->command, COMP_CMD_STOP);
1331 list_del(&cur_cd->cancel_cmd_list);
1332 kfree(cur_cd);
1333 return 1;
1334 }
1335 }
1336
1337 return 0;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1348 int cmd_trb_comp_code)
1349{
1350 int cur_trb_is_good = 0;
1351
1352
1353
1354
1355 cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1356 xhci->cmd_ring->dequeue);
1357
1358 if (cmd_trb_comp_code == COMP_CMD_ABORT)
1359 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1360 else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1361
1362
1363
1364 xhci_cancel_cmd_in_cd_list(xhci);
1365
1366 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1367
1368
1369
1370
1371 if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1372 xhci_ring_cmd_db(xhci);
1373 }
1374 return cur_trb_is_good;
1375}
1376
1377static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1378 u32 cmd_comp_code)
1379{
1380 if (cmd_comp_code == COMP_SUCCESS)
1381 xhci->slot_id = slot_id;
1382 else
1383 xhci->slot_id = 0;
1384 complete(&xhci->addr_dev);
1385}
1386
1387static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1388{
1389 struct xhci_virt_device *virt_dev;
1390
1391 virt_dev = xhci->devs[slot_id];
1392 if (!virt_dev)
1393 return;
1394 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1395
1396 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1397 xhci_free_virt_device(xhci, slot_id);
1398}
1399
1400static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1401 struct xhci_event_cmd *event, u32 cmd_comp_code)
1402{
1403 struct xhci_virt_device *virt_dev;
1404 struct xhci_input_control_ctx *ctrl_ctx;
1405 unsigned int ep_index;
1406 unsigned int ep_state;
1407 u32 add_flags, drop_flags;
1408
1409 virt_dev = xhci->devs[slot_id];
1410 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1411 return;
1412
1413
1414
1415
1416
1417
1418
1419
1420 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1421 if (!ctrl_ctx) {
1422 xhci_warn(xhci, "Could not get input context, bad type.\n");
1423 return;
1424 }
1425
1426 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1427 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1428
1429 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1430
1431
1432
1433
1434
1435
1436
1437 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1438 ep_index != (unsigned int) -1 &&
1439 add_flags - SLOT_FLAG == drop_flags) {
1440 ep_state = virt_dev->eps[ep_index].ep_state;
1441 if (!(ep_state & EP_HALTED))
1442 goto bandwidth_change;
1443 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1444 "Completed config ep cmd - "
1445 "last ep index = %d, state = %d",
1446 ep_index, ep_state);
1447
1448 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1449 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1450 return;
1451 }
1452bandwidth_change:
1453 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1454 "Completed config ep cmd");
1455 virt_dev->cmd_status = cmd_comp_code;
1456 complete(&virt_dev->cmd_completion);
1457 return;
1458}
1459
1460static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
1461 struct xhci_event_cmd *event, u32 cmd_comp_code)
1462{
1463 struct xhci_virt_device *virt_dev;
1464
1465 virt_dev = xhci->devs[slot_id];
1466 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1467 return;
1468 virt_dev->cmd_status = cmd_comp_code;
1469 complete(&virt_dev->cmd_completion);
1470}
1471
1472static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
1473 u32 cmd_comp_code)
1474{
1475 xhci->devs[slot_id]->cmd_status = cmd_comp_code;
1476 complete(&xhci->addr_dev);
1477}
1478
1479static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1480 struct xhci_event_cmd *event)
1481{
1482 struct xhci_virt_device *virt_dev;
1483
1484 xhci_dbg(xhci, "Completed reset device command.\n");
1485 virt_dev = xhci->devs[slot_id];
1486 if (virt_dev)
1487 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1488 else
1489 xhci_warn(xhci, "Reset device command completion "
1490 "for disabled slot %u\n", slot_id);
1491}
1492
1493static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1494 struct xhci_event_cmd *event)
1495{
1496 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1497 xhci->error_bitmask |= 1 << 6;
1498 return;
1499 }
1500 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1501 "NEC firmware version %2x.%02x",
1502 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1503 NEC_FW_MINOR(le32_to_cpu(event->status)));
1504}
1505
1506static void handle_cmd_completion(struct xhci_hcd *xhci,
1507 struct xhci_event_cmd *event)
1508{
1509 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1510 u64 cmd_dma;
1511 dma_addr_t cmd_dequeue_dma;
1512 u32 cmd_comp_code;
1513 union xhci_trb *cmd_trb;
1514 u32 cmd_type;
1515
1516 cmd_dma = le64_to_cpu(event->cmd_trb);
1517 cmd_trb = xhci->cmd_ring->dequeue;
1518 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1519 cmd_trb);
1520
1521 if (cmd_dequeue_dma == 0) {
1522 xhci->error_bitmask |= 1 << 4;
1523 return;
1524 }
1525
1526 if (cmd_dma != (u64) cmd_dequeue_dma) {
1527 xhci->error_bitmask |= 1 << 5;
1528 return;
1529 }
1530
1531 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1532
1533 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1534 if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
1535
1536
1537
1538
1539
1540
1541 if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
1542 inc_deq(xhci, xhci->cmd_ring);
1543 return;
1544 }
1545
1546
1547
1548
1549 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1550 return;
1551 }
1552
1553 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1554 switch (cmd_type) {
1555 case TRB_ENABLE_SLOT:
1556 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1557 break;
1558 case TRB_DISABLE_SLOT:
1559 xhci_handle_cmd_disable_slot(xhci, slot_id);
1560 break;
1561 case TRB_CONFIG_EP:
1562 xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
1563 break;
1564 case TRB_EVAL_CONTEXT:
1565 xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
1566 break;
1567 case TRB_ADDR_DEV:
1568 xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
1569 break;
1570 case TRB_STOP_RING:
1571 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1572 le32_to_cpu(cmd_trb->generic.field[3])));
1573 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1574 break;
1575 case TRB_SET_DEQ:
1576 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1577 le32_to_cpu(cmd_trb->generic.field[3])));
1578 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1579 break;
1580 case TRB_CMD_NOOP:
1581 break;
1582 case TRB_RESET_EP:
1583 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1584 le32_to_cpu(cmd_trb->generic.field[3])));
1585 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1586 break;
1587 case TRB_RESET_DEV:
1588 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1589 le32_to_cpu(cmd_trb->generic.field[3])));
1590 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1591 break;
1592 case TRB_NEC_GET_FW:
1593 xhci_handle_cmd_nec_get_fw(xhci, event);
1594 break;
1595 default:
1596
1597 xhci->error_bitmask |= 1 << 6;
1598 break;
1599 }
1600 inc_deq(xhci, xhci->cmd_ring);
1601}
1602
1603static void handle_vendor_event(struct xhci_hcd *xhci,
1604 union xhci_trb *event)
1605{
1606 u32 trb_type;
1607
1608 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1609 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1610 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1611 handle_cmd_completion(xhci, &event->event_cmd);
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1622 struct xhci_hcd *xhci, u32 port_id)
1623{
1624 unsigned int i;
1625 unsigned int num_similar_speed_ports = 0;
1626
1627
1628
1629
1630
1631 for (i = 0; i < (port_id - 1); i++) {
1632 u8 port_speed = xhci->port_array[i];
1633
1634
1635
1636
1637
1638 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1639 continue;
1640
1641
1642
1643
1644
1645
1646 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1647 num_similar_speed_ports++;
1648 }
1649 return num_similar_speed_ports;
1650}
1651
1652static void handle_device_notification(struct xhci_hcd *xhci,
1653 union xhci_trb *event)
1654{
1655 u32 slot_id;
1656 struct usb_device *udev;
1657
1658 slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
1659 if (!xhci->devs[slot_id]) {
1660 xhci_warn(xhci, "Device Notification event for "
1661 "unused slot %u\n", slot_id);
1662 return;
1663 }
1664
1665 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1666 slot_id);
1667 udev = xhci->devs[slot_id]->udev;
1668 if (udev && udev->parent)
1669 usb_wakeup_notification(udev->parent, udev->portnum);
1670}
1671
1672static void handle_port_status(struct xhci_hcd *xhci,
1673 union xhci_trb *event)
1674{
1675 struct usb_hcd *hcd;
1676 u32 port_id;
1677 u32 temp, temp1;
1678 int max_ports;
1679 int slot_id;
1680 unsigned int faked_port_index;
1681 u8 major_revision;
1682 struct xhci_bus_state *bus_state;
1683 __le32 __iomem **port_array;
1684 bool bogus_port_status = false;
1685
1686
1687 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1688 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1689 xhci->error_bitmask |= 1 << 8;
1690 }
1691 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1692 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1693
1694 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1695 if ((port_id <= 0) || (port_id > max_ports)) {
1696 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1697 inc_deq(xhci, xhci->event_ring);
1698 return;
1699 }
1700
1701
1702
1703
1704 major_revision = xhci->port_array[port_id - 1];
1705
1706
1707 hcd = xhci_to_hcd(xhci);
1708 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1709 hcd = xhci->shared_hcd;
1710
1711 if (major_revision == 0) {
1712 xhci_warn(xhci, "Event for port %u not in "
1713 "Extended Capabilities, ignoring.\n",
1714 port_id);
1715 bogus_port_status = true;
1716 goto cleanup;
1717 }
1718 if (major_revision == DUPLICATE_ENTRY) {
1719 xhci_warn(xhci, "Event for port %u duplicated in"
1720 "Extended Capabilities, ignoring.\n",
1721 port_id);
1722 bogus_port_status = true;
1723 goto cleanup;
1724 }
1725
1726
1727
1728
1729
1730
1731
1732
1733 bus_state = &xhci->bus_state[hcd_index(hcd)];
1734 if (hcd->speed == HCD_USB3)
1735 port_array = xhci->usb3_ports;
1736 else
1737 port_array = xhci->usb2_ports;
1738
1739 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1740 port_id);
1741
1742 temp = xhci_readl(xhci, port_array[faked_port_index]);
1743 if (hcd->state == HC_STATE_SUSPENDED) {
1744 xhci_dbg(xhci, "resume root hub\n");
1745 usb_hcd_resume_root_hub(hcd);
1746 }
1747
1748 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1749 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1750
1751 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1752 if (!(temp1 & CMD_RUN)) {
1753 xhci_warn(xhci, "xHC is not running.\n");
1754 goto cleanup;
1755 }
1756
1757 if (DEV_SUPERSPEED(temp)) {
1758 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1759
1760
1761
1762
1763 bus_state->port_remote_wakeup |= 1 << faked_port_index;
1764 xhci_test_and_clear_bit(xhci, port_array,
1765 faked_port_index, PORT_PLC);
1766 xhci_set_link_state(xhci, port_array, faked_port_index,
1767 XDEV_U0);
1768
1769
1770
1771 bogus_port_status = true;
1772 goto cleanup;
1773 } else {
1774 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1775 bus_state->resume_done[faked_port_index] = jiffies +
1776 msecs_to_jiffies(20);
1777 set_bit(faked_port_index, &bus_state->resuming_ports);
1778 mod_timer(&hcd->rh_timer,
1779 bus_state->resume_done[faked_port_index]);
1780
1781 }
1782 }
1783
1784 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1785 DEV_SUPERSPEED(temp)) {
1786 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1787
1788
1789
1790
1791
1792
1793
1794 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1795 faked_port_index + 1);
1796 if (slot_id && xhci->devs[slot_id])
1797 xhci_ring_device(xhci, slot_id);
1798 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1799 bus_state->port_remote_wakeup &=
1800 ~(1 << faked_port_index);
1801 xhci_test_and_clear_bit(xhci, port_array,
1802 faked_port_index, PORT_PLC);
1803 usb_wakeup_notification(hcd->self.root_hub,
1804 faked_port_index + 1);
1805 bogus_port_status = true;
1806 goto cleanup;
1807 }
1808 }
1809
1810
1811
1812
1813
1814
1815 if (!DEV_SUPERSPEED(temp) &&
1816 test_and_clear_bit(faked_port_index,
1817 &bus_state->rexit_ports)) {
1818 complete(&bus_state->rexit_done[faked_port_index]);
1819 bogus_port_status = true;
1820 goto cleanup;
1821 }
1822
1823 if (hcd->speed != HCD_USB3)
1824 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1825 PORT_PLC);
1826
1827cleanup:
1828
1829 inc_deq(xhci, xhci->event_ring);
1830
1831
1832
1833
1834
1835 if (bogus_port_status)
1836 return;
1837
1838
1839
1840
1841
1842
1843
1844
1845 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1846 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1847 spin_unlock(&xhci->lock);
1848
1849 usb_hcd_poll_rh_status(hcd);
1850 spin_lock(&xhci->lock);
1851}
1852
1853
1854
1855
1856
1857
1858
1859struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1860 union xhci_trb *start_trb,
1861 union xhci_trb *end_trb,
1862 dma_addr_t suspect_dma)
1863{
1864 dma_addr_t start_dma;
1865 dma_addr_t end_seg_dma;
1866 dma_addr_t end_trb_dma;
1867 struct xhci_segment *cur_seg;
1868
1869 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1870 cur_seg = start_seg;
1871
1872 do {
1873 if (start_dma == 0)
1874 return NULL;
1875
1876 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1877 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1878
1879 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1880
1881 if (end_trb_dma > 0) {
1882
1883 if (start_dma <= end_trb_dma) {
1884 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1885 return cur_seg;
1886 } else {
1887
1888
1889
1890 if ((suspect_dma >= start_dma &&
1891 suspect_dma <= end_seg_dma) ||
1892 (suspect_dma >= cur_seg->dma &&
1893 suspect_dma <= end_trb_dma))
1894 return cur_seg;
1895 }
1896 return NULL;
1897 } else {
1898
1899 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1900 return cur_seg;
1901 }
1902 cur_seg = cur_seg->next;
1903 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1904 } while (cur_seg != start_seg);
1905
1906 return NULL;
1907}
1908
1909static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1910 unsigned int slot_id, unsigned int ep_index,
1911 unsigned int stream_id,
1912 struct xhci_td *td, union xhci_trb *event_trb)
1913{
1914 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1915 ep->ep_state |= EP_HALTED;
1916 ep->stopped_td = td;
1917 ep->stopped_trb = event_trb;
1918 ep->stopped_stream = stream_id;
1919
1920 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1921 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1922
1923 ep->stopped_td = NULL;
1924 ep->stopped_trb = NULL;
1925 ep->stopped_stream = 0;
1926
1927 xhci_ring_cmd_db(xhci);
1928}
1929
1930
1931
1932
1933
1934
1935
1936static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1937 struct xhci_ep_ctx *ep_ctx,
1938 unsigned int trb_comp_code)
1939{
1940
1941 if (trb_comp_code == COMP_TX_ERR ||
1942 trb_comp_code == COMP_BABBLE ||
1943 trb_comp_code == COMP_SPLIT_ERR)
1944
1945
1946
1947
1948
1949
1950 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1951 cpu_to_le32(EP_STATE_HALTED))
1952 return 1;
1953
1954 return 0;
1955}
1956
1957int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1958{
1959 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1960
1961
1962
1963 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1964 trb_comp_code);
1965 xhci_dbg(xhci, "Treating code as success.\n");
1966 return 1;
1967 }
1968 return 0;
1969}
1970
1971
1972
1973
1974
1975static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1976 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1977 struct xhci_virt_ep *ep, int *status, bool skip)
1978{
1979 struct xhci_virt_device *xdev;
1980 struct xhci_ring *ep_ring;
1981 unsigned int slot_id;
1982 int ep_index;
1983 struct urb *urb = NULL;
1984 struct xhci_ep_ctx *ep_ctx;
1985 int ret = 0;
1986 struct urb_priv *urb_priv;
1987 u32 trb_comp_code;
1988
1989 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1990 xdev = xhci->devs[slot_id];
1991 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1992 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1993 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1994 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1995
1996 if (skip)
1997 goto td_cleanup;
1998
1999 if (trb_comp_code == COMP_STOP_INVAL ||
2000 trb_comp_code == COMP_STOP) {
2001
2002
2003
2004
2005 ep->stopped_td = td;
2006 ep->stopped_trb = event_trb;
2007 return 0;
2008 } else {
2009 if (trb_comp_code == COMP_STALL) {
2010
2011
2012
2013
2014
2015
2016
2017 ep->stopped_td = td;
2018 ep->stopped_trb = event_trb;
2019 ep->stopped_stream = ep_ring->stream_id;
2020 } else if (xhci_requires_manual_halt_cleanup(xhci,
2021 ep_ctx, trb_comp_code)) {
2022
2023
2024
2025
2026
2027 xhci_cleanup_halted_endpoint(xhci,
2028 slot_id, ep_index, ep_ring->stream_id,
2029 td, event_trb);
2030 } else {
2031
2032 while (ep_ring->dequeue != td->last_trb)
2033 inc_deq(xhci, ep_ring);
2034 inc_deq(xhci, ep_ring);
2035 }
2036
2037td_cleanup:
2038
2039 urb = td->urb;
2040 urb_priv = urb->hcpriv;
2041
2042
2043
2044
2045
2046
2047
2048 if (urb->actual_length > urb->transfer_buffer_length) {
2049 xhci_warn(xhci, "URB transfer length is wrong, "
2050 "xHC issue? req. len = %u, "
2051 "act. len = %u\n",
2052 urb->transfer_buffer_length,
2053 urb->actual_length);
2054 urb->actual_length = 0;
2055 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2056 *status = -EREMOTEIO;
2057 else
2058 *status = 0;
2059 }
2060 list_del_init(&td->td_list);
2061
2062 if (!list_empty(&td->cancelled_td_list))
2063 list_del_init(&td->cancelled_td_list);
2064
2065 urb_priv->td_cnt++;
2066
2067 if (urb_priv->td_cnt == urb_priv->length) {
2068 ret = 1;
2069 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2070 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
2071 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
2072 == 0) {
2073 if (xhci->quirks & XHCI_AMD_PLL_FIX)
2074 usb_amd_quirk_pll_enable();
2075 }
2076 }
2077 }
2078 }
2079
2080 return ret;
2081}
2082
2083
2084
2085
2086static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2087 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2088 struct xhci_virt_ep *ep, int *status)
2089{
2090 struct xhci_virt_device *xdev;
2091 struct xhci_ring *ep_ring;
2092 unsigned int slot_id;
2093 int ep_index;
2094 struct xhci_ep_ctx *ep_ctx;
2095 u32 trb_comp_code;
2096
2097 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2098 xdev = xhci->devs[slot_id];
2099 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2100 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2101 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2102 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2103
2104 switch (trb_comp_code) {
2105 case COMP_SUCCESS:
2106 if (event_trb == ep_ring->dequeue) {
2107 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
2108 "without IOC set??\n");
2109 *status = -ESHUTDOWN;
2110 } else if (event_trb != td->last_trb) {
2111 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
2112 "without IOC set??\n");
2113 *status = -ESHUTDOWN;
2114 } else {
2115 *status = 0;
2116 }
2117 break;
2118 case COMP_SHORT_TX:
2119 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2120 *status = -EREMOTEIO;
2121 else
2122 *status = 0;
2123 break;
2124 case COMP_STOP_INVAL:
2125 case COMP_STOP:
2126 return finish_td(xhci, td, event_trb, event, ep, status, false);
2127 default:
2128 if (!xhci_requires_manual_halt_cleanup(xhci,
2129 ep_ctx, trb_comp_code))
2130 break;
2131 xhci_dbg(xhci, "TRB error code %u, "
2132 "halted endpoint index = %u\n",
2133 trb_comp_code, ep_index);
2134
2135 case COMP_STALL:
2136
2137 if (event_trb != ep_ring->dequeue &&
2138 event_trb != td->last_trb)
2139 td->urb->actual_length =
2140 td->urb->transfer_buffer_length -
2141 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2142 else
2143 td->urb->actual_length = 0;
2144
2145 xhci_cleanup_halted_endpoint(xhci,
2146 slot_id, ep_index, 0, td, event_trb);
2147 return finish_td(xhci, td, event_trb, event, ep, status, true);
2148 }
2149
2150
2151
2152
2153 if (event_trb != ep_ring->dequeue) {
2154
2155 if (event_trb == td->last_trb) {
2156 if (td->urb->actual_length != 0) {
2157
2158
2159 if ((*status == -EINPROGRESS || *status == 0) &&
2160 (td->urb->transfer_flags
2161 & URB_SHORT_NOT_OK))
2162
2163
2164 *status = -EREMOTEIO;
2165 } else {
2166 td->urb->actual_length =
2167 td->urb->transfer_buffer_length;
2168 }
2169 } else {
2170
2171 td->urb->actual_length =
2172 td->urb->transfer_buffer_length -
2173 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2174 xhci_dbg(xhci, "Waiting for status "
2175 "stage event\n");
2176 return 0;
2177 }
2178 }
2179
2180 return finish_td(xhci, td, event_trb, event, ep, status, false);
2181}
2182
2183
2184
2185
2186static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2187 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2188 struct xhci_virt_ep *ep, int *status)
2189{
2190 struct xhci_ring *ep_ring;
2191 struct urb_priv *urb_priv;
2192 int idx;
2193 int len = 0;
2194 union xhci_trb *cur_trb;
2195 struct xhci_segment *cur_seg;
2196 struct usb_iso_packet_descriptor *frame;
2197 u32 trb_comp_code;
2198 bool skip_td = false;
2199
2200 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2201 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2202 urb_priv = td->urb->hcpriv;
2203 idx = urb_priv->td_cnt;
2204 frame = &td->urb->iso_frame_desc[idx];
2205
2206
2207 switch (trb_comp_code) {
2208 case COMP_SUCCESS:
2209 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2210 frame->status = 0;
2211 break;
2212 }
2213 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2214 trb_comp_code = COMP_SHORT_TX;
2215 case COMP_SHORT_TX:
2216 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2217 -EREMOTEIO : 0;
2218 break;
2219 case COMP_BW_OVER:
2220 frame->status = -ECOMM;
2221 skip_td = true;
2222 break;
2223 case COMP_BUFF_OVER:
2224 case COMP_BABBLE:
2225 frame->status = -EOVERFLOW;
2226 skip_td = true;
2227 break;
2228 case COMP_DEV_ERR:
2229 case COMP_STALL:
2230 case COMP_TX_ERR:
2231 frame->status = -EPROTO;
2232 skip_td = true;
2233 break;
2234 case COMP_STOP:
2235 case COMP_STOP_INVAL:
2236 break;
2237 default:
2238 frame->status = -1;
2239 break;
2240 }
2241
2242 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2243 frame->actual_length = frame->length;
2244 td->urb->actual_length += frame->length;
2245 } else {
2246 for (cur_trb = ep_ring->dequeue,
2247 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2248 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2249 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2250 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2251 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2252 }
2253 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2254 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2255
2256 if (trb_comp_code != COMP_STOP_INVAL) {
2257 frame->actual_length = len;
2258 td->urb->actual_length += len;
2259 }
2260 }
2261
2262 return finish_td(xhci, td, event_trb, event, ep, status, false);
2263}
2264
2265static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2266 struct xhci_transfer_event *event,
2267 struct xhci_virt_ep *ep, int *status)
2268{
2269 struct xhci_ring *ep_ring;
2270 struct urb_priv *urb_priv;
2271 struct usb_iso_packet_descriptor *frame;
2272 int idx;
2273
2274 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2275 urb_priv = td->urb->hcpriv;
2276 idx = urb_priv->td_cnt;
2277 frame = &td->urb->iso_frame_desc[idx];
2278
2279
2280 frame->status = -EXDEV;
2281
2282
2283 frame->actual_length = 0;
2284
2285
2286 while (ep_ring->dequeue != td->last_trb)
2287 inc_deq(xhci, ep_ring);
2288 inc_deq(xhci, ep_ring);
2289
2290 return finish_td(xhci, td, NULL, event, ep, status, true);
2291}
2292
2293
2294
2295
2296static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2297 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2298 struct xhci_virt_ep *ep, int *status)
2299{
2300 struct xhci_ring *ep_ring;
2301 union xhci_trb *cur_trb;
2302 struct xhci_segment *cur_seg;
2303 u32 trb_comp_code;
2304
2305 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2306 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2307
2308 switch (trb_comp_code) {
2309 case COMP_SUCCESS:
2310
2311 if (event_trb != td->last_trb ||
2312 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2313 xhci_warn(xhci, "WARN Successful completion "
2314 "on short TX\n");
2315 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2316 *status = -EREMOTEIO;
2317 else
2318 *status = 0;
2319 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2320 trb_comp_code = COMP_SHORT_TX;
2321 } else {
2322 *status = 0;
2323 }
2324 break;
2325 case COMP_SHORT_TX:
2326 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2327 *status = -EREMOTEIO;
2328 else
2329 *status = 0;
2330 break;
2331 default:
2332
2333 break;
2334 }
2335 if (trb_comp_code == COMP_SHORT_TX)
2336 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2337 "%d bytes untransferred\n",
2338 td->urb->ep->desc.bEndpointAddress,
2339 td->urb->transfer_buffer_length,
2340 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2341
2342 if (event_trb == td->last_trb) {
2343 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2344 td->urb->actual_length =
2345 td->urb->transfer_buffer_length -
2346 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2347 if (td->urb->transfer_buffer_length <
2348 td->urb->actual_length) {
2349 xhci_warn(xhci, "HC gave bad length "
2350 "of %d bytes left\n",
2351 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2352 td->urb->actual_length = 0;
2353 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2354 *status = -EREMOTEIO;
2355 else
2356 *status = 0;
2357 }
2358
2359 if (*status == -EINPROGRESS) {
2360 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2361 *status = -EREMOTEIO;
2362 else
2363 *status = 0;
2364 }
2365 } else {
2366 td->urb->actual_length =
2367 td->urb->transfer_buffer_length;
2368
2369
2370
2371 if (*status == -EREMOTEIO)
2372 *status = 0;
2373 }
2374 } else {
2375
2376
2377
2378 td->urb->actual_length = 0;
2379 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2380 cur_trb != event_trb;
2381 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2382 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2383 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2384 td->urb->actual_length +=
2385 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2386 }
2387
2388
2389
2390 if (trb_comp_code != COMP_STOP_INVAL)
2391 td->urb->actual_length +=
2392 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2393 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2394 }
2395
2396 return finish_td(xhci, td, event_trb, event, ep, status, false);
2397}
2398
2399
2400
2401
2402
2403
2404static int handle_tx_event(struct xhci_hcd *xhci,
2405 struct xhci_transfer_event *event)
2406 __releases(&xhci->lock)
2407 __acquires(&xhci->lock)
2408{
2409 struct xhci_virt_device *xdev;
2410 struct xhci_virt_ep *ep;
2411 struct xhci_ring *ep_ring;
2412 unsigned int slot_id;
2413 int ep_index;
2414 struct xhci_td *td = NULL;
2415 dma_addr_t event_dma;
2416 struct xhci_segment *event_seg;
2417 union xhci_trb *event_trb;
2418 struct urb *urb = NULL;
2419 int status = -EINPROGRESS;
2420 struct urb_priv *urb_priv;
2421 struct xhci_ep_ctx *ep_ctx;
2422 struct list_head *tmp;
2423 u32 trb_comp_code;
2424 int ret = 0;
2425 int td_num = 0;
2426
2427 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2428 xdev = xhci->devs[slot_id];
2429 if (!xdev) {
2430 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2431 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2432 (unsigned long long) xhci_trb_virt_to_dma(
2433 xhci->event_ring->deq_seg,
2434 xhci->event_ring->dequeue),
2435 lower_32_bits(le64_to_cpu(event->buffer)),
2436 upper_32_bits(le64_to_cpu(event->buffer)),
2437 le32_to_cpu(event->transfer_len),
2438 le32_to_cpu(event->flags));
2439 xhci_dbg(xhci, "Event ring:\n");
2440 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2441 return -ENODEV;
2442 }
2443
2444
2445 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2446 ep = &xdev->eps[ep_index];
2447 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2448 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2449 if (!ep_ring ||
2450 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2451 EP_STATE_DISABLED) {
2452 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2453 "or incorrect stream ring\n");
2454 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2455 (unsigned long long) xhci_trb_virt_to_dma(
2456 xhci->event_ring->deq_seg,
2457 xhci->event_ring->dequeue),
2458 lower_32_bits(le64_to_cpu(event->buffer)),
2459 upper_32_bits(le64_to_cpu(event->buffer)),
2460 le32_to_cpu(event->transfer_len),
2461 le32_to_cpu(event->flags));
2462 xhci_dbg(xhci, "Event ring:\n");
2463 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2464 return -ENODEV;
2465 }
2466
2467
2468 if (ep->skip) {
2469 list_for_each(tmp, &ep_ring->td_list)
2470 td_num++;
2471 }
2472
2473 event_dma = le64_to_cpu(event->buffer);
2474 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2475
2476 switch (trb_comp_code) {
2477
2478
2479
2480 case COMP_SUCCESS:
2481 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2482 break;
2483 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2484 trb_comp_code = COMP_SHORT_TX;
2485 else
2486 xhci_warn_ratelimited(xhci,
2487 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2488 case COMP_SHORT_TX:
2489 break;
2490 case COMP_STOP:
2491 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2492 break;
2493 case COMP_STOP_INVAL:
2494 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2495 break;
2496 case COMP_STALL:
2497 xhci_dbg(xhci, "Stalled endpoint\n");
2498 ep->ep_state |= EP_HALTED;
2499 status = -EPIPE;
2500 break;
2501 case COMP_TRB_ERR:
2502 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2503 status = -EILSEQ;
2504 break;
2505 case COMP_SPLIT_ERR:
2506 case COMP_TX_ERR:
2507 xhci_dbg(xhci, "Transfer error on endpoint\n");
2508 status = -EPROTO;
2509 break;
2510 case COMP_BABBLE:
2511 xhci_dbg(xhci, "Babble error on endpoint\n");
2512 status = -EOVERFLOW;
2513 break;
2514 case COMP_DB_ERR:
2515 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2516 status = -ENOSR;
2517 break;
2518 case COMP_BW_OVER:
2519 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2520 break;
2521 case COMP_BUFF_OVER:
2522 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2523 break;
2524 case COMP_UNDERRUN:
2525
2526
2527
2528
2529
2530 xhci_dbg(xhci, "underrun event on endpoint\n");
2531 if (!list_empty(&ep_ring->td_list))
2532 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2533 "still with TDs queued?\n",
2534 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2535 ep_index);
2536 goto cleanup;
2537 case COMP_OVERRUN:
2538 xhci_dbg(xhci, "overrun event on endpoint\n");
2539 if (!list_empty(&ep_ring->td_list))
2540 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2541 "still with TDs queued?\n",
2542 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2543 ep_index);
2544 goto cleanup;
2545 case COMP_DEV_ERR:
2546 xhci_warn(xhci, "WARN: detect an incompatible device");
2547 status = -EPROTO;
2548 break;
2549 case COMP_MISSED_INT:
2550
2551
2552
2553
2554
2555
2556 ep->skip = true;
2557 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2558 goto cleanup;
2559 default:
2560 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2561 status = 0;
2562 break;
2563 }
2564 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2565 "busted\n");
2566 goto cleanup;
2567 }
2568
2569 do {
2570
2571
2572
2573 if (list_empty(&ep_ring->td_list)) {
2574
2575
2576
2577
2578
2579 if (!(trb_comp_code == COMP_STOP ||
2580 trb_comp_code == COMP_STOP_INVAL)) {
2581 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2582 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2583 ep_index);
2584 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2585 (le32_to_cpu(event->flags) &
2586 TRB_TYPE_BITMASK)>>10);
2587 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2588 }
2589 if (ep->skip) {
2590 ep->skip = false;
2591 xhci_dbg(xhci, "td_list is empty while skip "
2592 "flag set. Clear skip flag.\n");
2593 }
2594 ret = 0;
2595 goto cleanup;
2596 }
2597
2598
2599 if (ep->skip && td_num == 0) {
2600 ep->skip = false;
2601 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2602 "Clear skip flag.\n");
2603 ret = 0;
2604 goto cleanup;
2605 }
2606
2607 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2608 if (ep->skip)
2609 td_num--;
2610
2611
2612 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2613 td->last_trb, event_dma);
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2624 ret = 0;
2625 goto cleanup;
2626 }
2627
2628 if (!event_seg) {
2629 if (!ep->skip ||
2630 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2631
2632
2633
2634
2635 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2636 ep_ring->last_td_was_short) {
2637 ep_ring->last_td_was_short = false;
2638 ret = 0;
2639 goto cleanup;
2640 }
2641
2642 xhci_err(xhci,
2643 "ERROR Transfer event TRB DMA ptr not "
2644 "part of current TD\n");
2645 return -ESHUTDOWN;
2646 }
2647
2648 ret = skip_isoc_td(xhci, td, event, ep, &status);
2649 goto cleanup;
2650 }
2651 if (trb_comp_code == COMP_SHORT_TX)
2652 ep_ring->last_td_was_short = true;
2653 else
2654 ep_ring->last_td_was_short = false;
2655
2656 if (ep->skip) {
2657 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2658 ep->skip = false;
2659 }
2660
2661 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2662 sizeof(*event_trb)];
2663
2664
2665
2666
2667
2668
2669 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2670 xhci_dbg(xhci,
2671 "event_trb is a no-op TRB. Skip it\n");
2672 goto cleanup;
2673 }
2674
2675
2676
2677
2678 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2679 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2680 &status);
2681 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2682 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2683 &status);
2684 else
2685 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2686 ep, &status);
2687
2688cleanup:
2689
2690
2691
2692
2693 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2694 inc_deq(xhci, xhci->event_ring);
2695 }
2696
2697 if (ret) {
2698 urb = td->urb;
2699 urb_priv = urb->hcpriv;
2700
2701
2702
2703
2704
2705 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2706 (trb_comp_code != COMP_STALL &&
2707 trb_comp_code != COMP_BABBLE))
2708 xhci_urb_free_priv(xhci, urb_priv);
2709 else
2710 kfree(urb_priv);
2711
2712 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2713 if ((urb->actual_length != urb->transfer_buffer_length &&
2714 (urb->transfer_flags &
2715 URB_SHORT_NOT_OK)) ||
2716 (status != 0 &&
2717 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2718 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2719 "expected = %d, status = %d\n",
2720 urb, urb->actual_length,
2721 urb->transfer_buffer_length,
2722 status);
2723 spin_unlock(&xhci->lock);
2724
2725
2726
2727 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2728 status = 0;
2729 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2730 spin_lock(&xhci->lock);
2731 }
2732
2733
2734
2735
2736
2737
2738
2739 } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2740
2741 return 0;
2742}
2743
2744
2745
2746
2747
2748
2749
2750static int xhci_handle_event(struct xhci_hcd *xhci)
2751{
2752 union xhci_trb *event;
2753 int update_ptrs = 1;
2754 int ret;
2755
2756 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2757 xhci->error_bitmask |= 1 << 1;
2758 return 0;
2759 }
2760
2761 event = xhci->event_ring->dequeue;
2762
2763 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2764 xhci->event_ring->cycle_state) {
2765 xhci->error_bitmask |= 1 << 2;
2766 return 0;
2767 }
2768
2769
2770
2771
2772
2773 rmb();
2774
2775 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2776 case TRB_TYPE(TRB_COMPLETION):
2777 handle_cmd_completion(xhci, &event->event_cmd);
2778 break;
2779 case TRB_TYPE(TRB_PORT_STATUS):
2780 handle_port_status(xhci, event);
2781 update_ptrs = 0;
2782 break;
2783 case TRB_TYPE(TRB_TRANSFER):
2784 ret = handle_tx_event(xhci, &event->trans_event);
2785 if (ret < 0)
2786 xhci->error_bitmask |= 1 << 9;
2787 else
2788 update_ptrs = 0;
2789 break;
2790 case TRB_TYPE(TRB_DEV_NOTE):
2791 handle_device_notification(xhci, event);
2792 break;
2793 default:
2794 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2795 TRB_TYPE(48))
2796 handle_vendor_event(xhci, event);
2797 else
2798 xhci->error_bitmask |= 1 << 3;
2799 }
2800
2801
2802
2803 if (xhci->xhc_state & XHCI_STATE_DYING) {
2804 xhci_dbg(xhci, "xHCI host dying, returning from "
2805 "event handler.\n");
2806 return 0;
2807 }
2808
2809 if (update_ptrs)
2810
2811 inc_deq(xhci, xhci->event_ring);
2812
2813
2814
2815
2816 return 1;
2817}
2818
2819
2820
2821
2822
2823
2824irqreturn_t xhci_irq(struct usb_hcd *hcd)
2825{
2826 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2827 u32 status;
2828 u64 temp_64;
2829 union xhci_trb *event_ring_deq;
2830 dma_addr_t deq;
2831
2832 spin_lock(&xhci->lock);
2833
2834 status = xhci_readl(xhci, &xhci->op_regs->status);
2835 if (status == 0xffffffff)
2836 goto hw_died;
2837
2838 if (!(status & STS_EINT)) {
2839 spin_unlock(&xhci->lock);
2840 return IRQ_NONE;
2841 }
2842 if (status & STS_FATAL) {
2843 xhci_warn(xhci, "WARNING: Host System Error\n");
2844 xhci_halt(xhci);
2845hw_died:
2846 spin_unlock(&xhci->lock);
2847 return -ESHUTDOWN;
2848 }
2849
2850
2851
2852
2853
2854
2855 status |= STS_EINT;
2856 xhci_writel(xhci, status, &xhci->op_regs->status);
2857
2858
2859
2860 if (hcd->irq) {
2861 u32 irq_pending;
2862
2863 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2864 irq_pending |= IMAN_IP;
2865 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2866 }
2867
2868 if (xhci->xhc_state & XHCI_STATE_DYING) {
2869 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2870 "Shouldn't IRQs be disabled?\n");
2871
2872
2873
2874 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2875 xhci_write_64(xhci, temp_64 | ERST_EHB,
2876 &xhci->ir_set->erst_dequeue);
2877 spin_unlock(&xhci->lock);
2878
2879 return IRQ_HANDLED;
2880 }
2881
2882 event_ring_deq = xhci->event_ring->dequeue;
2883
2884
2885
2886 while (xhci_handle_event(xhci) > 0) {}
2887
2888 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2889
2890 if (event_ring_deq != xhci->event_ring->dequeue) {
2891 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2892 xhci->event_ring->dequeue);
2893 if (deq == 0)
2894 xhci_warn(xhci, "WARN something wrong with SW event "
2895 "ring dequeue ptr.\n");
2896
2897 temp_64 &= ERST_PTR_MASK;
2898 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2899 }
2900
2901
2902 temp_64 |= ERST_EHB;
2903 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2904
2905 spin_unlock(&xhci->lock);
2906
2907 return IRQ_HANDLED;
2908}
2909
2910irqreturn_t xhci_msi_irq(int irq, void *hcd)
2911{
2912 return xhci_irq(hcd);
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2925 bool more_trbs_coming,
2926 u32 field1, u32 field2, u32 field3, u32 field4)
2927{
2928 struct xhci_generic_trb *trb;
2929
2930 trb = &ring->enqueue->generic;
2931 trb->field[0] = cpu_to_le32(field1);
2932 trb->field[1] = cpu_to_le32(field2);
2933 trb->field[2] = cpu_to_le32(field3);
2934 trb->field[3] = cpu_to_le32(field4);
2935 inc_enq(xhci, ring, more_trbs_coming);
2936}
2937
2938
2939
2940
2941
2942static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2943 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2944{
2945 unsigned int num_trbs_needed;
2946
2947
2948 switch (ep_state) {
2949 case EP_STATE_DISABLED:
2950
2951
2952
2953
2954 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2955 return -ENOENT;
2956 case EP_STATE_ERROR:
2957 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2958
2959
2960 return -EINVAL;
2961 case EP_STATE_HALTED:
2962 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2963 case EP_STATE_STOPPED:
2964 case EP_STATE_RUNNING:
2965 break;
2966 default:
2967 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2968
2969
2970
2971
2972 return -EINVAL;
2973 }
2974
2975 while (1) {
2976 if (room_on_ring(xhci, ep_ring, num_trbs)) {
2977 union xhci_trb *trb = ep_ring->enqueue;
2978 unsigned int usable = ep_ring->enq_seg->trbs +
2979 TRBS_PER_SEGMENT - 1 - trb;
2980 u32 nop_cmd;
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991 if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2992 break;
2993
2994 if (ep_ring->type != TYPE_BULK)
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 break;
3007
3008 if (num_trbs >= TRBS_PER_SEGMENT) {
3009 xhci_err(xhci, "Too many fragments %d, max %d\n",
3010 num_trbs, TRBS_PER_SEGMENT - 1);
3011 return -ENOMEM;
3012 }
3013
3014 nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
3015 ep_ring->cycle_state);
3016 ep_ring->num_trbs_free -= usable;
3017 do {
3018 trb->generic.field[0] = 0;
3019 trb->generic.field[1] = 0;
3020 trb->generic.field[2] = 0;
3021 trb->generic.field[3] = nop_cmd;
3022 trb++;
3023 } while (--usable);
3024 ep_ring->enqueue = trb;
3025 if (room_on_ring(xhci, ep_ring, num_trbs))
3026 break;
3027 }
3028
3029 if (ep_ring == xhci->cmd_ring) {
3030 xhci_err(xhci, "Do not support expand command ring\n");
3031 return -ENOMEM;
3032 }
3033
3034 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3035 "ERROR no room on ep ring, try ring expansion");
3036 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3037 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3038 mem_flags)) {
3039 xhci_err(xhci, "Ring expansion failed\n");
3040 return -ENOMEM;
3041 }
3042 }
3043
3044 if (enqueue_is_link_trb(ep_ring)) {
3045 struct xhci_ring *ring = ep_ring;
3046 union xhci_trb *next;
3047
3048 next = ring->enqueue;
3049
3050 while (last_trb(xhci, ring, ring->enq_seg, next)) {
3051
3052
3053
3054 if (!xhci_link_trb_quirk(xhci) &&
3055 !(ring->type == TYPE_ISOC &&
3056 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3057 next->link.control &= cpu_to_le32(~TRB_CHAIN);
3058 else
3059 next->link.control |= cpu_to_le32(TRB_CHAIN);
3060
3061 wmb();
3062 next->link.control ^= cpu_to_le32(TRB_CYCLE);
3063
3064
3065 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
3066 ring->cycle_state = (ring->cycle_state ? 0 : 1);
3067 }
3068 ring->enq_seg = ring->enq_seg->next;
3069 ring->enqueue = ring->enq_seg->trbs;
3070 next = ring->enqueue;
3071 }
3072 }
3073
3074 return 0;
3075}
3076
3077static int prepare_transfer(struct xhci_hcd *xhci,
3078 struct xhci_virt_device *xdev,
3079 unsigned int ep_index,
3080 unsigned int stream_id,
3081 unsigned int num_trbs,
3082 struct urb *urb,
3083 unsigned int td_index,
3084 gfp_t mem_flags)
3085{
3086 int ret;
3087 struct urb_priv *urb_priv;
3088 struct xhci_td *td;
3089 struct xhci_ring *ep_ring;
3090 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3091
3092 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
3093 if (!ep_ring) {
3094 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3095 stream_id);
3096 return -EINVAL;
3097 }
3098
3099 ret = prepare_ring(xhci, ep_ring,
3100 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3101 num_trbs, mem_flags);
3102 if (ret)
3103 return ret;
3104
3105 urb_priv = urb->hcpriv;
3106 td = urb_priv->td[td_index];
3107
3108 INIT_LIST_HEAD(&td->td_list);
3109 INIT_LIST_HEAD(&td->cancelled_td_list);
3110
3111 if (td_index == 0) {
3112 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3113 if (unlikely(ret))
3114 return ret;
3115 }
3116
3117 td->urb = urb;
3118
3119 list_add_tail(&td->td_list, &ep_ring->td_list);
3120 td->start_seg = ep_ring->enq_seg;
3121 td->first_trb = ep_ring->enqueue;
3122
3123 urb_priv->td[td_index] = td;
3124
3125 return 0;
3126}
3127
3128static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
3129{
3130 int num_sgs, num_trbs, running_total, temp, i;
3131 struct scatterlist *sg;
3132
3133 sg = NULL;
3134 num_sgs = urb->num_mapped_sgs;
3135 temp = urb->transfer_buffer_length;
3136
3137 num_trbs = 0;
3138 for_each_sg(urb->sg, sg, num_sgs, i) {
3139 unsigned int len = sg_dma_len(sg);
3140
3141
3142 running_total = TRB_MAX_BUFF_SIZE -
3143 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
3144 running_total &= TRB_MAX_BUFF_SIZE - 1;
3145 if (running_total != 0)
3146 num_trbs++;
3147
3148
3149 while (running_total < sg_dma_len(sg) && running_total < temp) {
3150 num_trbs++;
3151 running_total += TRB_MAX_BUFF_SIZE;
3152 }
3153 len = min_t(int, len, temp);
3154 temp -= len;
3155 if (temp == 0)
3156 break;
3157 }
3158 return num_trbs;
3159}
3160
3161static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
3162{
3163 if (num_trbs != 0)
3164 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
3165 "TRBs, %d left\n", __func__,
3166 urb->ep->desc.bEndpointAddress, num_trbs);
3167 if (running_total != urb->transfer_buffer_length)
3168 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3169 "queued %#x (%d), asked for %#x (%d)\n",
3170 __func__,
3171 urb->ep->desc.bEndpointAddress,
3172 running_total, running_total,
3173 urb->transfer_buffer_length,
3174 urb->transfer_buffer_length);
3175}
3176
3177static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3178 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3179 struct xhci_generic_trb *start_trb)
3180{
3181
3182
3183
3184
3185 wmb();
3186 if (start_cycle)
3187 start_trb->field[3] |= cpu_to_le32(start_cycle);
3188 else
3189 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3190 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3191}
3192
3193
3194
3195
3196
3197
3198
3199int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3200 struct urb *urb, int slot_id, unsigned int ep_index)
3201{
3202 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3203 xhci->devs[slot_id]->out_ctx, ep_index);
3204 int xhci_interval;
3205 int ep_interval;
3206
3207 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3208 ep_interval = urb->interval;
3209
3210 if (urb->dev->speed == USB_SPEED_LOW ||
3211 urb->dev->speed == USB_SPEED_FULL)
3212 ep_interval *= 8;
3213
3214
3215
3216 if (xhci_interval != ep_interval) {
3217 dev_dbg_ratelimited(&urb->dev->dev,
3218 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3219 ep_interval, ep_interval == 1 ? "" : "s",
3220 xhci_interval, xhci_interval == 1 ? "" : "s");
3221 urb->interval = xhci_interval;
3222
3223 if (urb->dev->speed == USB_SPEED_LOW ||
3224 urb->dev->speed == USB_SPEED_FULL)
3225 urb->interval /= 8;
3226 }
3227 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3228}
3229
3230
3231
3232
3233
3234
3235static u32 xhci_td_remainder(unsigned int remainder)
3236{
3237 u32 max = (1 << (21 - 17 + 1)) - 1;
3238
3239 if ((remainder >> 10) >= max)
3240 return max << 17;
3241 else
3242 return (remainder >> 10) << 17;
3243}
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3261 unsigned int total_packet_count, struct urb *urb,
3262 unsigned int num_trbs_left)
3263{
3264 int packets_transferred;
3265
3266
3267 if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3268 return 0;
3269
3270
3271
3272
3273 packets_transferred = (running_total + trb_buff_len) /
3274 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3275
3276 if ((total_packet_count - packets_transferred) > 31)
3277 return 31 << 17;
3278 return (total_packet_count - packets_transferred) << 17;
3279}
3280
3281static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3282 struct urb *urb, int slot_id, unsigned int ep_index)
3283{
3284 struct xhci_ring *ep_ring;
3285 unsigned int num_trbs;
3286 struct urb_priv *urb_priv;
3287 struct xhci_td *td;
3288 struct scatterlist *sg;
3289 int num_sgs;
3290 int trb_buff_len, this_sg_len, running_total;
3291 unsigned int total_packet_count;
3292 bool first_trb;
3293 u64 addr;
3294 bool more_trbs_coming;
3295
3296 struct xhci_generic_trb *start_trb;
3297 int start_cycle;
3298
3299 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3300 if (!ep_ring)
3301 return -EINVAL;
3302
3303 num_trbs = count_sg_trbs_needed(xhci, urb);
3304 num_sgs = urb->num_mapped_sgs;
3305 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3306 usb_endpoint_maxp(&urb->ep->desc));
3307
3308 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3309 ep_index, urb->stream_id,
3310 num_trbs, urb, 0, mem_flags);
3311 if (trb_buff_len < 0)
3312 return trb_buff_len;
3313
3314 urb_priv = urb->hcpriv;
3315 td = urb_priv->td[0];
3316
3317
3318
3319
3320
3321
3322 start_trb = &ep_ring->enqueue->generic;
3323 start_cycle = ep_ring->cycle_state;
3324
3325 running_total = 0;
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335 sg = urb->sg;
3336 addr = (u64) sg_dma_address(sg);
3337 this_sg_len = sg_dma_len(sg);
3338 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3339 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3340 if (trb_buff_len > urb->transfer_buffer_length)
3341 trb_buff_len = urb->transfer_buffer_length;
3342
3343 first_trb = true;
3344
3345 do {
3346 u32 field = 0;
3347 u32 length_field = 0;
3348 u32 remainder = 0;
3349
3350
3351 if (first_trb) {
3352 first_trb = false;
3353 if (start_cycle == 0)
3354 field |= 0x1;
3355 } else
3356 field |= ep_ring->cycle_state;
3357
3358
3359
3360
3361 if (num_trbs > 1) {
3362 field |= TRB_CHAIN;
3363 } else {
3364
3365 td->last_trb = ep_ring->enqueue;
3366 field |= TRB_IOC;
3367 }
3368
3369
3370 if (usb_urb_dir_in(urb))
3371 field |= TRB_ISP;
3372
3373 if (TRB_MAX_BUFF_SIZE -
3374 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3375 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3376 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3377 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3378 (unsigned int) addr + trb_buff_len);
3379 }
3380
3381
3382 if (xhci->hci_version < 0x100) {
3383 remainder = xhci_td_remainder(
3384 urb->transfer_buffer_length -
3385 running_total);
3386 } else {
3387 remainder = xhci_v1_0_td_remainder(running_total,
3388 trb_buff_len, total_packet_count, urb,
3389 num_trbs - 1);
3390 }
3391 length_field = TRB_LEN(trb_buff_len) |
3392 remainder |
3393 TRB_INTR_TARGET(0);
3394
3395 if (num_trbs > 1)
3396 more_trbs_coming = true;
3397 else
3398 more_trbs_coming = false;
3399 queue_trb(xhci, ep_ring, more_trbs_coming,
3400 lower_32_bits(addr),
3401 upper_32_bits(addr),
3402 length_field,
3403 field | TRB_TYPE(TRB_NORMAL));
3404 --num_trbs;
3405 running_total += trb_buff_len;
3406
3407
3408
3409
3410 this_sg_len -= trb_buff_len;
3411 if (this_sg_len == 0) {
3412 --num_sgs;
3413 if (num_sgs == 0)
3414 break;
3415 sg = sg_next(sg);
3416 addr = (u64) sg_dma_address(sg);
3417 this_sg_len = sg_dma_len(sg);
3418 } else {
3419 addr += trb_buff_len;
3420 }
3421
3422 trb_buff_len = TRB_MAX_BUFF_SIZE -
3423 (addr & (TRB_MAX_BUFF_SIZE - 1));
3424 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3425 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3426 trb_buff_len =
3427 urb->transfer_buffer_length - running_total;
3428 } while (running_total < urb->transfer_buffer_length);
3429
3430 check_trb_math(urb, num_trbs, running_total);
3431 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3432 start_cycle, start_trb);
3433 return 0;
3434}
3435
3436
3437int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3438 struct urb *urb, int slot_id, unsigned int ep_index)
3439{
3440 struct xhci_ring *ep_ring;
3441 struct urb_priv *urb_priv;
3442 struct xhci_td *td;
3443 int num_trbs;
3444 struct xhci_generic_trb *start_trb;
3445 bool first_trb;
3446 bool more_trbs_coming;
3447 int start_cycle;
3448 u32 field, length_field;
3449
3450 int running_total, trb_buff_len, ret;
3451 unsigned int total_packet_count;
3452 u64 addr;
3453
3454 if (urb->num_sgs)
3455 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3456
3457 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3458 if (!ep_ring)
3459 return -EINVAL;
3460
3461 num_trbs = 0;
3462
3463 running_total = TRB_MAX_BUFF_SIZE -
3464 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3465 running_total &= TRB_MAX_BUFF_SIZE - 1;
3466
3467
3468
3469
3470 if (running_total != 0 || urb->transfer_buffer_length == 0)
3471 num_trbs++;
3472
3473 while (running_total < urb->transfer_buffer_length) {
3474 num_trbs++;
3475 running_total += TRB_MAX_BUFF_SIZE;
3476 }
3477
3478
3479 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3480 ep_index, urb->stream_id,
3481 num_trbs, urb, 0, mem_flags);
3482 if (ret < 0)
3483 return ret;
3484
3485 urb_priv = urb->hcpriv;
3486 td = urb_priv->td[0];
3487
3488
3489
3490
3491
3492
3493 start_trb = &ep_ring->enqueue->generic;
3494 start_cycle = ep_ring->cycle_state;
3495
3496 running_total = 0;
3497 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3498 usb_endpoint_maxp(&urb->ep->desc));
3499
3500 addr = (u64) urb->transfer_dma;
3501 trb_buff_len = TRB_MAX_BUFF_SIZE -
3502 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3503 if (trb_buff_len > urb->transfer_buffer_length)
3504 trb_buff_len = urb->transfer_buffer_length;
3505
3506 first_trb = true;
3507
3508
3509 do {
3510 u32 remainder = 0;
3511 field = 0;
3512
3513
3514 if (first_trb) {
3515 first_trb = false;
3516 if (start_cycle == 0)
3517 field |= 0x1;
3518 } else
3519 field |= ep_ring->cycle_state;
3520
3521
3522
3523
3524 if (num_trbs > 1) {
3525 field |= TRB_CHAIN;
3526 } else {
3527
3528 td->last_trb = ep_ring->enqueue;
3529 field |= TRB_IOC;
3530 }
3531
3532
3533 if (usb_urb_dir_in(urb))
3534 field |= TRB_ISP;
3535
3536
3537 if (xhci->hci_version < 0x100) {
3538 remainder = xhci_td_remainder(
3539 urb->transfer_buffer_length -
3540 running_total);
3541 } else {
3542 remainder = xhci_v1_0_td_remainder(running_total,
3543 trb_buff_len, total_packet_count, urb,
3544 num_trbs - 1);
3545 }
3546 length_field = TRB_LEN(trb_buff_len) |
3547 remainder |
3548 TRB_INTR_TARGET(0);
3549
3550 if (num_trbs > 1)
3551 more_trbs_coming = true;
3552 else
3553 more_trbs_coming = false;
3554 queue_trb(xhci, ep_ring, more_trbs_coming,
3555 lower_32_bits(addr),
3556 upper_32_bits(addr),
3557 length_field,
3558 field | TRB_TYPE(TRB_NORMAL));
3559 --num_trbs;
3560 running_total += trb_buff_len;
3561
3562
3563 addr += trb_buff_len;
3564 trb_buff_len = urb->transfer_buffer_length - running_total;
3565 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3566 trb_buff_len = TRB_MAX_BUFF_SIZE;
3567 } while (running_total < urb->transfer_buffer_length);
3568
3569 check_trb_math(urb, num_trbs, running_total);
3570 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3571 start_cycle, start_trb);
3572 return 0;
3573}
3574
3575
3576int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3577 struct urb *urb, int slot_id, unsigned int ep_index)
3578{
3579 struct xhci_ring *ep_ring;
3580 int num_trbs;
3581 int ret;
3582 struct usb_ctrlrequest *setup;
3583 struct xhci_generic_trb *start_trb;
3584 int start_cycle;
3585 u32 field, length_field;
3586 struct urb_priv *urb_priv;
3587 struct xhci_td *td;
3588
3589 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3590 if (!ep_ring)
3591 return -EINVAL;
3592
3593
3594
3595
3596
3597 if (!urb->setup_packet)
3598 return -EINVAL;
3599
3600
3601 num_trbs = 2;
3602
3603
3604
3605
3606
3607 if (urb->transfer_buffer_length > 0)
3608 num_trbs++;
3609 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3610 ep_index, urb->stream_id,
3611 num_trbs, urb, 0, mem_flags);
3612 if (ret < 0)
3613 return ret;
3614
3615 urb_priv = urb->hcpriv;
3616 td = urb_priv->td[0];
3617
3618
3619
3620
3621
3622
3623 start_trb = &ep_ring->enqueue->generic;
3624 start_cycle = ep_ring->cycle_state;
3625
3626
3627
3628 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3629 field = 0;
3630 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3631 if (start_cycle == 0)
3632 field |= 0x1;
3633
3634
3635 if (xhci->hci_version == 0x100) {
3636 if (urb->transfer_buffer_length > 0) {
3637 if (setup->bRequestType & USB_DIR_IN)
3638 field |= TRB_TX_TYPE(TRB_DATA_IN);
3639 else
3640 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3641 }
3642 }
3643
3644 queue_trb(xhci, ep_ring, true,
3645 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3646 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3647 TRB_LEN(8) | TRB_INTR_TARGET(0),
3648
3649 field);
3650
3651
3652
3653 if (usb_urb_dir_in(urb))
3654 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3655 else
3656 field = TRB_TYPE(TRB_DATA);
3657
3658 length_field = TRB_LEN(urb->transfer_buffer_length) |
3659 xhci_td_remainder(urb->transfer_buffer_length) |
3660 TRB_INTR_TARGET(0);
3661 if (urb->transfer_buffer_length > 0) {
3662 if (setup->bRequestType & USB_DIR_IN)
3663 field |= TRB_DIR_IN;
3664 queue_trb(xhci, ep_ring, true,
3665 lower_32_bits(urb->transfer_dma),
3666 upper_32_bits(urb->transfer_dma),
3667 length_field,
3668 field | ep_ring->cycle_state);
3669 }
3670
3671
3672 td->last_trb = ep_ring->enqueue;
3673
3674
3675
3676 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3677 field = 0;
3678 else
3679 field = TRB_DIR_IN;
3680 queue_trb(xhci, ep_ring, false,
3681 0,
3682 0,
3683 TRB_INTR_TARGET(0),
3684
3685 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3686
3687 giveback_first_trb(xhci, slot_id, ep_index, 0,
3688 start_cycle, start_trb);
3689 return 0;
3690}
3691
3692static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3693 struct urb *urb, int i)
3694{
3695 int num_trbs = 0;
3696 u64 addr, td_len;
3697
3698 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3699 td_len = urb->iso_frame_desc[i].length;
3700
3701 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3702 TRB_MAX_BUFF_SIZE);
3703 if (num_trbs == 0)
3704 num_trbs++;
3705
3706 return num_trbs;
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3718 struct usb_device *udev,
3719 struct urb *urb, unsigned int total_packet_count)
3720{
3721 unsigned int max_burst;
3722
3723 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3724 return 0;
3725
3726 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3727 return roundup(total_packet_count, max_burst + 1) - 1;
3728}
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3739 struct usb_device *udev,
3740 struct urb *urb, unsigned int total_packet_count)
3741{
3742 unsigned int max_burst;
3743 unsigned int residue;
3744
3745 if (xhci->hci_version < 0x100)
3746 return 0;
3747
3748 switch (udev->speed) {
3749 case USB_SPEED_SUPER:
3750
3751 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3752 residue = total_packet_count % (max_burst + 1);
3753
3754
3755
3756 if (residue == 0)
3757 return max_burst;
3758 return residue - 1;
3759 default:
3760 if (total_packet_count == 0)
3761 return 0;
3762 return total_packet_count - 1;
3763 }
3764}
3765
3766
3767static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3768 struct urb *urb, int slot_id, unsigned int ep_index)
3769{
3770 struct xhci_ring *ep_ring;
3771 struct urb_priv *urb_priv;
3772 struct xhci_td *td;
3773 int num_tds, trbs_per_td;
3774 struct xhci_generic_trb *start_trb;
3775 bool first_trb;
3776 int start_cycle;
3777 u32 field, length_field;
3778 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3779 u64 start_addr, addr;
3780 int i, j;
3781 bool more_trbs_coming;
3782
3783 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3784
3785 num_tds = urb->number_of_packets;
3786 if (num_tds < 1) {
3787 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3788 return -EINVAL;
3789 }
3790
3791 start_addr = (u64) urb->transfer_dma;
3792 start_trb = &ep_ring->enqueue->generic;
3793 start_cycle = ep_ring->cycle_state;
3794
3795 urb_priv = urb->hcpriv;
3796
3797 for (i = 0; i < num_tds; i++) {
3798 unsigned int total_packet_count;
3799 unsigned int burst_count;
3800 unsigned int residue;
3801
3802 first_trb = true;
3803 running_total = 0;
3804 addr = start_addr + urb->iso_frame_desc[i].offset;
3805 td_len = urb->iso_frame_desc[i].length;
3806 td_remain_len = td_len;
3807 total_packet_count = DIV_ROUND_UP(td_len,
3808 GET_MAX_PACKET(
3809 usb_endpoint_maxp(&urb->ep->desc)));
3810
3811 if (total_packet_count == 0)
3812 total_packet_count++;
3813 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3814 total_packet_count);
3815 residue = xhci_get_last_burst_packet_count(xhci,
3816 urb->dev, urb, total_packet_count);
3817
3818 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3819
3820 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3821 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3822 if (ret < 0) {
3823 if (i == 0)
3824 return ret;
3825 goto cleanup;
3826 }
3827
3828 td = urb_priv->td[i];
3829 for (j = 0; j < trbs_per_td; j++) {
3830 u32 remainder = 0;
3831 field = 0;
3832
3833 if (first_trb) {
3834 field = TRB_TBC(burst_count) |
3835 TRB_TLBPC(residue);
3836
3837 field |= TRB_TYPE(TRB_ISOC);
3838
3839 field |= TRB_SIA;
3840 if (i == 0) {
3841 if (start_cycle == 0)
3842 field |= 0x1;
3843 } else
3844 field |= ep_ring->cycle_state;
3845 first_trb = false;
3846 } else {
3847
3848 field |= TRB_TYPE(TRB_NORMAL);
3849 field |= ep_ring->cycle_state;
3850 }
3851
3852
3853 if (usb_urb_dir_in(urb))
3854 field |= TRB_ISP;
3855
3856
3857
3858
3859
3860 if (j < trbs_per_td - 1) {
3861 field |= TRB_CHAIN;
3862 more_trbs_coming = true;
3863 } else {
3864 td->last_trb = ep_ring->enqueue;
3865 field |= TRB_IOC;
3866 if (xhci->hci_version == 0x100 &&
3867 !(xhci->quirks &
3868 XHCI_AVOID_BEI)) {
3869
3870 if (i < num_tds - 1)
3871 field |= TRB_BEI;
3872 }
3873 more_trbs_coming = false;
3874 }
3875
3876
3877 trb_buff_len = TRB_MAX_BUFF_SIZE -
3878 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3879 if (trb_buff_len > td_remain_len)
3880 trb_buff_len = td_remain_len;
3881
3882
3883 if (xhci->hci_version < 0x100) {
3884 remainder = xhci_td_remainder(
3885 td_len - running_total);
3886 } else {
3887 remainder = xhci_v1_0_td_remainder(
3888 running_total, trb_buff_len,
3889 total_packet_count, urb,
3890 (trbs_per_td - j - 1));
3891 }
3892 length_field = TRB_LEN(trb_buff_len) |
3893 remainder |
3894 TRB_INTR_TARGET(0);
3895
3896 queue_trb(xhci, ep_ring, more_trbs_coming,
3897 lower_32_bits(addr),
3898 upper_32_bits(addr),
3899 length_field,
3900 field);
3901 running_total += trb_buff_len;
3902
3903 addr += trb_buff_len;
3904 td_remain_len -= trb_buff_len;
3905 }
3906
3907
3908 if (running_total != td_len) {
3909 xhci_err(xhci, "ISOC TD length unmatch\n");
3910 ret = -EINVAL;
3911 goto cleanup;
3912 }
3913 }
3914
3915 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3916 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3917 usb_amd_quirk_pll_disable();
3918 }
3919 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3920
3921 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3922 start_cycle, start_trb);
3923 return 0;
3924cleanup:
3925
3926
3927 for (i--; i >= 0; i--)
3928 list_del_init(&urb_priv->td[i]->td_list);
3929
3930
3931
3932
3933
3934
3935 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3936
3937 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3938
3939
3940 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3941 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3942 ep_ring->cycle_state = start_cycle;
3943 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3944 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3945 return ret;
3946}
3947
3948
3949
3950
3951
3952
3953
3954
3955int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3956 struct urb *urb, int slot_id, unsigned int ep_index)
3957{
3958 struct xhci_virt_device *xdev;
3959 struct xhci_ring *ep_ring;
3960 struct xhci_ep_ctx *ep_ctx;
3961 int start_frame;
3962 int xhci_interval;
3963 int ep_interval;
3964 int num_tds, num_trbs, i;
3965 int ret;
3966
3967 xdev = xhci->devs[slot_id];
3968 ep_ring = xdev->eps[ep_index].ring;
3969 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3970
3971 num_trbs = 0;
3972 num_tds = urb->number_of_packets;
3973 for (i = 0; i < num_tds; i++)
3974 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3975
3976
3977
3978
3979 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3980 num_trbs, mem_flags);
3981 if (ret)
3982 return ret;
3983
3984 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3985 start_frame &= 0x3fff;
3986
3987 urb->start_frame = start_frame;
3988 if (urb->dev->speed == USB_SPEED_LOW ||
3989 urb->dev->speed == USB_SPEED_FULL)
3990 urb->start_frame >>= 3;
3991
3992 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3993 ep_interval = urb->interval;
3994
3995 if (urb->dev->speed == USB_SPEED_LOW ||
3996 urb->dev->speed == USB_SPEED_FULL)
3997 ep_interval *= 8;
3998
3999
4000
4001 if (xhci_interval != ep_interval) {
4002 dev_dbg_ratelimited(&urb->dev->dev,
4003 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
4004 ep_interval, ep_interval == 1 ? "" : "s",
4005 xhci_interval, xhci_interval == 1 ? "" : "s");
4006 urb->interval = xhci_interval;
4007
4008 if (urb->dev->speed == USB_SPEED_LOW ||
4009 urb->dev->speed == USB_SPEED_FULL)
4010 urb->interval /= 8;
4011 }
4012 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4013
4014 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4015}
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
4028 u32 field3, u32 field4, bool command_must_succeed)
4029{
4030 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4031 int ret;
4032
4033 if (!command_must_succeed)
4034 reserved_trbs++;
4035
4036 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4037 reserved_trbs, GFP_ATOMIC);
4038 if (ret < 0) {
4039 xhci_err(xhci, "ERR: No room for command on command ring\n");
4040 if (command_must_succeed)
4041 xhci_err(xhci, "ERR: Reserved TRB counting for "
4042 "unfailable commands failed.\n");
4043 return ret;
4044 }
4045 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4046 field4 | xhci->cmd_ring->cycle_state);
4047 return 0;
4048}
4049
4050
4051int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
4052{
4053 return queue_command(xhci, 0, 0, 0,
4054 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4055}
4056
4057
4058int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4059 u32 slot_id)
4060{
4061 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4062 upper_32_bits(in_ctx_ptr), 0,
4063 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
4064 false);
4065}
4066
4067int xhci_queue_vendor_command(struct xhci_hcd *xhci,
4068 u32 field1, u32 field2, u32 field3, u32 field4)
4069{
4070 return queue_command(xhci, field1, field2, field3, field4, false);
4071}
4072
4073
4074int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
4075{
4076 return queue_command(xhci, 0, 0, 0,
4077 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4078 false);
4079}
4080
4081
4082int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4083 u32 slot_id, bool command_must_succeed)
4084{
4085 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4086 upper_32_bits(in_ctx_ptr), 0,
4087 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4088 command_must_succeed);
4089}
4090
4091
4092int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4093 u32 slot_id, bool command_must_succeed)
4094{
4095 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4096 upper_32_bits(in_ctx_ptr), 0,
4097 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4098 command_must_succeed);
4099}
4100
4101
4102
4103
4104
4105int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
4106 unsigned int ep_index, int suspend)
4107{
4108 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4109 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4110 u32 type = TRB_TYPE(TRB_STOP_RING);
4111 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4112
4113 return queue_command(xhci, 0, 0, 0,
4114 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4115}
4116
4117
4118
4119
4120static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
4121 unsigned int ep_index, unsigned int stream_id,
4122 struct xhci_segment *deq_seg,
4123 union xhci_trb *deq_ptr, u32 cycle_state)
4124{
4125 dma_addr_t addr;
4126 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4127 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4128 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4129 u32 type = TRB_TYPE(TRB_SET_DEQ);
4130 struct xhci_virt_ep *ep;
4131
4132 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
4133 if (addr == 0) {
4134 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4135 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4136 deq_seg, deq_ptr);
4137 return 0;
4138 }
4139 ep = &xhci->devs[slot_id]->eps[ep_index];
4140 if ((ep->ep_state & SET_DEQ_PENDING)) {
4141 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4142 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4143 return 0;
4144 }
4145 ep->queued_deq_seg = deq_seg;
4146 ep->queued_deq_ptr = deq_ptr;
4147 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
4148 upper_32_bits(addr), trb_stream_id,
4149 trb_slot_id | trb_ep_index | type, false);
4150}
4151
4152int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
4153 unsigned int ep_index)
4154{
4155 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4156 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4157 u32 type = TRB_TYPE(TRB_RESET_EP);
4158
4159 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
4160 false);
4161}
4162