1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include <linux/scatterlist.h>
56#include <linux/slab.h>
57#include <linux/dma-mapping.h>
58#include "xhci.h"
59#include "xhci-trace.h"
60
61static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
62 u32 field1, u32 field2,
63 u32 field3, u32 field4, bool command_must_succeed);
64
65
66
67
68
69dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
70 union xhci_trb *trb)
71{
72 unsigned long segment_offset;
73
74 if (!seg || !trb || trb < seg->trbs)
75 return 0;
76
77 segment_offset = trb - seg->trbs;
78 if (segment_offset >= TRBS_PER_SEGMENT)
79 return 0;
80 return seg->dma + (segment_offset * sizeof(*trb));
81}
82
83static bool trb_is_noop(union xhci_trb *trb)
84{
85 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
86}
87
88static bool trb_is_link(union xhci_trb *trb)
89{
90 return TRB_TYPE_LINK_LE32(trb->link.control);
91}
92
93static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
94{
95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
96}
97
98static bool last_trb_on_ring(struct xhci_ring *ring,
99 struct xhci_segment *seg, union xhci_trb *trb)
100{
101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102}
103
104static bool link_trb_toggles_cycle(union xhci_trb *trb)
105{
106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107}
108
109static bool last_td_in_urb(struct xhci_td *td)
110{
111 struct urb_priv *urb_priv = td->urb->hcpriv;
112
113 return urb_priv->num_tds_done == urb_priv->num_tds;
114}
115
116static void inc_td_cnt(struct urb *urb)
117{
118 struct urb_priv *urb_priv = urb->hcpriv;
119
120 urb_priv->num_tds_done++;
121}
122
123static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
124{
125 if (trb_is_link(trb)) {
126
127 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
128 } else {
129 trb->generic.field[0] = 0;
130 trb->generic.field[1] = 0;
131 trb->generic.field[2] = 0;
132
133 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
134 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
135 }
136}
137
138
139
140
141
142static void next_trb(struct xhci_hcd *xhci,
143 struct xhci_ring *ring,
144 struct xhci_segment **seg,
145 union xhci_trb **trb)
146{
147 if (trb_is_link(*trb)) {
148 *seg = (*seg)->next;
149 *trb = ((*seg)->trbs);
150 } else {
151 (*trb)++;
152 }
153}
154
155
156
157
158void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
159{
160 unsigned int link_trb_count = 0;
161
162
163 if (ring->type == TYPE_EVENT) {
164 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
165 ring->dequeue++;
166 goto out;
167 }
168 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
169 ring->cycle_state ^= 1;
170 ring->deq_seg = ring->deq_seg->next;
171 ring->dequeue = ring->deq_seg->trbs;
172 goto out;
173 }
174
175
176 if (!trb_is_link(ring->dequeue)) {
177 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
178 xhci_warn(xhci, "Missing link TRB at end of segment\n");
179 } else {
180 ring->dequeue++;
181 ring->num_trbs_free++;
182 }
183 }
184
185 while (trb_is_link(ring->dequeue)) {
186 ring->deq_seg = ring->deq_seg->next;
187 ring->dequeue = ring->deq_seg->trbs;
188
189 if (link_trb_count++ > ring->num_segs) {
190 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
191 break;
192 }
193 }
194out:
195 trace_xhci_inc_deq(ring);
196
197 return;
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
217 bool more_trbs_coming)
218{
219 u32 chain;
220 union xhci_trb *next;
221 unsigned int link_trb_count = 0;
222
223 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
224
225 if (!trb_is_link(ring->enqueue))
226 ring->num_trbs_free--;
227
228 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
229 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
230 return;
231 }
232
233 next = ++(ring->enqueue);
234
235
236 while (trb_is_link(next)) {
237
238
239
240
241
242
243
244
245 if (!chain && !more_trbs_coming)
246 break;
247
248
249
250
251
252 if (!(ring->type == TYPE_ISOC &&
253 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
254 !xhci_link_trb_quirk(xhci)) {
255 next->link.control &= cpu_to_le32(~TRB_CHAIN);
256 next->link.control |= cpu_to_le32(chain);
257 }
258
259 wmb();
260 next->link.control ^= cpu_to_le32(TRB_CYCLE);
261
262
263 if (link_trb_toggles_cycle(next))
264 ring->cycle_state ^= 1;
265
266 ring->enq_seg = ring->enq_seg->next;
267 ring->enqueue = ring->enq_seg->trbs;
268 next = ring->enqueue;
269
270 if (link_trb_count++ > ring->num_segs) {
271 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
272 break;
273 }
274 }
275
276 trace_xhci_inc_enq(ring);
277}
278
279
280
281
282
283static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
284 unsigned int num_trbs)
285{
286 int num_trbs_in_deq_seg;
287
288 if (ring->num_trbs_free < num_trbs)
289 return 0;
290
291 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
292 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
293 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
294 return 0;
295 }
296
297 return 1;
298}
299
300
301void xhci_ring_cmd_db(struct xhci_hcd *xhci)
302{
303 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
304 return;
305
306 xhci_dbg(xhci, "// Ding dong!\n");
307
308 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
309
310 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
311
312 readl(&xhci->dba->doorbell[0]);
313}
314
315static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
316{
317 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
318}
319
320static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
321{
322 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
323 cmd_list);
324}
325
326
327
328
329
330
331static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
332 struct xhci_command *cur_cmd)
333{
334 struct xhci_command *i_cmd;
335
336
337 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
338
339 if (i_cmd->status != COMP_COMMAND_ABORTED)
340 continue;
341
342 i_cmd->status = COMP_COMMAND_RING_STOPPED;
343
344 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
345 i_cmd->command_trb);
346
347 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
348
349
350
351
352
353 }
354
355 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
356
357
358 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
359 !(xhci->xhc_state & XHCI_STATE_DYING)) {
360 xhci->current_cmd = cur_cmd;
361 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
362 xhci_ring_cmd_db(xhci);
363 }
364}
365
366
367static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
368{
369 u32 temp_32;
370 int ret;
371
372 xhci_dbg(xhci, "Abort command ring\n");
373
374 reinit_completion(&xhci->cmd_ring_stop_completion);
375
376
377
378
379
380
381
382
383 temp_32 = readl(&xhci->op_regs->cmd_ring);
384 writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
385
386
387
388
389
390
391
392 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
393 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
394 if (ret < 0) {
395 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
396 xhci_halt(xhci);
397 xhci_hc_died(xhci);
398 return ret;
399 }
400
401
402
403
404
405
406 spin_unlock_irqrestore(&xhci->lock, flags);
407 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
408 msecs_to_jiffies(2000));
409 spin_lock_irqsave(&xhci->lock, flags);
410 if (!ret) {
411 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
412 xhci_cleanup_command_queue(xhci);
413 } else {
414 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
415 }
416 return 0;
417}
418
419void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
420 unsigned int slot_id,
421 unsigned int ep_index,
422 unsigned int stream_id)
423{
424 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
425 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
426 unsigned int ep_state = ep->ep_state;
427
428
429
430
431
432
433
434 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
435 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
436 return;
437
438 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
439
440 writel(DB_VALUE(ep_index, stream_id), db_addr);
441
442 readl(db_addr);
443}
444
445
446static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
447 unsigned int slot_id,
448 unsigned int ep_index)
449{
450 unsigned int stream_id;
451 struct xhci_virt_ep *ep;
452
453 ep = &xhci->devs[slot_id]->eps[ep_index];
454
455
456 if (!(ep->ep_state & EP_HAS_STREAMS)) {
457 if (ep->ring && !(list_empty(&ep->ring->td_list)))
458 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
459 return;
460 }
461
462 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
463 stream_id++) {
464 struct xhci_stream_info *stream_info = ep->stream_info;
465 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
466 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
467 stream_id);
468 }
469}
470
471void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
472 unsigned int slot_id,
473 unsigned int ep_index)
474{
475 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
476}
477
478static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
479 unsigned int slot_id,
480 unsigned int ep_index)
481{
482 if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
483 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
484 return NULL;
485 }
486 if (ep_index >= EP_CTX_PER_DEV) {
487 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
488 return NULL;
489 }
490 if (!xhci->devs[slot_id]) {
491 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
492 return NULL;
493 }
494
495 return &xhci->devs[slot_id]->eps[ep_index];
496}
497
498static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
499 struct xhci_virt_ep *ep,
500 unsigned int stream_id)
501{
502
503 if (!(ep->ep_state & EP_HAS_STREAMS))
504 return ep->ring;
505
506 if (!ep->stream_info)
507 return NULL;
508
509 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
510 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
511 stream_id, ep->vdev->slot_id, ep->ep_index);
512 return NULL;
513 }
514
515 return ep->stream_info->stream_rings[stream_id];
516}
517
518
519
520
521
522struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
523 unsigned int slot_id, unsigned int ep_index,
524 unsigned int stream_id)
525{
526 struct xhci_virt_ep *ep;
527
528 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
529 if (!ep)
530 return NULL;
531
532 return xhci_virt_ep_to_ring(xhci, ep, stream_id);
533}
534
535
536
537
538
539
540
541
542static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
543 unsigned int ep_index, unsigned int stream_id)
544{
545 struct xhci_ep_ctx *ep_ctx;
546 struct xhci_stream_ctx *st_ctx;
547 struct xhci_virt_ep *ep;
548
549 ep = &vdev->eps[ep_index];
550
551 if (ep->ep_state & EP_HAS_STREAMS) {
552 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
553 return le64_to_cpu(st_ctx->stream_ring);
554 }
555 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
556 return le64_to_cpu(ep_ctx->deq);
557}
558
559static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
560 unsigned int slot_id, unsigned int ep_index,
561 unsigned int stream_id, struct xhci_td *td)
562{
563 struct xhci_virt_device *dev = xhci->devs[slot_id];
564 struct xhci_virt_ep *ep = &dev->eps[ep_index];
565 struct xhci_ring *ep_ring;
566 struct xhci_command *cmd;
567 struct xhci_segment *new_seg;
568 struct xhci_segment *halted_seg = NULL;
569 union xhci_trb *new_deq;
570 int new_cycle;
571 union xhci_trb *halted_trb;
572 int index = 0;
573 dma_addr_t addr;
574 u64 hw_dequeue;
575 bool cycle_found = false;
576 bool td_last_trb_found = false;
577 u32 trb_sct = 0;
578 int ret;
579
580 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
581 ep_index, stream_id);
582 if (!ep_ring) {
583 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
584 stream_id);
585 return -ENODEV;
586 }
587
588
589
590
591
592
593
594 if (!td) {
595 if (list_empty(&ep_ring->td_list)) {
596 new_seg = ep_ring->enq_seg;
597 new_deq = ep_ring->enqueue;
598 new_cycle = ep_ring->cycle_state;
599 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
600 goto deq_found;
601 } else {
602 xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
603 return -EINVAL;
604 }
605 }
606
607 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
608 new_seg = ep_ring->deq_seg;
609 new_deq = ep_ring->dequeue;
610
611
612
613
614
615
616 if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
617 !(ep->ep_state & EP_HAS_STREAMS))
618 halted_seg = trb_in_td(xhci, td->start_seg,
619 td->first_trb, td->last_trb,
620 hw_dequeue & ~0xf, false);
621 if (halted_seg) {
622 index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
623 sizeof(*halted_trb);
624 halted_trb = &halted_seg->trbs[index];
625 new_cycle = halted_trb->generic.field[3] & 0x1;
626 xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
627 (u8)(hw_dequeue & 0x1), index, new_cycle);
628 } else {
629 new_cycle = hw_dequeue & 0x1;
630 }
631
632
633
634
635
636
637
638 do {
639 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
640 == (dma_addr_t)(hw_dequeue & ~0xf)) {
641 cycle_found = true;
642 if (td_last_trb_found)
643 break;
644 }
645 if (new_deq == td->last_trb)
646 td_last_trb_found = true;
647
648 if (cycle_found && trb_is_link(new_deq) &&
649 link_trb_toggles_cycle(new_deq))
650 new_cycle ^= 0x1;
651
652 next_trb(xhci, ep_ring, &new_seg, &new_deq);
653
654
655 if (new_deq == ep->ring->dequeue) {
656 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
657 return -EINVAL;
658 }
659
660 } while (!cycle_found || !td_last_trb_found);
661
662deq_found:
663
664
665 addr = xhci_trb_virt_to_dma(new_seg, new_deq);
666 if (addr == 0) {
667 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
668 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
669 return -EINVAL;
670 }
671
672 if ((ep->ep_state & SET_DEQ_PENDING)) {
673 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
674 &addr);
675 return -EBUSY;
676 }
677
678
679 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
680 if (!cmd) {
681 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
682 return -ENOMEM;
683 }
684
685 if (stream_id)
686 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
687 ret = queue_command(xhci, cmd,
688 lower_32_bits(addr) | trb_sct | new_cycle,
689 upper_32_bits(addr),
690 STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
691 EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
692 if (ret < 0) {
693 xhci_free_command(xhci, cmd);
694 return ret;
695 }
696 ep->queued_deq_seg = new_seg;
697 ep->queued_deq_ptr = new_deq;
698
699 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
700 "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
701
702
703
704
705
706
707 ep->ep_state |= SET_DEQ_PENDING;
708 xhci_ring_cmd_db(xhci);
709 return 0;
710}
711
712
713
714
715
716static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
717 struct xhci_td *td, bool flip_cycle)
718{
719 struct xhci_segment *seg = td->start_seg;
720 union xhci_trb *trb = td->first_trb;
721
722 while (1) {
723 trb_to_noop(trb, TRB_TR_NOOP);
724
725
726 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
727 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
728
729 if (trb == td->last_trb)
730 break;
731
732 next_trb(xhci, ep_ring, &seg, &trb);
733 }
734}
735
736static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
737 struct xhci_virt_ep *ep)
738{
739 ep->ep_state &= ~EP_STOP_CMD_PENDING;
740
741 del_timer(&ep->stop_cmd_timer);
742}
743
744
745
746
747
748static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
749 struct xhci_td *cur_td, int status)
750{
751 struct urb *urb = cur_td->urb;
752 struct urb_priv *urb_priv = urb->hcpriv;
753 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
754
755 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
756 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
757 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
758 if (xhci->quirks & XHCI_AMD_PLL_FIX)
759 usb_amd_quirk_pll_enable();
760 }
761 }
762 xhci_urb_free_priv(urb_priv);
763 usb_hcd_unlink_urb_from_ep(hcd, urb);
764 trace_xhci_urb_giveback(urb);
765 usb_hcd_giveback_urb(hcd, urb, status);
766}
767
768static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
769 struct xhci_ring *ring, struct xhci_td *td)
770{
771 struct device *dev = xhci_to_hcd(xhci)->self.controller;
772 struct xhci_segment *seg = td->bounce_seg;
773 struct urb *urb = td->urb;
774 size_t len;
775
776 if (!ring || !seg || !urb)
777 return;
778
779 if (usb_urb_dir_out(urb)) {
780 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
781 DMA_TO_DEVICE);
782 return;
783 }
784
785 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
786 DMA_FROM_DEVICE);
787
788 if (urb->num_sgs) {
789 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
790 seg->bounce_len, seg->bounce_offs);
791 if (len != seg->bounce_len)
792 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
793 len, seg->bounce_len);
794 } else {
795 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
796 seg->bounce_len);
797 }
798 seg->bounce_len = 0;
799 seg->bounce_offs = 0;
800}
801
802static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
803 struct xhci_ring *ep_ring, int status)
804{
805 struct urb *urb = NULL;
806
807
808 urb = td->urb;
809
810
811 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
812
813
814
815
816
817
818 if (urb->actual_length > urb->transfer_buffer_length) {
819 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
820 urb->transfer_buffer_length, urb->actual_length);
821 urb->actual_length = 0;
822 status = 0;
823 }
824
825 if (!list_empty(&td->td_list))
826 list_del_init(&td->td_list);
827
828 if (!list_empty(&td->cancelled_td_list))
829 list_del_init(&td->cancelled_td_list);
830
831 inc_td_cnt(urb);
832
833 if (last_td_in_urb(td)) {
834 if ((urb->actual_length != urb->transfer_buffer_length &&
835 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
836 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
837 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
838 urb, urb->actual_length,
839 urb->transfer_buffer_length, status);
840
841
842 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
843 status = 0;
844 if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
845 (ep_ring->stream_timeout_handler == true)) {
846
847
848
849
850 xhci_giveback_urb_in_irq(xhci, td, -EAGAIN);
851 ep_ring->stream_timeout_handler = false;
852 } else
853 xhci_giveback_urb_in_irq(xhci, td, status);
854 }
855
856 return 0;
857}
858
859
860
861static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
862{
863 struct xhci_ring *ring;
864 struct xhci_td *td, *tmp_td;
865
866 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
867 cancelled_td_list) {
868
869 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
870
871 if (td->cancel_status == TD_CLEARED) {
872 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
873 __func__, td->urb);
874 xhci_td_cleanup(ep->xhci, td, ring, td->status);
875 } else {
876 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
877 __func__, td->urb, td->cancel_status);
878 }
879 if (ep->xhci->xhc_state & XHCI_STATE_DYING)
880 return;
881 }
882}
883
884static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
885 unsigned int ep_index, enum xhci_ep_reset_type reset_type)
886{
887 struct xhci_command *command;
888 int ret = 0;
889
890 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
891 if (!command) {
892 ret = -ENOMEM;
893 goto done;
894 }
895
896 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
897 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
898 ep_index, slot_id);
899
900 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
901done:
902 if (ret)
903 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
904 slot_id, ep_index, ret);
905 return ret;
906}
907
908static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
909 struct xhci_virt_ep *ep, unsigned int stream_id,
910 struct xhci_td *td,
911 enum xhci_ep_reset_type reset_type)
912{
913 unsigned int slot_id = ep->vdev->slot_id;
914 int err;
915
916
917
918
919
920 if (ep->vdev->flags & VDEV_PORT_ERROR)
921 return -ENODEV;
922
923
924 if (reset_type == EP_HARD_RESET) {
925 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
926 if (td && list_empty(&td->cancelled_td_list)) {
927 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
928 td->cancel_status = TD_HALTED;
929 }
930 }
931
932 if (ep->ep_state & EP_HALTED) {
933 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
934 ep->ep_index);
935 return 0;
936 }
937
938 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
939 if (err)
940 return err;
941
942 ep->ep_state |= EP_HALTED;
943
944 xhci_ring_cmd_db(xhci);
945
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
959{
960 struct xhci_hcd *xhci;
961 struct xhci_td *td = NULL;
962 struct xhci_td *tmp_td = NULL;
963 struct xhci_td *cached_td = NULL;
964 struct xhci_ring *ring;
965 u64 hw_deq;
966 unsigned int slot_id = ep->vdev->slot_id;
967 int err;
968
969 xhci = ep->xhci;
970
971 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
972 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
973 "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
974 (unsigned long long)xhci_trb_virt_to_dma(
975 td->start_seg, td->first_trb),
976 td->urb->stream_id, td->urb);
977 list_del_init(&td->td_list);
978 ring = xhci_urb_to_transfer_ring(xhci, td->urb);
979 if (!ring) {
980 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
981 td->urb, td->urb->stream_id);
982 continue;
983 }
984
985
986
987
988
989
990 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
991 td->urb->stream_id);
992 hw_deq &= ~0xf;
993
994 if (td->cancel_status == TD_HALTED ||
995 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
996 switch (td->cancel_status) {
997 case TD_CLEARED:
998 case TD_CLEARING_CACHE:
999 break;
1000 case TD_DIRTY:
1001 case TD_HALTED:
1002 td->cancel_status = TD_CLEARING_CACHE;
1003 if (cached_td)
1004
1005 xhci_dbg(xhci,
1006 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
1007 td->urb->stream_id, td->urb,
1008 cached_td->urb->stream_id, cached_td->urb);
1009 cached_td = td;
1010 break;
1011 }
1012 } else {
1013 td_to_noop(xhci, ring, td, false);
1014 td->cancel_status = TD_CLEARED;
1015 }
1016 }
1017
1018
1019 if (!cached_td)
1020 return 0;
1021
1022 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
1023 cached_td->urb->stream_id,
1024 cached_td);
1025 if (err) {
1026
1027 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1028 if (td->cancel_status != TD_CLEARING_CACHE)
1029 continue;
1030 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
1031 td->urb);
1032 td_to_noop(xhci, ring, td, false);
1033 td->cancel_status = TD_CLEARED;
1034 }
1035 }
1036 return 0;
1037}
1038
1039
1040
1041
1042
1043static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
1044{
1045 struct xhci_td *td;
1046 u64 hw_deq;
1047
1048 if (!list_empty(&ep->ring->td_list)) {
1049 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
1050 hw_deq &= ~0xf;
1051 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
1052 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
1053 td->last_trb, hw_deq, false))
1054 return td;
1055 }
1056 return NULL;
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1070 union xhci_trb *trb, u32 comp_code)
1071{
1072 unsigned int ep_index;
1073 struct xhci_virt_ep *ep;
1074 struct xhci_ep_ctx *ep_ctx;
1075 struct xhci_td *td = NULL;
1076 enum xhci_ep_reset_type reset_type;
1077 struct xhci_command *command;
1078 int err;
1079
1080 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1081 if (!xhci->devs[slot_id])
1082 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1083 slot_id);
1084 return;
1085 }
1086
1087 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1088 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1089 if (!ep)
1090 return;
1091
1092 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1093
1094 trace_xhci_handle_cmd_stop_ep(ep_ctx);
1095
1096 if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 switch (GET_EP_CTX_STATE(ep_ctx)) {
1112 case EP_STATE_HALTED:
1113 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1114 if (ep->ep_state & EP_HAS_STREAMS) {
1115 reset_type = EP_SOFT_RESET;
1116 } else {
1117 reset_type = EP_HARD_RESET;
1118 td = find_halted_td(ep);
1119 if (td)
1120 td->status = -EPROTO;
1121 }
1122
1123 err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
1124 reset_type);
1125 if (err)
1126 break;
1127 xhci_stop_watchdog_timer_in_irq(xhci, ep);
1128 return;
1129 case EP_STATE_RUNNING:
1130
1131 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
1132
1133 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1134 if (!command)
1135 xhci_stop_watchdog_timer_in_irq(xhci, ep);
1136
1137 mod_timer(&ep->stop_cmd_timer,
1138 jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
1139 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1140 xhci_ring_cmd_db(xhci);
1141
1142 return;
1143 default:
1144 break;
1145 }
1146 }
1147
1148 xhci_invalidate_cancelled_tds(ep);
1149 xhci_stop_watchdog_timer_in_irq(xhci, ep);
1150
1151
1152 xhci_giveback_invalidated_tds(ep);
1153 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1154}
1155
1156static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1157{
1158 struct xhci_td *cur_td;
1159 struct xhci_td *tmp;
1160
1161 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1162 list_del_init(&cur_td->td_list);
1163
1164 if (!list_empty(&cur_td->cancelled_td_list))
1165 list_del_init(&cur_td->cancelled_td_list);
1166
1167 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1168
1169 inc_td_cnt(cur_td->urb);
1170 if (last_td_in_urb(cur_td))
1171 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1172 }
1173}
1174
1175static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1176 int slot_id, int ep_index)
1177{
1178 struct xhci_td *cur_td;
1179 struct xhci_td *tmp;
1180 struct xhci_virt_ep *ep;
1181 struct xhci_ring *ring;
1182
1183 ep = &xhci->devs[slot_id]->eps[ep_index];
1184 if ((ep->ep_state & EP_HAS_STREAMS) ||
1185 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1186 int stream_id;
1187
1188 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1189 stream_id++) {
1190 ring = ep->stream_info->stream_rings[stream_id];
1191 if (!ring)
1192 continue;
1193
1194 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1195 "Killing URBs for slot ID %u, ep index %u, stream %u",
1196 slot_id, ep_index, stream_id);
1197 xhci_kill_ring_urbs(xhci, ring);
1198 }
1199 } else {
1200 ring = ep->ring;
1201 if (!ring)
1202 return;
1203 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1204 "Killing URBs for slot ID %u, ep index %u",
1205 slot_id, ep_index);
1206 xhci_kill_ring_urbs(xhci, ring);
1207 }
1208
1209 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1210 cancelled_td_list) {
1211 list_del_init(&cur_td->cancelled_td_list);
1212 inc_td_cnt(cur_td->urb);
1213
1214 if (last_td_in_urb(cur_td))
1215 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1216 }
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228void xhci_hc_died(struct xhci_hcd *xhci)
1229{
1230 int i, j;
1231
1232 if (xhci->xhc_state & XHCI_STATE_DYING)
1233 return;
1234
1235 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1236 xhci->xhc_state |= XHCI_STATE_DYING;
1237
1238 xhci_cleanup_command_queue(xhci);
1239
1240
1241 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1242 if (!xhci->devs[i])
1243 continue;
1244 for (j = 0; j < 31; j++)
1245 xhci_kill_endpoint_urbs(xhci, i, j);
1246 }
1247
1248
1249 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1250 usb_hc_died(xhci_to_hcd(xhci));
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261void xhci_stream_timeout(struct timer_list *arg)
1262{
1263 struct xhci_hcd *xhci;
1264 struct xhci_virt_ep *ep;
1265 struct xhci_ring *ep_ring;
1266 unsigned int slot_id, ep_index, stream_id;
1267 struct xhci_td *td = NULL;
1268 struct urb *urb = NULL;
1269 struct urb_priv *urb_priv;
1270 struct xhci_command *command;
1271 unsigned long flags;
1272 int i;
1273
1274 ep_ring = from_timer(ep_ring, arg, stream_timer);
1275 xhci = ep_ring->xhci;
1276
1277 spin_lock_irqsave(&xhci->lock, flags);
1278
1279 if (!list_empty(&ep_ring->td_list)) {
1280 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
1281 urb = td->urb;
1282 urb_priv = urb->hcpriv;
1283
1284 slot_id = urb->dev->slot_id;
1285 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1286 stream_id = ep_ring->stream_id;
1287 ep = &xhci->devs[slot_id]->eps[ep_index];
1288 ep_ring->stream_timeout_handler = true;
1289
1290
1291 del_timer(&ep_ring->stream_timer);
1292
1293 for (i = 0; i < urb_priv->num_tds; i++) {
1294 td = &urb_priv->td[i];
1295 list_add_tail(&td->cancelled_td_list,
1296 &ep->cancelled_td_list);
1297 }
1298
1299
1300
1301
1302 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1303 command = xhci_alloc_command(xhci, false,
1304 GFP_ATOMIC);
1305 if (!command) {
1306 xhci_warn(xhci,
1307 "%s: Failed to allocate command\n",
1308 __func__);
1309 spin_unlock_irqrestore(&xhci->lock, flags);
1310 return;
1311 }
1312
1313 ep->ep_state |= EP_STOP_CMD_PENDING;
1314 ep->stop_cmd_timer.expires = jiffies +
1315 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1316 add_timer(&ep->stop_cmd_timer);
1317 xhci_queue_stop_endpoint(xhci, command,
1318 urb->dev->slot_id, ep_index, 0);
1319 xhci_ring_cmd_db(xhci);
1320 }
1321
1322 spin_unlock_irqrestore(&xhci->lock, flags);
1323 return;
1324 }
1325
1326 spin_unlock_irqrestore(&xhci->lock, flags);
1327
1328 del_timer(&ep_ring->stream_timer);
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
1349{
1350 struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
1351 struct xhci_hcd *xhci = ep->xhci;
1352 unsigned long flags;
1353 u32 usbsts;
1354 char str[XHCI_MSG_MAX];
1355
1356 spin_lock_irqsave(&xhci->lock, flags);
1357
1358
1359 if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
1360 timer_pending(&ep->stop_cmd_timer)) {
1361 spin_unlock_irqrestore(&xhci->lock, flags);
1362 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
1363 return;
1364 }
1365 usbsts = readl(&xhci->op_regs->status);
1366
1367 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
1368 xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
1369
1370 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1371
1372 xhci_halt(xhci);
1373
1374
1375
1376
1377
1378
1379 xhci_hc_died(xhci);
1380
1381 spin_unlock_irqrestore(&xhci->lock, flags);
1382 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1383 "xHCI host controller is dead.");
1384}
1385
1386static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1387 struct xhci_virt_device *dev,
1388 struct xhci_ring *ep_ring,
1389 unsigned int ep_index)
1390{
1391 union xhci_trb *dequeue_temp;
1392 int num_trbs_free_temp;
1393 bool revert = false;
1394
1395 num_trbs_free_temp = ep_ring->num_trbs_free;
1396 dequeue_temp = ep_ring->dequeue;
1397
1398
1399
1400
1401
1402
1403
1404 if (trb_is_link(ep_ring->dequeue)) {
1405 ep_ring->deq_seg = ep_ring->deq_seg->next;
1406 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1407 }
1408
1409 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1410
1411 ep_ring->num_trbs_free++;
1412 ep_ring->dequeue++;
1413 if (trb_is_link(ep_ring->dequeue)) {
1414 if (ep_ring->dequeue ==
1415 dev->eps[ep_index].queued_deq_ptr)
1416 break;
1417 ep_ring->deq_seg = ep_ring->deq_seg->next;
1418 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1419 }
1420 if (ep_ring->dequeue == dequeue_temp) {
1421 revert = true;
1422 break;
1423 }
1424 }
1425
1426 if (revert) {
1427 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1428 ep_ring->num_trbs_free = num_trbs_free_temp;
1429 }
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1440 union xhci_trb *trb, u32 cmd_comp_code)
1441{
1442 unsigned int ep_index;
1443 unsigned int stream_id;
1444 struct xhci_ring *ep_ring;
1445 struct xhci_virt_ep *ep;
1446 struct xhci_ep_ctx *ep_ctx;
1447 struct xhci_slot_ctx *slot_ctx;
1448 struct xhci_td *td, *tmp_td;
1449
1450 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1451 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1452 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1453 if (!ep)
1454 return;
1455
1456 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1457 if (!ep_ring) {
1458 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1459 stream_id);
1460
1461 goto cleanup;
1462 }
1463
1464 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1465 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1466 trace_xhci_handle_cmd_set_deq(slot_ctx);
1467 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1468
1469 if (cmd_comp_code != COMP_SUCCESS) {
1470 unsigned int ep_state;
1471 unsigned int slot_state;
1472
1473 switch (cmd_comp_code) {
1474 case COMP_TRB_ERROR:
1475 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1476 break;
1477 case COMP_CONTEXT_STATE_ERROR:
1478 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1479 ep_state = GET_EP_CTX_STATE(ep_ctx);
1480 slot_state = le32_to_cpu(slot_ctx->dev_state);
1481 slot_state = GET_SLOT_STATE(slot_state);
1482 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1483 "Slot state = %u, EP state = %u",
1484 slot_state, ep_state);
1485 break;
1486 case COMP_SLOT_NOT_ENABLED_ERROR:
1487 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1488 slot_id);
1489 break;
1490 default:
1491 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1492 cmd_comp_code);
1493 break;
1494 }
1495
1496
1497
1498
1499
1500
1501 } else {
1502 u64 deq;
1503
1504 if (ep->ep_state & EP_HAS_STREAMS) {
1505 struct xhci_stream_ctx *ctx =
1506 &ep->stream_info->stream_ctx_array[stream_id];
1507 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1508 } else {
1509 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1510 }
1511 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1512 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1513 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1514 ep->queued_deq_ptr) == deq) {
1515
1516
1517
1518 update_ring_for_set_deq_completion(xhci, ep->vdev,
1519 ep_ring, ep_index);
1520 } else {
1521 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1522 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1523 ep->queued_deq_seg, ep->queued_deq_ptr);
1524 }
1525 }
1526
1527 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1528 cancelled_td_list) {
1529 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1530 if (td->cancel_status == TD_CLEARING_CACHE) {
1531 td->cancel_status = TD_CLEARED;
1532 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
1533 __func__, td->urb);
1534 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1535 } else {
1536 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
1537 __func__, td->urb, td->cancel_status);
1538 }
1539 }
1540cleanup:
1541 ep->ep_state &= ~SET_DEQ_PENDING;
1542 ep->queued_deq_seg = NULL;
1543 ep->queued_deq_ptr = NULL;
1544
1545 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1546}
1547
1548static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1549 union xhci_trb *trb, u32 cmd_comp_code)
1550{
1551 struct xhci_virt_ep *ep;
1552 struct xhci_ep_ctx *ep_ctx;
1553 unsigned int ep_index;
1554
1555 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1556 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1557 if (!ep)
1558 return;
1559
1560 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1561 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1562
1563
1564
1565
1566 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1567 "Ignoring reset ep completion code of %u", cmd_comp_code);
1568
1569
1570 xhci_invalidate_cancelled_tds(ep);
1571
1572 if (xhci->quirks & XHCI_RESET_EP_QUIRK)
1573 xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
1574
1575 ep->ep_state &= ~EP_HALTED;
1576
1577 xhci_giveback_invalidated_tds(ep);
1578
1579
1580 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1581 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1582}
1583
1584static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1585 struct xhci_command *command, u32 cmd_comp_code)
1586{
1587 if (cmd_comp_code == COMP_SUCCESS)
1588 command->slot_id = slot_id;
1589 else
1590 command->slot_id = 0;
1591}
1592
1593static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1594{
1595 struct xhci_virt_device *virt_dev;
1596 struct xhci_slot_ctx *slot_ctx;
1597
1598 virt_dev = xhci->devs[slot_id];
1599 if (!virt_dev)
1600 return;
1601
1602 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1603 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1604
1605 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1606
1607 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1608 xhci_free_virt_device(xhci, slot_id);
1609}
1610
1611static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1612 u32 cmd_comp_code)
1613{
1614 struct xhci_virt_device *virt_dev;
1615 struct xhci_input_control_ctx *ctrl_ctx;
1616 struct xhci_ep_ctx *ep_ctx;
1617 unsigned int ep_index;
1618 unsigned int ep_state;
1619 u32 add_flags, drop_flags;
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 virt_dev = xhci->devs[slot_id];
1630 if (!virt_dev)
1631 return;
1632 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1633 if (!ctrl_ctx) {
1634 xhci_warn(xhci, "Could not get input context, bad type.\n");
1635 return;
1636 }
1637
1638 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1639 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1640
1641 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1642
1643 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1644 trace_xhci_handle_cmd_config_ep(ep_ctx);
1645
1646
1647
1648
1649
1650
1651
1652 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1653 ep_index != (unsigned int) -1 &&
1654 add_flags - SLOT_FLAG == drop_flags) {
1655 ep_state = virt_dev->eps[ep_index].ep_state;
1656 if (!(ep_state & EP_HALTED))
1657 return;
1658 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1659 "Completed config ep cmd - "
1660 "last ep index = %d, state = %d",
1661 ep_index, ep_state);
1662
1663 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1664 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1665 return;
1666 }
1667 return;
1668}
1669
1670static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1671{
1672 struct xhci_virt_device *vdev;
1673 struct xhci_slot_ctx *slot_ctx;
1674
1675 vdev = xhci->devs[slot_id];
1676 if (!vdev)
1677 return;
1678 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1679 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1680}
1681
1682static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1683{
1684 struct xhci_virt_device *vdev;
1685 struct xhci_slot_ctx *slot_ctx;
1686
1687 vdev = xhci->devs[slot_id];
1688 if (!vdev) {
1689 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1690 slot_id);
1691 return;
1692 }
1693 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1694 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1695
1696 xhci_dbg(xhci, "Completed reset device command.\n");
1697}
1698
1699static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1700 struct xhci_event_cmd *event)
1701{
1702 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1703 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1704 return;
1705 }
1706 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1707 "NEC firmware version %2x.%02x",
1708 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1709 NEC_FW_MINOR(le32_to_cpu(event->status)));
1710}
1711
1712static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1713{
1714 list_del(&cmd->cmd_list);
1715
1716 if (cmd->completion) {
1717 cmd->status = status;
1718 complete(cmd->completion);
1719 } else {
1720 kfree(cmd);
1721 }
1722}
1723
1724void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1725{
1726 struct xhci_command *cur_cmd, *tmp_cmd;
1727 xhci->current_cmd = NULL;
1728 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1729 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1730}
1731
1732void xhci_handle_command_timeout(struct work_struct *work)
1733{
1734 struct xhci_hcd *xhci;
1735 unsigned long flags;
1736 u64 hw_ring_state;
1737
1738 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1739
1740 spin_lock_irqsave(&xhci->lock, flags);
1741
1742
1743
1744
1745
1746 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1747 spin_unlock_irqrestore(&xhci->lock, flags);
1748 return;
1749 }
1750
1751 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1752
1753
1754 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1755 if (hw_ring_state == ~(u64)0) {
1756 xhci_hc_died(xhci);
1757 goto time_out_completed;
1758 }
1759
1760 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1761 (hw_ring_state & CMD_RING_RUNNING)) {
1762
1763 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1764 xhci_dbg(xhci, "Command timeout\n");
1765 xhci_abort_cmd_ring(xhci, flags);
1766 goto time_out_completed;
1767 }
1768
1769
1770 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1771 xhci_dbg(xhci, "host removed, ring start fail?\n");
1772 xhci_cleanup_command_queue(xhci);
1773
1774 goto time_out_completed;
1775 }
1776
1777
1778 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1779 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1780
1781time_out_completed:
1782 spin_unlock_irqrestore(&xhci->lock, flags);
1783 return;
1784}
1785
1786static void handle_cmd_completion(struct xhci_hcd *xhci,
1787 struct xhci_event_cmd *event)
1788{
1789 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1790 u64 cmd_dma;
1791 dma_addr_t cmd_dequeue_dma;
1792 u32 cmd_comp_code;
1793 union xhci_trb *cmd_trb;
1794 struct xhci_command *cmd;
1795 u32 cmd_type;
1796
1797 if (slot_id >= MAX_HC_SLOTS) {
1798 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1799 return;
1800 }
1801
1802 cmd_dma = le64_to_cpu(event->cmd_trb);
1803 cmd_trb = xhci->cmd_ring->dequeue;
1804
1805 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1806
1807 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1808 cmd_trb);
1809
1810
1811
1812
1813 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1814 xhci_warn(xhci,
1815 "ERROR mismatched command completion event\n");
1816 return;
1817 }
1818
1819 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1820
1821 cancel_delayed_work(&xhci->cmd_timer);
1822
1823 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1824
1825
1826 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1827 complete_all(&xhci->cmd_ring_stop_completion);
1828 return;
1829 }
1830
1831 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1832 xhci_err(xhci,
1833 "Command completion event does not match command\n");
1834 return;
1835 }
1836
1837
1838
1839
1840
1841
1842
1843 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1844 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1845 if (cmd->status == COMP_COMMAND_ABORTED) {
1846 if (xhci->current_cmd == cmd)
1847 xhci->current_cmd = NULL;
1848 goto event_handled;
1849 }
1850 }
1851
1852 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1853 switch (cmd_type) {
1854 case TRB_ENABLE_SLOT:
1855 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1856 break;
1857 case TRB_DISABLE_SLOT:
1858 xhci_handle_cmd_disable_slot(xhci, slot_id);
1859 break;
1860 case TRB_CONFIG_EP:
1861 if (!cmd->completion)
1862 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
1863 break;
1864 case TRB_EVAL_CONTEXT:
1865 break;
1866 case TRB_ADDR_DEV:
1867 xhci_handle_cmd_addr_dev(xhci, slot_id);
1868 break;
1869 case TRB_STOP_RING:
1870 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1871 le32_to_cpu(cmd_trb->generic.field[3])));
1872 if (!cmd->completion)
1873 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1874 cmd_comp_code);
1875 break;
1876 case TRB_SET_DEQ:
1877 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1878 le32_to_cpu(cmd_trb->generic.field[3])));
1879 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1880 break;
1881 case TRB_CMD_NOOP:
1882
1883 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1884 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1885 break;
1886 case TRB_RESET_EP:
1887 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1888 le32_to_cpu(cmd_trb->generic.field[3])));
1889 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1890 break;
1891 case TRB_RESET_DEV:
1892
1893
1894
1895 slot_id = TRB_TO_SLOT_ID(
1896 le32_to_cpu(cmd_trb->generic.field[3]));
1897 xhci_handle_cmd_reset_dev(xhci, slot_id);
1898 break;
1899 case TRB_NEC_GET_FW:
1900 xhci_handle_cmd_nec_get_fw(xhci, event);
1901 break;
1902 default:
1903
1904 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1905 break;
1906 }
1907
1908
1909 if (!list_is_singular(&xhci->cmd_list)) {
1910 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1911 struct xhci_command, cmd_list);
1912 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
1913 } else if (xhci->current_cmd == cmd) {
1914 xhci->current_cmd = NULL;
1915 }
1916
1917event_handled:
1918 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1919
1920 inc_deq(xhci, xhci->cmd_ring);
1921}
1922
1923static void handle_vendor_event(struct xhci_hcd *xhci,
1924 union xhci_trb *event, u32 trb_type)
1925{
1926 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1927 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1928 handle_cmd_completion(xhci, &event->event_cmd);
1929}
1930
1931static void handle_device_notification(struct xhci_hcd *xhci,
1932 union xhci_trb *event)
1933{
1934 u32 slot_id;
1935 struct usb_device *udev;
1936
1937 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1938 if (!xhci->devs[slot_id]) {
1939 xhci_warn(xhci, "Device Notification event for "
1940 "unused slot %u\n", slot_id);
1941 return;
1942 }
1943
1944 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1945 slot_id);
1946 udev = xhci->devs[slot_id]->udev;
1947 if (udev && udev->parent)
1948 usb_wakeup_notification(udev->parent, udev->portnum);
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1964{
1965 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1966 u32 pll_lock_check;
1967 u32 retry_count = 4;
1968
1969 do {
1970
1971 writel(0x6F, hcd->regs + 0x1048);
1972 udelay(10);
1973
1974 writel(0x7F, hcd->regs + 0x1048);
1975 udelay(200);
1976 pll_lock_check = readl(hcd->regs + 0x1070);
1977 } while (!(pll_lock_check & 0x1) && --retry_count);
1978}
1979
1980static void handle_port_status(struct xhci_hcd *xhci,
1981 union xhci_trb *event)
1982{
1983 struct usb_hcd *hcd;
1984 u32 port_id;
1985 u32 portsc, cmd_reg;
1986 int max_ports;
1987 int slot_id;
1988 unsigned int hcd_portnum;
1989 struct xhci_bus_state *bus_state;
1990 bool bogus_port_status = false;
1991 struct xhci_port *port;
1992
1993
1994 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1995 xhci_warn(xhci,
1996 "WARN: xHC returned failed port status event\n");
1997
1998 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1999 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2000
2001 if ((port_id <= 0) || (port_id > max_ports)) {
2002 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
2003 port_id);
2004 inc_deq(xhci, xhci->event_ring);
2005 return;
2006 }
2007
2008 port = &xhci->hw_ports[port_id - 1];
2009 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
2010 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
2011 port_id);
2012 bogus_port_status = true;
2013 goto cleanup;
2014 }
2015
2016
2017 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
2018 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
2019 bogus_port_status = true;
2020 goto cleanup;
2021 }
2022
2023 hcd = port->rhub->hcd;
2024 bus_state = &port->rhub->bus_state;
2025 hcd_portnum = port->hcd_portnum;
2026 portsc = readl(port->addr);
2027
2028 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
2029 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
2030
2031 trace_xhci_handle_port_status(hcd_portnum, portsc);
2032
2033 if (hcd->state == HC_STATE_SUSPENDED) {
2034 xhci_dbg(xhci, "resume root hub\n");
2035 usb_hcd_resume_root_hub(hcd);
2036 }
2037
2038 if (hcd->speed >= HCD_USB3 &&
2039 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
2040 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
2041 if (slot_id && xhci->devs[slot_id])
2042 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
2043 }
2044
2045 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
2046 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
2047
2048 cmd_reg = readl(&xhci->op_regs->command);
2049 if (!(cmd_reg & CMD_RUN)) {
2050 xhci_warn(xhci, "xHC is not running.\n");
2051 goto cleanup;
2052 }
2053
2054 if (DEV_SUPERSPEED_ANY(portsc)) {
2055 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
2056
2057
2058
2059
2060 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
2061 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2062 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
2063 xhci_set_link_state(xhci, port, XDEV_U0);
2064
2065
2066
2067 bogus_port_status = true;
2068 goto cleanup;
2069 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
2070 xhci_dbg(xhci, "resume HS port %d\n", port_id);
2071 bus_state->resume_done[hcd_portnum] = jiffies +
2072 msecs_to_jiffies(USB_RESUME_TIMEOUT);
2073 set_bit(hcd_portnum, &bus_state->resuming_ports);
2074
2075
2076
2077
2078 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2079 mod_timer(&hcd->rh_timer,
2080 bus_state->resume_done[hcd_portnum]);
2081 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
2082 bogus_port_status = true;
2083 }
2084 }
2085
2086 if ((portsc & PORT_PLC) &&
2087 DEV_SUPERSPEED_ANY(portsc) &&
2088 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
2089 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
2090 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
2091 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2092 complete(&bus_state->u3exit_done[hcd_portnum]);
2093
2094
2095
2096
2097
2098
2099
2100 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
2101 if (slot_id && xhci->devs[slot_id])
2102 xhci_ring_device(xhci, slot_id);
2103 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
2104 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2105 usb_wakeup_notification(hcd->self.root_hub,
2106 hcd_portnum + 1);
2107 bogus_port_status = true;
2108 goto cleanup;
2109 }
2110 }
2111
2112
2113
2114
2115
2116
2117 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
2118 test_and_clear_bit(hcd_portnum,
2119 &bus_state->rexit_ports)) {
2120 complete(&bus_state->rexit_done[hcd_portnum]);
2121 bogus_port_status = true;
2122 goto cleanup;
2123 }
2124
2125 if (hcd->speed < HCD_USB3) {
2126 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2127 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
2128 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
2129 xhci_cavium_reset_phy_quirk(xhci);
2130 }
2131
2132cleanup:
2133
2134 inc_deq(xhci, xhci->event_ring);
2135
2136
2137
2138
2139
2140 if (bogus_port_status)
2141 return;
2142
2143
2144
2145
2146
2147
2148
2149
2150 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
2151 __func__, hcd->self.busnum);
2152 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2153 spin_unlock(&xhci->lock);
2154
2155 usb_hcd_poll_rh_status(hcd);
2156 spin_lock(&xhci->lock);
2157}
2158
2159
2160
2161
2162
2163
2164
2165struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
2166 struct xhci_segment *start_seg,
2167 union xhci_trb *start_trb,
2168 union xhci_trb *end_trb,
2169 dma_addr_t suspect_dma,
2170 bool debug)
2171{
2172 dma_addr_t start_dma;
2173 dma_addr_t end_seg_dma;
2174 dma_addr_t end_trb_dma;
2175 struct xhci_segment *cur_seg;
2176
2177 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
2178 cur_seg = start_seg;
2179
2180 do {
2181 if (start_dma == 0)
2182 return NULL;
2183
2184 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2185 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2186
2187 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
2188
2189 if (debug)
2190 xhci_warn(xhci,
2191 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2192 (unsigned long long)suspect_dma,
2193 (unsigned long long)start_dma,
2194 (unsigned long long)end_trb_dma,
2195 (unsigned long long)cur_seg->dma,
2196 (unsigned long long)end_seg_dma);
2197
2198 if (end_trb_dma > 0) {
2199
2200 if (start_dma <= end_trb_dma) {
2201 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2202 return cur_seg;
2203 } else {
2204
2205
2206
2207 if ((suspect_dma >= start_dma &&
2208 suspect_dma <= end_seg_dma) ||
2209 (suspect_dma >= cur_seg->dma &&
2210 suspect_dma <= end_trb_dma))
2211 return cur_seg;
2212 }
2213 return NULL;
2214 } else {
2215
2216 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2217 return cur_seg;
2218 }
2219 cur_seg = cur_seg->next;
2220 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2221 } while (cur_seg != start_seg);
2222
2223 return NULL;
2224}
2225
2226static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2227 struct xhci_virt_ep *ep)
2228{
2229
2230
2231
2232
2233 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2234 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2235 !(ep->ep_state & EP_CLEARING_TT)) {
2236 ep->ep_state |= EP_CLEARING_TT;
2237 td->urb->ep->hcpriv = td->urb->dev;
2238 if (usb_hub_clear_tt_buffer(td->urb))
2239 ep->ep_state &= ~EP_CLEARING_TT;
2240 }
2241}
2242
2243
2244
2245
2246
2247
2248
2249static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
2250 struct xhci_ep_ctx *ep_ctx,
2251 unsigned int trb_comp_code)
2252{
2253
2254 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
2255 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
2256 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2257
2258
2259
2260
2261
2262
2263 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2264 return 1;
2265
2266 return 0;
2267}
2268
2269int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2270{
2271 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2272
2273
2274
2275 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2276 trb_comp_code);
2277 xhci_dbg(xhci, "Treating code as success.\n");
2278 return 1;
2279 }
2280 return 0;
2281}
2282
2283static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2284 struct xhci_ring *ep_ring, struct xhci_td *td,
2285 u32 trb_comp_code)
2286{
2287 struct xhci_ep_ctx *ep_ctx;
2288
2289 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2290
2291 switch (trb_comp_code) {
2292 case COMP_STOPPED_LENGTH_INVALID:
2293 case COMP_STOPPED_SHORT_PACKET:
2294 case COMP_STOPPED:
2295
2296
2297
2298
2299
2300 return 0;
2301 case COMP_USB_TRANSACTION_ERROR:
2302 case COMP_BABBLE_DETECTED_ERROR:
2303 case COMP_SPLIT_TRANSACTION_ERROR:
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2316
2317
2318
2319
2320
2321 if ((ep->ep_state & EP_HALTED) &&
2322 !list_empty(&td->cancelled_td_list)) {
2323 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2324 (unsigned long long)xhci_trb_virt_to_dma(
2325 td->start_seg, td->first_trb));
2326 return 0;
2327 }
2328
2329 break;
2330 }
2331
2332 xhci_clear_hub_tt_buffer(xhci, td, ep);
2333 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2334 EP_HARD_RESET);
2335 return 0;
2336 case COMP_STALL_ERROR:
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347 if (ep->ep_index != 0)
2348 xhci_clear_hub_tt_buffer(xhci, td, ep);
2349
2350 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2351 EP_HARD_RESET);
2352
2353 return 0;
2354 default:
2355 break;
2356 }
2357
2358
2359 ep_ring->dequeue = td->last_trb;
2360 ep_ring->deq_seg = td->last_trb_seg;
2361 ep_ring->num_trbs_free += td->num_trbs - 1;
2362 inc_deq(xhci, ep_ring);
2363
2364 return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2365}
2366
2367
2368static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2369 union xhci_trb *stop_trb)
2370{
2371 u32 sum;
2372 union xhci_trb *trb = ring->dequeue;
2373 struct xhci_segment *seg = ring->deq_seg;
2374
2375 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2376 if (!trb_is_noop(trb) && !trb_is_link(trb))
2377 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2378 }
2379 return sum;
2380}
2381
2382
2383
2384
2385static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2386 struct xhci_ring *ep_ring, struct xhci_td *td,
2387 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2388{
2389 struct xhci_ep_ctx *ep_ctx;
2390 u32 trb_comp_code;
2391 u32 remaining, requested;
2392 u32 trb_type;
2393
2394 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2395 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2396 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2397 requested = td->urb->transfer_buffer_length;
2398 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2399
2400 switch (trb_comp_code) {
2401 case COMP_SUCCESS:
2402 if (trb_type != TRB_STATUS) {
2403 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2404 (trb_type == TRB_DATA) ? "data" : "setup");
2405 td->status = -ESHUTDOWN;
2406 break;
2407 }
2408 td->status = 0;
2409 break;
2410 case COMP_SHORT_PACKET:
2411 td->status = 0;
2412 break;
2413 case COMP_STOPPED_SHORT_PACKET:
2414 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2415 td->urb->actual_length = remaining;
2416 else
2417 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2418 goto finish_td;
2419 case COMP_STOPPED:
2420 switch (trb_type) {
2421 case TRB_SETUP:
2422 td->urb->actual_length = 0;
2423 goto finish_td;
2424 case TRB_DATA:
2425 case TRB_NORMAL:
2426 td->urb->actual_length = requested - remaining;
2427 goto finish_td;
2428 case TRB_STATUS:
2429 td->urb->actual_length = requested;
2430 goto finish_td;
2431 default:
2432 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2433 trb_type);
2434 goto finish_td;
2435 }
2436 case COMP_STOPPED_LENGTH_INVALID:
2437 goto finish_td;
2438 default:
2439 if (!xhci_requires_manual_halt_cleanup(xhci,
2440 ep_ctx, trb_comp_code))
2441 break;
2442 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2443 trb_comp_code, ep->ep_index);
2444 fallthrough;
2445 case COMP_STALL_ERROR:
2446
2447 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2448 td->urb->actual_length = requested - remaining;
2449 else if (!td->urb_length_set)
2450 td->urb->actual_length = 0;
2451 goto finish_td;
2452 }
2453
2454
2455 if (trb_type == TRB_SETUP)
2456 goto finish_td;
2457
2458
2459
2460
2461
2462 if (trb_type == TRB_DATA ||
2463 trb_type == TRB_NORMAL) {
2464 td->urb_length_set = true;
2465 td->urb->actual_length = requested - remaining;
2466 xhci_dbg(xhci, "Waiting for status stage event\n");
2467 return 0;
2468 }
2469
2470
2471 if (!td->urb_length_set)
2472 td->urb->actual_length = requested;
2473
2474finish_td:
2475 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2476}
2477
2478
2479
2480
2481static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2482 struct xhci_ring *ep_ring, struct xhci_td *td,
2483 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2484{
2485 struct urb_priv *urb_priv;
2486 int idx;
2487 struct usb_iso_packet_descriptor *frame;
2488 u32 trb_comp_code;
2489 bool sum_trbs_for_length = false;
2490 u32 remaining, requested, ep_trb_len;
2491 int short_framestatus;
2492
2493 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2494 urb_priv = td->urb->hcpriv;
2495 idx = urb_priv->num_tds_done;
2496 frame = &td->urb->iso_frame_desc[idx];
2497 requested = frame->length;
2498 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2499 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2500 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2501 -EREMOTEIO : 0;
2502
2503
2504 switch (trb_comp_code) {
2505 case COMP_SUCCESS:
2506 if (remaining) {
2507 frame->status = short_framestatus;
2508 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2509 sum_trbs_for_length = true;
2510 break;
2511 }
2512 frame->status = 0;
2513 break;
2514 case COMP_SHORT_PACKET:
2515 frame->status = short_framestatus;
2516 sum_trbs_for_length = true;
2517 break;
2518 case COMP_BANDWIDTH_OVERRUN_ERROR:
2519 frame->status = -ECOMM;
2520 break;
2521 case COMP_ISOCH_BUFFER_OVERRUN:
2522 case COMP_BABBLE_DETECTED_ERROR:
2523 frame->status = -EOVERFLOW;
2524 break;
2525 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2526 case COMP_STALL_ERROR:
2527 frame->status = -EPROTO;
2528 break;
2529 case COMP_USB_TRANSACTION_ERROR:
2530 frame->status = -EPROTO;
2531 if (ep_trb != td->last_trb)
2532 return 0;
2533 break;
2534 case COMP_STOPPED:
2535 sum_trbs_for_length = true;
2536 break;
2537 case COMP_STOPPED_SHORT_PACKET:
2538
2539 frame->status = short_framestatus;
2540 requested = remaining;
2541 break;
2542 case COMP_STOPPED_LENGTH_INVALID:
2543 requested = 0;
2544 remaining = 0;
2545 break;
2546 default:
2547 sum_trbs_for_length = true;
2548 frame->status = -1;
2549 break;
2550 }
2551
2552 if (sum_trbs_for_length)
2553 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
2554 ep_trb_len - remaining;
2555 else
2556 frame->actual_length = requested;
2557
2558 td->urb->actual_length += frame->actual_length;
2559
2560 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2561}
2562
2563static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2564 struct xhci_virt_ep *ep, int status)
2565{
2566 struct urb_priv *urb_priv;
2567 struct usb_iso_packet_descriptor *frame;
2568 int idx;
2569
2570 urb_priv = td->urb->hcpriv;
2571 idx = urb_priv->num_tds_done;
2572 frame = &td->urb->iso_frame_desc[idx];
2573
2574
2575 frame->status = -EXDEV;
2576
2577
2578 frame->actual_length = 0;
2579
2580
2581 ep->ring->dequeue = td->last_trb;
2582 ep->ring->deq_seg = td->last_trb_seg;
2583 ep->ring->num_trbs_free += td->num_trbs - 1;
2584 inc_deq(xhci, ep->ring);
2585
2586 return xhci_td_cleanup(xhci, td, ep->ring, status);
2587}
2588
2589
2590
2591
2592static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2593 struct xhci_ring *ep_ring, struct xhci_td *td,
2594 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2595{
2596 struct xhci_slot_ctx *slot_ctx;
2597 u32 trb_comp_code;
2598 u32 remaining, requested, ep_trb_len;
2599
2600 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2601 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2602 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2603 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2604 requested = td->urb->transfer_buffer_length;
2605
2606 switch (trb_comp_code) {
2607 case COMP_SUCCESS:
2608 ep_ring->err_count = 0;
2609
2610 if (ep_trb != td->last_trb || remaining) {
2611 xhci_warn(xhci, "WARN Successful completion on short TX\n");
2612 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2613 td->urb->ep->desc.bEndpointAddress,
2614 requested, remaining);
2615 }
2616 td->status = 0;
2617 break;
2618 case COMP_SHORT_PACKET:
2619 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2620 td->urb->ep->desc.bEndpointAddress,
2621 requested, remaining);
2622 td->status = 0;
2623 break;
2624 case COMP_STOPPED_SHORT_PACKET:
2625 td->urb->actual_length = remaining;
2626 goto finish_td;
2627 case COMP_STOPPED_LENGTH_INVALID:
2628
2629 ep_trb_len = 0;
2630 remaining = 0;
2631 break;
2632 case COMP_USB_TRANSACTION_ERROR:
2633 if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2634 (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
2635 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2636 break;
2637
2638 td->status = 0;
2639
2640 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2641 EP_SOFT_RESET);
2642 return 0;
2643 default:
2644
2645 break;
2646 }
2647
2648 if (ep_trb == td->last_trb)
2649 td->urb->actual_length = requested - remaining;
2650 else
2651 td->urb->actual_length =
2652 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2653 ep_trb_len - remaining;
2654finish_td:
2655 if (remaining > requested) {
2656 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2657 remaining);
2658 td->urb->actual_length = 0;
2659 }
2660
2661 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2662}
2663
2664
2665
2666
2667
2668
2669static int handle_tx_event(struct xhci_hcd *xhci,
2670 struct xhci_transfer_event *event)
2671{
2672 struct xhci_virt_ep *ep;
2673 struct xhci_ring *ep_ring;
2674 unsigned int slot_id;
2675 int ep_index;
2676 struct xhci_td *td = NULL;
2677 dma_addr_t ep_trb_dma;
2678 struct xhci_segment *ep_seg;
2679 union xhci_trb *ep_trb;
2680 int status = -EINPROGRESS;
2681 struct xhci_ep_ctx *ep_ctx;
2682 struct list_head *tmp;
2683 u32 trb_comp_code;
2684 int td_num = 0;
2685 bool handling_skipped_tds = false;
2686
2687 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2688 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2689 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2690 ep_trb_dma = le64_to_cpu(event->buffer);
2691
2692 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2693 if (!ep) {
2694 xhci_err(xhci, "ERROR Invalid Transfer event\n");
2695 goto err_out;
2696 }
2697
2698 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2699 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2700
2701 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2702 xhci_err(xhci,
2703 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2704 slot_id, ep_index);
2705 goto err_out;
2706 }
2707
2708
2709 if (!ep_ring) {
2710 switch (trb_comp_code) {
2711 case COMP_STALL_ERROR:
2712 case COMP_USB_TRANSACTION_ERROR:
2713 case COMP_INVALID_STREAM_TYPE_ERROR:
2714 case COMP_INVALID_STREAM_ID_ERROR:
2715 xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
2716 EP_SOFT_RESET);
2717 goto cleanup;
2718 case COMP_RING_UNDERRUN:
2719 case COMP_RING_OVERRUN:
2720 case COMP_STOPPED_LENGTH_INVALID:
2721 goto cleanup;
2722 default:
2723 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2724 slot_id, ep_index);
2725 goto err_out;
2726 }
2727 }
2728
2729
2730 if (ep->skip) {
2731 list_for_each(tmp, &ep_ring->td_list)
2732 td_num++;
2733 }
2734
2735 if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
2736 (ep->ep_state & EP_HAS_STREAMS))
2737 del_timer(&ep_ring->stream_timer);
2738
2739
2740 switch (trb_comp_code) {
2741
2742
2743
2744 case COMP_SUCCESS:
2745 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2746 break;
2747 if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2748 ep_ring->last_td_was_short)
2749 trb_comp_code = COMP_SHORT_PACKET;
2750 else
2751 xhci_warn_ratelimited(xhci,
2752 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2753 slot_id, ep_index);
2754 break;
2755 case COMP_SHORT_PACKET:
2756 break;
2757
2758 case COMP_STOPPED:
2759 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2760 slot_id, ep_index);
2761 break;
2762 case COMP_STOPPED_LENGTH_INVALID:
2763 xhci_dbg(xhci,
2764 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2765 slot_id, ep_index);
2766 break;
2767 case COMP_STOPPED_SHORT_PACKET:
2768 xhci_dbg(xhci,
2769 "Stopped with short packet transfer detected for slot %u ep %u\n",
2770 slot_id, ep_index);
2771 break;
2772
2773 case COMP_STALL_ERROR:
2774 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2775 ep_index);
2776 status = -EPIPE;
2777 break;
2778 case COMP_SPLIT_TRANSACTION_ERROR:
2779 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2780 slot_id, ep_index);
2781 status = -EPROTO;
2782 break;
2783 case COMP_USB_TRANSACTION_ERROR:
2784 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2785 slot_id, ep_index);
2786 status = -EPROTO;
2787 break;
2788 case COMP_BABBLE_DETECTED_ERROR:
2789 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2790 slot_id, ep_index);
2791 status = -EOVERFLOW;
2792 break;
2793
2794 case COMP_TRB_ERROR:
2795 xhci_warn(xhci,
2796 "WARN: TRB error for slot %u ep %u on endpoint\n",
2797 slot_id, ep_index);
2798 status = -EILSEQ;
2799 break;
2800
2801 case COMP_DATA_BUFFER_ERROR:
2802 xhci_warn(xhci,
2803 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2804 slot_id, ep_index);
2805 status = -ENOSR;
2806 break;
2807 case COMP_BANDWIDTH_OVERRUN_ERROR:
2808 xhci_warn(xhci,
2809 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2810 slot_id, ep_index);
2811 break;
2812 case COMP_ISOCH_BUFFER_OVERRUN:
2813 xhci_warn(xhci,
2814 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2815 slot_id, ep_index);
2816 break;
2817 case COMP_RING_UNDERRUN:
2818
2819
2820
2821
2822
2823 xhci_dbg(xhci, "underrun event on endpoint\n");
2824 if (!list_empty(&ep_ring->td_list))
2825 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2826 "still with TDs queued?\n",
2827 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2828 ep_index);
2829 goto cleanup;
2830 case COMP_RING_OVERRUN:
2831 xhci_dbg(xhci, "overrun event on endpoint\n");
2832 if (!list_empty(&ep_ring->td_list))
2833 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2834 "still with TDs queued?\n",
2835 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2836 ep_index);
2837 goto cleanup;
2838 case COMP_MISSED_SERVICE_ERROR:
2839
2840
2841
2842
2843
2844
2845 ep->skip = true;
2846 xhci_dbg(xhci,
2847 "Miss service interval error for slot %u ep %u, set skip flag\n",
2848 slot_id, ep_index);
2849 goto cleanup;
2850 case COMP_NO_PING_RESPONSE_ERROR:
2851 ep->skip = true;
2852 xhci_dbg(xhci,
2853 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2854 slot_id, ep_index);
2855 goto cleanup;
2856
2857 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2858
2859 xhci_warn(xhci,
2860 "WARN: detect an incompatible device for slot %u ep %u",
2861 slot_id, ep_index);
2862 status = -EPROTO;
2863 break;
2864 default:
2865 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2866 status = 0;
2867 break;
2868 }
2869 xhci_warn(xhci,
2870 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2871 trb_comp_code, slot_id, ep_index);
2872 goto cleanup;
2873 }
2874
2875 do {
2876
2877
2878
2879 if (list_empty(&ep_ring->td_list)) {
2880
2881
2882
2883
2884
2885
2886
2887
2888 if (!(trb_comp_code == COMP_STOPPED ||
2889 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2890 ep_ring->last_td_was_short)) {
2891 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2892 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2893 ep_index);
2894 }
2895 if (ep->skip) {
2896 ep->skip = false;
2897 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2898 slot_id, ep_index);
2899 }
2900 if (trb_comp_code == COMP_STALL_ERROR ||
2901 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2902 trb_comp_code)) {
2903 xhci_handle_halted_endpoint(xhci, ep,
2904 ep_ring->stream_id,
2905 NULL,
2906 EP_HARD_RESET);
2907 }
2908 goto cleanup;
2909 }
2910
2911
2912 if (ep->skip && td_num == 0) {
2913 ep->skip = false;
2914 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2915 slot_id, ep_index);
2916 goto cleanup;
2917 }
2918
2919 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2920 td_list);
2921 if (ep->skip)
2922 td_num--;
2923
2924
2925 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2926 td->last_trb, ep_trb_dma, false);
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2937 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2938 goto cleanup;
2939 }
2940
2941 if (!ep_seg) {
2942 if (!ep->skip ||
2943 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2944
2945
2946
2947
2948 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2949 ep_ring->last_td_was_short) {
2950 ep_ring->last_td_was_short = false;
2951 goto cleanup;
2952 }
2953
2954 xhci_err(xhci,
2955 "ERROR Transfer event TRB DMA ptr not "
2956 "part of current TD ep_index %d "
2957 "comp_code %u\n", ep_index,
2958 trb_comp_code);
2959 trb_in_td(xhci, ep_ring->deq_seg,
2960 ep_ring->dequeue, td->last_trb,
2961 ep_trb_dma, true);
2962 return -ESHUTDOWN;
2963 }
2964
2965 skip_isoc_td(xhci, td, ep, status);
2966 goto cleanup;
2967 }
2968 if (trb_comp_code == COMP_SHORT_PACKET)
2969 ep_ring->last_td_was_short = true;
2970 else
2971 ep_ring->last_td_was_short = false;
2972
2973 if (ep->skip) {
2974 xhci_dbg(xhci,
2975 "Found td. Clear skip flag for slot %u ep %u.\n",
2976 slot_id, ep_index);
2977 ep->skip = false;
2978 }
2979
2980 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2981 sizeof(*ep_trb)];
2982
2983 trace_xhci_handle_transfer(ep_ring,
2984 (struct xhci_generic_trb *) ep_trb);
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994 if (trb_is_noop(ep_trb)) {
2995 if (trb_comp_code == COMP_STALL_ERROR ||
2996 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2997 trb_comp_code))
2998 xhci_handle_halted_endpoint(xhci, ep,
2999 ep_ring->stream_id,
3000 td, EP_HARD_RESET);
3001 goto cleanup;
3002 }
3003
3004 td->status = status;
3005
3006
3007 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
3008 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
3009 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
3010 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
3011 else
3012 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
3013cleanup:
3014 handling_skipped_tds = ep->skip &&
3015 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
3016 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
3017
3018
3019
3020
3021
3022 if (!handling_skipped_tds)
3023 inc_deq(xhci, xhci->event_ring);
3024
3025
3026
3027
3028
3029
3030
3031 } while (handling_skipped_tds);
3032
3033 return 0;
3034
3035err_out:
3036 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
3037 (unsigned long long) xhci_trb_virt_to_dma(
3038 xhci->event_ring->deq_seg,
3039 xhci->event_ring->dequeue),
3040 lower_32_bits(le64_to_cpu(event->buffer)),
3041 upper_32_bits(le64_to_cpu(event->buffer)),
3042 le32_to_cpu(event->transfer_len),
3043 le32_to_cpu(event->flags));
3044 return -ENODEV;
3045}
3046
3047
3048
3049
3050
3051
3052
3053static int xhci_handle_event(struct xhci_hcd *xhci)
3054{
3055 union xhci_trb *event;
3056 int update_ptrs = 1;
3057 u32 trb_type;
3058 int ret;
3059
3060
3061 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
3062 xhci_err(xhci, "ERROR event ring not ready\n");
3063 return -ENOMEM;
3064 }
3065
3066 event = xhci->event_ring->dequeue;
3067
3068 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
3069 xhci->event_ring->cycle_state)
3070 return 0;
3071
3072 trace_xhci_handle_event(xhci->event_ring, &event->generic);
3073
3074
3075
3076
3077
3078 rmb();
3079 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
3080
3081
3082 switch (trb_type) {
3083 case TRB_COMPLETION:
3084 handle_cmd_completion(xhci, &event->event_cmd);
3085 break;
3086 case TRB_PORT_STATUS:
3087 handle_port_status(xhci, event);
3088 update_ptrs = 0;
3089 break;
3090 case TRB_TRANSFER:
3091 ret = handle_tx_event(xhci, &event->trans_event);
3092 if (ret >= 0)
3093 update_ptrs = 0;
3094 break;
3095 case TRB_DEV_NOTE:
3096 handle_device_notification(xhci, event);
3097 break;
3098 default:
3099 if (trb_type >= TRB_VENDOR_DEFINED_LOW)
3100 handle_vendor_event(xhci, event, trb_type);
3101 else
3102 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
3103 }
3104
3105
3106
3107 if (xhci->xhc_state & XHCI_STATE_DYING) {
3108 xhci_dbg(xhci, "xHCI host dying, returning from "
3109 "event handler.\n");
3110 return 0;
3111 }
3112
3113 if (update_ptrs)
3114
3115 inc_deq(xhci, xhci->event_ring);
3116
3117
3118
3119
3120 return 1;
3121}
3122
3123
3124
3125
3126
3127
3128static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
3129 union xhci_trb *event_ring_deq)
3130{
3131 u64 temp_64;
3132 dma_addr_t deq;
3133
3134 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3135
3136 if (event_ring_deq != xhci->event_ring->dequeue) {
3137 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
3138 xhci->event_ring->dequeue);
3139 if (deq == 0)
3140 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
3141
3142
3143
3144
3145 if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
3146 ((u64) deq & (u64) ~ERST_PTR_MASK))
3147 return;
3148
3149
3150 temp_64 &= ERST_PTR_MASK;
3151 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3152 }
3153
3154
3155 temp_64 |= ERST_EHB;
3156 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3157}
3158
3159
3160
3161
3162
3163
3164irqreturn_t xhci_irq(struct usb_hcd *hcd)
3165{
3166 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3167 union xhci_trb *event_ring_deq;
3168 irqreturn_t ret = IRQ_NONE;
3169 u64 temp_64;
3170 u32 status;
3171 int event_loop = 0;
3172
3173 spin_lock(&xhci->lock);
3174
3175 status = readl(&xhci->op_regs->status);
3176 if (status == ~(u32)0) {
3177 xhci_hc_died(xhci);
3178 ret = IRQ_HANDLED;
3179 goto out;
3180 }
3181
3182 if (!(status & STS_EINT))
3183 goto out;
3184
3185 if (status & STS_FATAL) {
3186 xhci_warn(xhci, "WARNING: Host System Error\n");
3187 xhci_halt(xhci);
3188 ret = IRQ_HANDLED;
3189 goto out;
3190 }
3191
3192
3193
3194
3195
3196
3197 status |= STS_EINT;
3198 writel(status, &xhci->op_regs->status);
3199
3200 if (!hcd->msi_enabled) {
3201 u32 irq_pending;
3202 irq_pending = readl(&xhci->ir_set->irq_pending);
3203 irq_pending |= IMAN_IP;
3204 writel(irq_pending, &xhci->ir_set->irq_pending);
3205 }
3206
3207 if (xhci->xhc_state & XHCI_STATE_DYING ||
3208 xhci->xhc_state & XHCI_STATE_HALTED) {
3209 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
3210 "Shouldn't IRQs be disabled?\n");
3211
3212
3213
3214 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3215 xhci_write_64(xhci, temp_64 | ERST_EHB,
3216 &xhci->ir_set->erst_dequeue);
3217 ret = IRQ_HANDLED;
3218 goto out;
3219 }
3220
3221 event_ring_deq = xhci->event_ring->dequeue;
3222
3223
3224
3225 while (xhci_handle_event(xhci) > 0) {
3226 if (event_loop++ < TRBS_PER_SEGMENT / 2)
3227 continue;
3228 xhci_update_erst_dequeue(xhci, event_ring_deq);
3229
3230
3231 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3232 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
3233
3234 event_loop = 0;
3235 }
3236
3237 xhci_update_erst_dequeue(xhci, event_ring_deq);
3238 ret = IRQ_HANDLED;
3239
3240out:
3241 spin_unlock(&xhci->lock);
3242
3243 return ret;
3244}
3245
3246irqreturn_t xhci_msi_irq(int irq, void *hcd)
3247{
3248 return xhci_irq(hcd);
3249}
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3261 bool more_trbs_coming,
3262 u32 field1, u32 field2, u32 field3, u32 field4)
3263{
3264 struct xhci_generic_trb *trb;
3265
3266 trb = &ring->enqueue->generic;
3267 trb->field[0] = cpu_to_le32(field1);
3268 trb->field[1] = cpu_to_le32(field2);
3269 trb->field[2] = cpu_to_le32(field3);
3270
3271 wmb();
3272 trb->field[3] = cpu_to_le32(field4);
3273
3274 trace_xhci_queue_trb(ring, trb);
3275
3276 inc_enq(xhci, ring, more_trbs_coming);
3277}
3278
3279
3280
3281
3282
3283static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3284 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3285{
3286 unsigned int num_trbs_needed;
3287 unsigned int link_trb_count = 0;
3288
3289
3290 switch (ep_state) {
3291 case EP_STATE_DISABLED:
3292
3293
3294
3295
3296 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3297 return -ENOENT;
3298 case EP_STATE_ERROR:
3299 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3300
3301
3302 return -EINVAL;
3303 case EP_STATE_HALTED:
3304 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3305 break;
3306 case EP_STATE_STOPPED:
3307 case EP_STATE_RUNNING:
3308 break;
3309 default:
3310 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3311
3312
3313
3314
3315 return -EINVAL;
3316 }
3317
3318 while (1) {
3319 if (room_on_ring(xhci, ep_ring, num_trbs))
3320 break;
3321
3322 if (ep_ring == xhci->cmd_ring) {
3323 xhci_err(xhci, "Do not support expand command ring\n");
3324 return -ENOMEM;
3325 }
3326
3327 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3328 "ERROR no room on ep ring, try ring expansion");
3329 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3330 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3331 mem_flags)) {
3332 xhci_err(xhci, "Ring expansion failed\n");
3333 return -ENOMEM;
3334 }
3335 }
3336
3337 while (trb_is_link(ep_ring->enqueue)) {
3338
3339
3340
3341 if (!xhci_link_trb_quirk(xhci) &&
3342 !(ep_ring->type == TYPE_ISOC &&
3343 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3344 ep_ring->enqueue->link.control &=
3345 cpu_to_le32(~TRB_CHAIN);
3346 else
3347 ep_ring->enqueue->link.control |=
3348 cpu_to_le32(TRB_CHAIN);
3349
3350 wmb();
3351 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3352
3353
3354 if (link_trb_toggles_cycle(ep_ring->enqueue))
3355 ep_ring->cycle_state ^= 1;
3356
3357 ep_ring->enq_seg = ep_ring->enq_seg->next;
3358 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3359
3360
3361 if (link_trb_count++ > ep_ring->num_segs) {
3362 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3363 return -EINVAL;
3364 }
3365 }
3366
3367 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3368 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3369 return -EINVAL;
3370 }
3371
3372 return 0;
3373}
3374
3375static int prepare_transfer(struct xhci_hcd *xhci,
3376 struct xhci_virt_device *xdev,
3377 unsigned int ep_index,
3378 unsigned int stream_id,
3379 unsigned int num_trbs,
3380 struct urb *urb,
3381 unsigned int td_index,
3382 gfp_t mem_flags)
3383{
3384 int ret;
3385 struct urb_priv *urb_priv;
3386 struct xhci_td *td;
3387 struct xhci_ring *ep_ring;
3388 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3389
3390 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3391 stream_id);
3392 if (!ep_ring) {
3393 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3394 stream_id);
3395 return -EINVAL;
3396 }
3397
3398 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3399 num_trbs, mem_flags);
3400 if (ret)
3401 return ret;
3402
3403 urb_priv = urb->hcpriv;
3404 td = &urb_priv->td[td_index];
3405
3406 INIT_LIST_HEAD(&td->td_list);
3407 INIT_LIST_HEAD(&td->cancelled_td_list);
3408
3409 if (td_index == 0) {
3410 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3411 if (unlikely(ret))
3412 return ret;
3413 }
3414
3415 td->urb = urb;
3416
3417 list_add_tail(&td->td_list, &ep_ring->td_list);
3418 td->start_seg = ep_ring->enq_seg;
3419 td->first_trb = ep_ring->enqueue;
3420
3421 return 0;
3422}
3423
3424unsigned int count_trbs(u64 addr, u64 len)
3425{
3426 unsigned int num_trbs;
3427
3428 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3429 TRB_MAX_BUFF_SIZE);
3430 if (num_trbs == 0)
3431 num_trbs++;
3432
3433 return num_trbs;
3434}
3435
3436static inline unsigned int count_trbs_needed(struct urb *urb)
3437{
3438 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3439}
3440
3441static unsigned int count_sg_trbs_needed(struct urb *urb)
3442{
3443 struct scatterlist *sg;
3444 unsigned int i, len, full_len, num_trbs = 0;
3445
3446 full_len = urb->transfer_buffer_length;
3447
3448 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3449 len = sg_dma_len(sg);
3450 num_trbs += count_trbs(sg_dma_address(sg), len);
3451 len = min_t(unsigned int, len, full_len);
3452 full_len -= len;
3453 if (full_len == 0)
3454 break;
3455 }
3456
3457 return num_trbs;
3458}
3459
3460static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3461{
3462 u64 addr, len;
3463
3464 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3465 len = urb->iso_frame_desc[i].length;
3466
3467 return count_trbs(addr, len);
3468}
3469
3470static void check_trb_math(struct urb *urb, int running_total)
3471{
3472 if (unlikely(running_total != urb->transfer_buffer_length))
3473 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3474 "queued %#x (%d), asked for %#x (%d)\n",
3475 __func__,
3476 urb->ep->desc.bEndpointAddress,
3477 running_total, running_total,
3478 urb->transfer_buffer_length,
3479 urb->transfer_buffer_length);
3480}
3481
3482static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3483 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3484 struct xhci_generic_trb *start_trb)
3485{
3486
3487
3488
3489
3490 wmb();
3491 if (start_cycle)
3492 start_trb->field[3] |= cpu_to_le32(start_cycle);
3493 else
3494 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3495 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3496}
3497
3498static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3499 struct xhci_ep_ctx *ep_ctx)
3500{
3501 int xhci_interval;
3502 int ep_interval;
3503
3504 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3505 ep_interval = urb->interval;
3506
3507
3508 if (urb->dev->speed == USB_SPEED_LOW ||
3509 urb->dev->speed == USB_SPEED_FULL)
3510 ep_interval *= 8;
3511
3512
3513
3514
3515 if (xhci_interval != ep_interval) {
3516 dev_dbg_ratelimited(&urb->dev->dev,
3517 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3518 ep_interval, ep_interval == 1 ? "" : "s",
3519 xhci_interval, xhci_interval == 1 ? "" : "s");
3520 urb->interval = xhci_interval;
3521
3522 if (urb->dev->speed == USB_SPEED_LOW ||
3523 urb->dev->speed == USB_SPEED_FULL)
3524 urb->interval /= 8;
3525 }
3526}
3527
3528
3529
3530
3531
3532
3533
3534int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3535 struct urb *urb, int slot_id, unsigned int ep_index)
3536{
3537 struct xhci_ep_ctx *ep_ctx;
3538
3539 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3540 check_interval(xhci, urb, ep_ctx);
3541
3542 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3543}
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3566 int trb_buff_len, unsigned int td_total_len,
3567 struct urb *urb, bool more_trbs_coming)
3568{
3569 u32 maxp, total_packet_count;
3570
3571
3572 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3573 return ((td_total_len - transferred) >> 10);
3574
3575
3576 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3577 trb_buff_len == td_total_len)
3578 return 0;
3579
3580
3581 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3582 trb_buff_len = 0;
3583
3584 maxp = usb_endpoint_maxp(&urb->ep->desc);
3585 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3586
3587
3588 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3589}
3590
3591
3592static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3593 u32 *trb_buff_len, struct xhci_segment *seg)
3594{
3595 struct device *dev = xhci_to_hcd(xhci)->self.controller;
3596 unsigned int unalign;
3597 unsigned int max_pkt;
3598 u32 new_buff_len;
3599 size_t len;
3600
3601 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3602 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3603
3604
3605 if (unalign == 0)
3606 return 0;
3607
3608 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3609 unalign, *trb_buff_len);
3610
3611
3612 if (*trb_buff_len > unalign) {
3613 *trb_buff_len -= unalign;
3614 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3615 return 0;
3616 }
3617
3618
3619
3620
3621
3622
3623 new_buff_len = max_pkt - (enqd_len % max_pkt);
3624
3625 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3626 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3627
3628
3629 if (usb_urb_dir_out(urb)) {
3630 if (urb->num_sgs) {
3631 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3632 seg->bounce_buf, new_buff_len, enqd_len);
3633 if (len != new_buff_len)
3634 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3635 len, new_buff_len);
3636 } else {
3637 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3638 }
3639
3640 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3641 max_pkt, DMA_TO_DEVICE);
3642 } else {
3643 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3644 max_pkt, DMA_FROM_DEVICE);
3645 }
3646
3647 if (dma_mapping_error(dev, seg->bounce_dma)) {
3648
3649 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3650 return 0;
3651 }
3652 *trb_buff_len = new_buff_len;
3653 seg->bounce_len = new_buff_len;
3654 seg->bounce_offs = enqd_len;
3655
3656 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3657
3658 return 1;
3659}
3660
3661
3662int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3663 struct urb *urb, int slot_id, unsigned int ep_index)
3664{
3665 struct xhci_ring *ring;
3666 struct urb_priv *urb_priv;
3667 struct xhci_td *td;
3668 struct xhci_generic_trb *start_trb;
3669 struct scatterlist *sg = NULL;
3670 bool more_trbs_coming = true;
3671 bool need_zero_pkt = false;
3672 bool first_trb = true;
3673 unsigned int num_trbs;
3674 unsigned int start_cycle, num_sgs = 0;
3675 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3676 int sent_len, ret;
3677 u32 field, length_field, remainder;
3678 u64 addr, send_addr;
3679
3680 ring = xhci_urb_to_transfer_ring(xhci, urb);
3681 if (!ring)
3682 return -EINVAL;
3683
3684 full_len = urb->transfer_buffer_length;
3685
3686 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3687 num_sgs = urb->num_mapped_sgs;
3688 sg = urb->sg;
3689 addr = (u64) sg_dma_address(sg);
3690 block_len = sg_dma_len(sg);
3691 num_trbs = count_sg_trbs_needed(urb);
3692 } else {
3693 num_trbs = count_trbs_needed(urb);
3694 addr = (u64) urb->transfer_dma;
3695 block_len = full_len;
3696 }
3697 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3698 ep_index, urb->stream_id,
3699 num_trbs, urb, 0, mem_flags);
3700 if (unlikely(ret < 0))
3701 return ret;
3702
3703 urb_priv = urb->hcpriv;
3704
3705
3706 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3707 need_zero_pkt = true;
3708
3709 td = &urb_priv->td[0];
3710
3711
3712
3713
3714
3715
3716 start_trb = &ring->enqueue->generic;
3717 start_cycle = ring->cycle_state;
3718 send_addr = addr;
3719
3720
3721 for (enqd_len = 0; first_trb || enqd_len < full_len;
3722 enqd_len += trb_buff_len) {
3723 field = TRB_TYPE(TRB_NORMAL);
3724
3725
3726 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3727 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3728
3729 if (enqd_len + trb_buff_len > full_len)
3730 trb_buff_len = full_len - enqd_len;
3731
3732
3733 if (first_trb) {
3734 first_trb = false;
3735 if (start_cycle == 0)
3736 field |= TRB_CYCLE;
3737 } else
3738 field |= ring->cycle_state;
3739
3740
3741
3742
3743 if (enqd_len + trb_buff_len < full_len) {
3744 field |= TRB_CHAIN;
3745 if (trb_is_link(ring->enqueue + 1)) {
3746 if (xhci_align_td(xhci, urb, enqd_len,
3747 &trb_buff_len,
3748 ring->enq_seg)) {
3749 send_addr = ring->enq_seg->bounce_dma;
3750
3751 td->bounce_seg = ring->enq_seg;
3752 }
3753 }
3754 }
3755 if (enqd_len + trb_buff_len >= full_len) {
3756 field &= ~TRB_CHAIN;
3757 field |= TRB_IOC;
3758 more_trbs_coming = false;
3759 td->last_trb = ring->enqueue;
3760 td->last_trb_seg = ring->enq_seg;
3761 if (xhci_urb_suitable_for_idt(urb)) {
3762 memcpy(&send_addr, urb->transfer_buffer,
3763 trb_buff_len);
3764 le64_to_cpus(&send_addr);
3765 field |= TRB_IDT;
3766 }
3767 }
3768
3769
3770 if (usb_urb_dir_in(urb))
3771 field |= TRB_ISP;
3772
3773
3774 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3775 full_len, urb, more_trbs_coming);
3776
3777 length_field = TRB_LEN(trb_buff_len) |
3778 TRB_TD_SIZE(remainder) |
3779 TRB_INTR_TARGET(0);
3780
3781 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3782 lower_32_bits(send_addr),
3783 upper_32_bits(send_addr),
3784 length_field,
3785 field);
3786 td->num_trbs++;
3787 addr += trb_buff_len;
3788 sent_len = trb_buff_len;
3789
3790 while (sg && sent_len >= block_len) {
3791
3792 --num_sgs;
3793 sent_len -= block_len;
3794 sg = sg_next(sg);
3795 if (num_sgs != 0 && sg) {
3796 block_len = sg_dma_len(sg);
3797 addr = (u64) sg_dma_address(sg);
3798 addr += sent_len;
3799 }
3800 }
3801 block_len -= sent_len;
3802 send_addr = addr;
3803 }
3804
3805 if (need_zero_pkt) {
3806 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3807 ep_index, urb->stream_id,
3808 1, urb, 1, mem_flags);
3809 urb_priv->td[1].last_trb = ring->enqueue;
3810 urb_priv->td[1].last_trb_seg = ring->enq_seg;
3811 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3812 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3813 urb_priv->td[1].num_trbs++;
3814 }
3815
3816 check_trb_math(urb, enqd_len);
3817
3818 if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
3819 (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
3820
3821
3822
3823 ring->stream_timeout_handler = false;
3824 mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
3825 }
3826 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3827 start_cycle, start_trb);
3828 return 0;
3829}
3830
3831
3832int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3833 struct urb *urb, int slot_id, unsigned int ep_index)
3834{
3835 struct xhci_ring *ep_ring;
3836 int num_trbs;
3837 int ret;
3838 struct usb_ctrlrequest *setup;
3839 struct xhci_generic_trb *start_trb;
3840 int start_cycle;
3841 u32 field;
3842 struct urb_priv *urb_priv;
3843 struct xhci_td *td;
3844
3845 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3846 if (!ep_ring)
3847 return -EINVAL;
3848
3849
3850
3851
3852
3853 if (!urb->setup_packet)
3854 return -EINVAL;
3855
3856
3857 num_trbs = 2;
3858
3859
3860
3861
3862
3863 if (urb->transfer_buffer_length > 0)
3864 num_trbs++;
3865 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3866 ep_index, urb->stream_id,
3867 num_trbs, urb, 0, mem_flags);
3868 if (ret < 0)
3869 return ret;
3870
3871 urb_priv = urb->hcpriv;
3872 td = &urb_priv->td[0];
3873 td->num_trbs = num_trbs;
3874
3875
3876
3877
3878
3879
3880 start_trb = &ep_ring->enqueue->generic;
3881 start_cycle = ep_ring->cycle_state;
3882
3883
3884
3885 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3886 field = 0;
3887 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3888 if (start_cycle == 0)
3889 field |= 0x1;
3890
3891
3892 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3893 if (urb->transfer_buffer_length > 0) {
3894 if (setup->bRequestType & USB_DIR_IN)
3895 field |= TRB_TX_TYPE(TRB_DATA_IN);
3896 else
3897 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3898 }
3899 }
3900
3901 queue_trb(xhci, ep_ring, true,
3902 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3903 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3904 TRB_LEN(8) | TRB_INTR_TARGET(0),
3905
3906 field);
3907
3908
3909
3910 if (usb_urb_dir_in(urb))
3911 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3912 else
3913 field = TRB_TYPE(TRB_DATA);
3914
3915 if (urb->transfer_buffer_length > 0) {
3916 u32 length_field, remainder;
3917 u64 addr;
3918
3919 if (xhci_urb_suitable_for_idt(urb)) {
3920 memcpy(&addr, urb->transfer_buffer,
3921 urb->transfer_buffer_length);
3922 le64_to_cpus(&addr);
3923 field |= TRB_IDT;
3924 } else {
3925 addr = (u64) urb->transfer_dma;
3926 }
3927
3928 remainder = xhci_td_remainder(xhci, 0,
3929 urb->transfer_buffer_length,
3930 urb->transfer_buffer_length,
3931 urb, 1);
3932 length_field = TRB_LEN(urb->transfer_buffer_length) |
3933 TRB_TD_SIZE(remainder) |
3934 TRB_INTR_TARGET(0);
3935 if (setup->bRequestType & USB_DIR_IN)
3936 field |= TRB_DIR_IN;
3937 queue_trb(xhci, ep_ring, true,
3938 lower_32_bits(addr),
3939 upper_32_bits(addr),
3940 length_field,
3941 field | ep_ring->cycle_state);
3942 }
3943
3944
3945 td->last_trb = ep_ring->enqueue;
3946 td->last_trb_seg = ep_ring->enq_seg;
3947
3948
3949
3950 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3951 field = 0;
3952 else
3953 field = TRB_DIR_IN;
3954 queue_trb(xhci, ep_ring, false,
3955 0,
3956 0,
3957 TRB_INTR_TARGET(0),
3958
3959 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3960
3961 giveback_first_trb(xhci, slot_id, ep_index, 0,
3962 start_cycle, start_trb);
3963 return 0;
3964}
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3975 struct urb *urb, unsigned int total_packet_count)
3976{
3977 unsigned int max_burst;
3978
3979 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3980 return 0;
3981
3982 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3983 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3995 struct urb *urb, unsigned int total_packet_count)
3996{
3997 unsigned int max_burst;
3998 unsigned int residue;
3999
4000 if (xhci->hci_version < 0x100)
4001 return 0;
4002
4003 if (urb->dev->speed >= USB_SPEED_SUPER) {
4004
4005 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
4006 residue = total_packet_count % (max_burst + 1);
4007
4008
4009
4010 if (residue == 0)
4011 return max_burst;
4012 return residue - 1;
4013 }
4014 if (total_packet_count == 0)
4015 return 0;
4016 return total_packet_count - 1;
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
4027 struct urb *urb, int index)
4028{
4029 int start_frame, ist, ret = 0;
4030 int start_frame_id, end_frame_id, current_frame_id;
4031
4032 if (urb->dev->speed == USB_SPEED_LOW ||
4033 urb->dev->speed == USB_SPEED_FULL)
4034 start_frame = urb->start_frame + index * urb->interval;
4035 else
4036 start_frame = (urb->start_frame + index * urb->interval) >> 3;
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4047 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4048 ist <<= 3;
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063 current_frame_id = readl(&xhci->run_regs->microframe_index);
4064 start_frame_id = roundup(current_frame_id + ist + 1, 8);
4065 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
4066
4067 start_frame &= 0x7ff;
4068 start_frame_id = (start_frame_id >> 3) & 0x7ff;
4069 end_frame_id = (end_frame_id >> 3) & 0x7ff;
4070
4071 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
4072 __func__, index, readl(&xhci->run_regs->microframe_index),
4073 start_frame_id, end_frame_id, start_frame);
4074
4075 if (start_frame_id < end_frame_id) {
4076 if (start_frame > end_frame_id ||
4077 start_frame < start_frame_id)
4078 ret = -EINVAL;
4079 } else if (start_frame_id > end_frame_id) {
4080 if ((start_frame > end_frame_id &&
4081 start_frame < start_frame_id))
4082 ret = -EINVAL;
4083 } else {
4084 ret = -EINVAL;
4085 }
4086
4087 if (index == 0) {
4088 if (ret == -EINVAL || start_frame == start_frame_id) {
4089 start_frame = start_frame_id + 1;
4090 if (urb->dev->speed == USB_SPEED_LOW ||
4091 urb->dev->speed == USB_SPEED_FULL)
4092 urb->start_frame = start_frame;
4093 else
4094 urb->start_frame = start_frame << 3;
4095 ret = 0;
4096 }
4097 }
4098
4099 if (ret) {
4100 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
4101 start_frame, current_frame_id, index,
4102 start_frame_id, end_frame_id);
4103 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
4104 return ret;
4105 }
4106
4107 return start_frame;
4108}
4109
4110
4111static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
4112{
4113 if (xhci->hci_version < 0x100)
4114 return false;
4115
4116 if (i == num_tds - 1)
4117 return false;
4118
4119
4120
4121
4122 if (i && xhci->quirks & XHCI_AVOID_BEI)
4123 return !!(i % xhci->isoc_bei_interval);
4124
4125 return true;
4126}
4127
4128
4129static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4130 struct urb *urb, int slot_id, unsigned int ep_index)
4131{
4132 struct xhci_ring *ep_ring;
4133 struct urb_priv *urb_priv;
4134 struct xhci_td *td;
4135 int num_tds, trbs_per_td;
4136 struct xhci_generic_trb *start_trb;
4137 bool first_trb;
4138 int start_cycle;
4139 u32 field, length_field;
4140 int running_total, trb_buff_len, td_len, td_remain_len, ret;
4141 u64 start_addr, addr;
4142 int i, j;
4143 bool more_trbs_coming;
4144 struct xhci_virt_ep *xep;
4145 int frame_id;
4146
4147 xep = &xhci->devs[slot_id]->eps[ep_index];
4148 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
4149
4150 num_tds = urb->number_of_packets;
4151 if (num_tds < 1) {
4152 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
4153 return -EINVAL;
4154 }
4155 start_addr = (u64) urb->transfer_dma;
4156 start_trb = &ep_ring->enqueue->generic;
4157 start_cycle = ep_ring->cycle_state;
4158
4159 urb_priv = urb->hcpriv;
4160
4161 for (i = 0; i < num_tds; i++) {
4162 unsigned int total_pkt_count, max_pkt;
4163 unsigned int burst_count, last_burst_pkt_count;
4164 u32 sia_frame_id;
4165
4166 first_trb = true;
4167 running_total = 0;
4168 addr = start_addr + urb->iso_frame_desc[i].offset;
4169 td_len = urb->iso_frame_desc[i].length;
4170 td_remain_len = td_len;
4171 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4172 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4173
4174
4175 if (total_pkt_count == 0)
4176 total_pkt_count++;
4177 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4178 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4179 urb, total_pkt_count);
4180
4181 trbs_per_td = count_isoc_trbs_needed(urb, i);
4182
4183 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4184 urb->stream_id, trbs_per_td, urb, i, mem_flags);
4185 if (ret < 0) {
4186 if (i == 0)
4187 return ret;
4188 goto cleanup;
4189 }
4190 td = &urb_priv->td[i];
4191 td->num_trbs = trbs_per_td;
4192
4193 sia_frame_id = TRB_SIA;
4194 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4195 HCC_CFC(xhci->hcc_params)) {
4196 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4197 if (frame_id >= 0)
4198 sia_frame_id = TRB_FRAME_ID(frame_id);
4199 }
4200
4201
4202
4203
4204
4205 field = TRB_TYPE(TRB_ISOC) |
4206 TRB_TLBPC(last_burst_pkt_count) |
4207 sia_frame_id |
4208 (i ? ep_ring->cycle_state : !start_cycle);
4209
4210
4211 if (!xep->use_extended_tbc)
4212 field |= TRB_TBC(burst_count);
4213
4214
4215 for (j = 0; j < trbs_per_td; j++) {
4216 u32 remainder = 0;
4217
4218
4219 if (!first_trb)
4220 field = TRB_TYPE(TRB_NORMAL) |
4221 ep_ring->cycle_state;
4222
4223
4224 if (usb_urb_dir_in(urb))
4225 field |= TRB_ISP;
4226
4227
4228 if (j < trbs_per_td - 1) {
4229 more_trbs_coming = true;
4230 field |= TRB_CHAIN;
4231 } else {
4232 more_trbs_coming = false;
4233 td->last_trb = ep_ring->enqueue;
4234 td->last_trb_seg = ep_ring->enq_seg;
4235 field |= TRB_IOC;
4236 if (trb_block_event_intr(xhci, num_tds, i))
4237 field |= TRB_BEI;
4238 }
4239
4240 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
4241 if (trb_buff_len > td_remain_len)
4242 trb_buff_len = td_remain_len;
4243
4244
4245 remainder = xhci_td_remainder(xhci, running_total,
4246 trb_buff_len, td_len,
4247 urb, more_trbs_coming);
4248
4249 length_field = TRB_LEN(trb_buff_len) |
4250 TRB_INTR_TARGET(0);
4251
4252
4253 if (first_trb && xep->use_extended_tbc)
4254 length_field |= TRB_TD_SIZE_TBC(burst_count);
4255 else
4256 length_field |= TRB_TD_SIZE(remainder);
4257 first_trb = false;
4258
4259 queue_trb(xhci, ep_ring, more_trbs_coming,
4260 lower_32_bits(addr),
4261 upper_32_bits(addr),
4262 length_field,
4263 field);
4264 running_total += trb_buff_len;
4265
4266 addr += trb_buff_len;
4267 td_remain_len -= trb_buff_len;
4268 }
4269
4270
4271 if (running_total != td_len) {
4272 xhci_err(xhci, "ISOC TD length unmatch\n");
4273 ret = -EINVAL;
4274 goto cleanup;
4275 }
4276 }
4277
4278
4279 if (HCC_CFC(xhci->hcc_params))
4280 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
4281
4282 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
4283 if (xhci->quirks & XHCI_AMD_PLL_FIX)
4284 usb_amd_quirk_pll_disable();
4285 }
4286 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
4287
4288 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4289 start_cycle, start_trb);
4290 return 0;
4291cleanup:
4292
4293
4294 for (i--; i >= 0; i--)
4295 list_del_init(&urb_priv->td[i].td_list);
4296
4297
4298
4299
4300
4301
4302 urb_priv->td[0].last_trb = ep_ring->enqueue;
4303
4304 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
4305
4306
4307 ep_ring->enqueue = urb_priv->td[0].first_trb;
4308 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4309 ep_ring->cycle_state = start_cycle;
4310 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
4311 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
4312 return ret;
4313}
4314
4315
4316
4317
4318
4319
4320
4321
4322int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
4323 struct urb *urb, int slot_id, unsigned int ep_index)
4324{
4325 struct xhci_virt_device *xdev;
4326 struct xhci_ring *ep_ring;
4327 struct xhci_ep_ctx *ep_ctx;
4328 int start_frame;
4329 int num_tds, num_trbs, i;
4330 int ret;
4331 struct xhci_virt_ep *xep;
4332 int ist;
4333
4334 xdev = xhci->devs[slot_id];
4335 xep = &xhci->devs[slot_id]->eps[ep_index];
4336 ep_ring = xdev->eps[ep_index].ring;
4337 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
4338
4339 num_trbs = 0;
4340 num_tds = urb->number_of_packets;
4341 for (i = 0; i < num_tds; i++)
4342 num_trbs += count_isoc_trbs_needed(urb, i);
4343
4344
4345
4346
4347 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4348 num_trbs, mem_flags);
4349 if (ret)
4350 return ret;
4351
4352
4353
4354
4355
4356 check_interval(xhci, urb, ep_ctx);
4357
4358
4359 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
4360 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
4361 urb->start_frame = xep->next_frame_id;
4362 goto skip_start_over;
4363 }
4364 }
4365
4366 start_frame = readl(&xhci->run_regs->microframe_index);
4367 start_frame &= 0x3fff;
4368
4369
4370
4371
4372 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4373 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4374 ist <<= 3;
4375 start_frame += ist + XHCI_CFC_DELAY;
4376 start_frame = roundup(start_frame, 8);
4377
4378
4379
4380
4381
4382 if (urb->dev->speed == USB_SPEED_LOW ||
4383 urb->dev->speed == USB_SPEED_FULL) {
4384 start_frame = roundup(start_frame, urb->interval << 3);
4385 urb->start_frame = start_frame >> 3;
4386 } else {
4387 start_frame = roundup(start_frame, urb->interval);
4388 urb->start_frame = start_frame;
4389 }
4390
4391skip_start_over:
4392 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4393
4394 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4395}
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4408 u32 field1, u32 field2,
4409 u32 field3, u32 field4, bool command_must_succeed)
4410{
4411 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4412 int ret;
4413
4414 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4415 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4416 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4417 return -ESHUTDOWN;
4418 }
4419
4420 if (!command_must_succeed)
4421 reserved_trbs++;
4422
4423 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4424 reserved_trbs, GFP_ATOMIC);
4425 if (ret < 0) {
4426 xhci_err(xhci, "ERR: No room for command on command ring\n");
4427 if (command_must_succeed)
4428 xhci_err(xhci, "ERR: Reserved TRB counting for "
4429 "unfailable commands failed.\n");
4430 return ret;
4431 }
4432
4433 cmd->command_trb = xhci->cmd_ring->enqueue;
4434
4435
4436 if (list_empty(&xhci->cmd_list)) {
4437 xhci->current_cmd = cmd;
4438 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
4439 }
4440
4441 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4442
4443 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4444 field4 | xhci->cmd_ring->cycle_state);
4445 return 0;
4446}
4447
4448
4449int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4450 u32 trb_type, u32 slot_id)
4451{
4452 return queue_command(xhci, cmd, 0, 0, 0,
4453 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4454}
4455
4456
4457int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4458 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4459{
4460 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4461 upper_32_bits(in_ctx_ptr), 0,
4462 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4463 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4464}
4465
4466int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4467 u32 field1, u32 field2, u32 field3, u32 field4)
4468{
4469 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4470}
4471
4472
4473int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4474 u32 slot_id)
4475{
4476 return queue_command(xhci, cmd, 0, 0, 0,
4477 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4478 false);
4479}
4480
4481
4482int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4483 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4484 u32 slot_id, bool command_must_succeed)
4485{
4486 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4487 upper_32_bits(in_ctx_ptr), 0,
4488 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4489 command_must_succeed);
4490}
4491
4492
4493int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4494 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4495{
4496 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4497 upper_32_bits(in_ctx_ptr), 0,
4498 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4499 command_must_succeed);
4500}
4501
4502
4503
4504
4505
4506int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4507 int slot_id, unsigned int ep_index, int suspend)
4508{
4509 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4510 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4511 u32 type = TRB_TYPE(TRB_STOP_RING);
4512 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4513
4514 return queue_command(xhci, cmd, 0, 0, 0,
4515 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4516}
4517
4518int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4519 int slot_id, unsigned int ep_index,
4520 enum xhci_ep_reset_type reset_type)
4521{
4522 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4523 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4524 u32 type = TRB_TYPE(TRB_RESET_EP);
4525
4526 if (reset_type == EP_SOFT_RESET)
4527 type |= TRB_TSP;
4528
4529 return queue_command(xhci, cmd, 0, 0, 0,
4530 trb_slot_id | trb_ep_index | type, false);
4531}
4532