1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <common.h>
17#include <cpu_func.h>
18#include <log.h>
19#include <asm/byteorder.h>
20#include <usb.h>
21#include <asm/unaligned.h>
22#include <linux/bug.h>
23#include <linux/errno.h>
24
25#include <usb/xhci.h>
26
27
28
29
30
31
32
33
34
35
36
37
38static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
39 struct xhci_segment *seg, union xhci_trb *trb)
40{
41 if (ring == ctrl->event_ring)
42 return trb == &seg->trbs[TRBS_PER_SEGMENT];
43 else
44 return TRB_TYPE_LINK_LE32(trb->link.control);
45}
46
47
48
49
50
51
52
53
54
55
56
57static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
58 struct xhci_ring *ring,
59 struct xhci_segment *seg,
60 union xhci_trb *trb)
61{
62 if (ring == ctrl->event_ring)
63 return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
64 (seg->next == ring->first_seg));
65 else
66 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
67}
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
92 bool more_trbs_coming)
93{
94 u32 chain;
95 union xhci_trb *next;
96
97 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
98 next = ++(ring->enqueue);
99
100
101
102
103
104 while (last_trb(ctrl, ring, ring->enq_seg, next)) {
105 if (ring != ctrl->event_ring) {
106
107
108
109
110
111
112
113
114 if (!chain && !more_trbs_coming)
115 break;
116
117
118
119
120
121
122
123 next->link.control &= cpu_to_le32(~TRB_CHAIN);
124 next->link.control |= cpu_to_le32(chain);
125
126 next->link.control ^= cpu_to_le32(TRB_CYCLE);
127 xhci_flush_cache((uintptr_t)next,
128 sizeof(union xhci_trb));
129 }
130
131 if (last_trb_on_last_seg(ctrl, ring,
132 ring->enq_seg, next))
133 ring->cycle_state = (ring->cycle_state ? 0 : 1);
134
135 ring->enq_seg = ring->enq_seg->next;
136 ring->enqueue = ring->enq_seg->trbs;
137 next = ring->enqueue;
138 }
139}
140
141
142
143
144
145
146
147
148
149static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
150{
151 do {
152
153
154
155
156
157 if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
158 if (ring == ctrl->event_ring &&
159 last_trb_on_last_seg(ctrl, ring,
160 ring->deq_seg, ring->dequeue)) {
161 ring->cycle_state = (ring->cycle_state ? 0 : 1);
162 }
163 ring->deq_seg = ring->deq_seg->next;
164 ring->dequeue = ring->deq_seg->trbs;
165 } else {
166 ring->dequeue++;
167 }
168 } while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183static struct xhci_generic_trb *queue_trb(struct xhci_ctrl *ctrl,
184 struct xhci_ring *ring,
185 bool more_trbs_coming,
186 unsigned int *trb_fields)
187{
188 struct xhci_generic_trb *trb;
189 int i;
190
191 trb = &ring->enqueue->generic;
192
193 for (i = 0; i < 4; i++)
194 trb->field[i] = cpu_to_le32(trb_fields[i]);
195
196 xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
197
198 inc_enq(ctrl, ring, more_trbs_coming);
199
200 return trb;
201}
202
203
204
205
206
207
208
209
210
211
212static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
213 u32 ep_state)
214{
215 union xhci_trb *next = ep_ring->enqueue;
216
217
218 switch (ep_state) {
219 case EP_STATE_DISABLED:
220
221
222
223
224 puts("WARN urb submitted to disabled ep\n");
225 return -ENOENT;
226 case EP_STATE_ERROR:
227 puts("WARN waiting for error on ep to be cleared\n");
228 return -EINVAL;
229 case EP_STATE_HALTED:
230 puts("WARN halted endpoint, queueing URB anyway.\n");
231 case EP_STATE_STOPPED:
232 case EP_STATE_RUNNING:
233 debug("EP STATE RUNNING.\n");
234 break;
235 default:
236 puts("ERROR unknown endpoint state for ep\n");
237 return -EINVAL;
238 }
239
240 while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
241
242
243
244
245 next->link.control &= cpu_to_le32(~TRB_CHAIN);
246
247 next->link.control ^= cpu_to_le32(TRB_CYCLE);
248
249 xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
250
251
252 if (last_trb_on_last_seg(ctrl, ep_ring,
253 ep_ring->enq_seg, next))
254 ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
255 ep_ring->enq_seg = ep_ring->enq_seg->next;
256 ep_ring->enqueue = ep_ring->enq_seg->trbs;
257 next = ep_ring->enqueue;
258 }
259
260 return 0;
261}
262
263
264
265
266
267
268
269
270
271
272
273
274void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr, u32 slot_id,
275 u32 ep_index, trb_type cmd)
276{
277 u32 fields[4];
278 u64 val_64 = 0;
279
280 BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
281
282 if (ptr)
283 val_64 = xhci_virt_to_bus(ctrl, ptr);
284
285 fields[0] = lower_32_bits(val_64);
286 fields[1] = upper_32_bits(val_64);
287 fields[2] = 0;
288 fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
289 ctrl->cmd_ring->cycle_state;
290
291
292
293
294
295 if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
296 fields[3] |= EP_ID_FOR_TRB(ep_index);
297
298 queue_trb(ctrl, ctrl->cmd_ring, false, fields);
299
300
301 xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332static u32 xhci_td_remainder(struct xhci_ctrl *ctrl, int transferred,
333 int trb_buff_len, unsigned int td_total_len,
334 int maxp, bool more_trbs_coming)
335{
336 u32 total_packet_count;
337
338
339 if (ctrl->hci_version < 0x100 && !(ctrl->quirks & XHCI_MTK_HOST))
340 return ((td_total_len - transferred) >> 10);
341
342
343 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
344 trb_buff_len == td_total_len)
345 return 0;
346
347
348 if ((ctrl->quirks & XHCI_MTK_HOST) && (ctrl->hci_version < 0x100))
349 trb_buff_len = 0;
350
351 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
352
353
354 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
355}
356
357
358
359
360
361
362
363
364
365
366static void giveback_first_trb(struct usb_device *udev, int ep_index,
367 int start_cycle,
368 struct xhci_generic_trb *start_trb)
369{
370 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
371
372
373
374
375
376 if (start_cycle)
377 start_trb->field[3] |= cpu_to_le32(start_cycle);
378 else
379 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
380
381 xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
382
383
384 xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
385 DB_VALUE(ep_index, 0));
386
387 return;
388}
389
390
391
392
393
394
395
396
397
398
399
400void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
401{
402
403 inc_deq(ctrl, ctrl->event_ring);
404
405
406 xhci_writeq(&ctrl->ir_set->erst_dequeue,
407 xhci_virt_to_bus(ctrl, ctrl->event_ring->dequeue) | ERST_EHB);
408}
409
410
411
412
413
414
415
416static int event_ready(struct xhci_ctrl *ctrl)
417{
418 union xhci_trb *event;
419
420 xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
421 sizeof(union xhci_trb));
422
423 event = ctrl->event_ring->dequeue;
424
425
426 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
427 ctrl->event_ring->cycle_state)
428 return 0;
429
430 return 1;
431}
432
433
434
435
436
437
438
439
440
441
442union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
443{
444 trb_type type;
445 unsigned long ts = get_timer(0);
446
447 do {
448 union xhci_trb *event = ctrl->event_ring->dequeue;
449
450 if (!event_ready(ctrl))
451 continue;
452
453 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
454 if (type == expected)
455 return event;
456
457 if (type == TRB_PORT_STATUS)
458
459
460
461
462
463 BUG_ON(GET_COMP_CODE(
464 le32_to_cpu(event->generic.field[2])) !=
465 COMP_SUCCESS);
466 else
467 printf("Unexpected XHCI event TRB, skipping... "
468 "(%08x %08x %08x %08x)\n",
469 le32_to_cpu(event->generic.field[0]),
470 le32_to_cpu(event->generic.field[1]),
471 le32_to_cpu(event->generic.field[2]),
472 le32_to_cpu(event->generic.field[3]));
473
474 xhci_acknowledge_event(ctrl);
475 } while (get_timer(ts) < XHCI_TIMEOUT);
476
477 if (expected == TRB_TRANSFER)
478 return NULL;
479
480 printf("XHCI timeout on event type %d... cannot recover.\n", expected);
481 BUG();
482}
483
484
485
486
487
488
489
490
491
492static void abort_td(struct usb_device *udev, int ep_index)
493{
494 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
495 struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
496 union xhci_trb *event;
497 u32 field;
498
499 xhci_queue_command(ctrl, NULL, udev->slot_id, ep_index, TRB_STOP_RING);
500
501 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
502 field = le32_to_cpu(event->trans_event.flags);
503 BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
504 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
505 BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
506 != COMP_STOP)));
507 xhci_acknowledge_event(ctrl);
508
509 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
510 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
511 != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
512 event->event_cmd.status)) != COMP_SUCCESS);
513 xhci_acknowledge_event(ctrl);
514
515 xhci_queue_command(ctrl, (void *)((uintptr_t)ring->enqueue |
516 ring->cycle_state), udev->slot_id, ep_index, TRB_SET_DEQ);
517 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
518 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
519 != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
520 event->event_cmd.status)) != COMP_SUCCESS);
521 xhci_acknowledge_event(ctrl);
522}
523
524static void record_transfer_result(struct usb_device *udev,
525 union xhci_trb *event, int length)
526{
527 udev->act_len = min(length, length -
528 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
529
530 switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
531 case COMP_SUCCESS:
532 BUG_ON(udev->act_len != length);
533
534 case COMP_SHORT_TX:
535 udev->status = 0;
536 break;
537 case COMP_STALL:
538 udev->status = USB_ST_STALLED;
539 break;
540 case COMP_DB_ERR:
541 case COMP_TRB_ERR:
542 udev->status = USB_ST_BUF_ERR;
543 break;
544 case COMP_BABBLE:
545 udev->status = USB_ST_BABBLE_DET;
546 break;
547 default:
548 udev->status = 0x80;
549 }
550}
551
552
553
554
555
556
557
558
559
560
561
562int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
563 int length, void *buffer)
564{
565 int num_trbs = 0;
566 struct xhci_generic_trb *start_trb;
567 bool first_trb = false;
568 int start_cycle;
569 u32 field = 0;
570 u32 length_field = 0;
571 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
572 int slot_id = udev->slot_id;
573 int ep_index;
574 struct xhci_virt_device *virt_dev;
575 struct xhci_ep_ctx *ep_ctx;
576 struct xhci_ring *ring;
577 union xhci_trb *event;
578
579 int running_total, trb_buff_len;
580 bool more_trbs_coming = true;
581 int maxpacketsize;
582 u64 addr;
583 int ret;
584 u32 trb_fields[4];
585 u64 val_64 = xhci_virt_to_bus(ctrl, buffer);
586 void *last_transfer_trb_addr;
587 int available_length;
588
589 debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
590 udev, pipe, buffer, length);
591
592 available_length = length;
593 ep_index = usb_pipe_ep_index(pipe);
594 virt_dev = ctrl->devs[slot_id];
595
596 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
597 virt_dev->out_ctx->size);
598
599 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
600
601 ring = virt_dev->eps[ep_index].ring;
602
603
604
605
606
607
608 running_total = TRB_MAX_BUFF_SIZE -
609 (lower_32_bits(val_64) & (TRB_MAX_BUFF_SIZE - 1));
610 trb_buff_len = running_total;
611 running_total &= TRB_MAX_BUFF_SIZE - 1;
612
613
614
615
616
617 if (running_total != 0 || length == 0)
618 num_trbs++;
619
620
621 while (running_total < length) {
622 num_trbs++;
623 running_total += TRB_MAX_BUFF_SIZE;
624 }
625
626
627
628
629
630
631 ret = prepare_ring(ctrl, ring,
632 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
633 if (ret < 0)
634 return ret;
635
636
637
638
639
640
641 start_trb = &ring->enqueue->generic;
642 start_cycle = ring->cycle_state;
643
644 running_total = 0;
645 maxpacketsize = usb_maxpacket(udev, pipe);
646
647
648
649
650
651
652
653
654 addr = val_64;
655
656 if (trb_buff_len > length)
657 trb_buff_len = length;
658
659 first_trb = true;
660
661
662 xhci_flush_cache((uintptr_t)buffer, length);
663
664
665 do {
666 u32 remainder = 0;
667 field = 0;
668
669 if (first_trb) {
670 first_trb = false;
671 if (start_cycle == 0)
672 field |= TRB_CYCLE;
673 } else {
674 field |= ring->cycle_state;
675 }
676
677
678
679
680
681 if (num_trbs > 1) {
682 field |= TRB_CHAIN;
683 } else {
684 field |= TRB_IOC;
685 more_trbs_coming = false;
686 }
687
688
689 if (usb_pipein(pipe))
690 field |= TRB_ISP;
691
692
693 remainder = xhci_td_remainder(ctrl, running_total, trb_buff_len,
694 length, maxpacketsize,
695 more_trbs_coming);
696
697 length_field = (TRB_LEN(trb_buff_len) |
698 TRB_TD_SIZE(remainder) |
699 TRB_INTR_TARGET(0));
700
701 trb_fields[0] = lower_32_bits(addr);
702 trb_fields[1] = upper_32_bits(addr);
703 trb_fields[2] = length_field;
704 trb_fields[3] = field | TRB_TYPE(TRB_NORMAL);
705
706 last_transfer_trb_addr = queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
707
708 --num_trbs;
709
710 running_total += trb_buff_len;
711
712
713 addr += trb_buff_len;
714 trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
715 } while (running_total < length);
716
717 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
718
719again:
720 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
721 if (!event) {
722 debug("XHCI bulk transfer timed out, aborting...\n");
723 abort_td(udev, ep_index);
724 udev->status = USB_ST_NAK_REC;
725 udev->act_len = 0;
726 return -ETIMEDOUT;
727 }
728
729 if ((uintptr_t)(le64_to_cpu(event->trans_event.buffer)) !=
730 (uintptr_t)xhci_virt_to_bus(ctrl, last_transfer_trb_addr)) {
731 available_length -=
732 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len));
733 xhci_acknowledge_event(ctrl);
734 goto again;
735 }
736
737 field = le32_to_cpu(event->trans_event.flags);
738 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
739 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
740
741 record_transfer_result(udev, event, available_length);
742 xhci_acknowledge_event(ctrl);
743 xhci_inval_cache((uintptr_t)buffer, length);
744
745 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
746}
747
748
749
750
751
752
753
754
755
756
757
758int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
759 struct devrequest *req, int length,
760 void *buffer)
761{
762 int ret;
763 int start_cycle;
764 int num_trbs;
765 u32 field;
766 u32 length_field;
767 u64 buf_64 = 0;
768 struct xhci_generic_trb *start_trb;
769 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
770 int slot_id = udev->slot_id;
771 int ep_index;
772 u32 trb_fields[4];
773 struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
774 struct xhci_ring *ep_ring;
775 union xhci_trb *event;
776 u32 remainder;
777
778 debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
779 req->request, req->request,
780 req->requesttype, req->requesttype,
781 le16_to_cpu(req->value), le16_to_cpu(req->value),
782 le16_to_cpu(req->index));
783
784 ep_index = usb_pipe_ep_index(pipe);
785
786 ep_ring = virt_dev->eps[ep_index].ring;
787
788
789
790
791
792 if (udev->speed == USB_SPEED_FULL) {
793 ret = xhci_check_maxpacket(udev);
794 if (ret < 0)
795 return ret;
796 }
797
798 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
799 virt_dev->out_ctx->size);
800
801 struct xhci_ep_ctx *ep_ctx = NULL;
802 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
803
804
805 num_trbs = 2;
806
807
808
809
810
811
812 if (length > 0)
813 num_trbs++;
814
815
816
817
818
819 ret = prepare_ring(ctrl, ep_ring,
820 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
821
822 if (ret < 0)
823 return ret;
824
825
826
827
828
829
830 start_trb = &ep_ring->enqueue->generic;
831 start_cycle = ep_ring->cycle_state;
832
833 debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
834
835
836
837 field = 0;
838 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
839 if (start_cycle == 0)
840 field |= 0x1;
841
842
843 if (ctrl->hci_version >= 0x100 || ctrl->quirks & XHCI_MTK_HOST) {
844 if (length > 0) {
845 if (req->requesttype & USB_DIR_IN)
846 field |= TRB_TX_TYPE(TRB_DATA_IN);
847 else
848 field |= TRB_TX_TYPE(TRB_DATA_OUT);
849 }
850 }
851
852 debug("req->requesttype = %d, req->request = %d, req->value = %d, req->index = %d, req->length = %d\n",
853 req->requesttype, req->request, le16_to_cpu(req->value),
854 le16_to_cpu(req->index), le16_to_cpu(req->length));
855
856 trb_fields[0] = req->requesttype | req->request << 8 |
857 le16_to_cpu(req->value) << 16;
858 trb_fields[1] = le16_to_cpu(req->index) |
859 le16_to_cpu(req->length) << 16;
860
861 trb_fields[2] = (TRB_LEN(8) | TRB_INTR_TARGET(0));
862
863 trb_fields[3] = field;
864 queue_trb(ctrl, ep_ring, true, trb_fields);
865
866
867 field = 0;
868
869
870 if (usb_pipein(pipe))
871 field = TRB_ISP | TRB_TYPE(TRB_DATA);
872 else
873 field = TRB_TYPE(TRB_DATA);
874
875 remainder = xhci_td_remainder(ctrl, 0, length, length,
876 usb_maxpacket(udev, pipe), true);
877 length_field = TRB_LEN(length) | TRB_TD_SIZE(remainder) |
878 TRB_INTR_TARGET(0);
879 debug("length_field = %d, length = %d,"
880 "xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
881 length_field, TRB_LEN(length),
882 TRB_TD_SIZE(remainder), 0);
883
884 if (length > 0) {
885 if (req->requesttype & USB_DIR_IN)
886 field |= TRB_DIR_IN;
887 buf_64 = xhci_virt_to_bus(ctrl, buffer);
888
889 trb_fields[0] = lower_32_bits(buf_64);
890 trb_fields[1] = upper_32_bits(buf_64);
891 trb_fields[2] = length_field;
892 trb_fields[3] = field | ep_ring->cycle_state;
893
894 xhci_flush_cache((uintptr_t)buffer, length);
895 queue_trb(ctrl, ep_ring, true, trb_fields);
896 }
897
898
899
900
901
902
903
904 field = 0;
905 if (length > 0 && req->requesttype & USB_DIR_IN)
906 field = 0;
907 else
908 field = TRB_DIR_IN;
909
910 trb_fields[0] = 0;
911 trb_fields[1] = 0;
912 trb_fields[2] = TRB_INTR_TARGET(0);
913
914 trb_fields[3] = field | TRB_IOC |
915 TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state;
916
917 queue_trb(ctrl, ep_ring, false, trb_fields);
918
919 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
920
921 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
922 if (!event)
923 goto abort;
924 field = le32_to_cpu(event->trans_event.flags);
925
926 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
927 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
928
929 record_transfer_result(udev, event, length);
930 xhci_acknowledge_event(ctrl);
931
932
933 if (length > 0)
934 xhci_inval_cache((uintptr_t)buffer, length);
935
936 if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
937 == COMP_SHORT_TX) {
938
939 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
940 if (!event)
941 goto abort;
942 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
943 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
944 xhci_acknowledge_event(ctrl);
945 }
946
947 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
948
949abort:
950 debug("XHCI control transfer timed out, aborting...\n");
951 abort_td(udev, ep_index);
952 udev->status = USB_ST_NAK_REC;
953 udev->act_len = 0;
954 return -ETIMEDOUT;
955}
956