1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#include <linux/spinlock.h>
83#include <linux/slab.h>
84#include <linux/hash.h>
85#include <linux/ratelimit.h>
86#include <linux/export.h>
87#include <linux/scatterlist.h>
88
89#include "wa-hc.h"
90#include "wusbhc.h"
91
92enum {
93
94 WA_SEGS_MAX = 128,
95};
96
97enum wa_seg_status {
98 WA_SEG_NOTREADY,
99 WA_SEG_READY,
100 WA_SEG_DELAYED,
101 WA_SEG_SUBMITTED,
102 WA_SEG_PENDING,
103 WA_SEG_DTI_PENDING,
104 WA_SEG_DONE,
105 WA_SEG_ERROR,
106 WA_SEG_ABORTED,
107};
108
109static void wa_xfer_delayed_run(struct wa_rpipe *);
110static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
111
112
113
114
115
116
117struct wa_seg {
118 struct urb tr_urb;
119 struct urb *isoc_pack_desc_urb;
120 struct urb *dto_urb;
121 struct list_head list_node;
122 struct wa_xfer *xfer;
123 u8 index;
124 int isoc_frame_count;
125 int isoc_frame_offset;
126
127 int isoc_frame_index;
128 int isoc_size;
129 enum wa_seg_status status;
130 ssize_t result;
131 struct wa_xfer_hdr xfer_hdr;
132};
133
134static inline void wa_seg_init(struct wa_seg *seg)
135{
136 usb_init_urb(&seg->tr_urb);
137
138
139 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
140 sizeof(*seg) - sizeof(seg->tr_urb));
141}
142
143
144
145
146
147struct wa_xfer {
148 struct kref refcnt;
149 struct list_head list_node;
150 spinlock_t lock;
151 u32 id;
152
153 struct wahc *wa;
154 struct usb_host_endpoint *ep;
155 struct urb *urb;
156 struct wa_seg **seg;
157 u8 segs, segs_submitted, segs_done;
158 unsigned is_inbound:1;
159 unsigned is_dma:1;
160 size_t seg_size;
161 int result;
162
163 gfp_t gfp;
164
165 struct wusb_dev *wusb_dev;
166};
167
168static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
169 struct wa_seg *seg, int curr_iso_frame);
170static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
171 int starting_index, enum wa_seg_status status);
172
173static inline void wa_xfer_init(struct wa_xfer *xfer)
174{
175 kref_init(&xfer->refcnt);
176 INIT_LIST_HEAD(&xfer->list_node);
177 spin_lock_init(&xfer->lock);
178}
179
180
181
182
183
184
185
186static void wa_xfer_destroy(struct kref *_xfer)
187{
188 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
189 if (xfer->seg) {
190 unsigned cnt;
191 for (cnt = 0; cnt < xfer->segs; cnt++) {
192 struct wa_seg *seg = xfer->seg[cnt];
193 if (seg) {
194 usb_free_urb(seg->isoc_pack_desc_urb);
195 if (seg->dto_urb) {
196 kfree(seg->dto_urb->sg);
197 usb_free_urb(seg->dto_urb);
198 }
199 usb_free_urb(&seg->tr_urb);
200 }
201 }
202 kfree(xfer->seg);
203 }
204 kfree(xfer);
205}
206
207static void wa_xfer_get(struct wa_xfer *xfer)
208{
209 kref_get(&xfer->refcnt);
210}
211
212static void wa_xfer_put(struct wa_xfer *xfer)
213{
214 kref_put(&xfer->refcnt, wa_xfer_destroy);
215}
216
217
218
219
220
221static inline int __wa_dto_try_get(struct wahc *wa)
222{
223 return (test_and_set_bit(0, &wa->dto_in_use) == 0);
224}
225
226
227static inline void __wa_dto_put(struct wahc *wa)
228{
229 clear_bit_unlock(0, &wa->dto_in_use);
230}
231
232
233static void wa_check_for_delayed_rpipes(struct wahc *wa)
234{
235 unsigned long flags;
236 int dto_waiting = 0;
237 struct wa_rpipe *rpipe;
238
239 spin_lock_irqsave(&wa->rpipe_lock, flags);
240 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
241 rpipe = list_first_entry(&wa->rpipe_delayed_list,
242 struct wa_rpipe, list_node);
243 __wa_xfer_delayed_run(rpipe, &dto_waiting);
244
245 if (!dto_waiting) {
246 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247 __func__,
248 le16_to_cpu(rpipe->descr.wRPipeIndex));
249 list_del_init(&rpipe->list_node);
250 }
251 }
252 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
253}
254
255
256static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&wa->rpipe_lock, flags);
261
262 if (list_empty(&rpipe->list_node)) {
263 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
265 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
266 }
267 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
268}
269
270
271
272
273
274
275
276
277
278
279
280static void wa_xfer_giveback(struct wa_xfer *xfer)
281{
282 unsigned long flags;
283
284 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
285 list_del_init(&xfer->list_node);
286 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
287 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
288
289 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
290 wa_put(xfer->wa);
291 wa_xfer_put(xfer);
292}
293
294
295
296
297
298
299static void wa_xfer_completion(struct wa_xfer *xfer)
300{
301 if (xfer->wusb_dev)
302 wusb_dev_put(xfer->wusb_dev);
303 rpipe_put(xfer->ep->hcpriv);
304 wa_xfer_giveback(xfer);
305}
306
307
308
309
310
311
312
313
314
315static void wa_xfer_id_init(struct wa_xfer *xfer)
316{
317 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
318}
319
320
321static inline u32 wa_xfer_id(struct wa_xfer *xfer)
322{
323 return xfer->id;
324}
325
326
327static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
328{
329 return cpu_to_le32(xfer->id);
330}
331
332
333
334
335
336
337static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
338{
339 struct device *dev = &xfer->wa->usb_iface->dev;
340 unsigned result, cnt;
341 struct wa_seg *seg;
342 struct urb *urb = xfer->urb;
343 unsigned found_short = 0;
344
345 result = xfer->segs_done == xfer->segs_submitted;
346 if (result == 0)
347 goto out;
348 urb->actual_length = 0;
349 for (cnt = 0; cnt < xfer->segs; cnt++) {
350 seg = xfer->seg[cnt];
351 switch (seg->status) {
352 case WA_SEG_DONE:
353 if (found_short && seg->result > 0) {
354 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355 xfer, wa_xfer_id(xfer), cnt,
356 seg->result);
357 urb->status = -EINVAL;
358 goto out;
359 }
360 urb->actual_length += seg->result;
361 if (!(usb_pipeisoc(xfer->urb->pipe))
362 && seg->result < xfer->seg_size
363 && cnt != xfer->segs-1)
364 found_short = 1;
365 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
366 "result %zu urb->actual_length %d\n",
367 xfer, wa_xfer_id(xfer), seg->index, found_short,
368 seg->result, urb->actual_length);
369 break;
370 case WA_SEG_ERROR:
371 xfer->result = seg->result;
372 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373 xfer, wa_xfer_id(xfer), seg->index, seg->result,
374 seg->result);
375 goto out;
376 case WA_SEG_ABORTED:
377 xfer->result = seg->result;
378 dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379 xfer, wa_xfer_id(xfer), seg->index, seg->result,
380 seg->result);
381 goto out;
382 default:
383 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
384 xfer, wa_xfer_id(xfer), cnt, seg->status);
385 xfer->result = -EINVAL;
386 goto out;
387 }
388 }
389 xfer->result = 0;
390out:
391 return result;
392}
393
394
395
396
397
398
399
400
401
402static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403 struct wa_seg *seg, enum wa_seg_status status)
404{
405 seg->status = status;
406 xfer->segs_done++;
407
408
409 return __wa_xfer_is_done(xfer);
410}
411
412
413
414
415
416
417
418
419
420static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
421{
422 unsigned long flags;
423 struct wa_xfer *xfer_itr;
424 spin_lock_irqsave(&wa->xfer_list_lock, flags);
425 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
426 if (id == xfer_itr->id) {
427 wa_xfer_get(xfer_itr);
428 goto out;
429 }
430 }
431 xfer_itr = NULL;
432out:
433 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
434 return xfer_itr;
435}
436
437struct wa_xfer_abort_buffer {
438 struct urb urb;
439 struct wahc *wa;
440 struct wa_xfer_abort cmd;
441};
442
443static void __wa_xfer_abort_cb(struct urb *urb)
444{
445 struct wa_xfer_abort_buffer *b = urb->context;
446 struct wahc *wa = b->wa;
447
448
449
450
451
452
453 if (urb->status < 0) {
454 struct wa_xfer *xfer;
455 struct device *dev = &wa->usb_iface->dev;
456
457 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
458 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
459 __func__, urb->status);
460 if (xfer) {
461 unsigned long flags;
462 int done;
463 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
464
465 dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
466 __func__, xfer, wa_xfer_id(xfer));
467 spin_lock_irqsave(&xfer->lock, flags);
468
469 wa_complete_remaining_xfer_segs(xfer, 0,
470 WA_SEG_ABORTED);
471 done = __wa_xfer_is_done(xfer);
472 spin_unlock_irqrestore(&xfer->lock, flags);
473 if (done)
474 wa_xfer_completion(xfer);
475 wa_xfer_delayed_run(rpipe);
476 wa_xfer_put(xfer);
477 } else {
478 dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
479 __func__, le32_to_cpu(b->cmd.dwTransferID));
480 }
481 }
482
483 wa_put(wa);
484 usb_put_urb(&b->urb);
485}
486
487
488
489
490
491
492
493
494
495
496
497static int __wa_xfer_abort(struct wa_xfer *xfer)
498{
499 int result = -ENOMEM;
500 struct device *dev = &xfer->wa->usb_iface->dev;
501 struct wa_xfer_abort_buffer *b;
502 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
503
504 b = kmalloc(sizeof(*b), GFP_ATOMIC);
505 if (b == NULL)
506 goto error_kmalloc;
507 b->cmd.bLength = sizeof(b->cmd);
508 b->cmd.bRequestType = WA_XFER_ABORT;
509 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
510 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
511 b->wa = wa_get(xfer->wa);
512
513 usb_init_urb(&b->urb);
514 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
515 usb_sndbulkpipe(xfer->wa->usb_dev,
516 xfer->wa->dto_epd->bEndpointAddress),
517 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
518 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
519 if (result < 0)
520 goto error_submit;
521 return result;
522
523
524error_submit:
525 wa_put(xfer->wa);
526 if (printk_ratelimit())
527 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
528 xfer, result);
529 kfree(b);
530error_kmalloc:
531 return result;
532
533}
534
535
536
537
538
539static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
540 int isoc_frame_offset, int *total_size)
541{
542 int segment_size = 0, frame_count = 0;
543 int index = isoc_frame_offset;
544 struct usb_iso_packet_descriptor *iso_frame_desc =
545 xfer->urb->iso_frame_desc;
546
547 while ((index < xfer->urb->number_of_packets)
548 && ((segment_size + iso_frame_desc[index].length)
549 <= xfer->seg_size)) {
550
551
552
553
554
555
556
557 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
558 && (xfer->is_inbound == 0)
559 && (index > isoc_frame_offset)
560 && ((iso_frame_desc[index - 1].offset +
561 iso_frame_desc[index - 1].length) !=
562 iso_frame_desc[index].offset))
563 break;
564
565
566 ++frame_count;
567 segment_size += iso_frame_desc[index].length;
568
569
570 ++index;
571 }
572
573 *total_size = segment_size;
574 return frame_count;
575}
576
577
578
579
580
581static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
582 enum wa_xfer_type *pxfer_type)
583{
584 ssize_t result;
585 struct device *dev = &xfer->wa->usb_iface->dev;
586 size_t maxpktsize;
587 struct urb *urb = xfer->urb;
588 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
589
590 switch (rpipe->descr.bmAttribute & 0x3) {
591 case USB_ENDPOINT_XFER_CONTROL:
592 *pxfer_type = WA_XFER_TYPE_CTL;
593 result = sizeof(struct wa_xfer_ctl);
594 break;
595 case USB_ENDPOINT_XFER_INT:
596 case USB_ENDPOINT_XFER_BULK:
597 *pxfer_type = WA_XFER_TYPE_BI;
598 result = sizeof(struct wa_xfer_bi);
599 break;
600 case USB_ENDPOINT_XFER_ISOC:
601 *pxfer_type = WA_XFER_TYPE_ISO;
602 result = sizeof(struct wa_xfer_hwaiso);
603 break;
604 default:
605
606 BUG();
607 result = -EINVAL;
608 }
609 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
610 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
611
612 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
613 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
614 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
615
616
617
618 if (xfer->seg_size < maxpktsize) {
619 dev_err(dev,
620 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
621 xfer->seg_size, maxpktsize);
622 result = -EINVAL;
623 goto error;
624 }
625 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
626 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
627 int index = 0;
628
629 xfer->segs = 0;
630
631
632
633
634 while (index < urb->number_of_packets) {
635 int seg_size;
636 index += __wa_seg_calculate_isoc_frame_count(xfer,
637 index, &seg_size);
638 ++xfer->segs;
639 }
640 } else {
641 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
642 xfer->seg_size);
643 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
644 xfer->segs = 1;
645 }
646
647 if (xfer->segs > WA_SEGS_MAX) {
648 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
649 (urb->transfer_buffer_length/xfer->seg_size),
650 WA_SEGS_MAX);
651 result = -EINVAL;
652 goto error;
653 }
654error:
655 return result;
656}
657
658static void __wa_setup_isoc_packet_descr(
659 struct wa_xfer_packet_info_hwaiso *packet_desc,
660 struct wa_xfer *xfer,
661 struct wa_seg *seg) {
662 struct usb_iso_packet_descriptor *iso_frame_desc =
663 xfer->urb->iso_frame_desc;
664 int frame_index;
665
666
667 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
668 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
669 (sizeof(packet_desc->PacketLength[0]) *
670 seg->isoc_frame_count));
671 for (frame_index = 0; frame_index < seg->isoc_frame_count;
672 ++frame_index) {
673 int offset_index = frame_index + seg->isoc_frame_offset;
674 packet_desc->PacketLength[frame_index] =
675 cpu_to_le16(iso_frame_desc[offset_index].length);
676 }
677}
678
679
680
681static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
682 struct wa_xfer_hdr *xfer_hdr0,
683 enum wa_xfer_type xfer_type,
684 size_t xfer_hdr_size)
685{
686 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
687 struct wa_seg *seg = xfer->seg[0];
688
689 xfer_hdr0 = &seg->xfer_hdr;
690 xfer_hdr0->bLength = xfer_hdr_size;
691 xfer_hdr0->bRequestType = xfer_type;
692 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
693 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
694 xfer_hdr0->bTransferSegment = 0;
695 switch (xfer_type) {
696 case WA_XFER_TYPE_CTL: {
697 struct wa_xfer_ctl *xfer_ctl =
698 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
699 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
700 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
701 sizeof(xfer_ctl->baSetupData));
702 break;
703 }
704 case WA_XFER_TYPE_BI:
705 break;
706 case WA_XFER_TYPE_ISO: {
707 struct wa_xfer_hwaiso *xfer_iso =
708 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
709 struct wa_xfer_packet_info_hwaiso *packet_desc =
710 ((void *)xfer_iso) + xfer_hdr_size;
711
712
713 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
714
715 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
716 break;
717 }
718 default:
719 BUG();
720 };
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735static void wa_seg_dto_cb(struct urb *urb)
736{
737 struct wa_seg *seg = urb->context;
738 struct wa_xfer *xfer = seg->xfer;
739 struct wahc *wa;
740 struct device *dev;
741 struct wa_rpipe *rpipe;
742 unsigned long flags;
743 unsigned rpipe_ready = 0;
744 int data_send_done = 1, release_dto = 0, holding_dto = 0;
745 u8 done = 0;
746 int result;
747
748
749 kfree(urb->sg);
750 urb->sg = NULL;
751
752 spin_lock_irqsave(&xfer->lock, flags);
753 wa = xfer->wa;
754 dev = &wa->usb_iface->dev;
755 if (usb_pipeisoc(xfer->urb->pipe)) {
756
757 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
758 seg->isoc_frame_index += seg->isoc_frame_count;
759 else
760 seg->isoc_frame_index += 1;
761 if (seg->isoc_frame_index < seg->isoc_frame_count) {
762 data_send_done = 0;
763 holding_dto = 1;
764
765
766
767
768 if ((seg->isoc_frame_index + 1) >=
769 seg->isoc_frame_count)
770 release_dto = 1;
771 }
772 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
773 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
774 holding_dto, release_dto);
775 }
776 spin_unlock_irqrestore(&xfer->lock, flags);
777
778 switch (urb->status) {
779 case 0:
780 spin_lock_irqsave(&xfer->lock, flags);
781 seg->result += urb->actual_length;
782 if (data_send_done) {
783 dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
784 wa_xfer_id(xfer), seg->index, seg->result);
785 if (seg->status < WA_SEG_PENDING)
786 seg->status = WA_SEG_PENDING;
787 } else {
788
789
790
791
792
793 __wa_populate_dto_urb_isoc(xfer, seg,
794 seg->isoc_frame_offset + seg->isoc_frame_index);
795
796
797
798 wa_xfer_get(xfer);
799 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
800 if (result < 0) {
801 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
802 wa_xfer_id(xfer), seg->index, result);
803 spin_unlock_irqrestore(&xfer->lock, flags);
804 goto error_dto_submit;
805 }
806 }
807 spin_unlock_irqrestore(&xfer->lock, flags);
808 if (release_dto) {
809 __wa_dto_put(wa);
810 wa_check_for_delayed_rpipes(wa);
811 }
812 break;
813 case -ECONNRESET:
814 case -ENOENT:
815 if (holding_dto) {
816 __wa_dto_put(wa);
817 wa_check_for_delayed_rpipes(wa);
818 }
819 break;
820 default:
821 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
822 wa_xfer_id(xfer), seg->index, urb->status);
823 goto error_default;
824 }
825
826
827 wa_xfer_put(xfer);
828 return;
829
830error_dto_submit:
831
832 wa_xfer_put(xfer);
833error_default:
834 spin_lock_irqsave(&xfer->lock, flags);
835 rpipe = xfer->ep->hcpriv;
836 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
837 EDC_ERROR_TIMEFRAME)){
838 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
839 wa_reset_all(wa);
840 }
841 if (seg->status != WA_SEG_ERROR) {
842 seg->result = urb->status;
843 __wa_xfer_abort(xfer);
844 rpipe_ready = rpipe_avail_inc(rpipe);
845 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
846 }
847 spin_unlock_irqrestore(&xfer->lock, flags);
848 if (holding_dto) {
849 __wa_dto_put(wa);
850 wa_check_for_delayed_rpipes(wa);
851 }
852 if (done)
853 wa_xfer_completion(xfer);
854 if (rpipe_ready)
855 wa_xfer_delayed_run(rpipe);
856
857 wa_xfer_put(xfer);
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872static void wa_seg_iso_pack_desc_cb(struct urb *urb)
873{
874 struct wa_seg *seg = urb->context;
875 struct wa_xfer *xfer = seg->xfer;
876 struct wahc *wa;
877 struct device *dev;
878 struct wa_rpipe *rpipe;
879 unsigned long flags;
880 unsigned rpipe_ready = 0;
881 u8 done = 0;
882
883 switch (urb->status) {
884 case 0:
885 spin_lock_irqsave(&xfer->lock, flags);
886 wa = xfer->wa;
887 dev = &wa->usb_iface->dev;
888 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
889 wa_xfer_id(xfer), seg->index);
890 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
891 seg->status = WA_SEG_PENDING;
892 spin_unlock_irqrestore(&xfer->lock, flags);
893 break;
894 case -ECONNRESET:
895 case -ENOENT:
896 break;
897 default:
898 spin_lock_irqsave(&xfer->lock, flags);
899 wa = xfer->wa;
900 dev = &wa->usb_iface->dev;
901 rpipe = xfer->ep->hcpriv;
902 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
903 wa_xfer_id(xfer), seg->index, urb->status);
904 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
905 EDC_ERROR_TIMEFRAME)){
906 dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
907 wa_reset_all(wa);
908 }
909 if (seg->status != WA_SEG_ERROR) {
910 usb_unlink_urb(seg->dto_urb);
911 seg->result = urb->status;
912 __wa_xfer_abort(xfer);
913 rpipe_ready = rpipe_avail_inc(rpipe);
914 done = __wa_xfer_mark_seg_as_done(xfer, seg,
915 WA_SEG_ERROR);
916 }
917 spin_unlock_irqrestore(&xfer->lock, flags);
918 if (done)
919 wa_xfer_completion(xfer);
920 if (rpipe_ready)
921 wa_xfer_delayed_run(rpipe);
922 }
923
924 wa_xfer_put(xfer);
925}
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945static void wa_seg_tr_cb(struct urb *urb)
946{
947 struct wa_seg *seg = urb->context;
948 struct wa_xfer *xfer = seg->xfer;
949 struct wahc *wa;
950 struct device *dev;
951 struct wa_rpipe *rpipe;
952 unsigned long flags;
953 unsigned rpipe_ready;
954 u8 done = 0;
955
956 switch (urb->status) {
957 case 0:
958 spin_lock_irqsave(&xfer->lock, flags);
959 wa = xfer->wa;
960 dev = &wa->usb_iface->dev;
961 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
962 xfer, wa_xfer_id(xfer), seg->index);
963 if (xfer->is_inbound &&
964 seg->status < WA_SEG_PENDING &&
965 !(usb_pipeisoc(xfer->urb->pipe)))
966 seg->status = WA_SEG_PENDING;
967 spin_unlock_irqrestore(&xfer->lock, flags);
968 break;
969 case -ECONNRESET:
970 case -ENOENT:
971 break;
972 default:
973 spin_lock_irqsave(&xfer->lock, flags);
974 wa = xfer->wa;
975 dev = &wa->usb_iface->dev;
976 rpipe = xfer->ep->hcpriv;
977 if (printk_ratelimit())
978 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
979 xfer, wa_xfer_id(xfer), seg->index,
980 urb->status);
981 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
982 EDC_ERROR_TIMEFRAME)){
983 dev_err(dev, "DTO: URB max acceptable errors "
984 "exceeded, resetting device\n");
985 wa_reset_all(wa);
986 }
987 usb_unlink_urb(seg->isoc_pack_desc_urb);
988 usb_unlink_urb(seg->dto_urb);
989 seg->result = urb->status;
990 __wa_xfer_abort(xfer);
991 rpipe_ready = rpipe_avail_inc(rpipe);
992 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
993 spin_unlock_irqrestore(&xfer->lock, flags);
994 if (done)
995 wa_xfer_completion(xfer);
996 if (rpipe_ready)
997 wa_xfer_delayed_run(rpipe);
998 }
999
1000 wa_xfer_put(xfer);
1001}
1002
1003
1004
1005
1006
1007
1008static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1009 const unsigned int bytes_transferred,
1010 const unsigned int bytes_to_transfer, int *out_num_sgs)
1011{
1012 struct scatterlist *out_sg;
1013 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1014 nents;
1015 struct scatterlist *current_xfer_sg = in_sg;
1016 struct scatterlist *current_seg_sg, *last_seg_sg;
1017
1018
1019 while ((current_xfer_sg) &&
1020 (bytes_processed < bytes_transferred)) {
1021 bytes_processed += current_xfer_sg->length;
1022
1023
1024
1025 if (bytes_processed <= bytes_transferred)
1026 current_xfer_sg = sg_next(current_xfer_sg);
1027 }
1028
1029
1030
1031 if (bytes_processed > bytes_transferred) {
1032 offset_into_current_page_data = current_xfer_sg->length -
1033 (bytes_processed - bytes_transferred);
1034 }
1035
1036
1037 nents = DIV_ROUND_UP((bytes_to_transfer +
1038 offset_into_current_page_data +
1039 current_xfer_sg->offset),
1040 PAGE_SIZE);
1041
1042 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1043 if (out_sg) {
1044 sg_init_table(out_sg, nents);
1045
1046
1047
1048 last_seg_sg = current_seg_sg = out_sg;
1049 bytes_processed = 0;
1050
1051
1052
1053 nents = 0;
1054 while ((bytes_processed < bytes_to_transfer) &&
1055 current_seg_sg && current_xfer_sg) {
1056 unsigned int page_len = min((current_xfer_sg->length -
1057 offset_into_current_page_data),
1058 (bytes_to_transfer - bytes_processed));
1059
1060 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1061 page_len,
1062 current_xfer_sg->offset +
1063 offset_into_current_page_data);
1064
1065 bytes_processed += page_len;
1066
1067 last_seg_sg = current_seg_sg;
1068 current_seg_sg = sg_next(current_seg_sg);
1069 current_xfer_sg = sg_next(current_xfer_sg);
1070
1071
1072 offset_into_current_page_data = 0;
1073 nents++;
1074 }
1075
1076
1077
1078 sg_mark_end(last_seg_sg);
1079 *out_num_sgs = nents;
1080 }
1081
1082 return out_sg;
1083}
1084
1085
1086
1087
1088static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1089 struct wa_seg *seg, int curr_iso_frame)
1090{
1091 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1092 seg->dto_urb->sg = NULL;
1093 seg->dto_urb->num_sgs = 0;
1094
1095 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1096 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1097
1098 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1099 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1100 else
1101 seg->dto_urb->transfer_buffer_length =
1102 xfer->urb->iso_frame_desc[curr_iso_frame].length;
1103}
1104
1105
1106
1107
1108static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1109 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1110{
1111 int result = 0;
1112
1113 if (xfer->is_dma) {
1114 seg->dto_urb->transfer_dma =
1115 xfer->urb->transfer_dma + buf_itr_offset;
1116 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1117 seg->dto_urb->sg = NULL;
1118 seg->dto_urb->num_sgs = 0;
1119 } else {
1120
1121 seg->dto_urb->transfer_flags &=
1122 ~URB_NO_TRANSFER_DMA_MAP;
1123
1124 seg->dto_urb->num_mapped_sgs = 0;
1125
1126 if (xfer->urb->transfer_buffer) {
1127 seg->dto_urb->transfer_buffer =
1128 xfer->urb->transfer_buffer +
1129 buf_itr_offset;
1130 seg->dto_urb->sg = NULL;
1131 seg->dto_urb->num_sgs = 0;
1132 } else {
1133 seg->dto_urb->transfer_buffer = NULL;
1134
1135
1136
1137
1138
1139
1140
1141 seg->dto_urb->sg = wa_xfer_create_subset_sg(
1142 xfer->urb->sg,
1143 buf_itr_offset, buf_itr_size,
1144 &(seg->dto_urb->num_sgs));
1145 if (!(seg->dto_urb->sg))
1146 result = -ENOMEM;
1147 }
1148 }
1149 seg->dto_urb->transfer_buffer_length = buf_itr_size;
1150
1151 return result;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1163{
1164 int result, cnt, isoc_frame_offset = 0;
1165 size_t alloc_size = sizeof(*xfer->seg[0])
1166 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1167 struct usb_device *usb_dev = xfer->wa->usb_dev;
1168 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1169 struct wa_seg *seg;
1170 size_t buf_itr, buf_size, buf_itr_size;
1171
1172 result = -ENOMEM;
1173 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1174 if (xfer->seg == NULL)
1175 goto error_segs_kzalloc;
1176 buf_itr = 0;
1177 buf_size = xfer->urb->transfer_buffer_length;
1178 for (cnt = 0; cnt < xfer->segs; cnt++) {
1179 size_t iso_pkt_descr_size = 0;
1180 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1181
1182
1183
1184
1185
1186 if (usb_pipeisoc(xfer->urb->pipe)) {
1187 seg_isoc_frame_count =
1188 __wa_seg_calculate_isoc_frame_count(xfer,
1189 isoc_frame_offset, &seg_isoc_size);
1190
1191 iso_pkt_descr_size =
1192 sizeof(struct wa_xfer_packet_info_hwaiso) +
1193 (seg_isoc_frame_count * sizeof(__le16));
1194 }
1195 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1196 GFP_ATOMIC);
1197 if (seg == NULL)
1198 goto error_seg_kmalloc;
1199 wa_seg_init(seg);
1200 seg->xfer = xfer;
1201 seg->index = cnt;
1202 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1203 usb_sndbulkpipe(usb_dev,
1204 dto_epd->bEndpointAddress),
1205 &seg->xfer_hdr, xfer_hdr_size,
1206 wa_seg_tr_cb, seg);
1207 buf_itr_size = min(buf_size, xfer->seg_size);
1208
1209 if (usb_pipeisoc(xfer->urb->pipe)) {
1210 seg->isoc_frame_count = seg_isoc_frame_count;
1211 seg->isoc_frame_offset = isoc_frame_offset;
1212 seg->isoc_size = seg_isoc_size;
1213
1214 seg->isoc_pack_desc_urb =
1215 usb_alloc_urb(0, GFP_ATOMIC);
1216 if (seg->isoc_pack_desc_urb == NULL)
1217 goto error_iso_pack_desc_alloc;
1218
1219
1220
1221
1222
1223 usb_fill_bulk_urb(
1224 seg->isoc_pack_desc_urb, usb_dev,
1225 usb_sndbulkpipe(usb_dev,
1226 dto_epd->bEndpointAddress),
1227 (void *)(&seg->xfer_hdr) +
1228 xfer_hdr_size,
1229 iso_pkt_descr_size,
1230 wa_seg_iso_pack_desc_cb, seg);
1231
1232
1233 isoc_frame_offset += seg_isoc_frame_count;
1234 }
1235
1236 if (xfer->is_inbound == 0 && buf_size > 0) {
1237
1238 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1239 if (seg->dto_urb == NULL)
1240 goto error_dto_alloc;
1241 usb_fill_bulk_urb(
1242 seg->dto_urb, usb_dev,
1243 usb_sndbulkpipe(usb_dev,
1244 dto_epd->bEndpointAddress),
1245 NULL, 0, wa_seg_dto_cb, seg);
1246
1247 if (usb_pipeisoc(xfer->urb->pipe)) {
1248
1249
1250
1251
1252
1253
1254 __wa_populate_dto_urb_isoc(xfer, seg,
1255 seg->isoc_frame_offset);
1256 } else {
1257
1258 result = __wa_populate_dto_urb(xfer, seg,
1259 buf_itr, buf_itr_size);
1260 if (result < 0)
1261 goto error_seg_outbound_populate;
1262
1263 buf_itr += buf_itr_size;
1264 buf_size -= buf_itr_size;
1265 }
1266 }
1267 seg->status = WA_SEG_READY;
1268 }
1269 return 0;
1270
1271
1272
1273
1274
1275
1276error_seg_outbound_populate:
1277 usb_free_urb(xfer->seg[cnt]->dto_urb);
1278error_dto_alloc:
1279 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1280error_iso_pack_desc_alloc:
1281 kfree(xfer->seg[cnt]);
1282 xfer->seg[cnt] = NULL;
1283error_seg_kmalloc:
1284error_segs_kzalloc:
1285 return result;
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1299{
1300 int result;
1301 struct device *dev = &xfer->wa->usb_iface->dev;
1302 enum wa_xfer_type xfer_type = 0;
1303 size_t xfer_hdr_size, cnt, transfer_size;
1304 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1305
1306 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1307 if (result < 0)
1308 goto error_setup_sizes;
1309 xfer_hdr_size = result;
1310 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1311 if (result < 0) {
1312 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1313 xfer, xfer->segs, result);
1314 goto error_setup_segs;
1315 }
1316
1317 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1318 wa_xfer_id_init(xfer);
1319 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1320
1321
1322 xfer_hdr = xfer_hdr0;
1323 if (xfer_type == WA_XFER_TYPE_ISO) {
1324 xfer_hdr0->dwTransferLength =
1325 cpu_to_le32(xfer->seg[0]->isoc_size);
1326 for (cnt = 1; cnt < xfer->segs; cnt++) {
1327 struct wa_xfer_packet_info_hwaiso *packet_desc;
1328 struct wa_seg *seg = xfer->seg[cnt];
1329 struct wa_xfer_hwaiso *xfer_iso;
1330
1331 xfer_hdr = &seg->xfer_hdr;
1332 xfer_iso = container_of(xfer_hdr,
1333 struct wa_xfer_hwaiso, hdr);
1334 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1335
1336
1337
1338
1339 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1340 xfer_hdr->bTransferSegment = cnt;
1341 xfer_hdr->dwTransferLength =
1342 cpu_to_le32(seg->isoc_size);
1343 xfer_iso->dwNumOfPackets =
1344 cpu_to_le32(seg->isoc_frame_count);
1345 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1346 seg->status = WA_SEG_READY;
1347 }
1348 } else {
1349 transfer_size = urb->transfer_buffer_length;
1350 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1351 cpu_to_le32(xfer->seg_size) :
1352 cpu_to_le32(transfer_size);
1353 transfer_size -= xfer->seg_size;
1354 for (cnt = 1; cnt < xfer->segs; cnt++) {
1355 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1356 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1357 xfer_hdr->bTransferSegment = cnt;
1358 xfer_hdr->dwTransferLength =
1359 transfer_size > xfer->seg_size ?
1360 cpu_to_le32(xfer->seg_size)
1361 : cpu_to_le32(transfer_size);
1362 xfer->seg[cnt]->status = WA_SEG_READY;
1363 transfer_size -= xfer->seg_size;
1364 }
1365 }
1366 xfer_hdr->bTransferSegment |= 0x80;
1367 result = 0;
1368error_setup_segs:
1369error_setup_sizes:
1370 return result;
1371}
1372
1373
1374
1375
1376
1377
1378static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1379 struct wa_seg *seg, int *dto_done)
1380{
1381 int result;
1382
1383
1384 *dto_done = 1;
1385
1386
1387
1388
1389
1390 wa_xfer_get(xfer);
1391
1392 seg->status = WA_SEG_SUBMITTED;
1393 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1394 if (result < 0) {
1395 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1396 __func__, xfer, seg->index, result);
1397 wa_xfer_put(xfer);
1398 goto error_tr_submit;
1399 }
1400
1401 if (seg->isoc_pack_desc_urb) {
1402 wa_xfer_get(xfer);
1403 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1404 seg->isoc_frame_index = 0;
1405 if (result < 0) {
1406 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1407 __func__, xfer, seg->index, result);
1408 wa_xfer_put(xfer);
1409 goto error_iso_pack_desc_submit;
1410 }
1411 }
1412
1413 if (seg->dto_urb) {
1414 struct wahc *wa = xfer->wa;
1415 wa_xfer_get(xfer);
1416 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1417 if (result < 0) {
1418 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1419 __func__, xfer, seg->index, result);
1420 wa_xfer_put(xfer);
1421 goto error_dto_submit;
1422 }
1423
1424
1425
1426
1427
1428 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1429 && (seg->isoc_frame_count > 1))
1430 *dto_done = 0;
1431 }
1432 rpipe_avail_dec(rpipe);
1433 return 0;
1434
1435error_dto_submit:
1436 usb_unlink_urb(seg->isoc_pack_desc_urb);
1437error_iso_pack_desc_submit:
1438 usb_unlink_urb(&seg->tr_urb);
1439error_tr_submit:
1440 seg->status = WA_SEG_ERROR;
1441 seg->result = result;
1442 *dto_done = 1;
1443 return result;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1454{
1455 int result, dto_acquired = 0, dto_done = 0;
1456 struct device *dev = &rpipe->wa->usb_iface->dev;
1457 struct wa_seg *seg;
1458 struct wa_xfer *xfer;
1459 unsigned long flags;
1460
1461 *dto_waiting = 0;
1462
1463 spin_lock_irqsave(&rpipe->seg_lock, flags);
1464 while (atomic_read(&rpipe->segs_available) > 0
1465 && !list_empty(&rpipe->seg_list)
1466 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1467 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1468 list_node);
1469 list_del(&seg->list_node);
1470 xfer = seg->xfer;
1471
1472
1473
1474
1475
1476 wa_xfer_get(xfer);
1477 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1478
1479 if (dto_done)
1480 __wa_dto_put(rpipe->wa);
1481 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1482 xfer, wa_xfer_id(xfer), seg->index,
1483 atomic_read(&rpipe->segs_available), result);
1484 if (unlikely(result < 0)) {
1485 int done;
1486
1487 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1488 spin_lock_irqsave(&xfer->lock, flags);
1489 __wa_xfer_abort(xfer);
1490
1491
1492
1493
1494 xfer->segs_done++;
1495 done = __wa_xfer_is_done(xfer);
1496 spin_unlock_irqrestore(&xfer->lock, flags);
1497 if (done)
1498 wa_xfer_completion(xfer);
1499 spin_lock_irqsave(&rpipe->seg_lock, flags);
1500 }
1501 wa_xfer_put(xfer);
1502 }
1503
1504
1505
1506
1507 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1508 && (atomic_read(&rpipe->segs_available) ==
1509 le16_to_cpu(rpipe->descr.wRequests)))
1510 *dto_waiting = 1;
1511
1512 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1513
1514 return dto_done;
1515}
1516
1517static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1518{
1519 int dto_waiting;
1520 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 if (dto_waiting)
1531 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1532 else if (dto_done)
1533 wa_check_for_delayed_rpipes(rpipe->wa);
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543static int __wa_xfer_submit(struct wa_xfer *xfer)
1544{
1545 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1546 struct wahc *wa = xfer->wa;
1547 struct device *dev = &wa->usb_iface->dev;
1548 unsigned cnt;
1549 struct wa_seg *seg;
1550 unsigned long flags;
1551 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1552 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1553 u8 available;
1554 u8 empty;
1555
1556 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1557 list_add_tail(&xfer->list_node, &wa->xfer_list);
1558 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1559
1560 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1561 result = 0;
1562 spin_lock_irqsave(&rpipe->seg_lock, flags);
1563 for (cnt = 0; cnt < xfer->segs; cnt++) {
1564 int delay_seg = 1;
1565
1566 available = atomic_read(&rpipe->segs_available);
1567 empty = list_empty(&rpipe->seg_list);
1568 seg = xfer->seg[cnt];
1569 if (available && empty) {
1570
1571
1572
1573
1574 dto_acquired = __wa_dto_try_get(rpipe->wa);
1575 if (dto_acquired) {
1576 delay_seg = 0;
1577 result = __wa_seg_submit(rpipe, xfer, seg,
1578 &dto_done);
1579 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1580 xfer, wa_xfer_id(xfer), cnt, available,
1581 empty);
1582 if (dto_done)
1583 __wa_dto_put(rpipe->wa);
1584
1585 if (result < 0) {
1586 __wa_xfer_abort(xfer);
1587 goto error_seg_submit;
1588 }
1589 }
1590 }
1591
1592 if (delay_seg) {
1593 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1594 xfer, wa_xfer_id(xfer), cnt, available, empty);
1595 seg->status = WA_SEG_DELAYED;
1596 list_add_tail(&seg->list_node, &rpipe->seg_list);
1597 }
1598 xfer->segs_submitted++;
1599 }
1600error_seg_submit:
1601
1602
1603
1604
1605 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1606 && (atomic_read(&rpipe->segs_available) ==
1607 le16_to_cpu(rpipe->descr.wRequests)))
1608 dto_waiting = 1;
1609 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1610
1611 if (dto_waiting)
1612 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1613 else if (dto_done)
1614 wa_check_for_delayed_rpipes(rpipe->wa);
1615
1616 return result;
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1642{
1643 int result;
1644 unsigned long flags;
1645 struct urb *urb = xfer->urb;
1646 struct wahc *wa = xfer->wa;
1647 struct wusbhc *wusbhc = wa->wusb;
1648 struct wusb_dev *wusb_dev;
1649 unsigned done;
1650
1651 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1652 if (result < 0) {
1653 pr_err("%s: error_rpipe_get\n", __func__);
1654 goto error_rpipe_get;
1655 }
1656 result = -ENODEV;
1657
1658 mutex_lock(&wusbhc->mutex);
1659 if (urb->dev == NULL) {
1660 mutex_unlock(&wusbhc->mutex);
1661 pr_err("%s: error usb dev gone\n", __func__);
1662 goto error_dev_gone;
1663 }
1664 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1665 if (wusb_dev == NULL) {
1666 mutex_unlock(&wusbhc->mutex);
1667 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1668 __func__);
1669 goto error_dev_gone;
1670 }
1671 mutex_unlock(&wusbhc->mutex);
1672
1673 spin_lock_irqsave(&xfer->lock, flags);
1674 xfer->wusb_dev = wusb_dev;
1675 result = urb->status;
1676 if (urb->status != -EINPROGRESS) {
1677 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1678 goto error_dequeued;
1679 }
1680
1681 result = __wa_xfer_setup(xfer, urb);
1682 if (result < 0) {
1683 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1684 goto error_xfer_setup;
1685 }
1686
1687
1688
1689
1690
1691 wa_xfer_get(xfer);
1692 result = __wa_xfer_submit(xfer);
1693 if (result < 0) {
1694 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1695 goto error_xfer_submit;
1696 }
1697 spin_unlock_irqrestore(&xfer->lock, flags);
1698 wa_xfer_put(xfer);
1699 return 0;
1700
1701
1702
1703
1704
1705
1706error_xfer_setup:
1707error_dequeued:
1708 spin_unlock_irqrestore(&xfer->lock, flags);
1709
1710 if (wusb_dev)
1711 wusb_dev_put(wusb_dev);
1712error_dev_gone:
1713 rpipe_put(xfer->ep->hcpriv);
1714error_rpipe_get:
1715 xfer->result = result;
1716 return result;
1717
1718error_xfer_submit:
1719 done = __wa_xfer_is_done(xfer);
1720 xfer->result = result;
1721 spin_unlock_irqrestore(&xfer->lock, flags);
1722 if (done)
1723 wa_xfer_completion(xfer);
1724 wa_xfer_put(xfer);
1725
1726 return 0;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739void wa_urb_enqueue_run(struct work_struct *ws)
1740{
1741 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1742 struct wa_xfer *xfer, *next;
1743 struct urb *urb;
1744 LIST_HEAD(tmp_list);
1745
1746
1747 spin_lock_irq(&wa->xfer_list_lock);
1748 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1749 wa->xfer_delayed_list.prev);
1750 spin_unlock_irq(&wa->xfer_list_lock);
1751
1752
1753
1754
1755
1756 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1757 list_del_init(&xfer->list_node);
1758
1759 urb = xfer->urb;
1760 if (wa_urb_enqueue_b(xfer) < 0)
1761 wa_xfer_giveback(xfer);
1762 usb_put_urb(urb);
1763 }
1764}
1765EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1766
1767
1768
1769
1770void wa_process_errored_transfers_run(struct work_struct *ws)
1771{
1772 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1773 struct wa_xfer *xfer, *next;
1774 LIST_HEAD(tmp_list);
1775
1776 pr_info("%s: Run delayed STALL processing.\n", __func__);
1777
1778
1779 spin_lock_irq(&wa->xfer_list_lock);
1780 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1781 wa->xfer_errored_list.prev);
1782 spin_unlock_irq(&wa->xfer_list_lock);
1783
1784
1785
1786
1787
1788 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1789 struct usb_host_endpoint *ep;
1790 unsigned long flags;
1791 struct wa_rpipe *rpipe;
1792
1793 spin_lock_irqsave(&xfer->lock, flags);
1794 ep = xfer->ep;
1795 rpipe = ep->hcpriv;
1796 spin_unlock_irqrestore(&xfer->lock, flags);
1797
1798
1799 rpipe_clear_feature_stalled(wa, ep);
1800
1801
1802 wa_xfer_completion(xfer);
1803
1804
1805 wa_xfer_delayed_run(rpipe);
1806 }
1807}
1808EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1823 struct urb *urb, gfp_t gfp)
1824{
1825 int result;
1826 struct device *dev = &wa->usb_iface->dev;
1827 struct wa_xfer *xfer;
1828 unsigned long my_flags;
1829 unsigned cant_sleep = irqs_disabled() | in_atomic();
1830
1831 if ((urb->transfer_buffer == NULL)
1832 && (urb->sg == NULL)
1833 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1834 && urb->transfer_buffer_length != 0) {
1835 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1836 dump_stack();
1837 }
1838
1839 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1840 result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1841 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1842 if (result < 0)
1843 goto error_link_urb;
1844
1845 result = -ENOMEM;
1846 xfer = kzalloc(sizeof(*xfer), gfp);
1847 if (xfer == NULL)
1848 goto error_kmalloc;
1849
1850 result = -ENOENT;
1851 if (urb->status != -EINPROGRESS)
1852 goto error_dequeued;
1853 wa_xfer_init(xfer);
1854 xfer->wa = wa_get(wa);
1855 xfer->urb = urb;
1856 xfer->gfp = gfp;
1857 xfer->ep = ep;
1858 urb->hcpriv = xfer;
1859
1860 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1861 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1862 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1863 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1864 cant_sleep ? "deferred" : "inline");
1865
1866 if (cant_sleep) {
1867 usb_get_urb(urb);
1868 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1869 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1870 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1871 queue_work(wusbd, &wa->xfer_enqueue_work);
1872 } else {
1873 result = wa_urb_enqueue_b(xfer);
1874 if (result < 0) {
1875
1876
1877
1878
1879
1880 dev_err(dev, "%s: URB enqueue failed: %d\n",
1881 __func__, result);
1882 wa_put(xfer->wa);
1883 wa_xfer_put(xfer);
1884 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1885 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1886 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1887 return result;
1888 }
1889 }
1890 return 0;
1891
1892error_dequeued:
1893 kfree(xfer);
1894error_kmalloc:
1895 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1896 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1897 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1898error_link_urb:
1899 return result;
1900}
1901EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1922{
1923 unsigned long flags, flags2;
1924 struct wa_xfer *xfer;
1925 struct wa_seg *seg;
1926 struct wa_rpipe *rpipe;
1927 unsigned cnt, done = 0, xfer_abort_pending;
1928 unsigned rpipe_ready = 0;
1929 int result;
1930
1931
1932 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1933 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1934 if ((result == 0) && urb->hcpriv) {
1935
1936
1937
1938
1939 wa_xfer_get(urb->hcpriv);
1940 }
1941 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1942 if (result)
1943 return result;
1944
1945 xfer = urb->hcpriv;
1946 if (xfer == NULL)
1947 return -ENOENT;
1948 spin_lock_irqsave(&xfer->lock, flags);
1949 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1950 rpipe = xfer->ep->hcpriv;
1951 if (rpipe == NULL) {
1952 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
1953 __func__, xfer, wa_xfer_id(xfer),
1954 "Probably already aborted.\n" );
1955 result = -ENOENT;
1956 goto out_unlock;
1957 }
1958
1959
1960
1961
1962 if (__wa_xfer_is_done(xfer)) {
1963 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1964 xfer, wa_xfer_id(xfer));
1965 result = -ENOENT;
1966 goto out_unlock;
1967 }
1968
1969 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1970 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1971 goto dequeue_delayed;
1972 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1973 if (xfer->seg == NULL)
1974 goto out_unlock;
1975
1976 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1977
1978
1979
1980
1981 spin_lock(&rpipe->seg_lock);
1982 for (cnt = 0; cnt < xfer->segs; cnt++) {
1983 seg = xfer->seg[cnt];
1984 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1985 __func__, wa_xfer_id(xfer), cnt, seg->status);
1986 switch (seg->status) {
1987 case WA_SEG_NOTREADY:
1988 case WA_SEG_READY:
1989 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1990 xfer, cnt, seg->status);
1991 WARN_ON(1);
1992 break;
1993 case WA_SEG_DELAYED:
1994
1995
1996
1997
1998
1999
2000 seg->status = WA_SEG_ABORTED;
2001 seg->result = -ENOENT;
2002 list_del(&seg->list_node);
2003 xfer->segs_done++;
2004 break;
2005 case WA_SEG_DONE:
2006 case WA_SEG_ERROR:
2007 case WA_SEG_ABORTED:
2008 break;
2009
2010
2011
2012
2013
2014
2015
2016
2017 case WA_SEG_DTI_PENDING:
2018 break;
2019
2020
2021
2022
2023
2024
2025
2026 case WA_SEG_SUBMITTED:
2027 case WA_SEG_PENDING:
2028
2029
2030
2031
2032
2033 if (!xfer_abort_pending) {
2034 seg->status = WA_SEG_ABORTED;
2035 rpipe_ready = rpipe_avail_inc(rpipe);
2036 xfer->segs_done++;
2037 }
2038 break;
2039 }
2040 }
2041 spin_unlock(&rpipe->seg_lock);
2042 xfer->result = urb->status;
2043 done = __wa_xfer_is_done(xfer);
2044 spin_unlock_irqrestore(&xfer->lock, flags);
2045 if (done)
2046 wa_xfer_completion(xfer);
2047 if (rpipe_ready)
2048 wa_xfer_delayed_run(rpipe);
2049 wa_xfer_put(xfer);
2050 return result;
2051
2052out_unlock:
2053 spin_unlock_irqrestore(&xfer->lock, flags);
2054 wa_xfer_put(xfer);
2055 return result;
2056
2057dequeue_delayed:
2058 list_del_init(&xfer->list_node);
2059 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2060 xfer->result = urb->status;
2061 spin_unlock_irqrestore(&xfer->lock, flags);
2062 wa_xfer_giveback(xfer);
2063 wa_xfer_put(xfer);
2064 usb_put_urb(urb);
2065 return 0;
2066}
2067EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079static int wa_xfer_status_to_errno(u8 status)
2080{
2081 int errno;
2082 u8 real_status = status;
2083 static int xlat[] = {
2084 [WA_XFER_STATUS_SUCCESS] = 0,
2085 [WA_XFER_STATUS_HALTED] = -EPIPE,
2086 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
2087 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
2088 [WA_XFER_RESERVED] = EINVAL,
2089 [WA_XFER_STATUS_NOT_FOUND] = 0,
2090 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2091 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
2092 [WA_XFER_STATUS_ABORTED] = -ENOENT,
2093 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
2094 [WA_XFER_INVALID_FORMAT] = EINVAL,
2095 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
2096 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
2097 };
2098 status &= 0x3f;
2099
2100 if (status == 0)
2101 return 0;
2102 if (status >= ARRAY_SIZE(xlat)) {
2103 printk_ratelimited(KERN_ERR "%s(): BUG? "
2104 "Unknown WA transfer status 0x%02x\n",
2105 __func__, real_status);
2106 return -EINVAL;
2107 }
2108 errno = xlat[status];
2109 if (unlikely(errno > 0)) {
2110 printk_ratelimited(KERN_ERR "%s(): BUG? "
2111 "Inconsistent WA status: 0x%02x\n",
2112 __func__, real_status);
2113 errno = -errno;
2114 }
2115 return errno;
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2128 int starting_index, enum wa_seg_status status)
2129{
2130 int index;
2131 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2132
2133 for (index = starting_index; index < xfer->segs_submitted; index++) {
2134 struct wa_seg *current_seg = xfer->seg[index];
2135
2136 BUG_ON(current_seg == NULL);
2137
2138 switch (current_seg->status) {
2139 case WA_SEG_SUBMITTED:
2140 case WA_SEG_PENDING:
2141 case WA_SEG_DTI_PENDING:
2142 rpipe_avail_inc(rpipe);
2143
2144
2145
2146
2147 case WA_SEG_DELAYED:
2148 xfer->segs_done++;
2149 current_seg->status = status;
2150 break;
2151 case WA_SEG_ABORTED:
2152 break;
2153 default:
2154 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2155 __func__, wa_xfer_id(xfer), index,
2156 current_seg->status);
2157 break;
2158 }
2159 }
2160}
2161
2162
2163static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2164 struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2165{
2166 int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2167 int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2168 struct usb_iso_packet_descriptor *iso_frame_desc =
2169 xfer->urb->iso_frame_desc;
2170 const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2171 int next_frame_contiguous;
2172 struct usb_iso_packet_descriptor *iso_frame;
2173
2174 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2175
2176
2177
2178
2179
2180
2181
2182 seg_index = seg->isoc_frame_index;
2183 do {
2184 next_frame_contiguous = 0;
2185
2186 iso_frame = &iso_frame_desc[urb_frame_index];
2187 total_len += iso_frame->actual_length;
2188 ++urb_frame_index;
2189 ++seg_index;
2190
2191 if (seg_index < seg->isoc_frame_count) {
2192 struct usb_iso_packet_descriptor *next_iso_frame;
2193
2194 next_iso_frame = &iso_frame_desc[urb_frame_index];
2195
2196 if ((iso_frame->offset + iso_frame->actual_length) ==
2197 next_iso_frame->offset)
2198 next_frame_contiguous = 1;
2199 }
2200 } while (next_frame_contiguous
2201 && ((iso_frame->actual_length % dti_packet_size) == 0));
2202
2203
2204 buf_in_urb->num_mapped_sgs = 0;
2205 buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2206 iso_frame_desc[urb_start_frame].offset;
2207 buf_in_urb->transfer_buffer_length = total_len;
2208 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2209 buf_in_urb->transfer_buffer = NULL;
2210 buf_in_urb->sg = NULL;
2211 buf_in_urb->num_sgs = 0;
2212 buf_in_urb->context = seg;
2213
2214
2215 return seg_index - seg->isoc_frame_index;
2216}
2217
2218
2219static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2220 unsigned int seg_idx, unsigned int bytes_transferred)
2221{
2222 int result = 0;
2223 struct wa_seg *seg = xfer->seg[seg_idx];
2224
2225 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2226
2227 buf_in_urb->num_mapped_sgs = 0;
2228
2229 if (xfer->is_dma) {
2230 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2231 + (seg_idx * xfer->seg_size);
2232 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2233 buf_in_urb->transfer_buffer = NULL;
2234 buf_in_urb->sg = NULL;
2235 buf_in_urb->num_sgs = 0;
2236 } else {
2237
2238 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2239
2240 if (xfer->urb->transfer_buffer) {
2241 buf_in_urb->transfer_buffer =
2242 xfer->urb->transfer_buffer
2243 + (seg_idx * xfer->seg_size);
2244 buf_in_urb->sg = NULL;
2245 buf_in_urb->num_sgs = 0;
2246 } else {
2247
2248
2249
2250
2251 buf_in_urb->sg = wa_xfer_create_subset_sg(
2252 xfer->urb->sg,
2253 seg_idx * xfer->seg_size,
2254 bytes_transferred,
2255 &(buf_in_urb->num_sgs));
2256
2257 if (!(buf_in_urb->sg)) {
2258 buf_in_urb->num_sgs = 0;
2259 result = -ENOMEM;
2260 }
2261 buf_in_urb->transfer_buffer = NULL;
2262 }
2263 }
2264 buf_in_urb->transfer_buffer_length = bytes_transferred;
2265 buf_in_urb->context = seg;
2266
2267 return result;
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2278 struct wa_xfer_result *xfer_result)
2279{
2280 int result;
2281 struct device *dev = &wa->usb_iface->dev;
2282 unsigned long flags;
2283 unsigned int seg_idx;
2284 struct wa_seg *seg;
2285 struct wa_rpipe *rpipe;
2286 unsigned done = 0;
2287 u8 usb_status;
2288 unsigned rpipe_ready = 0;
2289 unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2290 struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2291
2292 spin_lock_irqsave(&xfer->lock, flags);
2293 seg_idx = xfer_result->bTransferSegment & 0x7f;
2294 if (unlikely(seg_idx >= xfer->segs))
2295 goto error_bad_seg;
2296 seg = xfer->seg[seg_idx];
2297 rpipe = xfer->ep->hcpriv;
2298 usb_status = xfer_result->bTransferStatus;
2299 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2300 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2301 if (seg->status == WA_SEG_ABORTED
2302 || seg->status == WA_SEG_ERROR)
2303 goto segment_aborted;
2304 if (seg->status == WA_SEG_SUBMITTED)
2305 seg->status = WA_SEG_PENDING;
2306 if (seg->status != WA_SEG_PENDING) {
2307 if (printk_ratelimit())
2308 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2309 xfer, seg_idx, seg->status);
2310 seg->status = WA_SEG_PENDING;
2311 }
2312 if (usb_status & 0x80) {
2313 seg->result = wa_xfer_status_to_errno(usb_status);
2314 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2315 xfer, xfer->id, seg->index, usb_status);
2316 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2317 WA_SEG_ABORTED : WA_SEG_ERROR;
2318 goto error_complete;
2319 }
2320
2321 if (usb_status & 0x40)
2322 usb_status = 0;
2323
2324
2325
2326
2327
2328 if (xfer_result->bTransferSegment & 0x80)
2329 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2330 WA_SEG_DONE);
2331 if (usb_pipeisoc(xfer->urb->pipe)
2332 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2333
2334 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2335 wa->dti_isoc_xfer_seg = seg_idx;
2336 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2337 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2338 && (bytes_transferred > 0)) {
2339
2340 seg->status = WA_SEG_DTI_PENDING;
2341 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2342 bytes_transferred);
2343 if (result < 0)
2344 goto error_buf_in_populate;
2345 ++(wa->active_buf_in_urbs);
2346 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2347 if (result < 0) {
2348 --(wa->active_buf_in_urbs);
2349 goto error_submit_buf_in;
2350 }
2351 } else {
2352
2353 seg->result = bytes_transferred;
2354 rpipe_ready = rpipe_avail_inc(rpipe);
2355 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2356 }
2357 spin_unlock_irqrestore(&xfer->lock, flags);
2358 if (done)
2359 wa_xfer_completion(xfer);
2360 if (rpipe_ready)
2361 wa_xfer_delayed_run(rpipe);
2362 return;
2363
2364error_submit_buf_in:
2365 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2366 dev_err(dev, "DTI: URB max acceptable errors "
2367 "exceeded, resetting device\n");
2368 wa_reset_all(wa);
2369 }
2370 if (printk_ratelimit())
2371 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2372 xfer, seg_idx, result);
2373 seg->result = result;
2374 kfree(buf_in_urb->sg);
2375 buf_in_urb->sg = NULL;
2376error_buf_in_populate:
2377 __wa_xfer_abort(xfer);
2378 seg->status = WA_SEG_ERROR;
2379error_complete:
2380 xfer->segs_done++;
2381 rpipe_ready = rpipe_avail_inc(rpipe);
2382 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2383 done = __wa_xfer_is_done(xfer);
2384
2385
2386
2387
2388 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2389 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2390 done) {
2391
2392 dev_info(dev, "Control EP stall. Queue delayed work.\n");
2393 spin_lock(&wa->xfer_list_lock);
2394
2395 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2396 spin_unlock(&wa->xfer_list_lock);
2397 spin_unlock_irqrestore(&xfer->lock, flags);
2398 queue_work(wusbd, &wa->xfer_error_work);
2399 } else {
2400 spin_unlock_irqrestore(&xfer->lock, flags);
2401 if (done)
2402 wa_xfer_completion(xfer);
2403 if (rpipe_ready)
2404 wa_xfer_delayed_run(rpipe);
2405 }
2406
2407 return;
2408
2409error_bad_seg:
2410 spin_unlock_irqrestore(&xfer->lock, flags);
2411 wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2412 if (printk_ratelimit())
2413 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2414 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2415 dev_err(dev, "DTI: URB max acceptable errors "
2416 "exceeded, resetting device\n");
2417 wa_reset_all(wa);
2418 }
2419 return;
2420
2421segment_aborted:
2422
2423 spin_unlock_irqrestore(&xfer->lock, flags);
2424}
2425
2426
2427
2428
2429
2430
2431static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2432{
2433 struct device *dev = &wa->usb_iface->dev;
2434 struct wa_xfer_packet_status_hwaiso *packet_status;
2435 struct wa_xfer_packet_status_len_hwaiso *status_array;
2436 struct wa_xfer *xfer;
2437 unsigned long flags;
2438 struct wa_seg *seg;
2439 struct wa_rpipe *rpipe;
2440 unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2441 unsigned first_frame_index = 0, rpipe_ready = 0;
2442 int expected_size;
2443
2444
2445 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2446 urb->actual_length, urb->transfer_buffer);
2447 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2448 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2449 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2450 packet_status->bPacketType);
2451 goto error_parse_buffer;
2452 }
2453 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2454 if (xfer == NULL) {
2455 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2456 wa->dti_isoc_xfer_in_progress);
2457 goto error_parse_buffer;
2458 }
2459 spin_lock_irqsave(&xfer->lock, flags);
2460 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2461 goto error_bad_seg;
2462 seg = xfer->seg[wa->dti_isoc_xfer_seg];
2463 rpipe = xfer->ep->hcpriv;
2464 expected_size = sizeof(*packet_status) +
2465 (sizeof(packet_status->PacketStatus[0]) *
2466 seg->isoc_frame_count);
2467 if (urb->actual_length != expected_size) {
2468 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2469 urb->actual_length, expected_size);
2470 goto error_bad_seg;
2471 }
2472 if (le16_to_cpu(packet_status->wLength) != expected_size) {
2473 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2474 le16_to_cpu(packet_status->wLength));
2475 goto error_bad_seg;
2476 }
2477
2478 status_array = packet_status->PacketStatus;
2479 xfer->urb->start_frame =
2480 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2481 for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2482 struct usb_iso_packet_descriptor *iso_frame_desc =
2483 xfer->urb->iso_frame_desc;
2484 const int xfer_frame_index =
2485 seg->isoc_frame_offset + seg_index;
2486
2487 iso_frame_desc[xfer_frame_index].status =
2488 wa_xfer_status_to_errno(
2489 le16_to_cpu(status_array[seg_index].PacketStatus));
2490 iso_frame_desc[xfer_frame_index].actual_length =
2491 le16_to_cpu(status_array[seg_index].PacketLength);
2492
2493 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2494
2495 if (!data_frame_count)
2496 first_frame_index = seg_index;
2497 ++data_frame_count;
2498 }
2499 }
2500
2501 if (xfer->is_inbound && data_frame_count) {
2502 int result, total_frames_read = 0, urb_index = 0;
2503 struct urb *buf_in_urb;
2504
2505
2506 seg->status = WA_SEG_DTI_PENDING;
2507
2508
2509 seg->isoc_frame_index = first_frame_index;
2510
2511 do {
2512 int urb_frame_index, urb_frame_count;
2513 struct usb_iso_packet_descriptor *iso_frame_desc;
2514
2515 buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2516 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2517 buf_in_urb, xfer, seg);
2518
2519 seg->isoc_frame_index += urb_frame_count;
2520 total_frames_read += urb_frame_count;
2521
2522 ++(wa->active_buf_in_urbs);
2523 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2524
2525
2526 urb_frame_index =
2527 seg->isoc_frame_offset + seg->isoc_frame_index;
2528 iso_frame_desc =
2529 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2530 while ((seg->isoc_frame_index <
2531 seg->isoc_frame_count) &&
2532 (iso_frame_desc->actual_length == 0)) {
2533 ++(seg->isoc_frame_index);
2534 ++iso_frame_desc;
2535 }
2536 ++urb_index;
2537
2538 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2539 && (seg->isoc_frame_index <
2540 seg->isoc_frame_count));
2541
2542 if (result < 0) {
2543 --(wa->active_buf_in_urbs);
2544 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2545 result);
2546 wa_reset_all(wa);
2547 } else if (data_frame_count > total_frames_read)
2548
2549 dti_busy = 1;
2550 } else {
2551
2552 rpipe_ready = rpipe_avail_inc(rpipe);
2553 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2554 }
2555 spin_unlock_irqrestore(&xfer->lock, flags);
2556 if (dti_busy)
2557 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2558 else
2559 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2560 if (done)
2561 wa_xfer_completion(xfer);
2562 if (rpipe_ready)
2563 wa_xfer_delayed_run(rpipe);
2564 wa_xfer_put(xfer);
2565 return dti_busy;
2566
2567error_bad_seg:
2568 spin_unlock_irqrestore(&xfer->lock, flags);
2569 wa_xfer_put(xfer);
2570error_parse_buffer:
2571 return dti_busy;
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584static void wa_buf_in_cb(struct urb *urb)
2585{
2586 struct wa_seg *seg = urb->context;
2587 struct wa_xfer *xfer = seg->xfer;
2588 struct wahc *wa;
2589 struct device *dev;
2590 struct wa_rpipe *rpipe;
2591 unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2592 unsigned long flags;
2593 int resubmit_dti = 0, active_buf_in_urbs;
2594 u8 done = 0;
2595
2596
2597 kfree(urb->sg);
2598 urb->sg = NULL;
2599
2600 spin_lock_irqsave(&xfer->lock, flags);
2601 wa = xfer->wa;
2602 dev = &wa->usb_iface->dev;
2603 --(wa->active_buf_in_urbs);
2604 active_buf_in_urbs = wa->active_buf_in_urbs;
2605
2606 if (usb_pipeisoc(xfer->urb->pipe)) {
2607 struct usb_iso_packet_descriptor *iso_frame_desc =
2608 xfer->urb->iso_frame_desc;
2609 int seg_index;
2610
2611
2612
2613
2614
2615 seg_index = seg->isoc_frame_index;
2616 while (seg_index < seg->isoc_frame_count) {
2617 const int urb_frame_index =
2618 seg->isoc_frame_offset + seg_index;
2619
2620 if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2621
2622 if (!isoc_data_frame_count)
2623 seg->isoc_frame_index = seg_index;
2624 ++isoc_data_frame_count;
2625 }
2626 ++seg_index;
2627 }
2628 }
2629 spin_unlock_irqrestore(&xfer->lock, flags);
2630
2631 switch (urb->status) {
2632 case 0:
2633 spin_lock_irqsave(&xfer->lock, flags);
2634
2635 seg->result += urb->actual_length;
2636 if (isoc_data_frame_count > 0) {
2637 int result, urb_frame_count;
2638
2639
2640 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2641 xfer, seg);
2642
2643 seg->isoc_frame_index += urb_frame_count;
2644 ++(wa->active_buf_in_urbs);
2645 result = usb_submit_urb(urb, GFP_ATOMIC);
2646 if (result < 0) {
2647 --(wa->active_buf_in_urbs);
2648 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2649 result);
2650 wa_reset_all(wa);
2651 }
2652
2653
2654
2655
2656
2657
2658
2659 resubmit_dti = (isoc_data_frame_count ==
2660 urb_frame_count);
2661 } else if (active_buf_in_urbs == 0) {
2662 rpipe = xfer->ep->hcpriv;
2663 dev_dbg(dev,
2664 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2665 xfer, wa_xfer_id(xfer), seg->index,
2666 seg->result);
2667 rpipe_ready = rpipe_avail_inc(rpipe);
2668 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2669 WA_SEG_DONE);
2670 }
2671 spin_unlock_irqrestore(&xfer->lock, flags);
2672 if (done)
2673 wa_xfer_completion(xfer);
2674 if (rpipe_ready)
2675 wa_xfer_delayed_run(rpipe);
2676 break;
2677 case -ECONNRESET:
2678 case -ENOENT:
2679 break;
2680 default:
2681
2682
2683
2684
2685
2686 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2687 spin_lock_irqsave(&xfer->lock, flags);
2688 rpipe = xfer->ep->hcpriv;
2689 if (printk_ratelimit())
2690 dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2691 xfer, wa_xfer_id(xfer), seg->index,
2692 urb->status);
2693 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2694 EDC_ERROR_TIMEFRAME)){
2695 dev_err(dev, "DTO: URB max acceptable errors "
2696 "exceeded, resetting device\n");
2697 wa_reset_all(wa);
2698 }
2699 seg->result = urb->status;
2700 rpipe_ready = rpipe_avail_inc(rpipe);
2701 if (active_buf_in_urbs == 0)
2702 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2703 WA_SEG_ERROR);
2704 else
2705 __wa_xfer_abort(xfer);
2706 spin_unlock_irqrestore(&xfer->lock, flags);
2707 if (done)
2708 wa_xfer_completion(xfer);
2709 if (rpipe_ready)
2710 wa_xfer_delayed_run(rpipe);
2711 }
2712
2713 if (resubmit_dti) {
2714 int result;
2715
2716 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2717
2718 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2719 if (result < 0) {
2720 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2721 result);
2722 wa_reset_all(wa);
2723 }
2724 }
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753static void wa_dti_cb(struct urb *urb)
2754{
2755 int result, dti_busy = 0;
2756 struct wahc *wa = urb->context;
2757 struct device *dev = &wa->usb_iface->dev;
2758 u32 xfer_id;
2759 u8 usb_status;
2760
2761 BUG_ON(wa->dti_urb != urb);
2762 switch (wa->dti_urb->status) {
2763 case 0:
2764 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2765 struct wa_xfer_result *xfer_result;
2766 struct wa_xfer *xfer;
2767
2768
2769 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2770 urb->actual_length, urb->transfer_buffer);
2771 if (urb->actual_length != sizeof(*xfer_result)) {
2772 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2773 urb->actual_length,
2774 sizeof(*xfer_result));
2775 break;
2776 }
2777 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2778 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2779 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2780 xfer_result->hdr.bLength);
2781 break;
2782 }
2783 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2784 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2785 xfer_result->hdr.bNotifyType);
2786 break;
2787 }
2788 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2789 usb_status = xfer_result->bTransferStatus & 0x3f;
2790 if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2791
2792 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2793 __func__, xfer_id,
2794 xfer_result->bTransferSegment & 0x7f);
2795 break;
2796 }
2797 xfer = wa_xfer_get_by_id(wa, xfer_id);
2798 if (xfer == NULL) {
2799
2800 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2801 xfer_id, usb_status);
2802 break;
2803 }
2804 wa_xfer_result_chew(wa, xfer, xfer_result);
2805 wa_xfer_put(xfer);
2806 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2807 dti_busy = wa_process_iso_packet_status(wa, urb);
2808 } else {
2809 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2810 wa->dti_state);
2811 }
2812 break;
2813 case -ENOENT:
2814 case -ESHUTDOWN:
2815 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2816 goto out;
2817 default:
2818
2819 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2820 EDC_ERROR_TIMEFRAME)) {
2821 dev_err(dev, "DTI: URB max acceptable errors "
2822 "exceeded, resetting device\n");
2823 wa_reset_all(wa);
2824 goto out;
2825 }
2826 if (printk_ratelimit())
2827 dev_err(dev, "DTI: URB error %d\n", urb->status);
2828 break;
2829 }
2830
2831
2832 if (!dti_busy) {
2833 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2834 if (result < 0) {
2835 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2836 result);
2837 wa_reset_all(wa);
2838 }
2839 }
2840out:
2841 return;
2842}
2843
2844
2845
2846
2847
2848int wa_dti_start(struct wahc *wa)
2849{
2850 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2851 struct device *dev = &wa->usb_iface->dev;
2852 int result = -ENOMEM, index;
2853
2854 if (wa->dti_urb != NULL)
2855 goto out;
2856
2857 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2858 if (wa->dti_urb == NULL) {
2859 dev_err(dev, "Can't allocate DTI URB\n");
2860 goto error_dti_urb_alloc;
2861 }
2862 usb_fill_bulk_urb(
2863 wa->dti_urb, wa->usb_dev,
2864 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2865 wa->dti_buf, wa->dti_buf_size,
2866 wa_dti_cb, wa);
2867
2868
2869 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2870 usb_fill_bulk_urb(
2871 &(wa->buf_in_urbs[index]), wa->usb_dev,
2872 usb_rcvbulkpipe(wa->usb_dev,
2873 0x80 | dti_epd->bEndpointAddress),
2874 NULL, 0, wa_buf_in_cb, wa);
2875 }
2876 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2877 if (result < 0) {
2878 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2879 result);
2880 goto error_dti_urb_submit;
2881 }
2882out:
2883 return 0;
2884
2885error_dti_urb_submit:
2886 usb_put_urb(wa->dti_urb);
2887 wa->dti_urb = NULL;
2888error_dti_urb_alloc:
2889 return result;
2890}
2891EXPORT_SYMBOL_GPL(wa_dti_start);
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2909{
2910 struct device *dev = &wa->usb_iface->dev;
2911 struct wa_notif_xfer *notif_xfer;
2912 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2913
2914 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2915 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2916
2917 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2918
2919 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2920 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2921 goto error;
2922 }
2923
2924
2925 if (wa_dti_start(wa) < 0)
2926 goto error;
2927
2928 return;
2929
2930error:
2931 wa_reset_all(wa);
2932}
2933