1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/slab.h>
85#include <linux/hash.h>
86#include <linux/ratelimit.h>
87#include <linux/export.h>
88#include <linux/scatterlist.h>
89
90#include "wa-hc.h"
91#include "wusbhc.h"
92
93enum {
94 WA_SEGS_MAX = 255,
95};
96
97enum wa_seg_status {
98 WA_SEG_NOTREADY,
99 WA_SEG_READY,
100 WA_SEG_DELAYED,
101 WA_SEG_SUBMITTED,
102 WA_SEG_PENDING,
103 WA_SEG_DTI_PENDING,
104 WA_SEG_DONE,
105 WA_SEG_ERROR,
106 WA_SEG_ABORTED,
107};
108
109static void wa_xfer_delayed_run(struct wa_rpipe *);
110
111
112
113
114
115
116struct wa_seg {
117 struct urb urb;
118 struct urb *dto_urb;
119 struct list_head list_node;
120 struct wa_xfer *xfer;
121 u8 index;
122 enum wa_seg_status status;
123 ssize_t result;
124 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[];
126};
127
128static inline void wa_seg_init(struct wa_seg *seg)
129{
130 usb_init_urb(&seg->urb);
131
132
133 memset(((void *)seg) + sizeof(seg->urb), 0,
134 sizeof(*seg) - sizeof(seg->urb));
135}
136
137
138
139
140
141struct wa_xfer {
142 struct kref refcnt;
143 struct list_head list_node;
144 spinlock_t lock;
145 u32 id;
146
147 struct wahc *wa;
148 struct usb_host_endpoint *ep;
149 struct urb *urb;
150 struct wa_seg **seg;
151 u8 segs, segs_submitted, segs_done;
152 unsigned is_inbound:1;
153 unsigned is_dma:1;
154 size_t seg_size;
155 int result;
156
157 gfp_t gfp;
158
159 struct wusb_dev *wusb_dev;
160};
161
162static inline void wa_xfer_init(struct wa_xfer *xfer)
163{
164 kref_init(&xfer->refcnt);
165 INIT_LIST_HEAD(&xfer->list_node);
166 spin_lock_init(&xfer->lock);
167}
168
169
170
171
172
173
174
175static void wa_xfer_destroy(struct kref *_xfer)
176{
177 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
178 if (xfer->seg) {
179 unsigned cnt;
180 for (cnt = 0; cnt < xfer->segs; cnt++) {
181 usb_free_urb(xfer->seg[cnt]->dto_urb);
182 usb_free_urb(&xfer->seg[cnt]->urb);
183 }
184 }
185 kfree(xfer);
186}
187
188static void wa_xfer_get(struct wa_xfer *xfer)
189{
190 kref_get(&xfer->refcnt);
191}
192
193static void wa_xfer_put(struct wa_xfer *xfer)
194{
195 kref_put(&xfer->refcnt, wa_xfer_destroy);
196}
197
198
199
200
201
202
203
204
205
206
207
208static void wa_xfer_giveback(struct wa_xfer *xfer)
209{
210 unsigned long flags;
211
212 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
213 list_del_init(&xfer->list_node);
214 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
215
216 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
217 wa_put(xfer->wa);
218 wa_xfer_put(xfer);
219}
220
221
222
223
224
225
226static void wa_xfer_completion(struct wa_xfer *xfer)
227{
228 if (xfer->wusb_dev)
229 wusb_dev_put(xfer->wusb_dev);
230 rpipe_put(xfer->ep->hcpriv);
231 wa_xfer_giveback(xfer);
232}
233
234
235
236
237
238
239static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
240{
241 struct device *dev = &xfer->wa->usb_iface->dev;
242 unsigned result, cnt;
243 struct wa_seg *seg;
244 struct urb *urb = xfer->urb;
245 unsigned found_short = 0;
246
247 result = xfer->segs_done == xfer->segs_submitted;
248 if (result == 0)
249 goto out;
250 urb->actual_length = 0;
251 for (cnt = 0; cnt < xfer->segs; cnt++) {
252 seg = xfer->seg[cnt];
253 switch (seg->status) {
254 case WA_SEG_DONE:
255 if (found_short && seg->result > 0) {
256 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
257 xfer, cnt, seg->result);
258 urb->status = -EINVAL;
259 goto out;
260 }
261 urb->actual_length += seg->result;
262 if (seg->result < xfer->seg_size
263 && cnt != xfer->segs-1)
264 found_short = 1;
265 dev_dbg(dev, "xfer %p#%u: DONE short %d "
266 "result %zu urb->actual_length %d\n",
267 xfer, seg->index, found_short, seg->result,
268 urb->actual_length);
269 break;
270 case WA_SEG_ERROR:
271 xfer->result = seg->result;
272 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
273 xfer, seg->index, seg->result);
274 goto out;
275 case WA_SEG_ABORTED:
276 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
277 xfer, seg->index, urb->status);
278 xfer->result = urb->status;
279 goto out;
280 default:
281 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
282 xfer, cnt, seg->status);
283 xfer->result = -EINVAL;
284 goto out;
285 }
286 }
287 xfer->result = 0;
288out:
289 return result;
290}
291
292
293
294
295
296
297
298
299
300static void wa_xfer_id_init(struct wa_xfer *xfer)
301{
302 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
303}
304
305
306
307
308
309
310static u32 wa_xfer_id(struct wa_xfer *xfer)
311{
312 return xfer->id;
313}
314
315
316
317
318
319
320
321
322
323static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
324{
325 unsigned long flags;
326 struct wa_xfer *xfer_itr;
327 spin_lock_irqsave(&wa->xfer_list_lock, flags);
328 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
329 if (id == xfer_itr->id) {
330 wa_xfer_get(xfer_itr);
331 goto out;
332 }
333 }
334 xfer_itr = NULL;
335out:
336 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
337 return xfer_itr;
338}
339
340struct wa_xfer_abort_buffer {
341 struct urb urb;
342 struct wa_xfer_abort cmd;
343};
344
345static void __wa_xfer_abort_cb(struct urb *urb)
346{
347 struct wa_xfer_abort_buffer *b = urb->context;
348 usb_put_urb(&b->urb);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static void __wa_xfer_abort(struct wa_xfer *xfer)
366{
367 int result;
368 struct device *dev = &xfer->wa->usb_iface->dev;
369 struct wa_xfer_abort_buffer *b;
370 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
371
372 b = kmalloc(sizeof(*b), GFP_ATOMIC);
373 if (b == NULL)
374 goto error_kmalloc;
375 b->cmd.bLength = sizeof(b->cmd);
376 b->cmd.bRequestType = WA_XFER_ABORT;
377 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
378 b->cmd.dwTransferID = wa_xfer_id(xfer);
379
380 usb_init_urb(&b->urb);
381 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
382 usb_sndbulkpipe(xfer->wa->usb_dev,
383 xfer->wa->dto_epd->bEndpointAddress),
384 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
385 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
386 if (result < 0)
387 goto error_submit;
388 return;
389
390
391error_submit:
392 if (printk_ratelimit())
393 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
394 xfer, result);
395 kfree(b);
396error_kmalloc:
397 return;
398
399}
400
401
402
403
404
405static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
406 enum wa_xfer_type *pxfer_type)
407{
408 ssize_t result;
409 struct device *dev = &xfer->wa->usb_iface->dev;
410 size_t maxpktsize;
411 struct urb *urb = xfer->urb;
412 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
413
414 switch (rpipe->descr.bmAttribute & 0x3) {
415 case USB_ENDPOINT_XFER_CONTROL:
416 *pxfer_type = WA_XFER_TYPE_CTL;
417 result = sizeof(struct wa_xfer_ctl);
418 break;
419 case USB_ENDPOINT_XFER_INT:
420 case USB_ENDPOINT_XFER_BULK:
421 *pxfer_type = WA_XFER_TYPE_BI;
422 result = sizeof(struct wa_xfer_bi);
423 break;
424 case USB_ENDPOINT_XFER_ISOC:
425 dev_err(dev, "FIXME: ISOC not implemented\n");
426 result = -ENOSYS;
427 goto error;
428 default:
429
430 BUG();
431 result = -EINVAL;
432 };
433 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
434 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
435 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
436 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
437
438
439
440 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
441 if (xfer->seg_size < maxpktsize) {
442 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
443 "%zu\n", xfer->seg_size, maxpktsize);
444 result = -EINVAL;
445 goto error;
446 }
447 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
448 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
449 if (xfer->segs >= WA_SEGS_MAX) {
450 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
451 (int)(urb->transfer_buffer_length / xfer->seg_size),
452 WA_SEGS_MAX);
453 result = -EINVAL;
454 goto error;
455 }
456 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
457 xfer->segs = 1;
458error:
459 return result;
460}
461
462
463static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
464 struct wa_xfer_hdr *xfer_hdr0,
465 enum wa_xfer_type xfer_type,
466 size_t xfer_hdr_size)
467{
468 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
469
470 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
471 xfer_hdr0->bLength = xfer_hdr_size;
472 xfer_hdr0->bRequestType = xfer_type;
473 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
474 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
475 xfer_hdr0->bTransferSegment = 0;
476 switch (xfer_type) {
477 case WA_XFER_TYPE_CTL: {
478 struct wa_xfer_ctl *xfer_ctl =
479 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
480 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
481 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
482 sizeof(xfer_ctl->baSetupData));
483 break;
484 }
485 case WA_XFER_TYPE_BI:
486 break;
487 case WA_XFER_TYPE_ISO:
488 printk(KERN_ERR "FIXME: ISOC not implemented\n");
489 default:
490 BUG();
491 };
492}
493
494
495
496
497
498
499
500
501
502
503
504
505
506static void wa_seg_dto_cb(struct urb *urb)
507{
508 struct wa_seg *seg = urb->context;
509 struct wa_xfer *xfer = seg->xfer;
510 struct wahc *wa;
511 struct device *dev;
512 struct wa_rpipe *rpipe;
513 unsigned long flags;
514 unsigned rpipe_ready = 0;
515 u8 done = 0;
516
517 switch (urb->status) {
518 case 0:
519 spin_lock_irqsave(&xfer->lock, flags);
520 wa = xfer->wa;
521 dev = &wa->usb_iface->dev;
522 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
523 xfer, seg->index, urb->actual_length);
524 if (seg->status < WA_SEG_PENDING)
525 seg->status = WA_SEG_PENDING;
526 seg->result = urb->actual_length;
527 spin_unlock_irqrestore(&xfer->lock, flags);
528 break;
529 case -ECONNRESET:
530 case -ENOENT:
531 break;
532 default:
533 spin_lock_irqsave(&xfer->lock, flags);
534 wa = xfer->wa;
535 dev = &wa->usb_iface->dev;
536 rpipe = xfer->ep->hcpriv;
537 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
538 xfer, seg->index, urb->status);
539 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
540 EDC_ERROR_TIMEFRAME)){
541 dev_err(dev, "DTO: URB max acceptable errors "
542 "exceeded, resetting device\n");
543 wa_reset_all(wa);
544 }
545 if (seg->status != WA_SEG_ERROR) {
546 seg->status = WA_SEG_ERROR;
547 seg->result = urb->status;
548 xfer->segs_done++;
549 __wa_xfer_abort(xfer);
550 rpipe_ready = rpipe_avail_inc(rpipe);
551 done = __wa_xfer_is_done(xfer);
552 }
553 spin_unlock_irqrestore(&xfer->lock, flags);
554 if (done)
555 wa_xfer_completion(xfer);
556 if (rpipe_ready)
557 wa_xfer_delayed_run(rpipe);
558 }
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static void wa_seg_cb(struct urb *urb)
580{
581 struct wa_seg *seg = urb->context;
582 struct wa_xfer *xfer = seg->xfer;
583 struct wahc *wa;
584 struct device *dev;
585 struct wa_rpipe *rpipe;
586 unsigned long flags;
587 unsigned rpipe_ready;
588 u8 done = 0;
589
590 switch (urb->status) {
591 case 0:
592 spin_lock_irqsave(&xfer->lock, flags);
593 wa = xfer->wa;
594 dev = &wa->usb_iface->dev;
595 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
596 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
597 seg->status = WA_SEG_PENDING;
598 spin_unlock_irqrestore(&xfer->lock, flags);
599 break;
600 case -ECONNRESET:
601 case -ENOENT:
602 break;
603 default:
604 spin_lock_irqsave(&xfer->lock, flags);
605 wa = xfer->wa;
606 dev = &wa->usb_iface->dev;
607 rpipe = xfer->ep->hcpriv;
608 if (printk_ratelimit())
609 dev_err(dev, "xfer %p#%u: request error %d\n",
610 xfer, seg->index, urb->status);
611 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
612 EDC_ERROR_TIMEFRAME)){
613 dev_err(dev, "DTO: URB max acceptable errors "
614 "exceeded, resetting device\n");
615 wa_reset_all(wa);
616 }
617 usb_unlink_urb(seg->dto_urb);
618 seg->status = WA_SEG_ERROR;
619 seg->result = urb->status;
620 xfer->segs_done++;
621 __wa_xfer_abort(xfer);
622 rpipe_ready = rpipe_avail_inc(rpipe);
623 done = __wa_xfer_is_done(xfer);
624 spin_unlock_irqrestore(&xfer->lock, flags);
625 if (done)
626 wa_xfer_completion(xfer);
627 if (rpipe_ready)
628 wa_xfer_delayed_run(rpipe);
629 }
630}
631
632
633
634
635static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
636 const unsigned int bytes_transferred,
637 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
638{
639 struct scatterlist *out_sg;
640 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
641 nents;
642 struct scatterlist *current_xfer_sg = in_sg;
643 struct scatterlist *current_seg_sg, *last_seg_sg;
644
645
646 while ((current_xfer_sg) &&
647 (bytes_processed < bytes_transferred)) {
648 bytes_processed += current_xfer_sg->length;
649
650
651
652 if (bytes_processed <= bytes_transferred)
653 current_xfer_sg = sg_next(current_xfer_sg);
654 }
655
656
657
658 if (bytes_processed > bytes_transferred) {
659 offset_into_current_page_data = current_xfer_sg->length -
660 (bytes_processed - bytes_transferred);
661 }
662
663
664 nents = DIV_ROUND_UP((bytes_to_transfer +
665 offset_into_current_page_data +
666 current_xfer_sg->offset),
667 PAGE_SIZE);
668
669 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
670 if (out_sg) {
671 sg_init_table(out_sg, nents);
672
673
674
675 last_seg_sg = current_seg_sg = out_sg;
676 bytes_processed = 0;
677
678
679
680 nents = 0;
681 while ((bytes_processed < bytes_to_transfer) &&
682 current_seg_sg && current_xfer_sg) {
683 unsigned int page_len = min((current_xfer_sg->length -
684 offset_into_current_page_data),
685 (bytes_to_transfer - bytes_processed));
686
687 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
688 page_len,
689 current_xfer_sg->offset +
690 offset_into_current_page_data);
691
692 bytes_processed += page_len;
693
694 last_seg_sg = current_seg_sg;
695 current_seg_sg = sg_next(current_seg_sg);
696 current_xfer_sg = sg_next(current_xfer_sg);
697
698
699 offset_into_current_page_data = 0;
700 nents++;
701 }
702
703
704
705 sg_mark_end(last_seg_sg);
706 *out_num_sgs = nents;
707 }
708
709 return out_sg;
710}
711
712
713
714
715
716
717
718
719
720static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
721{
722 int result, cnt;
723 size_t alloc_size = sizeof(*xfer->seg[0])
724 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
725 struct usb_device *usb_dev = xfer->wa->usb_dev;
726 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
727 struct wa_seg *seg;
728 size_t buf_itr, buf_size, buf_itr_size;
729
730 result = -ENOMEM;
731 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
732 if (xfer->seg == NULL)
733 goto error_segs_kzalloc;
734 buf_itr = 0;
735 buf_size = xfer->urb->transfer_buffer_length;
736 for (cnt = 0; cnt < xfer->segs; cnt++) {
737 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
738 if (seg == NULL)
739 goto error_seg_kmalloc;
740 wa_seg_init(seg);
741 seg->xfer = xfer;
742 seg->index = cnt;
743 usb_fill_bulk_urb(&seg->urb, usb_dev,
744 usb_sndbulkpipe(usb_dev,
745 dto_epd->bEndpointAddress),
746 &seg->xfer_hdr, xfer_hdr_size,
747 wa_seg_cb, seg);
748 buf_itr_size = min(buf_size, xfer->seg_size);
749 if (xfer->is_inbound == 0 && buf_size > 0) {
750
751 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
752 if (seg->dto_urb == NULL)
753 goto error_dto_alloc;
754 usb_fill_bulk_urb(
755 seg->dto_urb, usb_dev,
756 usb_sndbulkpipe(usb_dev,
757 dto_epd->bEndpointAddress),
758 NULL, 0, wa_seg_dto_cb, seg);
759 if (xfer->is_dma) {
760 seg->dto_urb->transfer_dma =
761 xfer->urb->transfer_dma + buf_itr;
762 seg->dto_urb->transfer_flags |=
763 URB_NO_TRANSFER_DMA_MAP;
764 seg->dto_urb->transfer_buffer = NULL;
765 seg->dto_urb->sg = NULL;
766 seg->dto_urb->num_sgs = 0;
767 } else {
768
769 seg->dto_urb->transfer_flags &=
770 ~URB_NO_TRANSFER_DMA_MAP;
771
772 seg->dto_urb->num_mapped_sgs = 0;
773
774 if (xfer->urb->transfer_buffer) {
775 seg->dto_urb->transfer_buffer =
776 xfer->urb->transfer_buffer +
777 buf_itr;
778 seg->dto_urb->sg = NULL;
779 seg->dto_urb->num_sgs = 0;
780 } else {
781
782
783
784
785
786 seg->dto_urb->sg =
787 wa_xfer_create_subset_sg(
788 xfer->urb->sg,
789 buf_itr, buf_itr_size,
790 &(seg->dto_urb->num_sgs));
791
792 if (!(seg->dto_urb->sg)) {
793 seg->dto_urb->num_sgs = 0;
794 goto error_sg_alloc;
795 }
796
797 seg->dto_urb->transfer_buffer = NULL;
798 }
799 }
800 seg->dto_urb->transfer_buffer_length = buf_itr_size;
801 }
802 seg->status = WA_SEG_READY;
803 buf_itr += buf_itr_size;
804 buf_size -= buf_itr_size;
805 }
806 return 0;
807
808error_sg_alloc:
809 usb_free_urb(xfer->seg[cnt]->dto_urb);
810error_dto_alloc:
811 kfree(xfer->seg[cnt]);
812 cnt--;
813error_seg_kmalloc:
814
815 for (; cnt >= 0; cnt--) {
816 if (xfer->seg[cnt] && xfer->is_inbound == 0) {
817 usb_free_urb(xfer->seg[cnt]->dto_urb);
818 kfree(xfer->seg[cnt]->dto_urb->sg);
819 }
820 kfree(xfer->seg[cnt]);
821 }
822error_segs_kzalloc:
823 return result;
824}
825
826
827
828
829
830
831
832
833
834
835
836static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
837{
838 int result;
839 struct device *dev = &xfer->wa->usb_iface->dev;
840 enum wa_xfer_type xfer_type = 0;
841 size_t xfer_hdr_size, cnt, transfer_size;
842 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
843
844 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
845 if (result < 0)
846 goto error_setup_sizes;
847 xfer_hdr_size = result;
848 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
849 if (result < 0) {
850 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
851 xfer, xfer->segs, result);
852 goto error_setup_segs;
853 }
854
855 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
856 wa_xfer_id_init(xfer);
857 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
858
859
860 xfer_hdr = xfer_hdr0;
861 transfer_size = urb->transfer_buffer_length;
862 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
863 xfer->seg_size : transfer_size;
864 transfer_size -= xfer->seg_size;
865 for (cnt = 1; cnt < xfer->segs; cnt++) {
866 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
867 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
868 xfer_hdr->bTransferSegment = cnt;
869 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
870 cpu_to_le32(xfer->seg_size)
871 : cpu_to_le32(transfer_size);
872 xfer->seg[cnt]->status = WA_SEG_READY;
873 transfer_size -= xfer->seg_size;
874 }
875 xfer_hdr->bTransferSegment |= 0x80;
876 result = 0;
877error_setup_segs:
878error_setup_sizes:
879 return result;
880}
881
882
883
884
885
886
887static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
888 struct wa_seg *seg)
889{
890 int result;
891 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
892 if (result < 0) {
893 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
894 xfer, seg->index, result);
895 goto error_seg_submit;
896 }
897 if (seg->dto_urb) {
898 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
899 if (result < 0) {
900 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
901 xfer, seg->index, result);
902 goto error_dto_submit;
903 }
904 }
905 seg->status = WA_SEG_SUBMITTED;
906 rpipe_avail_dec(rpipe);
907 return 0;
908
909error_dto_submit:
910 usb_unlink_urb(&seg->urb);
911error_seg_submit:
912 seg->status = WA_SEG_ERROR;
913 seg->result = result;
914 return result;
915}
916
917
918
919
920
921
922
923
924static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
925{
926 int result;
927 struct device *dev = &rpipe->wa->usb_iface->dev;
928 struct wa_seg *seg;
929 struct wa_xfer *xfer;
930 unsigned long flags;
931
932 spin_lock_irqsave(&rpipe->seg_lock, flags);
933 while (atomic_read(&rpipe->segs_available) > 0
934 && !list_empty(&rpipe->seg_list)) {
935 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
936 list_node);
937 list_del(&seg->list_node);
938 xfer = seg->xfer;
939 result = __wa_seg_submit(rpipe, xfer, seg);
940 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
941 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
942 if (unlikely(result < 0)) {
943 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
944 spin_lock_irqsave(&xfer->lock, flags);
945 __wa_xfer_abort(xfer);
946 xfer->segs_done++;
947 spin_unlock_irqrestore(&xfer->lock, flags);
948 spin_lock_irqsave(&rpipe->seg_lock, flags);
949 }
950 }
951 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
952}
953
954
955
956
957
958
959
960
961static int __wa_xfer_submit(struct wa_xfer *xfer)
962{
963 int result;
964 struct wahc *wa = xfer->wa;
965 struct device *dev = &wa->usb_iface->dev;
966 unsigned cnt;
967 struct wa_seg *seg;
968 unsigned long flags;
969 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
970 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
971 u8 available;
972 u8 empty;
973
974 spin_lock_irqsave(&wa->xfer_list_lock, flags);
975 list_add_tail(&xfer->list_node, &wa->xfer_list);
976 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
977
978 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
979 result = 0;
980 spin_lock_irqsave(&rpipe->seg_lock, flags);
981 for (cnt = 0; cnt < xfer->segs; cnt++) {
982 available = atomic_read(&rpipe->segs_available);
983 empty = list_empty(&rpipe->seg_list);
984 seg = xfer->seg[cnt];
985 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
986 xfer, cnt, available, empty,
987 available == 0 || !empty ? "delayed" : "submitted");
988 if (available == 0 || !empty) {
989 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
990 seg->status = WA_SEG_DELAYED;
991 list_add_tail(&seg->list_node, &rpipe->seg_list);
992 } else {
993 result = __wa_seg_submit(rpipe, xfer, seg);
994 if (result < 0) {
995 __wa_xfer_abort(xfer);
996 goto error_seg_submit;
997 }
998 }
999 xfer->segs_submitted++;
1000 }
1001error_seg_submit:
1002 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1003 return result;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1029{
1030 int result;
1031 unsigned long flags;
1032 struct urb *urb = xfer->urb;
1033 struct wahc *wa = xfer->wa;
1034 struct wusbhc *wusbhc = wa->wusb;
1035 struct wusb_dev *wusb_dev;
1036 unsigned done;
1037
1038 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1039 if (result < 0)
1040 goto error_rpipe_get;
1041 result = -ENODEV;
1042
1043 mutex_lock(&wusbhc->mutex);
1044 if (urb->dev == NULL) {
1045 mutex_unlock(&wusbhc->mutex);
1046 goto error_dev_gone;
1047 }
1048 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1049 if (wusb_dev == NULL) {
1050 mutex_unlock(&wusbhc->mutex);
1051 goto error_dev_gone;
1052 }
1053 mutex_unlock(&wusbhc->mutex);
1054
1055 spin_lock_irqsave(&xfer->lock, flags);
1056 xfer->wusb_dev = wusb_dev;
1057 result = urb->status;
1058 if (urb->status != -EINPROGRESS)
1059 goto error_dequeued;
1060
1061 result = __wa_xfer_setup(xfer, urb);
1062 if (result < 0)
1063 goto error_xfer_setup;
1064 result = __wa_xfer_submit(xfer);
1065 if (result < 0)
1066 goto error_xfer_submit;
1067 spin_unlock_irqrestore(&xfer->lock, flags);
1068 return;
1069
1070
1071
1072
1073
1074error_xfer_setup:
1075error_dequeued:
1076 spin_unlock_irqrestore(&xfer->lock, flags);
1077
1078 if (wusb_dev)
1079 wusb_dev_put(wusb_dev);
1080error_dev_gone:
1081 rpipe_put(xfer->ep->hcpriv);
1082error_rpipe_get:
1083 xfer->result = result;
1084 wa_xfer_giveback(xfer);
1085 return;
1086
1087error_xfer_submit:
1088 done = __wa_xfer_is_done(xfer);
1089 xfer->result = result;
1090 spin_unlock_irqrestore(&xfer->lock, flags);
1091 if (done)
1092 wa_xfer_completion(xfer);
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105void wa_urb_enqueue_run(struct work_struct *ws)
1106{
1107 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1108 struct wa_xfer *xfer, *next;
1109 struct urb *urb;
1110 LIST_HEAD(tmp_list);
1111
1112
1113 spin_lock_irq(&wa->xfer_list_lock);
1114 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1115 wa->xfer_delayed_list.prev);
1116 spin_unlock_irq(&wa->xfer_list_lock);
1117
1118
1119
1120
1121
1122 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1123 list_del_init(&xfer->list_node);
1124
1125 urb = xfer->urb;
1126 wa_urb_enqueue_b(xfer);
1127 usb_put_urb(urb);
1128 }
1129}
1130EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1131
1132
1133
1134
1135void wa_process_errored_transfers_run(struct work_struct *ws)
1136{
1137 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1138 struct wa_xfer *xfer, *next;
1139 LIST_HEAD(tmp_list);
1140
1141 pr_info("%s: Run delayed STALL processing.\n", __func__);
1142
1143
1144 spin_lock_irq(&wa->xfer_list_lock);
1145 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1146 wa->xfer_errored_list.prev);
1147 spin_unlock_irq(&wa->xfer_list_lock);
1148
1149
1150
1151
1152
1153 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1154 struct usb_host_endpoint *ep;
1155 unsigned long flags;
1156 struct wa_rpipe *rpipe;
1157
1158 spin_lock_irqsave(&xfer->lock, flags);
1159 ep = xfer->ep;
1160 rpipe = ep->hcpriv;
1161 spin_unlock_irqrestore(&xfer->lock, flags);
1162
1163
1164 rpipe_clear_feature_stalled(wa, ep);
1165
1166
1167 wa_xfer_completion(xfer);
1168
1169
1170 wa_xfer_delayed_run(rpipe);
1171 }
1172}
1173EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1188 struct urb *urb, gfp_t gfp)
1189{
1190 int result;
1191 struct device *dev = &wa->usb_iface->dev;
1192 struct wa_xfer *xfer;
1193 unsigned long my_flags;
1194 unsigned cant_sleep = irqs_disabled() | in_atomic();
1195
1196 if ((urb->transfer_buffer == NULL)
1197 && (urb->sg == NULL)
1198 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1199 && urb->transfer_buffer_length != 0) {
1200 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1201 dump_stack();
1202 }
1203
1204 result = -ENOMEM;
1205 xfer = kzalloc(sizeof(*xfer), gfp);
1206 if (xfer == NULL)
1207 goto error_kmalloc;
1208
1209 result = -ENOENT;
1210 if (urb->status != -EINPROGRESS)
1211 goto error_dequeued;
1212 wa_xfer_init(xfer);
1213 xfer->wa = wa_get(wa);
1214 xfer->urb = urb;
1215 xfer->gfp = gfp;
1216 xfer->ep = ep;
1217 urb->hcpriv = xfer;
1218
1219 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1220 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1221 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1222 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1223 cant_sleep ? "deferred" : "inline");
1224
1225 if (cant_sleep) {
1226 usb_get_urb(urb);
1227 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1228 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1229 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1230 queue_work(wusbd, &wa->xfer_enqueue_work);
1231 } else {
1232 wa_urb_enqueue_b(xfer);
1233 }
1234 return 0;
1235
1236error_dequeued:
1237 kfree(xfer);
1238error_kmalloc:
1239 return result;
1240}
1241EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1262{
1263 unsigned long flags, flags2;
1264 struct wa_xfer *xfer;
1265 struct wa_seg *seg;
1266 struct wa_rpipe *rpipe;
1267 unsigned cnt;
1268 unsigned rpipe_ready = 0;
1269
1270 xfer = urb->hcpriv;
1271 if (xfer == NULL) {
1272
1273
1274
1275
1276
1277 BUG_ON(urb->status == -EINPROGRESS);
1278 goto out;
1279 }
1280 spin_lock_irqsave(&xfer->lock, flags);
1281 rpipe = xfer->ep->hcpriv;
1282 if (rpipe == NULL) {
1283 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1284 __func__, wa_xfer_id(xfer),
1285 "Probably already aborted.\n" );
1286 goto out_unlock;
1287 }
1288
1289 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1290 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1291 goto dequeue_delayed;
1292 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1293 if (xfer->seg == NULL)
1294 goto out_unlock;
1295
1296 __wa_xfer_abort(xfer);
1297 for (cnt = 0; cnt < xfer->segs; cnt++) {
1298 seg = xfer->seg[cnt];
1299 switch (seg->status) {
1300 case WA_SEG_NOTREADY:
1301 case WA_SEG_READY:
1302 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1303 xfer, cnt, seg->status);
1304 WARN_ON(1);
1305 break;
1306 case WA_SEG_DELAYED:
1307 seg->status = WA_SEG_ABORTED;
1308 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1309 list_del(&seg->list_node);
1310 xfer->segs_done++;
1311 rpipe_ready = rpipe_avail_inc(rpipe);
1312 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1313 break;
1314 case WA_SEG_SUBMITTED:
1315 seg->status = WA_SEG_ABORTED;
1316 usb_unlink_urb(&seg->urb);
1317 if (xfer->is_inbound == 0)
1318 usb_unlink_urb(seg->dto_urb);
1319 xfer->segs_done++;
1320 rpipe_ready = rpipe_avail_inc(rpipe);
1321 break;
1322 case WA_SEG_PENDING:
1323 seg->status = WA_SEG_ABORTED;
1324 xfer->segs_done++;
1325 rpipe_ready = rpipe_avail_inc(rpipe);
1326 break;
1327 case WA_SEG_DTI_PENDING:
1328 usb_unlink_urb(wa->dti_urb);
1329 seg->status = WA_SEG_ABORTED;
1330 xfer->segs_done++;
1331 rpipe_ready = rpipe_avail_inc(rpipe);
1332 break;
1333 case WA_SEG_DONE:
1334 case WA_SEG_ERROR:
1335 case WA_SEG_ABORTED:
1336 break;
1337 }
1338 }
1339 xfer->result = urb->status;
1340 __wa_xfer_is_done(xfer);
1341 spin_unlock_irqrestore(&xfer->lock, flags);
1342 wa_xfer_completion(xfer);
1343 if (rpipe_ready)
1344 wa_xfer_delayed_run(rpipe);
1345 return 0;
1346
1347out_unlock:
1348 spin_unlock_irqrestore(&xfer->lock, flags);
1349out:
1350 return 0;
1351
1352dequeue_delayed:
1353 list_del_init(&xfer->list_node);
1354 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1355 xfer->result = urb->status;
1356 spin_unlock_irqrestore(&xfer->lock, flags);
1357 wa_xfer_giveback(xfer);
1358 usb_put_urb(urb);
1359 return 0;
1360}
1361EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static int wa_xfer_status_to_errno(u8 status)
1374{
1375 int errno;
1376 u8 real_status = status;
1377 static int xlat[] = {
1378 [WA_XFER_STATUS_SUCCESS] = 0,
1379 [WA_XFER_STATUS_HALTED] = -EPIPE,
1380 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1381 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1382 [WA_XFER_RESERVED] = EINVAL,
1383 [WA_XFER_STATUS_NOT_FOUND] = 0,
1384 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1385 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1386 [WA_XFER_STATUS_ABORTED] = -EINTR,
1387 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1388 [WA_XFER_INVALID_FORMAT] = EINVAL,
1389 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1390 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1391 };
1392 status &= 0x3f;
1393
1394 if (status == 0)
1395 return 0;
1396 if (status >= ARRAY_SIZE(xlat)) {
1397 printk_ratelimited(KERN_ERR "%s(): BUG? "
1398 "Unknown WA transfer status 0x%02x\n",
1399 __func__, real_status);
1400 return -EINVAL;
1401 }
1402 errno = xlat[status];
1403 if (unlikely(errno > 0)) {
1404 printk_ratelimited(KERN_ERR "%s(): BUG? "
1405 "Inconsistent WA status: 0x%02x\n",
1406 __func__, real_status);
1407 errno = -errno;
1408 }
1409 return errno;
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1420{
1421 int result;
1422 struct device *dev = &wa->usb_iface->dev;
1423 unsigned long flags;
1424 u8 seg_idx;
1425 struct wa_seg *seg;
1426 struct wa_rpipe *rpipe;
1427 struct wa_xfer_result *xfer_result = wa->xfer_result;
1428 u8 done = 0;
1429 u8 usb_status;
1430 unsigned rpipe_ready = 0;
1431
1432 spin_lock_irqsave(&xfer->lock, flags);
1433 seg_idx = xfer_result->bTransferSegment & 0x7f;
1434 if (unlikely(seg_idx >= xfer->segs))
1435 goto error_bad_seg;
1436 seg = xfer->seg[seg_idx];
1437 rpipe = xfer->ep->hcpriv;
1438 usb_status = xfer_result->bTransferStatus;
1439 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1440 xfer, seg_idx, usb_status, seg->status);
1441 if (seg->status == WA_SEG_ABORTED
1442 || seg->status == WA_SEG_ERROR)
1443 goto segment_aborted;
1444 if (seg->status == WA_SEG_SUBMITTED)
1445 seg->status = WA_SEG_PENDING;
1446 if (seg->status != WA_SEG_PENDING) {
1447 if (printk_ratelimit())
1448 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1449 xfer, seg_idx, seg->status);
1450 seg->status = WA_SEG_PENDING;
1451 }
1452 if (usb_status & 0x80) {
1453 seg->result = wa_xfer_status_to_errno(usb_status);
1454 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1455 xfer, xfer->id, seg->index, usb_status);
1456 goto error_complete;
1457 }
1458
1459 if (usb_status & 0x40)
1460 usb_status = 0;
1461 if (xfer->is_inbound) {
1462 seg->status = WA_SEG_DTI_PENDING;
1463 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1464
1465 wa->buf_in_urb->num_mapped_sgs = 0;
1466
1467 if (xfer->is_dma) {
1468 wa->buf_in_urb->transfer_dma =
1469 xfer->urb->transfer_dma
1470 + (seg_idx * xfer->seg_size);
1471 wa->buf_in_urb->transfer_flags
1472 |= URB_NO_TRANSFER_DMA_MAP;
1473 wa->buf_in_urb->transfer_buffer = NULL;
1474 wa->buf_in_urb->sg = NULL;
1475 wa->buf_in_urb->num_sgs = 0;
1476 } else {
1477
1478 wa->buf_in_urb->transfer_flags
1479 &= ~URB_NO_TRANSFER_DMA_MAP;
1480
1481 if (xfer->urb->transfer_buffer) {
1482 wa->buf_in_urb->transfer_buffer =
1483 xfer->urb->transfer_buffer
1484 + (seg_idx * xfer->seg_size);
1485 wa->buf_in_urb->sg = NULL;
1486 wa->buf_in_urb->num_sgs = 0;
1487 } else {
1488
1489
1490
1491
1492 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1493 xfer->urb->sg,
1494 seg_idx * xfer->seg_size,
1495 le32_to_cpu(
1496 xfer_result->dwTransferLength),
1497 &(wa->buf_in_urb->num_sgs));
1498
1499 if (!(wa->buf_in_urb->sg)) {
1500 wa->buf_in_urb->num_sgs = 0;
1501 goto error_sg_alloc;
1502 }
1503 wa->buf_in_urb->transfer_buffer = NULL;
1504 }
1505 }
1506 wa->buf_in_urb->transfer_buffer_length =
1507 le32_to_cpu(xfer_result->dwTransferLength);
1508 wa->buf_in_urb->context = seg;
1509 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1510 if (result < 0)
1511 goto error_submit_buf_in;
1512 } else {
1513
1514 seg->status = WA_SEG_DONE;
1515 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1516 xfer->segs_done++;
1517 rpipe_ready = rpipe_avail_inc(rpipe);
1518 done = __wa_xfer_is_done(xfer);
1519 }
1520 spin_unlock_irqrestore(&xfer->lock, flags);
1521 if (done)
1522 wa_xfer_completion(xfer);
1523 if (rpipe_ready)
1524 wa_xfer_delayed_run(rpipe);
1525 return;
1526
1527error_submit_buf_in:
1528 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1529 dev_err(dev, "DTI: URB max acceptable errors "
1530 "exceeded, resetting device\n");
1531 wa_reset_all(wa);
1532 }
1533 if (printk_ratelimit())
1534 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1535 xfer, seg_idx, result);
1536 seg->result = result;
1537 kfree(wa->buf_in_urb->sg);
1538error_sg_alloc:
1539 __wa_xfer_abort(xfer);
1540error_complete:
1541 seg->status = WA_SEG_ERROR;
1542 xfer->segs_done++;
1543 rpipe_ready = rpipe_avail_inc(rpipe);
1544 done = __wa_xfer_is_done(xfer);
1545
1546
1547
1548
1549 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1550 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1551 done) {
1552
1553 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1554 spin_lock_irq(&wa->xfer_list_lock);
1555
1556 list_del(&xfer->list_node);
1557
1558 list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
1559 spin_unlock_irq(&wa->xfer_list_lock);
1560 spin_unlock_irqrestore(&xfer->lock, flags);
1561 queue_work(wusbd, &wa->xfer_error_work);
1562 } else {
1563 spin_unlock_irqrestore(&xfer->lock, flags);
1564 if (done)
1565 wa_xfer_completion(xfer);
1566 if (rpipe_ready)
1567 wa_xfer_delayed_run(rpipe);
1568 }
1569
1570 return;
1571
1572error_bad_seg:
1573 spin_unlock_irqrestore(&xfer->lock, flags);
1574 wa_urb_dequeue(wa, xfer->urb);
1575 if (printk_ratelimit())
1576 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1577 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1578 dev_err(dev, "DTI: URB max acceptable errors "
1579 "exceeded, resetting device\n");
1580 wa_reset_all(wa);
1581 }
1582 return;
1583
1584segment_aborted:
1585
1586 spin_unlock_irqrestore(&xfer->lock, flags);
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599static void wa_buf_in_cb(struct urb *urb)
1600{
1601 struct wa_seg *seg = urb->context;
1602 struct wa_xfer *xfer = seg->xfer;
1603 struct wahc *wa;
1604 struct device *dev;
1605 struct wa_rpipe *rpipe;
1606 unsigned rpipe_ready;
1607 unsigned long flags;
1608 u8 done = 0;
1609
1610
1611 kfree(urb->sg);
1612 urb->sg = NULL;
1613
1614 switch (urb->status) {
1615 case 0:
1616 spin_lock_irqsave(&xfer->lock, flags);
1617 wa = xfer->wa;
1618 dev = &wa->usb_iface->dev;
1619 rpipe = xfer->ep->hcpriv;
1620 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1621 xfer, seg->index, (size_t)urb->actual_length);
1622 seg->status = WA_SEG_DONE;
1623 seg->result = urb->actual_length;
1624 xfer->segs_done++;
1625 rpipe_ready = rpipe_avail_inc(rpipe);
1626 done = __wa_xfer_is_done(xfer);
1627 spin_unlock_irqrestore(&xfer->lock, flags);
1628 if (done)
1629 wa_xfer_completion(xfer);
1630 if (rpipe_ready)
1631 wa_xfer_delayed_run(rpipe);
1632 break;
1633 case -ECONNRESET:
1634 case -ENOENT:
1635 break;
1636 default:
1637 spin_lock_irqsave(&xfer->lock, flags);
1638 wa = xfer->wa;
1639 dev = &wa->usb_iface->dev;
1640 rpipe = xfer->ep->hcpriv;
1641 if (printk_ratelimit())
1642 dev_err(dev, "xfer %p#%u: data in error %d\n",
1643 xfer, seg->index, urb->status);
1644 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1645 EDC_ERROR_TIMEFRAME)){
1646 dev_err(dev, "DTO: URB max acceptable errors "
1647 "exceeded, resetting device\n");
1648 wa_reset_all(wa);
1649 }
1650 seg->status = WA_SEG_ERROR;
1651 seg->result = urb->status;
1652 xfer->segs_done++;
1653 rpipe_ready = rpipe_avail_inc(rpipe);
1654 __wa_xfer_abort(xfer);
1655 done = __wa_xfer_is_done(xfer);
1656 spin_unlock_irqrestore(&xfer->lock, flags);
1657 if (done)
1658 wa_xfer_completion(xfer);
1659 if (rpipe_ready)
1660 wa_xfer_delayed_run(rpipe);
1661 }
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690static void wa_xfer_result_cb(struct urb *urb)
1691{
1692 int result;
1693 struct wahc *wa = urb->context;
1694 struct device *dev = &wa->usb_iface->dev;
1695 struct wa_xfer_result *xfer_result;
1696 u32 xfer_id;
1697 struct wa_xfer *xfer;
1698 u8 usb_status;
1699
1700 BUG_ON(wa->dti_urb != urb);
1701 switch (wa->dti_urb->status) {
1702 case 0:
1703
1704 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1705 urb->actual_length, urb->transfer_buffer);
1706 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1707 dev_err(dev, "DTI Error: xfer result--bad size "
1708 "xfer result (%d bytes vs %zu needed)\n",
1709 urb->actual_length, sizeof(*xfer_result));
1710 break;
1711 }
1712 xfer_result = wa->xfer_result;
1713 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1714 dev_err(dev, "DTI Error: xfer result--"
1715 "bad header length %u\n",
1716 xfer_result->hdr.bLength);
1717 break;
1718 }
1719 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1720 dev_err(dev, "DTI Error: xfer result--"
1721 "bad header type 0x%02x\n",
1722 xfer_result->hdr.bNotifyType);
1723 break;
1724 }
1725 usb_status = xfer_result->bTransferStatus & 0x3f;
1726 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1727
1728 break;
1729 xfer_id = xfer_result->dwTransferID;
1730 xfer = wa_xfer_get_by_id(wa, xfer_id);
1731 if (xfer == NULL) {
1732
1733 dev_err(dev, "DTI Error: xfer result--"
1734 "unknown xfer 0x%08x (status 0x%02x)\n",
1735 xfer_id, usb_status);
1736 break;
1737 }
1738 wa_xfer_result_chew(wa, xfer);
1739 wa_xfer_put(xfer);
1740 break;
1741 case -ENOENT:
1742 case -ESHUTDOWN:
1743 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1744 goto out;
1745 default:
1746
1747 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1748 EDC_ERROR_TIMEFRAME)) {
1749 dev_err(dev, "DTI: URB max acceptable errors "
1750 "exceeded, resetting device\n");
1751 wa_reset_all(wa);
1752 goto out;
1753 }
1754 if (printk_ratelimit())
1755 dev_err(dev, "DTI: URB error %d\n", urb->status);
1756 break;
1757 }
1758
1759 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1760 if (result < 0) {
1761 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1762 "resetting\n", result);
1763 wa_reset_all(wa);
1764 }
1765out:
1766 return;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1790{
1791 int result;
1792 struct device *dev = &wa->usb_iface->dev;
1793 struct wa_notif_xfer *notif_xfer;
1794 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1795
1796 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1797 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1798
1799 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1800
1801 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1802 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1803 goto error;
1804 }
1805 if (wa->dti_urb != NULL)
1806 goto out;
1807
1808 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1809 if (wa->dti_urb == NULL) {
1810 dev_err(dev, "Can't allocate DTI URB\n");
1811 goto error_dti_urb_alloc;
1812 }
1813 usb_fill_bulk_urb(
1814 wa->dti_urb, wa->usb_dev,
1815 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1816 wa->xfer_result, wa->xfer_result_size,
1817 wa_xfer_result_cb, wa);
1818
1819 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1820 if (wa->buf_in_urb == NULL) {
1821 dev_err(dev, "Can't allocate BUF-IN URB\n");
1822 goto error_buf_in_urb_alloc;
1823 }
1824 usb_fill_bulk_urb(
1825 wa->buf_in_urb, wa->usb_dev,
1826 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1827 NULL, 0, wa_buf_in_cb, wa);
1828 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1829 if (result < 0) {
1830 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1831 "resetting\n", result);
1832 goto error_dti_urb_submit;
1833 }
1834out:
1835 return;
1836
1837error_dti_urb_submit:
1838 usb_put_urb(wa->buf_in_urb);
1839error_buf_in_urb_alloc:
1840 usb_put_urb(wa->dti_urb);
1841 wa->dti_urb = NULL;
1842error_dti_urb_alloc:
1843error:
1844 wa_reset_all(wa);
1845}
1846