1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/dma-mapping.h>
19
20#include "musb_core.h"
21#include "musb_host.h"
22#include "musb_trace.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct musb *hcd_to_musb(struct usb_hcd *hcd)
74{
75 return *(struct musb **) hcd->hcd_priv;
76}
77
78
79static void musb_ep_program(struct musb *musb, u8 epnum,
80 struct urb *urb, int is_out,
81 u8 *buf, u32 offset, u32 len);
82
83
84
85
86static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
87{
88 struct musb *musb = ep->musb;
89 void __iomem *epio = ep->regs;
90 u16 csr;
91 int retries = 1000;
92
93 csr = musb_readw(epio, MUSB_TXCSR);
94 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
95 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
96 musb_writew(epio, MUSB_TXCSR, csr);
97 csr = musb_readw(epio, MUSB_TXCSR);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
114 "Could not flush host TX%d fifo: csr: %04x\n",
115 ep->epnum, csr))
116 return;
117 mdelay(1);
118 }
119}
120
121static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
122{
123 void __iomem *epio = ep->regs;
124 u16 csr;
125 int retries = 5;
126
127
128 do {
129 csr = musb_readw(epio, MUSB_TXCSR);
130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131 break;
132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133 csr = musb_readw(epio, MUSB_TXCSR);
134 udelay(10);
135 } while (--retries);
136
137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138 ep->epnum, csr);
139
140
141 musb_writew(epio, MUSB_TXCSR, 0);
142}
143
144
145
146
147
148static inline void musb_h_tx_start(struct musb_hw_ep *ep)
149{
150 u16 txcsr;
151
152
153 if (ep->epnum) {
154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157 } else {
158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
160 }
161
162}
163
164static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
165{
166 u16 txcsr;
167
168
169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
171 if (is_cppi_enabled(ep->musb))
172 txcsr |= MUSB_TXCSR_DMAMODE;
173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
174}
175
176static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
177{
178 if (is_in != 0 || ep->is_shared_fifo)
179 ep->in_qh = qh;
180 if (is_in == 0 || ep->is_shared_fifo)
181 ep->out_qh = qh;
182}
183
184static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
185{
186 return is_in ? ep->in_qh : ep->out_qh;
187}
188
189
190
191
192
193
194
195static void
196musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
197{
198 u32 len;
199 void __iomem *mbase = musb->mregs;
200 struct urb *urb = next_urb(qh);
201 void *buf = urb->transfer_buffer;
202 u32 offset = 0;
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
204 int epnum = hw_ep->epnum;
205
206
207 qh->offset = 0;
208 qh->segsize = 0;
209
210
211 switch (qh->type) {
212 case USB_ENDPOINT_XFER_CONTROL:
213
214 is_in = 0;
215 musb->ep0_stage = MUSB_EP0_START;
216 buf = urb->setup_packet;
217 len = 8;
218 break;
219 case USB_ENDPOINT_XFER_ISOC:
220 qh->iso_idx = 0;
221 qh->frame = 0;
222 offset = urb->iso_frame_desc[0].offset;
223 len = urb->iso_frame_desc[0].length;
224 break;
225 default:
226
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
229 }
230
231 trace_musb_urb_start(musb, urb);
232
233
234 musb_ep_set_qh(hw_ep, is_in, qh);
235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
236
237
238 if (is_in)
239 return;
240
241
242 switch (qh->type) {
243 case USB_ENDPOINT_XFER_ISOC:
244 case USB_ENDPOINT_XFER_INT:
245 musb_dbg(musb, "check whether there's still time for periodic Tx");
246
247
248
249 if (1) {
250
251
252
253 qh->frame = 0;
254 goto start;
255 } else {
256 qh->frame = urb->start_frame;
257
258 musb_dbg(musb, "SOF for %d", epnum);
259#if 1
260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261#endif
262 }
263 break;
264 default:
265start:
266 musb_dbg(musb, "Start TX%d %s", epnum,
267 hw_ep->tx_channel ? "dma" : "pio");
268
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
272 musb_h_tx_dma_start(hw_ep);
273 }
274}
275
276
277static void musb_giveback(struct musb *musb, struct urb *urb, int status)
278__releases(musb->lock)
279__acquires(musb->lock)
280{
281 trace_musb_urb_gb(musb, urb);
282
283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
284 spin_unlock(&musb->lock);
285 usb_hcd_giveback_urb(musb->hcd, urb, status);
286 spin_lock(&musb->lock);
287}
288
289
290
291
292
293
294
295
296static void musb_advance_schedule(struct musb *musb, struct urb *urb,
297 struct musb_hw_ep *hw_ep, int is_in)
298{
299 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
300 struct musb_hw_ep *ep = qh->hw_ep;
301 int ready = qh->is_ready;
302 int status;
303 u16 toggle;
304
305 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
306
307
308 switch (qh->type) {
309 case USB_ENDPOINT_XFER_BULK:
310 case USB_ENDPOINT_XFER_INT:
311 toggle = musb->io.get_toggle(qh, !is_in);
312 usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
313 break;
314 case USB_ENDPOINT_XFER_ISOC:
315 if (status == 0 && urb->error_count)
316 status = -EXDEV;
317 break;
318 }
319
320 qh->is_ready = 0;
321 musb_giveback(musb, urb, status);
322 qh->is_ready = ready;
323
324
325
326
327 if (list_empty(&qh->hep->urb_list)) {
328 struct list_head *head;
329 struct dma_controller *dma = musb->dma_controller;
330
331 if (is_in) {
332 ep->rx_reinit = 1;
333 if (ep->rx_channel) {
334 dma->channel_release(ep->rx_channel);
335 ep->rx_channel = NULL;
336 }
337 } else {
338 ep->tx_reinit = 1;
339 if (ep->tx_channel) {
340 dma->channel_release(ep->tx_channel);
341 ep->tx_channel = NULL;
342 }
343 }
344
345
346 musb_ep_set_qh(ep, is_in, NULL);
347 qh->hep->hcpriv = NULL;
348
349 switch (qh->type) {
350
351 case USB_ENDPOINT_XFER_CONTROL:
352 case USB_ENDPOINT_XFER_BULK:
353
354
355
356 if (qh->mux == 1) {
357 head = qh->ring.prev;
358 list_del(&qh->ring);
359 kfree(qh);
360 qh = first_qh(head);
361 break;
362 }
363
364
365 case USB_ENDPOINT_XFER_ISOC:
366 case USB_ENDPOINT_XFER_INT:
367
368
369
370
371 kfree(qh);
372 qh = NULL;
373 break;
374 }
375 }
376
377 if (qh != NULL && qh->is_ready) {
378 musb_dbg(musb, "... next ep%d %cX urb %p",
379 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
380 musb_start_urb(musb, is_in, qh);
381 }
382}
383
384static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
385{
386
387
388
389
390 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
391 csr &= ~(MUSB_RXCSR_H_REQPKT
392 | MUSB_RXCSR_H_AUTOREQ
393 | MUSB_RXCSR_AUTOCLEAR);
394
395
396 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
397 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
398
399
400 return musb_readw(hw_ep->regs, MUSB_RXCSR);
401}
402
403
404
405
406static bool
407musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
408{
409 u16 rx_count;
410 u8 *buf;
411 u16 csr;
412 bool done = false;
413 u32 length;
414 int do_flush = 0;
415 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
416 void __iomem *epio = hw_ep->regs;
417 struct musb_qh *qh = hw_ep->in_qh;
418 int pipe = urb->pipe;
419 void *buffer = urb->transfer_buffer;
420
421
422 rx_count = musb_readw(epio, MUSB_RXCOUNT);
423 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
424 urb->transfer_buffer, qh->offset,
425 urb->transfer_buffer_length);
426
427
428 if (usb_pipeisoc(pipe)) {
429 int status = 0;
430 struct usb_iso_packet_descriptor *d;
431
432 if (iso_err) {
433 status = -EILSEQ;
434 urb->error_count++;
435 }
436
437 d = urb->iso_frame_desc + qh->iso_idx;
438 buf = buffer + d->offset;
439 length = d->length;
440 if (rx_count > length) {
441 if (status == 0) {
442 status = -EOVERFLOW;
443 urb->error_count++;
444 }
445 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
446 do_flush = 1;
447 } else
448 length = rx_count;
449 urb->actual_length += length;
450 d->actual_length = length;
451
452 d->status = status;
453
454
455 done = (++qh->iso_idx >= urb->number_of_packets);
456 } else {
457
458 buf = buffer + qh->offset;
459 length = urb->transfer_buffer_length - qh->offset;
460 if (rx_count > length) {
461 if (urb->status == -EINPROGRESS)
462 urb->status = -EOVERFLOW;
463 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
464 do_flush = 1;
465 } else
466 length = rx_count;
467 urb->actual_length += length;
468 qh->offset += length;
469
470
471 done = (urb->actual_length == urb->transfer_buffer_length)
472 || (rx_count < qh->maxpacket)
473 || (urb->status != -EINPROGRESS);
474 if (done
475 && (urb->status == -EINPROGRESS)
476 && (urb->transfer_flags & URB_SHORT_NOT_OK)
477 && (urb->actual_length
478 < urb->transfer_buffer_length))
479 urb->status = -EREMOTEIO;
480 }
481
482 musb_read_fifo(hw_ep, length, buf);
483
484 csr = musb_readw(epio, MUSB_RXCSR);
485 csr |= MUSB_RXCSR_H_WZC_BITS;
486 if (unlikely(do_flush))
487 musb_h_flush_rxfifo(hw_ep, csr);
488 else {
489
490 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
491 if (!done)
492 csr |= MUSB_RXCSR_H_REQPKT;
493 musb_writew(epio, MUSB_RXCSR, csr);
494 }
495
496 return done;
497}
498
499
500
501
502
503
504
505
506
507static void
508musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
509{
510 struct musb_hw_ep *ep = musb->endpoints + epnum;
511 u16 csr;
512
513
514
515
516
517
518
519 if (ep->is_shared_fifo) {
520 csr = musb_readw(ep->regs, MUSB_TXCSR);
521 if (csr & MUSB_TXCSR_MODE) {
522 musb_h_tx_flush_fifo(ep);
523 csr = musb_readw(ep->regs, MUSB_TXCSR);
524 musb_writew(ep->regs, MUSB_TXCSR,
525 csr | MUSB_TXCSR_FRCDATATOG);
526 }
527
528
529
530
531
532 if (csr & MUSB_TXCSR_DMAMODE)
533 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
534 musb_writew(ep->regs, MUSB_TXCSR, 0);
535
536
537 }
538 csr = musb_readw(ep->regs, MUSB_RXCSR);
539 if (csr & MUSB_RXCSR_RXPKTRDY)
540 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
541 musb_readw(ep->regs, MUSB_RXCOUNT));
542
543 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
544
545
546 if (musb->is_multipoint) {
547 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
548 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
549 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
550 } else
551 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
552
553
554 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
555 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
556
557
558
559
560 musb_writew(ep->regs, MUSB_RXMAXP,
561 qh->maxpacket | ((qh->hb_mult - 1) << 11));
562
563 ep->rx_reinit = 0;
564}
565
566static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
567 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
568 struct urb *urb, u32 offset,
569 u32 *length, u8 *mode)
570{
571 struct dma_channel *channel = hw_ep->tx_channel;
572 void __iomem *epio = hw_ep->regs;
573 u16 pkt_size = qh->maxpacket;
574 u16 csr;
575
576 if (*length > channel->max_len)
577 *length = channel->max_len;
578
579 csr = musb_readw(epio, MUSB_TXCSR);
580 if (*length > pkt_size) {
581 *mode = 1;
582 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
583
584
585
586
587
588
589
590
591
592
593 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
594 can_bulk_split(hw_ep->musb, qh->type)))
595 csr |= MUSB_TXCSR_AUTOSET;
596 } else {
597 *mode = 0;
598 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
599 csr |= MUSB_TXCSR_DMAENAB;
600 }
601 channel->desired_mode = *mode;
602 musb_writew(epio, MUSB_TXCSR, csr);
603}
604
605static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
606 struct musb_hw_ep *hw_ep,
607 struct musb_qh *qh,
608 struct urb *urb,
609 u32 offset,
610 u32 *length,
611 u8 *mode)
612{
613 struct dma_channel *channel = hw_ep->tx_channel;
614
615 channel->actual_len = 0;
616
617
618
619
620
621 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
622}
623
624static bool musb_tx_dma_program(struct dma_controller *dma,
625 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
626 struct urb *urb, u32 offset, u32 length)
627{
628 struct dma_channel *channel = hw_ep->tx_channel;
629 u16 pkt_size = qh->maxpacket;
630 u8 mode;
631
632 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
633 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
634 &length, &mode);
635 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
636 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
637 &length, &mode);
638 else
639 return false;
640
641 qh->segsize = length;
642
643
644
645
646
647 wmb();
648
649 if (!dma->channel_program(channel, pkt_size, mode,
650 urb->transfer_dma + offset, length)) {
651 void __iomem *epio = hw_ep->regs;
652 u16 csr;
653
654 dma->channel_release(channel);
655 hw_ep->tx_channel = NULL;
656
657 csr = musb_readw(epio, MUSB_TXCSR);
658 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
659 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
660 return false;
661 }
662 return true;
663}
664
665
666
667
668
669static void musb_ep_program(struct musb *musb, u8 epnum,
670 struct urb *urb, int is_out,
671 u8 *buf, u32 offset, u32 len)
672{
673 struct dma_controller *dma_controller;
674 struct dma_channel *dma_channel;
675 u8 dma_ok;
676 void __iomem *mbase = musb->mregs;
677 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
678 void __iomem *epio = hw_ep->regs;
679 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
680 u16 packet_sz = qh->maxpacket;
681 u8 use_dma = 1;
682 u16 csr;
683
684 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
685 "h_addr%02x h_port%02x bytes %d",
686 is_out ? "-->" : "<--",
687 epnum, urb, urb->dev->speed,
688 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
689 qh->h_addr_reg, qh->h_port_reg,
690 len);
691
692 musb_ep_select(mbase, epnum);
693
694 if (is_out && !len) {
695 use_dma = 0;
696 csr = musb_readw(epio, MUSB_TXCSR);
697 csr &= ~MUSB_TXCSR_DMAENAB;
698 musb_writew(epio, MUSB_TXCSR, csr);
699 hw_ep->tx_channel = NULL;
700 }
701
702
703 dma_controller = musb->dma_controller;
704 if (use_dma && is_dma_capable() && epnum && dma_controller) {
705 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
706 if (!dma_channel) {
707 dma_channel = dma_controller->channel_alloc(
708 dma_controller, hw_ep, is_out);
709 if (is_out)
710 hw_ep->tx_channel = dma_channel;
711 else
712 hw_ep->rx_channel = dma_channel;
713 }
714 } else
715 dma_channel = NULL;
716
717
718
719
720 if (is_out) {
721 u16 csr;
722 u16 int_txe;
723 u16 load_count;
724
725 csr = musb_readw(epio, MUSB_TXCSR);
726
727
728 int_txe = musb->intrtxe;
729 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
730
731
732 if (epnum) {
733
734
735
736
737
738
739 if (!hw_ep->tx_double_buffered)
740 musb_h_tx_flush_fifo(hw_ep);
741
742
743
744
745
746
747 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
748 | MUSB_TXCSR_AUTOSET
749 | MUSB_TXCSR_DMAENAB
750 | MUSB_TXCSR_FRCDATATOG
751 | MUSB_TXCSR_H_RXSTALL
752 | MUSB_TXCSR_H_ERROR
753 | MUSB_TXCSR_TXPKTRDY
754 );
755 csr |= MUSB_TXCSR_MODE;
756
757 if (!hw_ep->tx_double_buffered)
758 csr |= musb->io.set_toggle(qh, is_out, urb);
759
760 musb_writew(epio, MUSB_TXCSR, csr);
761
762 csr &= ~MUSB_TXCSR_DMAMODE;
763 musb_writew(epio, MUSB_TXCSR, csr);
764 csr = musb_readw(epio, MUSB_TXCSR);
765 } else {
766
767 musb_h_ep0_flush_fifo(hw_ep);
768 }
769
770
771 if (musb->is_multipoint) {
772 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
773 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
774 musb_write_txhubport(musb, epnum, qh->h_port_reg);
775
776 } else
777 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
778
779
780 if (epnum) {
781 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
782 if (can_bulk_split(musb, qh->type)) {
783 qh->hb_mult = hw_ep->max_packet_sz_tx
784 / packet_sz;
785 musb_writew(epio, MUSB_TXMAXP, packet_sz
786 | ((qh->hb_mult) - 1) << 11);
787 } else {
788 musb_writew(epio, MUSB_TXMAXP,
789 qh->maxpacket |
790 ((qh->hb_mult - 1) << 11));
791 }
792 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
793 } else {
794 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
795 if (musb->is_multipoint)
796 musb_writeb(epio, MUSB_TYPE0,
797 qh->type_reg);
798 }
799
800 if (can_bulk_split(musb, qh->type))
801 load_count = min((u32) hw_ep->max_packet_sz_tx,
802 len);
803 else
804 load_count = min((u32) packet_sz, len);
805
806 if (dma_channel && musb_tx_dma_program(dma_controller,
807 hw_ep, qh, urb, offset, len))
808 load_count = 0;
809
810 if (load_count) {
811
812 qh->segsize = load_count;
813 if (!buf) {
814 sg_miter_start(&qh->sg_miter, urb->sg, 1,
815 SG_MITER_ATOMIC
816 | SG_MITER_FROM_SG);
817 if (!sg_miter_next(&qh->sg_miter)) {
818 dev_err(musb->controller,
819 "error: sg"
820 "list empty\n");
821 sg_miter_stop(&qh->sg_miter);
822 goto finish;
823 }
824 buf = qh->sg_miter.addr + urb->sg->offset +
825 urb->actual_length;
826 load_count = min_t(u32, load_count,
827 qh->sg_miter.length);
828 musb_write_fifo(hw_ep, load_count, buf);
829 qh->sg_miter.consumed = load_count;
830 sg_miter_stop(&qh->sg_miter);
831 } else
832 musb_write_fifo(hw_ep, load_count, buf);
833 }
834finish:
835
836 musb_writew(mbase, MUSB_INTRTXE, int_txe);
837
838
839 } else {
840 u16 csr = 0;
841
842 if (hw_ep->rx_reinit) {
843 musb_rx_reinit(musb, qh, epnum);
844 csr |= musb->io.set_toggle(qh, is_out, urb);
845
846 if (qh->type == USB_ENDPOINT_XFER_INT)
847 csr |= MUSB_RXCSR_DISNYET;
848
849 } else {
850 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
851
852 if (csr & (MUSB_RXCSR_RXPKTRDY
853 | MUSB_RXCSR_DMAENAB
854 | MUSB_RXCSR_H_REQPKT))
855 ERR("broken !rx_reinit, ep%d csr %04x\n",
856 hw_ep->epnum, csr);
857
858
859 csr &= MUSB_RXCSR_DISNYET;
860 }
861
862
863
864 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
865
866 dma_channel->actual_len = 0L;
867 qh->segsize = len;
868
869
870 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
871 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
872
873
874
875
876
877 dma_ok = dma_controller->channel_program(dma_channel,
878 packet_sz, !(urb->transfer_flags &
879 URB_SHORT_NOT_OK),
880 urb->transfer_dma + offset,
881 qh->segsize);
882 if (!dma_ok) {
883 dma_controller->channel_release(dma_channel);
884 hw_ep->rx_channel = dma_channel = NULL;
885 } else
886 csr |= MUSB_RXCSR_DMAENAB;
887 }
888
889 csr |= MUSB_RXCSR_H_REQPKT;
890 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
891 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
892 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
893 }
894}
895
896
897
898
899static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
900 int is_in)
901{
902 struct dma_channel *dma;
903 struct urb *urb;
904 void __iomem *mbase = musb->mregs;
905 void __iomem *epio = ep->regs;
906 struct musb_qh *cur_qh, *next_qh;
907 u16 rx_csr, tx_csr;
908 u16 toggle;
909
910 musb_ep_select(mbase, ep->epnum);
911 if (is_in) {
912 dma = is_dma_capable() ? ep->rx_channel : NULL;
913
914
915
916
917
918
919 rx_csr = musb_readw(epio, MUSB_RXCSR);
920 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
921 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
922 musb_writew(epio, MUSB_RXCSR, rx_csr);
923 rx_csr &= ~MUSB_RXCSR_DATAERROR;
924 musb_writew(epio, MUSB_RXCSR, rx_csr);
925
926 cur_qh = first_qh(&musb->in_bulk);
927 } else {
928 dma = is_dma_capable() ? ep->tx_channel : NULL;
929
930
931 tx_csr = musb_readw(epio, MUSB_TXCSR);
932 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
933 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
934 musb_writew(epio, MUSB_TXCSR, tx_csr);
935
936 cur_qh = first_qh(&musb->out_bulk);
937 }
938 if (cur_qh) {
939 urb = next_urb(cur_qh);
940 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
941 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
942 musb->dma_controller->channel_abort(dma);
943 urb->actual_length += dma->actual_len;
944 dma->actual_len = 0L;
945 }
946 toggle = musb->io.get_toggle(cur_qh, !is_in);
947 usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
948
949 if (is_in) {
950
951 list_move_tail(&cur_qh->ring, &musb->in_bulk);
952
953
954 next_qh = first_qh(&musb->in_bulk);
955
956
957 ep->rx_reinit = 1;
958 } else {
959
960 list_move_tail(&cur_qh->ring, &musb->out_bulk);
961
962
963 next_qh = first_qh(&musb->out_bulk);
964
965
966 ep->tx_reinit = 1;
967 }
968
969 if (next_qh)
970 musb_start_urb(musb, is_in, next_qh);
971 }
972}
973
974
975
976
977
978static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
979{
980 bool more = false;
981 u8 *fifo_dest = NULL;
982 u16 fifo_count = 0;
983 struct musb_hw_ep *hw_ep = musb->control_ep;
984 struct musb_qh *qh = hw_ep->in_qh;
985 struct usb_ctrlrequest *request;
986
987 switch (musb->ep0_stage) {
988 case MUSB_EP0_IN:
989 fifo_dest = urb->transfer_buffer + urb->actual_length;
990 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
991 urb->actual_length);
992 if (fifo_count < len)
993 urb->status = -EOVERFLOW;
994
995 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
996
997 urb->actual_length += fifo_count;
998 if (len < qh->maxpacket) {
999
1000
1001
1002 } else if (urb->actual_length <
1003 urb->transfer_buffer_length)
1004 more = true;
1005 break;
1006 case MUSB_EP0_START:
1007 request = (struct usb_ctrlrequest *) urb->setup_packet;
1008
1009 if (!request->wLength) {
1010 musb_dbg(musb, "start no-DATA");
1011 break;
1012 } else if (request->bRequestType & USB_DIR_IN) {
1013 musb_dbg(musb, "start IN-DATA");
1014 musb->ep0_stage = MUSB_EP0_IN;
1015 more = true;
1016 break;
1017 } else {
1018 musb_dbg(musb, "start OUT-DATA");
1019 musb->ep0_stage = MUSB_EP0_OUT;
1020 more = true;
1021 }
1022
1023 case MUSB_EP0_OUT:
1024 fifo_count = min_t(size_t, qh->maxpacket,
1025 urb->transfer_buffer_length -
1026 urb->actual_length);
1027 if (fifo_count) {
1028 fifo_dest = (u8 *) (urb->transfer_buffer
1029 + urb->actual_length);
1030 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1031 fifo_count,
1032 (fifo_count == 1) ? "" : "s",
1033 fifo_dest);
1034 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1035
1036 urb->actual_length += fifo_count;
1037 more = true;
1038 }
1039 break;
1040 default:
1041 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1042 break;
1043 }
1044
1045 return more;
1046}
1047
1048
1049
1050
1051
1052
1053
1054irqreturn_t musb_h_ep0_irq(struct musb *musb)
1055{
1056 struct urb *urb;
1057 u16 csr, len;
1058 int status = 0;
1059 void __iomem *mbase = musb->mregs;
1060 struct musb_hw_ep *hw_ep = musb->control_ep;
1061 void __iomem *epio = hw_ep->regs;
1062 struct musb_qh *qh = hw_ep->in_qh;
1063 bool complete = false;
1064 irqreturn_t retval = IRQ_NONE;
1065
1066
1067 urb = next_urb(qh);
1068
1069 musb_ep_select(mbase, 0);
1070 csr = musb_readw(epio, MUSB_CSR0);
1071 len = (csr & MUSB_CSR0_RXPKTRDY)
1072 ? musb_readb(epio, MUSB_COUNT0)
1073 : 0;
1074
1075 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1076 csr, qh, len, urb, musb->ep0_stage);
1077
1078
1079 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1080 retval = IRQ_HANDLED;
1081 complete = true;
1082 }
1083
1084
1085 if (csr & MUSB_CSR0_H_RXSTALL) {
1086 musb_dbg(musb, "STALLING ENDPOINT");
1087 status = -EPIPE;
1088
1089 } else if (csr & MUSB_CSR0_H_ERROR) {
1090 musb_dbg(musb, "no response, csr0 %04x", csr);
1091 status = -EPROTO;
1092
1093 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1094 musb_dbg(musb, "control NAK timeout");
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 musb_writew(epio, MUSB_CSR0, 0);
1105 retval = IRQ_HANDLED;
1106 }
1107
1108 if (status) {
1109 musb_dbg(musb, "aborting");
1110 retval = IRQ_HANDLED;
1111 if (urb)
1112 urb->status = status;
1113 complete = true;
1114
1115
1116 if (csr & MUSB_CSR0_H_REQPKT) {
1117 csr &= ~MUSB_CSR0_H_REQPKT;
1118 musb_writew(epio, MUSB_CSR0, csr);
1119 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1120 musb_writew(epio, MUSB_CSR0, csr);
1121 } else {
1122 musb_h_ep0_flush_fifo(hw_ep);
1123 }
1124
1125 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1126
1127
1128 musb_writew(epio, MUSB_CSR0, 0);
1129 }
1130
1131 if (unlikely(!urb)) {
1132
1133
1134 ERR("no URB for end 0\n");
1135
1136 musb_h_ep0_flush_fifo(hw_ep);
1137 goto done;
1138 }
1139
1140 if (!complete) {
1141
1142 if (musb_h_ep0_continue(musb, len, urb)) {
1143
1144 csr = (MUSB_EP0_IN == musb->ep0_stage)
1145 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1146 } else {
1147
1148 if (usb_pipeout(urb->pipe)
1149 || !urb->transfer_buffer_length)
1150 csr = MUSB_CSR0_H_STATUSPKT
1151 | MUSB_CSR0_H_REQPKT;
1152 else
1153 csr = MUSB_CSR0_H_STATUSPKT
1154 | MUSB_CSR0_TXPKTRDY;
1155
1156
1157 csr |= MUSB_CSR0_H_DIS_PING;
1158
1159
1160 musb->ep0_stage = MUSB_EP0_STATUS;
1161
1162 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1163
1164 }
1165 musb_writew(epio, MUSB_CSR0, csr);
1166 retval = IRQ_HANDLED;
1167 } else
1168 musb->ep0_stage = MUSB_EP0_IDLE;
1169
1170
1171 if (complete)
1172 musb_advance_schedule(musb, urb, hw_ep, 1);
1173done:
1174 return retval;
1175}
1176
1177
1178#ifdef CONFIG_USB_INVENTRA_DMA
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192#endif
1193
1194
1195void musb_host_tx(struct musb *musb, u8 epnum)
1196{
1197 int pipe;
1198 bool done = false;
1199 u16 tx_csr;
1200 size_t length = 0;
1201 size_t offset = 0;
1202 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1203 void __iomem *epio = hw_ep->regs;
1204 struct musb_qh *qh = hw_ep->out_qh;
1205 struct urb *urb = next_urb(qh);
1206 u32 status = 0;
1207 void __iomem *mbase = musb->mregs;
1208 struct dma_channel *dma;
1209 bool transfer_pending = false;
1210
1211 musb_ep_select(mbase, epnum);
1212 tx_csr = musb_readw(epio, MUSB_TXCSR);
1213
1214
1215 if (!urb) {
1216 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1217 return;
1218 }
1219
1220 pipe = urb->pipe;
1221 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1222 trace_musb_urb_tx(musb, urb);
1223 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1224 dma ? ", dma" : "");
1225
1226
1227 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1228
1229 musb_dbg(musb, "TX end %d stall", epnum);
1230
1231
1232 status = -EPIPE;
1233
1234 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1235
1236 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1237
1238 status = -ETIMEDOUT;
1239
1240 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1241 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1242 && !list_is_singular(&musb->out_bulk)) {
1243 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1244 musb_bulk_nak_timeout(musb, hw_ep, 0);
1245 } else {
1246 musb_dbg(musb, "TX ep%d device not responding", epnum);
1247
1248
1249
1250
1251
1252
1253
1254
1255 musb_ep_select(mbase, epnum);
1256 musb_writew(epio, MUSB_TXCSR,
1257 MUSB_TXCSR_H_WZC_BITS
1258 | MUSB_TXCSR_TXPKTRDY);
1259 }
1260 return;
1261 }
1262
1263done:
1264 if (status) {
1265 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1266 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1267 musb->dma_controller->channel_abort(dma);
1268 }
1269
1270
1271
1272
1273 musb_h_tx_flush_fifo(hw_ep);
1274 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1275 | MUSB_TXCSR_DMAENAB
1276 | MUSB_TXCSR_H_ERROR
1277 | MUSB_TXCSR_H_RXSTALL
1278 | MUSB_TXCSR_H_NAKTIMEOUT
1279 );
1280
1281 musb_ep_select(mbase, epnum);
1282 musb_writew(epio, MUSB_TXCSR, tx_csr);
1283
1284 musb_writew(epio, MUSB_TXCSR, tx_csr);
1285 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1286
1287 done = true;
1288 }
1289
1290
1291 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1292 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1293 return;
1294 }
1295
1296 if (is_dma_capable() && dma && !status) {
1297
1298
1299
1300
1301
1302
1303
1304
1305 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1323 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1324 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1325 MUSB_TXCSR_TXPKTRDY);
1326 musb_writew(epio, MUSB_TXCSR,
1327 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1328 }
1329 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1330 MUSB_TXCSR_TXPKTRDY);
1331 musb_writew(epio, MUSB_TXCSR,
1332 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1333
1334
1335
1336
1337
1338
1339
1340 tx_csr = musb_readw(epio, MUSB_TXCSR);
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1351 musb_dbg(musb,
1352 "DMA complete but FIFO not empty, CSR %04x",
1353 tx_csr);
1354 return;
1355 }
1356 }
1357
1358 if (!status || dma || usb_pipeisoc(pipe)) {
1359 if (dma)
1360 length = dma->actual_len;
1361 else
1362 length = qh->segsize;
1363 qh->offset += length;
1364
1365 if (usb_pipeisoc(pipe)) {
1366 struct usb_iso_packet_descriptor *d;
1367
1368 d = urb->iso_frame_desc + qh->iso_idx;
1369 d->actual_length = length;
1370 d->status = status;
1371 if (++qh->iso_idx >= urb->number_of_packets) {
1372 done = true;
1373 } else {
1374 d++;
1375 offset = d->offset;
1376 length = d->length;
1377 }
1378 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1379 done = true;
1380 } else {
1381
1382 if (qh->segsize < qh->maxpacket)
1383 done = true;
1384 else if (qh->offset == urb->transfer_buffer_length
1385 && !(urb->transfer_flags
1386 & URB_ZERO_PACKET))
1387 done = true;
1388 if (!done) {
1389 offset = qh->offset;
1390 length = urb->transfer_buffer_length - offset;
1391 transfer_pending = true;
1392 }
1393 }
1394 }
1395
1396
1397
1398
1399 if (urb->status != -EINPROGRESS) {
1400 done = true;
1401 if (status == 0)
1402 status = urb->status;
1403 }
1404
1405 if (done) {
1406
1407 urb->status = status;
1408 urb->actual_length = qh->offset;
1409 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1410 return;
1411 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1412 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1413 offset, length)) {
1414 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1415 musb_h_tx_dma_start(hw_ep);
1416 return;
1417 }
1418 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1419 musb_dbg(musb, "not complete, but DMA enabled?");
1420 return;
1421 }
1422
1423
1424
1425
1426
1427
1428
1429
1430 if (length > qh->maxpacket)
1431 length = qh->maxpacket;
1432
1433 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1434
1435
1436
1437
1438
1439 if (!urb->transfer_buffer)
1440 qh->use_sg = true;
1441
1442 if (qh->use_sg) {
1443
1444 if (!sg_miter_next(&qh->sg_miter)) {
1445 dev_err(musb->controller, "error: sg list empty\n");
1446 sg_miter_stop(&qh->sg_miter);
1447 status = -EINVAL;
1448 goto done;
1449 }
1450 urb->transfer_buffer = qh->sg_miter.addr;
1451 length = min_t(u32, length, qh->sg_miter.length);
1452 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1453 qh->sg_miter.consumed = length;
1454 sg_miter_stop(&qh->sg_miter);
1455 } else {
1456 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1457 }
1458
1459 qh->segsize = length;
1460
1461 if (qh->use_sg) {
1462 if (offset + length >= urb->transfer_buffer_length)
1463 qh->use_sg = false;
1464 }
1465
1466 musb_ep_select(mbase, epnum);
1467 musb_writew(epio, MUSB_TXCSR,
1468 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1469}
1470
1471#ifdef CONFIG_USB_TI_CPPI41_DMA
1472
1473static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1474 struct musb_hw_ep *hw_ep,
1475 struct musb_qh *qh,
1476 struct urb *urb,
1477 size_t len)
1478{
1479 struct dma_channel *channel = hw_ep->rx_channel;
1480 void __iomem *epio = hw_ep->regs;
1481 dma_addr_t *buf;
1482 u32 length;
1483 u16 val;
1484
1485 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1486 (u32)urb->transfer_dma;
1487
1488 length = urb->iso_frame_desc[qh->iso_idx].length;
1489
1490 val = musb_readw(epio, MUSB_RXCSR);
1491 val |= MUSB_RXCSR_DMAENAB;
1492 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1493
1494 return dma->channel_program(channel, qh->maxpacket, 0,
1495 (u32)buf, length);
1496}
1497#else
1498static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1499 struct musb_hw_ep *hw_ep,
1500 struct musb_qh *qh,
1501 struct urb *urb,
1502 size_t len)
1503{
1504 return false;
1505}
1506#endif
1507
1508#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1509 defined(CONFIG_USB_TI_CPPI41_DMA)
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1545 struct musb_hw_ep *hw_ep,
1546 struct musb_qh *qh,
1547 struct urb *urb,
1548 size_t len)
1549{
1550 struct dma_channel *channel = hw_ep->rx_channel;
1551 void __iomem *epio = hw_ep->regs;
1552 u16 val;
1553 int pipe;
1554 bool done;
1555
1556 pipe = urb->pipe;
1557
1558 if (usb_pipeisoc(pipe)) {
1559 struct usb_iso_packet_descriptor *d;
1560
1561 d = urb->iso_frame_desc + qh->iso_idx;
1562 d->actual_length = len;
1563
1564
1565
1566
1567 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1568 d->status = 0;
1569
1570 if (++qh->iso_idx >= urb->number_of_packets) {
1571 done = true;
1572 } else {
1573
1574 if (musb_dma_cppi41(hw_ep->musb))
1575 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1576 urb, len);
1577 done = false;
1578 }
1579
1580 } else {
1581
1582 done = (urb->actual_length + len >=
1583 urb->transfer_buffer_length
1584 || channel->actual_len < qh->maxpacket
1585 || channel->rx_packet_done);
1586 }
1587
1588
1589 if (!done) {
1590 val = musb_readw(epio, MUSB_RXCSR);
1591 val |= MUSB_RXCSR_H_REQPKT;
1592 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1593 }
1594
1595 return done;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1615 struct musb_hw_ep *hw_ep,
1616 struct musb_qh *qh,
1617 struct urb *urb,
1618 size_t len,
1619 u8 iso_err)
1620{
1621 struct musb *musb = hw_ep->musb;
1622 void __iomem *epio = hw_ep->regs;
1623 struct dma_channel *channel = hw_ep->rx_channel;
1624 u16 rx_count, val;
1625 int length, pipe, done;
1626 dma_addr_t buf;
1627
1628 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1629 pipe = urb->pipe;
1630
1631 if (usb_pipeisoc(pipe)) {
1632 int d_status = 0;
1633 struct usb_iso_packet_descriptor *d;
1634
1635 d = urb->iso_frame_desc + qh->iso_idx;
1636
1637 if (iso_err) {
1638 d_status = -EILSEQ;
1639 urb->error_count++;
1640 }
1641 if (rx_count > d->length) {
1642 if (d_status == 0) {
1643 d_status = -EOVERFLOW;
1644 urb->error_count++;
1645 }
1646 musb_dbg(musb, "** OVERFLOW %d into %d",
1647 rx_count, d->length);
1648
1649 length = d->length;
1650 } else
1651 length = rx_count;
1652 d->status = d_status;
1653 buf = urb->transfer_dma + d->offset;
1654 } else {
1655 length = rx_count;
1656 buf = urb->transfer_dma + urb->actual_length;
1657 }
1658
1659 channel->desired_mode = 0;
1660#ifdef USE_MODE1
1661
1662
1663
1664 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1665 && (urb->transfer_buffer_length - urb->actual_length)
1666 > qh->maxpacket)
1667 channel->desired_mode = 1;
1668 if (rx_count < hw_ep->max_packet_sz_rx) {
1669 length = rx_count;
1670 channel->desired_mode = 0;
1671 } else {
1672 length = urb->transfer_buffer_length;
1673 }
1674#endif
1675
1676
1677 val = musb_readw(epio, MUSB_RXCSR);
1678 val &= ~MUSB_RXCSR_H_REQPKT;
1679
1680 if (channel->desired_mode == 0)
1681 val &= ~MUSB_RXCSR_H_AUTOREQ;
1682 else
1683 val |= MUSB_RXCSR_H_AUTOREQ;
1684 val |= MUSB_RXCSR_DMAENAB;
1685
1686
1687 if (qh->hb_mult == 1)
1688 val |= MUSB_RXCSR_AUTOCLEAR;
1689
1690 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1691
1692
1693
1694
1695
1696 done = dma->channel_program(channel, qh->maxpacket,
1697 channel->desired_mode,
1698 buf, length);
1699
1700 if (!done) {
1701 dma->channel_release(channel);
1702 hw_ep->rx_channel = NULL;
1703 channel = NULL;
1704 val = musb_readw(epio, MUSB_RXCSR);
1705 val &= ~(MUSB_RXCSR_DMAENAB
1706 | MUSB_RXCSR_H_AUTOREQ
1707 | MUSB_RXCSR_AUTOCLEAR);
1708 musb_writew(epio, MUSB_RXCSR, val);
1709 }
1710
1711 return done;
1712}
1713#else
1714static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1715 struct musb_hw_ep *hw_ep,
1716 struct musb_qh *qh,
1717 struct urb *urb,
1718 size_t len)
1719{
1720 return false;
1721}
1722
1723static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1724 struct musb_hw_ep *hw_ep,
1725 struct musb_qh *qh,
1726 struct urb *urb,
1727 size_t len,
1728 u8 iso_err)
1729{
1730 return false;
1731}
1732#endif
1733
1734
1735
1736
1737
1738void musb_host_rx(struct musb *musb, u8 epnum)
1739{
1740 struct urb *urb;
1741 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1742 struct dma_controller *c = musb->dma_controller;
1743 void __iomem *epio = hw_ep->regs;
1744 struct musb_qh *qh = hw_ep->in_qh;
1745 size_t xfer_len;
1746 void __iomem *mbase = musb->mregs;
1747 u16 rx_csr, val;
1748 bool iso_err = false;
1749 bool done = false;
1750 u32 status;
1751 struct dma_channel *dma;
1752 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1753
1754 musb_ep_select(mbase, epnum);
1755
1756 urb = next_urb(qh);
1757 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1758 status = 0;
1759 xfer_len = 0;
1760
1761 rx_csr = musb_readw(epio, MUSB_RXCSR);
1762 val = rx_csr;
1763
1764 if (unlikely(!urb)) {
1765
1766
1767
1768
1769 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1770 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1771 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1772 return;
1773 }
1774
1775 trace_musb_urb_rx(musb, urb);
1776
1777
1778
1779 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1780 musb_dbg(musb, "RX end %d STALL", epnum);
1781
1782
1783 status = -EPIPE;
1784
1785 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1786 musb_dbg(musb, "end %d RX proto error", epnum);
1787
1788 status = -EPROTO;
1789 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1790
1791 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1792 musb_writew(epio, MUSB_RXCSR, rx_csr);
1793
1794 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1795
1796 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1797 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 if (usb_pipebulk(urb->pipe)
1808 && qh->mux == 1
1809 && !list_is_singular(&musb->in_bulk)) {
1810 musb_bulk_nak_timeout(musb, hw_ep, 1);
1811 return;
1812 }
1813 musb_ep_select(mbase, epnum);
1814 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1815 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1816 musb_writew(epio, MUSB_RXCSR, rx_csr);
1817
1818 goto finish;
1819 } else {
1820 musb_dbg(musb, "RX end %d ISO data error", epnum);
1821
1822 iso_err = true;
1823 }
1824 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1825 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1826 epnum);
1827 status = -EPROTO;
1828 }
1829
1830
1831 if (status) {
1832
1833 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1834 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1835 musb->dma_controller->channel_abort(dma);
1836 xfer_len = dma->actual_len;
1837 }
1838 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1839 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1840 done = true;
1841 goto finish;
1842 }
1843
1844 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1845
1846 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1847 goto finish;
1848 }
1849
1850
1851
1852
1853
1854
1855
1856 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1857 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1858
1859
1860
1861
1862
1863 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1864 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1865 musb->dma_controller->channel_abort(dma);
1866 xfer_len = dma->actual_len;
1867 done = true;
1868 }
1869
1870 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1871 xfer_len, dma ? ", dma" : "");
1872 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1873
1874 musb_ep_select(mbase, epnum);
1875 musb_writew(epio, MUSB_RXCSR,
1876 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1877 }
1878
1879 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1880 xfer_len = dma->actual_len;
1881
1882 val &= ~(MUSB_RXCSR_DMAENAB
1883 | MUSB_RXCSR_H_AUTOREQ
1884 | MUSB_RXCSR_AUTOCLEAR
1885 | MUSB_RXCSR_RXPKTRDY);
1886 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1887
1888 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1889 musb_dma_cppi41(musb)) {
1890 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1891 musb_dbg(hw_ep->musb,
1892 "ep %d dma %s, rxcsr %04x, rxcount %d",
1893 epnum, done ? "off" : "reset",
1894 musb_readw(epio, MUSB_RXCSR),
1895 musb_readw(epio, MUSB_RXCOUNT));
1896 } else {
1897 done = true;
1898 }
1899
1900 } else if (urb->status == -EINPROGRESS) {
1901
1902 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1903 status = -EPROTO;
1904 ERR("Rx interrupt with no errors or packet!\n");
1905
1906
1907
1908
1909
1910 musb_ep_select(mbase, epnum);
1911 val &= ~MUSB_RXCSR_H_REQPKT;
1912 musb_writew(epio, MUSB_RXCSR, val);
1913 goto finish;
1914 }
1915
1916
1917 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1918 musb_dma_cppi41(musb)) && dma) {
1919 musb_dbg(hw_ep->musb,
1920 "RX%d count %d, buffer 0x%llx len %d/%d",
1921 epnum, musb_readw(epio, MUSB_RXCOUNT),
1922 (unsigned long long) urb->transfer_dma
1923 + urb->actual_length,
1924 qh->offset,
1925 urb->transfer_buffer_length);
1926
1927 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1928 xfer_len, iso_err))
1929 goto finish;
1930 else
1931 dev_err(musb->controller, "error: rx_dma failed\n");
1932 }
1933
1934 if (!dma) {
1935 unsigned int received_len;
1936
1937
1938 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1939
1940
1941
1942
1943
1944 if (!urb->transfer_buffer) {
1945 qh->use_sg = true;
1946 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1947 sg_flags);
1948 }
1949
1950 if (qh->use_sg) {
1951 if (!sg_miter_next(&qh->sg_miter)) {
1952 dev_err(musb->controller, "error: sg list empty\n");
1953 sg_miter_stop(&qh->sg_miter);
1954 status = -EINVAL;
1955 done = true;
1956 goto finish;
1957 }
1958 urb->transfer_buffer = qh->sg_miter.addr;
1959 received_len = urb->actual_length;
1960 qh->offset = 0x0;
1961 done = musb_host_packet_rx(musb, urb, epnum,
1962 iso_err);
1963
1964 received_len = urb->actual_length -
1965 received_len;
1966 qh->sg_miter.consumed = received_len;
1967 sg_miter_stop(&qh->sg_miter);
1968 } else {
1969 done = musb_host_packet_rx(musb, urb,
1970 epnum, iso_err);
1971 }
1972 musb_dbg(musb, "read %spacket", done ? "last " : "");
1973 }
1974 }
1975
1976finish:
1977 urb->actual_length += xfer_len;
1978 qh->offset += xfer_len;
1979 if (done) {
1980 if (qh->use_sg)
1981 qh->use_sg = false;
1982
1983 if (urb->status == -EINPROGRESS)
1984 urb->status = status;
1985 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1986 }
1987}
1988
1989
1990
1991
1992
1993
1994static int musb_schedule(
1995 struct musb *musb,
1996 struct musb_qh *qh,
1997 int is_in)
1998{
1999 int idle = 0;
2000 int best_diff;
2001 int best_end, epnum;
2002 struct musb_hw_ep *hw_ep = NULL;
2003 struct list_head *head = NULL;
2004 u8 toggle;
2005 u8 txtype;
2006 struct urb *urb = next_urb(qh);
2007
2008
2009 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2010 head = &musb->control;
2011 hw_ep = musb->control_ep;
2012 goto success;
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 best_diff = 4096;
2025 best_end = -1;
2026
2027 for (epnum = 1, hw_ep = musb->endpoints + 1;
2028 epnum < musb->nr_endpoints;
2029 epnum++, hw_ep++) {
2030 int diff;
2031
2032 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2033 continue;
2034
2035 if (hw_ep == musb->bulk_ep)
2036 continue;
2037
2038 if (is_in)
2039 diff = hw_ep->max_packet_sz_rx;
2040 else
2041 diff = hw_ep->max_packet_sz_tx;
2042 diff -= (qh->maxpacket * qh->hb_mult);
2043
2044 if (diff >= 0 && best_diff > diff) {
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 hw_ep = musb->endpoints + epnum;
2059 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2060 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2061 >> 4) & 0x3;
2062 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2063 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2064 continue;
2065
2066 best_diff = diff;
2067 best_end = epnum;
2068 }
2069 }
2070
2071 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2072 hw_ep = musb->bulk_ep;
2073 if (is_in)
2074 head = &musb->in_bulk;
2075 else
2076 head = &musb->out_bulk;
2077
2078
2079
2080
2081
2082
2083
2084
2085 if (qh->dev)
2086 qh->intv_reg =
2087 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2088 goto success;
2089 } else if (best_end < 0) {
2090 dev_err(musb->controller,
2091 "%s hwep alloc failed for %dx%d\n",
2092 musb_ep_xfertype_string(qh->type),
2093 qh->hb_mult, qh->maxpacket);
2094 return -ENOSPC;
2095 }
2096
2097 idle = 1;
2098 qh->mux = 0;
2099 hw_ep = musb->endpoints + best_end;
2100 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2101success:
2102 if (head) {
2103 idle = list_empty(head);
2104 list_add_tail(&qh->ring, head);
2105 qh->mux = 1;
2106 }
2107 qh->hw_ep = hw_ep;
2108 qh->hep->hcpriv = qh;
2109 if (idle)
2110 musb_start_urb(musb, is_in, qh);
2111 return 0;
2112}
2113
2114static int musb_urb_enqueue(
2115 struct usb_hcd *hcd,
2116 struct urb *urb,
2117 gfp_t mem_flags)
2118{
2119 unsigned long flags;
2120 struct musb *musb = hcd_to_musb(hcd);
2121 struct usb_host_endpoint *hep = urb->ep;
2122 struct musb_qh *qh;
2123 struct usb_endpoint_descriptor *epd = &hep->desc;
2124 int ret;
2125 unsigned type_reg;
2126 unsigned interval;
2127
2128
2129 if (!is_host_active(musb) || !musb->is_active)
2130 return -ENODEV;
2131
2132 trace_musb_urb_enq(musb, urb);
2133
2134 spin_lock_irqsave(&musb->lock, flags);
2135 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2136 qh = ret ? NULL : hep->hcpriv;
2137 if (qh)
2138 urb->hcpriv = qh;
2139 spin_unlock_irqrestore(&musb->lock, flags);
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149 if (qh || ret)
2150 return ret;
2151
2152
2153
2154
2155
2156
2157
2158 qh = kzalloc(sizeof *qh, mem_flags);
2159 if (!qh) {
2160 spin_lock_irqsave(&musb->lock, flags);
2161 usb_hcd_unlink_urb_from_ep(hcd, urb);
2162 spin_unlock_irqrestore(&musb->lock, flags);
2163 return -ENOMEM;
2164 }
2165
2166 qh->hep = hep;
2167 qh->dev = urb->dev;
2168 INIT_LIST_HEAD(&qh->ring);
2169 qh->is_ready = 1;
2170
2171 qh->maxpacket = usb_endpoint_maxp(epd);
2172 qh->type = usb_endpoint_type(epd);
2173
2174
2175
2176
2177
2178 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2179 if (qh->hb_mult > 1) {
2180 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2181
2182 if (ok)
2183 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2184 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2185 if (!ok) {
2186 dev_err(musb->controller,
2187 "high bandwidth %s (%dx%d) not supported\n",
2188 musb_ep_xfertype_string(qh->type),
2189 qh->hb_mult, qh->maxpacket & 0x7ff);
2190 ret = -EMSGSIZE;
2191 goto done;
2192 }
2193 qh->maxpacket &= 0x7ff;
2194 }
2195
2196 qh->epnum = usb_endpoint_num(epd);
2197
2198
2199 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2200
2201
2202 type_reg = (qh->type << 4) | qh->epnum;
2203 switch (urb->dev->speed) {
2204 case USB_SPEED_LOW:
2205 type_reg |= 0xc0;
2206 break;
2207 case USB_SPEED_FULL:
2208 type_reg |= 0x80;
2209 break;
2210 default:
2211 type_reg |= 0x40;
2212 }
2213 qh->type_reg = type_reg;
2214
2215
2216 switch (qh->type) {
2217 case USB_ENDPOINT_XFER_INT:
2218
2219
2220
2221
2222 if (urb->dev->speed <= USB_SPEED_FULL) {
2223 interval = max_t(u8, epd->bInterval, 1);
2224 break;
2225 }
2226
2227 case USB_ENDPOINT_XFER_ISOC:
2228
2229 interval = min_t(u8, epd->bInterval, 16);
2230 break;
2231 default:
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246 interval = 0;
2247 }
2248 qh->intv_reg = interval;
2249
2250
2251 if (musb->is_multipoint) {
2252 struct usb_device *parent = urb->dev->parent;
2253
2254 if (parent != hcd->self.root_hub) {
2255 qh->h_addr_reg = (u8) parent->devnum;
2256
2257
2258 if (urb->dev->tt) {
2259 qh->h_port_reg = (u8) urb->dev->ttport;
2260 if (urb->dev->tt->hub)
2261 qh->h_addr_reg =
2262 (u8) urb->dev->tt->hub->devnum;
2263 if (urb->dev->tt->multi)
2264 qh->h_addr_reg |= 0x80;
2265 }
2266 }
2267 }
2268
2269
2270
2271
2272
2273 spin_lock_irqsave(&musb->lock, flags);
2274 if (hep->hcpriv || !next_urb(qh)) {
2275
2276
2277
2278 kfree(qh);
2279 qh = NULL;
2280 ret = 0;
2281 } else
2282 ret = musb_schedule(musb, qh,
2283 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2284
2285 if (ret == 0) {
2286 urb->hcpriv = qh;
2287
2288
2289
2290 }
2291 spin_unlock_irqrestore(&musb->lock, flags);
2292
2293done:
2294 if (ret != 0) {
2295 spin_lock_irqsave(&musb->lock, flags);
2296 usb_hcd_unlink_urb_from_ep(hcd, urb);
2297 spin_unlock_irqrestore(&musb->lock, flags);
2298 kfree(qh);
2299 }
2300 return ret;
2301}
2302
2303
2304
2305
2306
2307
2308
2309static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2310{
2311 struct musb_hw_ep *ep = qh->hw_ep;
2312 struct musb *musb = ep->musb;
2313 void __iomem *epio = ep->regs;
2314 unsigned hw_end = ep->epnum;
2315 void __iomem *regs = ep->musb->mregs;
2316 int is_in = usb_pipein(urb->pipe);
2317 int status = 0;
2318 u16 csr;
2319 struct dma_channel *dma = NULL;
2320
2321 musb_ep_select(regs, hw_end);
2322
2323 if (is_dma_capable()) {
2324 dma = is_in ? ep->rx_channel : ep->tx_channel;
2325 if (dma) {
2326 status = ep->musb->dma_controller->channel_abort(dma);
2327 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2328 is_in ? 'R' : 'T', ep->epnum,
2329 urb, status);
2330 urb->actual_length += dma->actual_len;
2331 }
2332 }
2333
2334
2335 if (ep->epnum && is_in) {
2336
2337 csr = musb_h_flush_rxfifo(ep, 0);
2338
2339
2340 if (is_dma_capable() && dma)
2341 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2342 } else if (ep->epnum) {
2343 musb_h_tx_flush_fifo(ep);
2344 csr = musb_readw(epio, MUSB_TXCSR);
2345 csr &= ~(MUSB_TXCSR_AUTOSET
2346 | MUSB_TXCSR_DMAENAB
2347 | MUSB_TXCSR_H_RXSTALL
2348 | MUSB_TXCSR_H_NAKTIMEOUT
2349 | MUSB_TXCSR_H_ERROR
2350 | MUSB_TXCSR_TXPKTRDY);
2351 musb_writew(epio, MUSB_TXCSR, csr);
2352
2353 musb_writew(epio, MUSB_TXCSR, csr);
2354
2355 csr = musb_readw(epio, MUSB_TXCSR);
2356 } else {
2357 musb_h_ep0_flush_fifo(ep);
2358 }
2359 if (status == 0)
2360 musb_advance_schedule(ep->musb, urb, ep, is_in);
2361 return status;
2362}
2363
2364static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2365{
2366 struct musb *musb = hcd_to_musb(hcd);
2367 struct musb_qh *qh;
2368 unsigned long flags;
2369 int is_in = usb_pipein(urb->pipe);
2370 int ret;
2371
2372 trace_musb_urb_deq(musb, urb);
2373
2374 spin_lock_irqsave(&musb->lock, flags);
2375 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2376 if (ret)
2377 goto done;
2378
2379 qh = urb->hcpriv;
2380 if (!qh)
2381 goto done;
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 if (!qh->is_ready
2396 || urb->urb_list.prev != &qh->hep->urb_list
2397 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2398 int ready = qh->is_ready;
2399
2400 qh->is_ready = 0;
2401 musb_giveback(musb, urb, 0);
2402 qh->is_ready = ready;
2403
2404
2405
2406
2407 if (ready && list_empty(&qh->hep->urb_list)) {
2408 qh->hep->hcpriv = NULL;
2409 list_del(&qh->ring);
2410 kfree(qh);
2411 }
2412 } else
2413 ret = musb_cleanup_urb(urb, qh);
2414done:
2415 spin_unlock_irqrestore(&musb->lock, flags);
2416 return ret;
2417}
2418
2419
2420static void
2421musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2422{
2423 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2424 unsigned long flags;
2425 struct musb *musb = hcd_to_musb(hcd);
2426 struct musb_qh *qh;
2427 struct urb *urb;
2428
2429 spin_lock_irqsave(&musb->lock, flags);
2430
2431 qh = hep->hcpriv;
2432 if (qh == NULL)
2433 goto exit;
2434
2435
2436
2437
2438 qh->is_ready = 0;
2439 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2440 urb = next_urb(qh);
2441
2442
2443 if (!urb->unlinked)
2444 urb->status = -ESHUTDOWN;
2445
2446
2447 musb_cleanup_urb(urb, qh);
2448
2449
2450
2451
2452 while (!list_empty(&hep->urb_list)) {
2453 urb = next_urb(qh);
2454 urb->status = -ESHUTDOWN;
2455 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2456 }
2457 } else {
2458
2459
2460
2461
2462 while (!list_empty(&hep->urb_list))
2463 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2464
2465 hep->hcpriv = NULL;
2466 list_del(&qh->ring);
2467 kfree(qh);
2468 }
2469exit:
2470 spin_unlock_irqrestore(&musb->lock, flags);
2471}
2472
2473static int musb_h_get_frame_number(struct usb_hcd *hcd)
2474{
2475 struct musb *musb = hcd_to_musb(hcd);
2476
2477 return musb_readw(musb->mregs, MUSB_FRAME);
2478}
2479
2480static int musb_h_start(struct usb_hcd *hcd)
2481{
2482 struct musb *musb = hcd_to_musb(hcd);
2483
2484
2485
2486
2487 hcd->state = HC_STATE_RUNNING;
2488 musb->port1_status = 0;
2489 return 0;
2490}
2491
2492static void musb_h_stop(struct usb_hcd *hcd)
2493{
2494 musb_stop(hcd_to_musb(hcd));
2495 hcd->state = HC_STATE_HALT;
2496}
2497
2498static int musb_bus_suspend(struct usb_hcd *hcd)
2499{
2500 struct musb *musb = hcd_to_musb(hcd);
2501 u8 devctl;
2502 int ret;
2503
2504 ret = musb_port_suspend(musb, true);
2505 if (ret)
2506 return ret;
2507
2508 if (!is_host_active(musb))
2509 return 0;
2510
2511 switch (musb->xceiv->otg->state) {
2512 case OTG_STATE_A_SUSPEND:
2513 return 0;
2514 case OTG_STATE_A_WAIT_VRISE:
2515
2516
2517
2518
2519 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2520 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2521 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2522 break;
2523 default:
2524 break;
2525 }
2526
2527 if (musb->is_active) {
2528 WARNING("trying to suspend as %s while active\n",
2529 usb_otg_state_string(musb->xceiv->otg->state));
2530 return -EBUSY;
2531 } else
2532 return 0;
2533}
2534
2535static int musb_bus_resume(struct usb_hcd *hcd)
2536{
2537 struct musb *musb = hcd_to_musb(hcd);
2538
2539 if (musb->config &&
2540 musb->config->host_port_deassert_reset_at_resume)
2541 musb_port_reset(musb, false);
2542
2543 return 0;
2544}
2545
2546#ifndef CONFIG_MUSB_PIO_ONLY
2547
2548#define MUSB_USB_DMA_ALIGN 4
2549
2550struct musb_temp_buffer {
2551 void *kmalloc_ptr;
2552 void *old_xfer_buffer;
2553 u8 data[0];
2554};
2555
2556static void musb_free_temp_buffer(struct urb *urb)
2557{
2558 enum dma_data_direction dir;
2559 struct musb_temp_buffer *temp;
2560 size_t length;
2561
2562 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2563 return;
2564
2565 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2566
2567 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2568 data);
2569
2570 if (dir == DMA_FROM_DEVICE) {
2571 if (usb_pipeisoc(urb->pipe))
2572 length = urb->transfer_buffer_length;
2573 else
2574 length = urb->actual_length;
2575
2576 memcpy(temp->old_xfer_buffer, temp->data, length);
2577 }
2578 urb->transfer_buffer = temp->old_xfer_buffer;
2579 kfree(temp->kmalloc_ptr);
2580
2581 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2582}
2583
2584static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2585{
2586 enum dma_data_direction dir;
2587 struct musb_temp_buffer *temp;
2588 void *kmalloc_ptr;
2589 size_t kmalloc_size;
2590
2591 if (urb->num_sgs || urb->sg ||
2592 urb->transfer_buffer_length == 0 ||
2593 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2594 return 0;
2595
2596 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2597
2598
2599 kmalloc_size = urb->transfer_buffer_length +
2600 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2601
2602 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2603 if (!kmalloc_ptr)
2604 return -ENOMEM;
2605
2606
2607 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2608
2609
2610 temp->kmalloc_ptr = kmalloc_ptr;
2611 temp->old_xfer_buffer = urb->transfer_buffer;
2612 if (dir == DMA_TO_DEVICE)
2613 memcpy(temp->data, urb->transfer_buffer,
2614 urb->transfer_buffer_length);
2615 urb->transfer_buffer = temp->data;
2616
2617 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2618
2619 return 0;
2620}
2621
2622static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2623 gfp_t mem_flags)
2624{
2625 struct musb *musb = hcd_to_musb(hcd);
2626 int ret;
2627
2628
2629
2630
2631
2632
2633
2634 if (musb->hwvers < MUSB_HWVERS_1800)
2635 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2636
2637 ret = musb_alloc_temp_buffer(urb, mem_flags);
2638 if (ret)
2639 return ret;
2640
2641 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2642 if (ret)
2643 musb_free_temp_buffer(urb);
2644
2645 return ret;
2646}
2647
2648static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2649{
2650 struct musb *musb = hcd_to_musb(hcd);
2651
2652 usb_hcd_unmap_urb_for_dma(hcd, urb);
2653
2654
2655 if (musb->hwvers < MUSB_HWVERS_1800)
2656 return;
2657
2658 musb_free_temp_buffer(urb);
2659}
2660#endif
2661
2662static const struct hc_driver musb_hc_driver = {
2663 .description = "musb-hcd",
2664 .product_desc = "MUSB HDRC host driver",
2665 .hcd_priv_size = sizeof(struct musb *),
2666 .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
2667
2668
2669
2670
2671
2672 .start = musb_h_start,
2673 .stop = musb_h_stop,
2674
2675 .get_frame_number = musb_h_get_frame_number,
2676
2677 .urb_enqueue = musb_urb_enqueue,
2678 .urb_dequeue = musb_urb_dequeue,
2679 .endpoint_disable = musb_h_disable,
2680
2681#ifndef CONFIG_MUSB_PIO_ONLY
2682 .map_urb_for_dma = musb_map_urb_for_dma,
2683 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2684#endif
2685
2686 .hub_status_data = musb_hub_status_data,
2687 .hub_control = musb_hub_control,
2688 .bus_suspend = musb_bus_suspend,
2689 .bus_resume = musb_bus_resume,
2690
2691
2692};
2693
2694int musb_host_alloc(struct musb *musb)
2695{
2696 struct device *dev = musb->controller;
2697
2698
2699 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2700 if (!musb->hcd)
2701 return -EINVAL;
2702
2703 *musb->hcd->hcd_priv = (unsigned long) musb;
2704 musb->hcd->self.uses_pio_for_control = 1;
2705 musb->hcd->uses_new_polling = 1;
2706 musb->hcd->has_tt = 1;
2707
2708 return 0;
2709}
2710
2711void musb_host_cleanup(struct musb *musb)
2712{
2713 if (musb->port_mode == MUSB_PERIPHERAL)
2714 return;
2715 usb_remove_hcd(musb->hcd);
2716}
2717
2718void musb_host_free(struct musb *musb)
2719{
2720 usb_put_hcd(musb->hcd);
2721}
2722
2723int musb_host_setup(struct musb *musb, int power_budget)
2724{
2725 int ret;
2726 struct usb_hcd *hcd = musb->hcd;
2727
2728 if (musb->port_mode == MUSB_HOST) {
2729 MUSB_HST_MODE(musb);
2730 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2731 }
2732 otg_set_host(musb->xceiv->otg, &hcd->self);
2733
2734 hcd->self.otg_port = 0;
2735 musb->xceiv->otg->host = &hcd->self;
2736 hcd->power_budget = 2 * (power_budget ? : 250);
2737 hcd->skip_phy_initialization = 1;
2738
2739 ret = usb_add_hcd(hcd, 0, 0);
2740 if (ret < 0)
2741 return ret;
2742
2743 device_wakeup_enable(hcd->self.controller);
2744 return 0;
2745}
2746
2747void musb_host_resume_root_hub(struct musb *musb)
2748{
2749 usb_hcd_resume_root_hub(musb->hcd);
2750}
2751
2752void musb_host_poke_root_hub(struct musb *musb)
2753{
2754 MUSB_HST_MODE(musb);
2755 if (musb->hcd->status_urb)
2756 usb_hcd_poll_rh_status(musb->hcd);
2757 else
2758 usb_hcd_resume_root_hub(musb->hcd);
2759}
2760