1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#define __UBOOT__
37#ifndef __UBOOT__
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/list.h>
46#include <linux/dma-mapping.h>
47#else
48#include <common.h>
49#include <usb.h>
50#include "linux-compat.h"
51#include "usb-compat.h"
52#endif
53
54#include "musb_core.h"
55#include "musb_host.h"
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static void musb_ep_program(struct musb *musb, u8 epnum,
109 struct urb *urb, int is_out,
110 u8 *buf, u32 offset, u32 len);
111
112
113
114
115static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
116{
117 struct musb *musb = ep->musb;
118 void __iomem *epio = ep->regs;
119 u16 csr;
120 u16 lastcsr = 0;
121 int retries = 1000;
122
123 csr = musb_readw(epio, MUSB_TXCSR);
124 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
125 if (csr != lastcsr)
126 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
127 lastcsr = csr;
128 csr |= MUSB_TXCSR_FLUSHFIFO;
129 musb_writew(epio, MUSB_TXCSR, csr);
130 csr = musb_readw(epio, MUSB_TXCSR);
131 if (WARN(retries-- < 1,
132 "Could not flush host TX%d fifo: csr: %04x\n",
133 ep->epnum, csr))
134 return;
135 mdelay(1);
136 }
137}
138
139static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
140{
141 void __iomem *epio = ep->regs;
142 u16 csr;
143 int retries = 5;
144
145
146 do {
147 csr = musb_readw(epio, MUSB_TXCSR);
148 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
149 break;
150 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
151 csr = musb_readw(epio, MUSB_TXCSR);
152 udelay(10);
153 } while (--retries);
154
155 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
156 ep->epnum, csr);
157
158
159 musb_writew(epio, MUSB_TXCSR, 0);
160}
161
162
163
164
165
166static inline void musb_h_tx_start(struct musb_hw_ep *ep)
167{
168 u16 txcsr;
169
170
171 if (ep->epnum) {
172 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
173 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
174 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
175 } else {
176 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
177 musb_writew(ep->regs, MUSB_CSR0, txcsr);
178 }
179
180}
181
182static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
183{
184 u16 txcsr;
185
186
187 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
188 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
189 if (is_cppi_enabled())
190 txcsr |= MUSB_TXCSR_DMAMODE;
191 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
192}
193
194static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
195{
196 if (is_in != 0 || ep->is_shared_fifo)
197 ep->in_qh = qh;
198 if (is_in == 0 || ep->is_shared_fifo)
199 ep->out_qh = qh;
200}
201
202static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
203{
204 return is_in ? ep->in_qh : ep->out_qh;
205}
206
207
208
209
210
211
212
213static void
214musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
215{
216 u16 frame;
217 u32 len;
218 void __iomem *mbase = musb->mregs;
219 struct urb *urb = next_urb(qh);
220 void *buf = urb->transfer_buffer;
221 u32 offset = 0;
222 struct musb_hw_ep *hw_ep = qh->hw_ep;
223 unsigned pipe = urb->pipe;
224 u8 address = usb_pipedevice(pipe);
225 int epnum = hw_ep->epnum;
226
227
228 qh->offset = 0;
229 qh->segsize = 0;
230
231
232 switch (qh->type) {
233 case USB_ENDPOINT_XFER_CONTROL:
234
235 is_in = 0;
236 musb->ep0_stage = MUSB_EP0_START;
237 buf = urb->setup_packet;
238 len = 8;
239 break;
240#ifndef __UBOOT__
241 case USB_ENDPOINT_XFER_ISOC:
242 qh->iso_idx = 0;
243 qh->frame = 0;
244 offset = urb->iso_frame_desc[0].offset;
245 len = urb->iso_frame_desc[0].length;
246 break;
247#endif
248 default:
249
250 buf = urb->transfer_buffer + urb->actual_length;
251 len = urb->transfer_buffer_length - urb->actual_length;
252 }
253
254 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
255 qh, urb, address, qh->epnum,
256 is_in ? "in" : "out",
257 ({char *s; switch (qh->type) {
258 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
259 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
260#ifndef __UBOOT__
261 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
262#endif
263 default: s = "-intr"; break;
264 }; s; }),
265 epnum, buf + offset, len);
266
267
268 musb_ep_set_qh(hw_ep, is_in, qh);
269 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
270
271
272 if (is_in)
273 return;
274
275
276 switch (qh->type) {
277#ifndef __UBOOT__
278 case USB_ENDPOINT_XFER_ISOC:
279#endif
280 case USB_ENDPOINT_XFER_INT:
281 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
282 frame = musb_readw(mbase, MUSB_FRAME);
283
284
285
286#ifndef __UBOOT__
287 if ((urb->transfer_flags & URB_ISO_ASAP)
288 || (frame >= urb->start_frame)) {
289
290
291
292 qh->frame = 0;
293 goto start;
294 } else {
295#endif
296 qh->frame = urb->start_frame;
297
298 dev_dbg(musb->controller, "SOF for %d\n", epnum);
299#if 1
300 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
301#endif
302#ifndef __UBOOT__
303 }
304#endif
305 break;
306 default:
307start:
308 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
309 hw_ep->tx_channel ? "dma" : "pio");
310
311 if (!hw_ep->tx_channel)
312 musb_h_tx_start(hw_ep);
313 else if (is_cppi_enabled() || tusb_dma_omap())
314 musb_h_tx_dma_start(hw_ep);
315 }
316}
317
318
319static void musb_giveback(struct musb *musb, struct urb *urb, int status)
320__releases(musb->lock)
321__acquires(musb->lock)
322{
323 dev_dbg(musb->controller,
324 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
325 urb, urb->complete, status,
326 usb_pipedevice(urb->pipe),
327 usb_pipeendpoint(urb->pipe),
328 usb_pipein(urb->pipe) ? "in" : "out",
329 urb->actual_length, urb->transfer_buffer_length
330 );
331
332 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
333 spin_unlock(&musb->lock);
334 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
335 spin_lock(&musb->lock);
336}
337
338
339static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
340 struct urb *urb)
341{
342 void __iomem *epio = qh->hw_ep->regs;
343 u16 csr;
344
345
346
347
348
349
350 if (is_in)
351 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
352 else
353 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
354
355 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
356}
357
358
359
360
361
362
363
364
365static void musb_advance_schedule(struct musb *musb, struct urb *urb,
366 struct musb_hw_ep *hw_ep, int is_in)
367{
368 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
369 struct musb_hw_ep *ep = qh->hw_ep;
370 int ready = qh->is_ready;
371 int status;
372
373 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
374
375
376 switch (qh->type) {
377 case USB_ENDPOINT_XFER_BULK:
378 case USB_ENDPOINT_XFER_INT:
379 musb_save_toggle(qh, is_in, urb);
380 break;
381#ifndef __UBOOT__
382 case USB_ENDPOINT_XFER_ISOC:
383 if (status == 0 && urb->error_count)
384 status = -EXDEV;
385 break;
386#endif
387 }
388
389 qh->is_ready = 0;
390 musb_giveback(musb, urb, status);
391 qh->is_ready = ready;
392
393
394
395
396 if (list_empty(&qh->hep->urb_list)) {
397 struct list_head *head;
398 struct dma_controller *dma = musb->dma_controller;
399
400 if (is_in) {
401 ep->rx_reinit = 1;
402 if (ep->rx_channel) {
403 dma->channel_release(ep->rx_channel);
404 ep->rx_channel = NULL;
405 }
406 } else {
407 ep->tx_reinit = 1;
408 if (ep->tx_channel) {
409 dma->channel_release(ep->tx_channel);
410 ep->tx_channel = NULL;
411 }
412 }
413
414
415 musb_ep_set_qh(ep, is_in, NULL);
416 qh->hep->hcpriv = NULL;
417
418 switch (qh->type) {
419
420 case USB_ENDPOINT_XFER_CONTROL:
421 case USB_ENDPOINT_XFER_BULK:
422
423
424
425 if (qh->mux == 1) {
426 head = qh->ring.prev;
427 list_del(&qh->ring);
428 kfree(qh);
429 qh = first_qh(head);
430 break;
431 }
432
433 case USB_ENDPOINT_XFER_ISOC:
434 case USB_ENDPOINT_XFER_INT:
435
436
437
438
439 kfree(qh);
440 qh = NULL;
441 break;
442 }
443 }
444
445 if (qh != NULL && qh->is_ready) {
446 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
447 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
448 musb_start_urb(musb, is_in, qh);
449 }
450}
451
452static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
453{
454
455
456
457
458 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
459 csr &= ~(MUSB_RXCSR_H_REQPKT
460 | MUSB_RXCSR_H_AUTOREQ
461 | MUSB_RXCSR_AUTOCLEAR);
462
463
464 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
465 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
466
467
468 return musb_readw(hw_ep->regs, MUSB_RXCSR);
469}
470
471
472
473
474static bool
475musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
476{
477 u16 rx_count;
478 u8 *buf;
479 u16 csr;
480 bool done = false;
481 u32 length;
482 int do_flush = 0;
483 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
484 void __iomem *epio = hw_ep->regs;
485 struct musb_qh *qh = hw_ep->in_qh;
486 int pipe = urb->pipe;
487 void *buffer = urb->transfer_buffer;
488
489
490 rx_count = musb_readw(epio, MUSB_RXCOUNT);
491 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
492 urb->transfer_buffer, qh->offset,
493 urb->transfer_buffer_length);
494
495
496#ifndef __UBOOT__
497 if (usb_pipeisoc(pipe)) {
498 int status = 0;
499 struct usb_iso_packet_descriptor *d;
500
501 if (iso_err) {
502 status = -EILSEQ;
503 urb->error_count++;
504 }
505
506 d = urb->iso_frame_desc + qh->iso_idx;
507 buf = buffer + d->offset;
508 length = d->length;
509 if (rx_count > length) {
510 if (status == 0) {
511 status = -EOVERFLOW;
512 urb->error_count++;
513 }
514 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
515 do_flush = 1;
516 } else
517 length = rx_count;
518 urb->actual_length += length;
519 d->actual_length = length;
520
521 d->status = status;
522
523
524 done = (++qh->iso_idx >= urb->number_of_packets);
525 } else {
526#endif
527
528 buf = buffer + qh->offset;
529 length = urb->transfer_buffer_length - qh->offset;
530 if (rx_count > length) {
531 if (urb->status == -EINPROGRESS)
532 urb->status = -EOVERFLOW;
533 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
534 do_flush = 1;
535 } else
536 length = rx_count;
537 urb->actual_length += length;
538 qh->offset += length;
539
540
541 done = (urb->actual_length == urb->transfer_buffer_length)
542 || (rx_count < qh->maxpacket)
543 || (urb->status != -EINPROGRESS);
544 if (done
545 && (urb->status == -EINPROGRESS)
546 && (urb->transfer_flags & URB_SHORT_NOT_OK)
547 && (urb->actual_length
548 < urb->transfer_buffer_length))
549 urb->status = -EREMOTEIO;
550#ifndef __UBOOT__
551 }
552#endif
553
554 musb_read_fifo(hw_ep, length, buf);
555
556 csr = musb_readw(epio, MUSB_RXCSR);
557 csr |= MUSB_RXCSR_H_WZC_BITS;
558 if (unlikely(do_flush))
559 musb_h_flush_rxfifo(hw_ep, csr);
560 else {
561
562 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
563 if (!done)
564 csr |= MUSB_RXCSR_H_REQPKT;
565 musb_writew(epio, MUSB_RXCSR, csr);
566 }
567
568 return done;
569}
570
571
572
573
574
575
576
577
578
579static void
580musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
581{
582 u16 csr;
583
584
585
586
587
588
589
590 if (ep->is_shared_fifo) {
591 csr = musb_readw(ep->regs, MUSB_TXCSR);
592 if (csr & MUSB_TXCSR_MODE) {
593 musb_h_tx_flush_fifo(ep);
594 csr = musb_readw(ep->regs, MUSB_TXCSR);
595 musb_writew(ep->regs, MUSB_TXCSR,
596 csr | MUSB_TXCSR_FRCDATATOG);
597 }
598
599
600
601
602
603 if (csr & MUSB_TXCSR_DMAMODE)
604 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
605 musb_writew(ep->regs, MUSB_TXCSR, 0);
606
607
608 } else {
609 csr = musb_readw(ep->regs, MUSB_RXCSR);
610 if (csr & MUSB_RXCSR_RXPKTRDY)
611 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
612 musb_readw(ep->regs, MUSB_RXCOUNT));
613
614 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
615 }
616
617
618 if (musb->is_multipoint) {
619 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
620 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
621 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
622
623 } else
624 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
625
626
627 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
628 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
629
630
631
632
633 if (musb->double_buffer_not_ok)
634 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
635 else
636 musb_writew(ep->regs, MUSB_RXMAXP,
637 qh->maxpacket | ((qh->hb_mult - 1) << 11));
638
639 ep->rx_reinit = 0;
640}
641
642static bool musb_tx_dma_program(struct dma_controller *dma,
643 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
644 struct urb *urb, u32 offset, u32 length)
645{
646 struct dma_channel *channel = hw_ep->tx_channel;
647 void __iomem *epio = hw_ep->regs;
648 u16 pkt_size = qh->maxpacket;
649 u16 csr;
650 u8 mode;
651
652#ifdef CONFIG_USB_INVENTRA_DMA
653 if (length > channel->max_len)
654 length = channel->max_len;
655
656 csr = musb_readw(epio, MUSB_TXCSR);
657 if (length > pkt_size) {
658 mode = 1;
659 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
660
661 if (qh->hb_mult == 1)
662 csr |= MUSB_TXCSR_AUTOSET;
663 } else {
664 mode = 0;
665 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
666 csr |= MUSB_TXCSR_DMAENAB;
667 }
668 channel->desired_mode = mode;
669 musb_writew(epio, MUSB_TXCSR, csr);
670#else
671 if (!is_cppi_enabled() && !tusb_dma_omap())
672 return false;
673
674 channel->actual_len = 0;
675
676
677
678
679
680 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
681#endif
682
683 qh->segsize = length;
684
685
686
687
688
689 wmb();
690
691 if (!dma->channel_program(channel, pkt_size, mode,
692 urb->transfer_dma + offset, length)) {
693 dma->channel_release(channel);
694 hw_ep->tx_channel = NULL;
695
696 csr = musb_readw(epio, MUSB_TXCSR);
697 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
698 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
699 return false;
700 }
701 return true;
702}
703
704
705
706
707
708static void musb_ep_program(struct musb *musb, u8 epnum,
709 struct urb *urb, int is_out,
710 u8 *buf, u32 offset, u32 len)
711{
712 struct dma_controller *dma_controller;
713 struct dma_channel *dma_channel;
714 u8 dma_ok;
715 void __iomem *mbase = musb->mregs;
716 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
717 void __iomem *epio = hw_ep->regs;
718 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
719 u16 packet_sz = qh->maxpacket;
720
721 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
722 "h_addr%02x h_port%02x bytes %d\n",
723 is_out ? "-->" : "<--",
724 epnum, urb, urb->dev->speed,
725 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
726 qh->h_addr_reg, qh->h_port_reg,
727 len);
728
729 musb_ep_select(mbase, epnum);
730
731
732 dma_controller = musb->dma_controller;
733 if (is_dma_capable() && epnum && dma_controller) {
734 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
735 if (!dma_channel) {
736 dma_channel = dma_controller->channel_alloc(
737 dma_controller, hw_ep, is_out);
738 if (is_out)
739 hw_ep->tx_channel = dma_channel;
740 else
741 hw_ep->rx_channel = dma_channel;
742 }
743 } else
744 dma_channel = NULL;
745
746
747
748
749 if (is_out) {
750 u16 csr;
751 u16 int_txe;
752 u16 load_count;
753
754 csr = musb_readw(epio, MUSB_TXCSR);
755
756
757 int_txe = musb_readw(mbase, MUSB_INTRTXE);
758 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
759
760
761 if (epnum) {
762
763 musb_h_tx_flush_fifo(hw_ep);
764
765
766
767
768
769
770 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
771 | MUSB_TXCSR_AUTOSET
772 | MUSB_TXCSR_DMAENAB
773 | MUSB_TXCSR_FRCDATATOG
774 | MUSB_TXCSR_H_RXSTALL
775 | MUSB_TXCSR_H_ERROR
776 | MUSB_TXCSR_TXPKTRDY
777 );
778 csr |= MUSB_TXCSR_MODE;
779
780 if (usb_gettoggle(urb->dev, qh->epnum, 1))
781 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
782 | MUSB_TXCSR_H_DATATOGGLE;
783 else
784 csr |= MUSB_TXCSR_CLRDATATOG;
785
786 musb_writew(epio, MUSB_TXCSR, csr);
787
788 csr &= ~MUSB_TXCSR_DMAMODE;
789 musb_writew(epio, MUSB_TXCSR, csr);
790 csr = musb_readw(epio, MUSB_TXCSR);
791 } else {
792
793 musb_h_ep0_flush_fifo(hw_ep);
794 }
795
796
797 if (musb->is_multipoint) {
798 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
799 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
800 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
801
802 } else
803 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
804
805
806 if (epnum) {
807 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
808 if (musb->double_buffer_not_ok)
809 musb_writew(epio, MUSB_TXMAXP,
810 hw_ep->max_packet_sz_tx);
811 else if (can_bulk_split(musb, qh->type))
812 musb_writew(epio, MUSB_TXMAXP, packet_sz
813 | ((hw_ep->max_packet_sz_tx /
814 packet_sz) - 1) << 11);
815 else
816 musb_writew(epio, MUSB_TXMAXP,
817 qh->maxpacket |
818 ((qh->hb_mult - 1) << 11));
819 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
820 } else {
821 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
822 if (musb->is_multipoint)
823 musb_writeb(epio, MUSB_TYPE0,
824 qh->type_reg);
825 }
826
827 if (can_bulk_split(musb, qh->type))
828 load_count = min((u32) hw_ep->max_packet_sz_tx,
829 len);
830 else
831 load_count = min((u32) packet_sz, len);
832
833 if (dma_channel && musb_tx_dma_program(dma_controller,
834 hw_ep, qh, urb, offset, len))
835 load_count = 0;
836
837 if (load_count) {
838
839 qh->segsize = load_count;
840 musb_write_fifo(hw_ep, load_count, buf);
841 }
842
843
844 musb_writew(mbase, MUSB_INTRTXE, int_txe);
845
846
847 } else {
848 u16 csr;
849
850 if (hw_ep->rx_reinit) {
851 musb_rx_reinit(musb, qh, hw_ep);
852
853
854 if (usb_gettoggle(urb->dev, qh->epnum, 0))
855 csr = MUSB_RXCSR_H_WR_DATATOGGLE
856 | MUSB_RXCSR_H_DATATOGGLE;
857 else
858 csr = 0;
859 if (qh->type == USB_ENDPOINT_XFER_INT)
860 csr |= MUSB_RXCSR_DISNYET;
861
862 } else {
863 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
864
865 if (csr & (MUSB_RXCSR_RXPKTRDY
866 | MUSB_RXCSR_DMAENAB
867 | MUSB_RXCSR_H_REQPKT))
868 ERR("broken !rx_reinit, ep%d csr %04x\n",
869 hw_ep->epnum, csr);
870
871
872 csr &= MUSB_RXCSR_DISNYET;
873 }
874
875
876
877 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
878
879 dma_channel->actual_len = 0L;
880 qh->segsize = len;
881
882
883 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
884 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
885
886
887
888
889
890 dma_ok = dma_controller->channel_program(dma_channel,
891 packet_sz, !(urb->transfer_flags &
892 URB_SHORT_NOT_OK),
893 urb->transfer_dma + offset,
894 qh->segsize);
895 if (!dma_ok) {
896 dma_controller->channel_release(dma_channel);
897 hw_ep->rx_channel = dma_channel = NULL;
898 } else
899 csr |= MUSB_RXCSR_DMAENAB;
900 }
901
902 csr |= MUSB_RXCSR_H_REQPKT;
903 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
904 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
905 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
906 }
907}
908
909
910
911
912
913
914static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
915{
916 bool more = false;
917 u8 *fifo_dest = NULL;
918 u16 fifo_count = 0;
919 struct musb_hw_ep *hw_ep = musb->control_ep;
920 struct musb_qh *qh = hw_ep->in_qh;
921 struct usb_ctrlrequest *request;
922
923 switch (musb->ep0_stage) {
924 case MUSB_EP0_IN:
925 fifo_dest = urb->transfer_buffer + urb->actual_length;
926 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
927 urb->actual_length);
928 if (fifo_count < len)
929 urb->status = -EOVERFLOW;
930
931 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
932
933 urb->actual_length += fifo_count;
934 if (len < qh->maxpacket) {
935
936
937
938 } else if (urb->actual_length <
939 urb->transfer_buffer_length)
940 more = true;
941 break;
942 case MUSB_EP0_START:
943 request = (struct usb_ctrlrequest *) urb->setup_packet;
944
945 if (!request->wLength) {
946 dev_dbg(musb->controller, "start no-DATA\n");
947 break;
948 } else if (request->bRequestType & USB_DIR_IN) {
949 dev_dbg(musb->controller, "start IN-DATA\n");
950 musb->ep0_stage = MUSB_EP0_IN;
951 more = true;
952 break;
953 } else {
954 dev_dbg(musb->controller, "start OUT-DATA\n");
955 musb->ep0_stage = MUSB_EP0_OUT;
956 more = true;
957 }
958
959 case MUSB_EP0_OUT:
960 fifo_count = min_t(size_t, qh->maxpacket,
961 urb->transfer_buffer_length -
962 urb->actual_length);
963 if (fifo_count) {
964 fifo_dest = (u8 *) (urb->transfer_buffer
965 + urb->actual_length);
966 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
967 fifo_count,
968 (fifo_count == 1) ? "" : "s",
969 fifo_dest);
970 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
971
972 urb->actual_length += fifo_count;
973 more = true;
974 }
975 break;
976 default:
977 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
978 break;
979 }
980
981 return more;
982}
983
984
985
986
987
988
989
990irqreturn_t musb_h_ep0_irq(struct musb *musb)
991{
992 struct urb *urb;
993 u16 csr, len;
994 int status = 0;
995 void __iomem *mbase = musb->mregs;
996 struct musb_hw_ep *hw_ep = musb->control_ep;
997 void __iomem *epio = hw_ep->regs;
998 struct musb_qh *qh = hw_ep->in_qh;
999 bool complete = false;
1000 irqreturn_t retval = IRQ_NONE;
1001
1002
1003 urb = next_urb(qh);
1004
1005 musb_ep_select(mbase, 0);
1006 csr = musb_readw(epio, MUSB_CSR0);
1007 len = (csr & MUSB_CSR0_RXPKTRDY)
1008 ? musb_readb(epio, MUSB_COUNT0)
1009 : 0;
1010
1011 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1012 csr, qh, len, urb, musb->ep0_stage);
1013
1014
1015 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1016 retval = IRQ_HANDLED;
1017 complete = true;
1018 }
1019
1020
1021 if (csr & MUSB_CSR0_H_RXSTALL) {
1022 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1023 status = -EPIPE;
1024
1025 } else if (csr & MUSB_CSR0_H_ERROR) {
1026 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1027 status = -EPROTO;
1028
1029 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1030 dev_dbg(musb->controller, "control NAK timeout\n");
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 musb_writew(epio, MUSB_CSR0, 0);
1041 retval = IRQ_HANDLED;
1042 }
1043
1044 if (status) {
1045 dev_dbg(musb->controller, "aborting\n");
1046 retval = IRQ_HANDLED;
1047 if (urb)
1048 urb->status = status;
1049 complete = true;
1050
1051
1052 if (csr & MUSB_CSR0_H_REQPKT) {
1053 csr &= ~MUSB_CSR0_H_REQPKT;
1054 musb_writew(epio, MUSB_CSR0, csr);
1055 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1056 musb_writew(epio, MUSB_CSR0, csr);
1057 } else {
1058 musb_h_ep0_flush_fifo(hw_ep);
1059 }
1060
1061 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1062
1063
1064 musb_writew(epio, MUSB_CSR0, 0);
1065 }
1066
1067 if (unlikely(!urb)) {
1068
1069
1070 ERR("no URB for end 0\n");
1071
1072 musb_h_ep0_flush_fifo(hw_ep);
1073 goto done;
1074 }
1075
1076 if (!complete) {
1077
1078 if (musb_h_ep0_continue(musb, len, urb)) {
1079
1080 csr = (MUSB_EP0_IN == musb->ep0_stage)
1081 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1082 } else {
1083
1084 if (usb_pipeout(urb->pipe)
1085 || !urb->transfer_buffer_length)
1086 csr = MUSB_CSR0_H_STATUSPKT
1087 | MUSB_CSR0_H_REQPKT;
1088 else
1089 csr = MUSB_CSR0_H_STATUSPKT
1090 | MUSB_CSR0_TXPKTRDY;
1091
1092
1093 musb->ep0_stage = MUSB_EP0_STATUS;
1094
1095 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1096
1097 }
1098 musb_writew(epio, MUSB_CSR0, csr);
1099 retval = IRQ_HANDLED;
1100 } else
1101 musb->ep0_stage = MUSB_EP0_IDLE;
1102
1103
1104 if (complete)
1105 musb_advance_schedule(musb, urb, hw_ep, 1);
1106done:
1107 return retval;
1108}
1109
1110
1111#ifdef CONFIG_USB_INVENTRA_DMA
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125#endif
1126
1127
1128void musb_host_tx(struct musb *musb, u8 epnum)
1129{
1130 int pipe;
1131 bool done = false;
1132 u16 tx_csr;
1133 size_t length = 0;
1134 size_t offset = 0;
1135 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1136 void __iomem *epio = hw_ep->regs;
1137 struct musb_qh *qh = hw_ep->out_qh;
1138 struct urb *urb = next_urb(qh);
1139 u32 status = 0;
1140 void __iomem *mbase = musb->mregs;
1141 struct dma_channel *dma;
1142 bool transfer_pending = false;
1143
1144 musb_ep_select(mbase, epnum);
1145 tx_csr = musb_readw(epio, MUSB_TXCSR);
1146
1147
1148 if (!urb) {
1149 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1150 return;
1151 }
1152
1153 pipe = urb->pipe;
1154 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1155 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1156 dma ? ", dma" : "");
1157
1158
1159 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1160
1161 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1162
1163
1164 status = -EPIPE;
1165
1166 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1167
1168 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1169
1170 status = -ETIMEDOUT;
1171
1172 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1173 dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 musb_ep_select(mbase, epnum);
1184 musb_writew(epio, MUSB_TXCSR,
1185 MUSB_TXCSR_H_WZC_BITS
1186 | MUSB_TXCSR_TXPKTRDY);
1187 return;
1188 }
1189
1190 if (status) {
1191 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1192 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1193 (void) musb->dma_controller->channel_abort(dma);
1194 }
1195
1196
1197
1198
1199 musb_h_tx_flush_fifo(hw_ep);
1200 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1201 | MUSB_TXCSR_DMAENAB
1202 | MUSB_TXCSR_H_ERROR
1203 | MUSB_TXCSR_H_RXSTALL
1204 | MUSB_TXCSR_H_NAKTIMEOUT
1205 );
1206
1207 musb_ep_select(mbase, epnum);
1208 musb_writew(epio, MUSB_TXCSR, tx_csr);
1209
1210 musb_writew(epio, MUSB_TXCSR, tx_csr);
1211 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1212
1213 done = true;
1214 }
1215
1216
1217 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1218 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1219 return;
1220 }
1221
1222 if (is_dma_capable() && dma && !status) {
1223
1224
1225
1226
1227
1228
1229
1230
1231 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1249 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1250 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1251 MUSB_TXCSR_TXPKTRDY);
1252 musb_writew(epio, MUSB_TXCSR,
1253 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1254 }
1255 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1256 MUSB_TXCSR_TXPKTRDY);
1257 musb_writew(epio, MUSB_TXCSR,
1258 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1259
1260
1261
1262
1263
1264
1265
1266 tx_csr = musb_readw(epio, MUSB_TXCSR);
1267 }
1268
1269
1270
1271
1272
1273
1274
1275
1276 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1277 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1278 "CSR %04x\n", tx_csr);
1279 return;
1280 }
1281 }
1282
1283 if (!status || dma || usb_pipeisoc(pipe)) {
1284 if (dma)
1285 length = dma->actual_len;
1286 else
1287 length = qh->segsize;
1288 qh->offset += length;
1289
1290 if (usb_pipeisoc(pipe)) {
1291#ifndef __UBOOT__
1292 struct usb_iso_packet_descriptor *d;
1293
1294 d = urb->iso_frame_desc + qh->iso_idx;
1295 d->actual_length = length;
1296 d->status = status;
1297 if (++qh->iso_idx >= urb->number_of_packets) {
1298 done = true;
1299 } else {
1300 d++;
1301 offset = d->offset;
1302 length = d->length;
1303 }
1304#endif
1305 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1306 done = true;
1307 } else {
1308
1309 if (qh->segsize < qh->maxpacket)
1310 done = true;
1311 else if (qh->offset == urb->transfer_buffer_length
1312 && !(urb->transfer_flags
1313 & URB_ZERO_PACKET))
1314 done = true;
1315 if (!done) {
1316 offset = qh->offset;
1317 length = urb->transfer_buffer_length - offset;
1318 transfer_pending = true;
1319 }
1320 }
1321 }
1322
1323
1324
1325
1326 if (urb->status != -EINPROGRESS) {
1327 done = true;
1328 if (status == 0)
1329 status = urb->status;
1330 }
1331
1332 if (done) {
1333
1334 urb->status = status;
1335 urb->actual_length = qh->offset;
1336 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1337 return;
1338 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1339 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1340 offset, length)) {
1341 if (is_cppi_enabled() || tusb_dma_omap())
1342 musb_h_tx_dma_start(hw_ep);
1343 return;
1344 }
1345 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1346 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1347 return;
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357 if (length > qh->maxpacket)
1358 length = qh->maxpacket;
1359
1360 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1361 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1362 qh->segsize = length;
1363
1364 musb_ep_select(mbase, epnum);
1365 musb_writew(epio, MUSB_TXCSR,
1366 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1367}
1368
1369
1370#ifdef CONFIG_USB_INVENTRA_DMA
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407#endif
1408
1409
1410
1411
1412static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1413{
1414 struct dma_channel *dma;
1415 struct urb *urb;
1416 void __iomem *mbase = musb->mregs;
1417 void __iomem *epio = ep->regs;
1418 struct musb_qh *cur_qh, *next_qh;
1419 u16 rx_csr;
1420
1421 musb_ep_select(mbase, ep->epnum);
1422 dma = is_dma_capable() ? ep->rx_channel : NULL;
1423
1424
1425 rx_csr = musb_readw(epio, MUSB_RXCSR);
1426 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1427 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1428 musb_writew(epio, MUSB_RXCSR, rx_csr);
1429
1430 cur_qh = first_qh(&musb->in_bulk);
1431 if (cur_qh) {
1432 urb = next_urb(cur_qh);
1433 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1434 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1435 musb->dma_controller->channel_abort(dma);
1436 urb->actual_length += dma->actual_len;
1437 dma->actual_len = 0L;
1438 }
1439 musb_save_toggle(cur_qh, 1, urb);
1440
1441
1442 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1443
1444
1445 next_qh = first_qh(&musb->in_bulk);
1446
1447
1448 ep->rx_reinit = 1;
1449 musb_start_urb(musb, 1, next_qh);
1450 }
1451}
1452
1453
1454
1455
1456
1457void musb_host_rx(struct musb *musb, u8 epnum)
1458{
1459 struct urb *urb;
1460 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1461 void __iomem *epio = hw_ep->regs;
1462 struct musb_qh *qh = hw_ep->in_qh;
1463 size_t xfer_len;
1464 void __iomem *mbase = musb->mregs;
1465 int pipe;
1466 u16 rx_csr, val;
1467 bool iso_err = false;
1468 bool done = false;
1469 u32 status;
1470 struct dma_channel *dma;
1471
1472 musb_ep_select(mbase, epnum);
1473
1474 urb = next_urb(qh);
1475 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1476 status = 0;
1477 xfer_len = 0;
1478
1479 rx_csr = musb_readw(epio, MUSB_RXCSR);
1480 val = rx_csr;
1481
1482 if (unlikely(!urb)) {
1483
1484
1485
1486
1487 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1488 musb_readw(epio, MUSB_RXCOUNT));
1489 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1490 return;
1491 }
1492
1493 pipe = urb->pipe;
1494
1495 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1496 epnum, rx_csr, urb->actual_length,
1497 dma ? dma->actual_len : 0);
1498
1499
1500
1501 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1502 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1503
1504
1505 status = -EPIPE;
1506
1507 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1508 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1509
1510 status = -EPROTO;
1511 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1512
1513 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1514
1515 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1516 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 if (usb_pipebulk(urb->pipe)
1527 && qh->mux == 1
1528 && !list_is_singular(&musb->in_bulk)) {
1529 musb_bulk_rx_nak_timeout(musb, hw_ep);
1530 return;
1531 }
1532 musb_ep_select(mbase, epnum);
1533 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1534 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1535 musb_writew(epio, MUSB_RXCSR, rx_csr);
1536
1537 goto finish;
1538 } else {
1539 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1540
1541 iso_err = true;
1542 }
1543 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1544 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1545 epnum);
1546 status = -EPROTO;
1547 }
1548
1549
1550 if (status) {
1551
1552 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1553 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1554 (void) musb->dma_controller->channel_abort(dma);
1555 xfer_len = dma->actual_len;
1556 }
1557 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1558 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1559 done = true;
1560 goto finish;
1561 }
1562
1563 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1564
1565 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1566 goto finish;
1567 }
1568
1569
1570
1571
1572
1573
1574
1575
1576#ifndef CONFIG_USB_INVENTRA_DMA
1577 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1578
1579
1580
1581
1582
1583 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1584 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1585 (void) musb->dma_controller->channel_abort(dma);
1586 xfer_len = dma->actual_len;
1587 done = true;
1588 }
1589
1590 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1591 xfer_len, dma ? ", dma" : "");
1592 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1593
1594 musb_ep_select(mbase, epnum);
1595 musb_writew(epio, MUSB_RXCSR,
1596 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1597 }
1598#endif
1599 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1600 xfer_len = dma->actual_len;
1601
1602 val &= ~(MUSB_RXCSR_DMAENAB
1603 | MUSB_RXCSR_H_AUTOREQ
1604 | MUSB_RXCSR_AUTOCLEAR
1605 | MUSB_RXCSR_RXPKTRDY);
1606 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1607
1608#ifdef CONFIG_USB_INVENTRA_DMA
1609 if (usb_pipeisoc(pipe)) {
1610 struct usb_iso_packet_descriptor *d;
1611
1612 d = urb->iso_frame_desc + qh->iso_idx;
1613 d->actual_length = xfer_len;
1614
1615
1616
1617
1618 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1619 d->status = 0;
1620
1621 if (++qh->iso_idx >= urb->number_of_packets)
1622 done = true;
1623 else
1624 done = false;
1625
1626 } else {
1627
1628 done = (urb->actual_length + xfer_len >=
1629 urb->transfer_buffer_length
1630 || dma->actual_len < qh->maxpacket);
1631 }
1632
1633
1634 if (!done) {
1635 val |= MUSB_RXCSR_H_REQPKT;
1636 musb_writew(epio, MUSB_RXCSR,
1637 MUSB_RXCSR_H_WZC_BITS | val);
1638 }
1639
1640 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1641 done ? "off" : "reset",
1642 musb_readw(epio, MUSB_RXCSR),
1643 musb_readw(epio, MUSB_RXCOUNT));
1644#else
1645 done = true;
1646#endif
1647 } else if (urb->status == -EINPROGRESS) {
1648
1649 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1650 status = -EPROTO;
1651 ERR("Rx interrupt with no errors or packet!\n");
1652
1653
1654
1655
1656
1657 musb_ep_select(mbase, epnum);
1658 val &= ~MUSB_RXCSR_H_REQPKT;
1659 musb_writew(epio, MUSB_RXCSR, val);
1660 goto finish;
1661 }
1662
1663
1664#ifdef CONFIG_USB_INVENTRA_DMA
1665 if (dma) {
1666 struct dma_controller *c;
1667 u16 rx_count;
1668 int ret, length;
1669 dma_addr_t buf;
1670
1671 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1672
1673 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1674 epnum, rx_count,
1675 urb->transfer_dma
1676 + urb->actual_length,
1677 qh->offset,
1678 urb->transfer_buffer_length);
1679
1680 c = musb->dma_controller;
1681
1682 if (usb_pipeisoc(pipe)) {
1683 int d_status = 0;
1684 struct usb_iso_packet_descriptor *d;
1685
1686 d = urb->iso_frame_desc + qh->iso_idx;
1687
1688 if (iso_err) {
1689 d_status = -EILSEQ;
1690 urb->error_count++;
1691 }
1692 if (rx_count > d->length) {
1693 if (d_status == 0) {
1694 d_status = -EOVERFLOW;
1695 urb->error_count++;
1696 }
1697 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1698 rx_count, d->length);
1699
1700 length = d->length;
1701 } else
1702 length = rx_count;
1703 d->status = d_status;
1704 buf = urb->transfer_dma + d->offset;
1705 } else {
1706 length = rx_count;
1707 buf = urb->transfer_dma +
1708 urb->actual_length;
1709 }
1710
1711 dma->desired_mode = 0;
1712#ifdef USE_MODE1
1713
1714
1715
1716 if ((urb->transfer_flags &
1717 URB_SHORT_NOT_OK)
1718 && (urb->transfer_buffer_length -
1719 urb->actual_length)
1720 > qh->maxpacket)
1721 dma->desired_mode = 1;
1722 if (rx_count < hw_ep->max_packet_sz_rx) {
1723 length = rx_count;
1724 dma->desired_mode = 0;
1725 } else {
1726 length = urb->transfer_buffer_length;
1727 }
1728#endif
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 val = musb_readw(epio, MUSB_RXCSR);
1748 val &= ~MUSB_RXCSR_H_REQPKT;
1749
1750 if (dma->desired_mode == 0)
1751 val &= ~MUSB_RXCSR_H_AUTOREQ;
1752 else
1753 val |= MUSB_RXCSR_H_AUTOREQ;
1754 val |= MUSB_RXCSR_DMAENAB;
1755
1756
1757 if (qh->hb_mult == 1)
1758 val |= MUSB_RXCSR_AUTOCLEAR;
1759
1760 musb_writew(epio, MUSB_RXCSR,
1761 MUSB_RXCSR_H_WZC_BITS | val);
1762
1763
1764
1765
1766
1767 ret = c->channel_program(
1768 dma, qh->maxpacket,
1769 dma->desired_mode, buf, length);
1770
1771 if (!ret) {
1772 c->channel_release(dma);
1773 hw_ep->rx_channel = NULL;
1774 dma = NULL;
1775 val = musb_readw(epio, MUSB_RXCSR);
1776 val &= ~(MUSB_RXCSR_DMAENAB
1777 | MUSB_RXCSR_H_AUTOREQ
1778 | MUSB_RXCSR_AUTOCLEAR);
1779 musb_writew(epio, MUSB_RXCSR, val);
1780 }
1781 }
1782#endif
1783
1784 if (!dma) {
1785
1786 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1787 done = musb_host_packet_rx(musb, urb,
1788 epnum, iso_err);
1789 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1790 }
1791 }
1792
1793finish:
1794 urb->actual_length += xfer_len;
1795 qh->offset += xfer_len;
1796 if (done) {
1797 if (urb->status == -EINPROGRESS)
1798 urb->status = status;
1799 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1800 }
1801}
1802
1803
1804
1805
1806
1807
1808static int musb_schedule(
1809 struct musb *musb,
1810 struct musb_qh *qh,
1811 int is_in)
1812{
1813 int idle;
1814 int best_diff;
1815 int best_end, epnum;
1816 struct musb_hw_ep *hw_ep = NULL;
1817 struct list_head *head = NULL;
1818 u8 toggle;
1819 u8 txtype;
1820 struct urb *urb = next_urb(qh);
1821
1822
1823 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1824 head = &musb->control;
1825 hw_ep = musb->control_ep;
1826 goto success;
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 best_diff = 4096;
1839 best_end = -1;
1840
1841 for (epnum = 1, hw_ep = musb->endpoints + 1;
1842 epnum < musb->nr_endpoints;
1843 epnum++, hw_ep++) {
1844 int diff;
1845
1846 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1847 continue;
1848
1849 if (hw_ep == musb->bulk_ep)
1850 continue;
1851
1852 if (is_in)
1853 diff = hw_ep->max_packet_sz_rx;
1854 else
1855 diff = hw_ep->max_packet_sz_tx;
1856 diff -= (qh->maxpacket * qh->hb_mult);
1857
1858 if (diff >= 0 && best_diff > diff) {
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 hw_ep = musb->endpoints + epnum;
1873 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1874 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1875 >> 4) & 0x3;
1876 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1877 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1878 continue;
1879
1880 best_diff = diff;
1881 best_end = epnum;
1882 }
1883 }
1884
1885 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1886 hw_ep = musb->bulk_ep;
1887 if (is_in)
1888 head = &musb->in_bulk;
1889 else
1890 head = &musb->out_bulk;
1891
1892
1893
1894
1895
1896
1897
1898
1899 if (is_in && qh->dev)
1900 qh->intv_reg =
1901 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1902 goto success;
1903 } else if (best_end < 0) {
1904 return -ENOSPC;
1905 }
1906
1907 idle = 1;
1908 qh->mux = 0;
1909 hw_ep = musb->endpoints + best_end;
1910 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
1911success:
1912 if (head) {
1913 idle = list_empty(head);
1914 list_add_tail(&qh->ring, head);
1915 qh->mux = 1;
1916 }
1917 qh->hw_ep = hw_ep;
1918 qh->hep->hcpriv = qh;
1919 if (idle)
1920 musb_start_urb(musb, is_in, qh);
1921 return 0;
1922}
1923
1924#ifdef __UBOOT__
1925
1926static int tt_needed(struct musb *musb, struct usb_device *dev)
1927{
1928 if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) &&
1929 (dev->speed < USB_SPEED_HIGH))
1930 return 1;
1931 return 0;
1932}
1933#endif
1934
1935#ifndef __UBOOT__
1936static int musb_urb_enqueue(
1937#else
1938int musb_urb_enqueue(
1939#endif
1940 struct usb_hcd *hcd,
1941 struct urb *urb,
1942 gfp_t mem_flags)
1943{
1944 unsigned long flags;
1945 struct musb *musb = hcd_to_musb(hcd);
1946 struct usb_host_endpoint *hep = urb->ep;
1947 struct musb_qh *qh;
1948 struct usb_endpoint_descriptor *epd = &hep->desc;
1949 int ret;
1950 unsigned type_reg;
1951 unsigned interval;
1952
1953
1954 if (!is_host_active(musb) || !musb->is_active)
1955 return -ENODEV;
1956
1957 spin_lock_irqsave(&musb->lock, flags);
1958 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1959 qh = ret ? NULL : hep->hcpriv;
1960 if (qh)
1961 urb->hcpriv = qh;
1962 spin_unlock_irqrestore(&musb->lock, flags);
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 if (qh || ret)
1973 return ret;
1974
1975
1976
1977
1978
1979
1980
1981 qh = kzalloc(sizeof *qh, mem_flags);
1982 if (!qh) {
1983 spin_lock_irqsave(&musb->lock, flags);
1984 usb_hcd_unlink_urb_from_ep(hcd, urb);
1985 spin_unlock_irqrestore(&musb->lock, flags);
1986 return -ENOMEM;
1987 }
1988
1989 qh->hep = hep;
1990 qh->dev = urb->dev;
1991 INIT_LIST_HEAD(&qh->ring);
1992 qh->is_ready = 1;
1993
1994 qh->maxpacket = usb_endpoint_maxp(epd);
1995 qh->type = usb_endpoint_type(epd);
1996
1997
1998
1999
2000
2001 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2002 if (qh->hb_mult > 1) {
2003 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2004
2005 if (ok)
2006 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2007 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2008 if (!ok) {
2009 ret = -EMSGSIZE;
2010 goto done;
2011 }
2012 qh->maxpacket &= 0x7ff;
2013 }
2014
2015 qh->epnum = usb_endpoint_num(epd);
2016
2017
2018 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2019
2020
2021 type_reg = (qh->type << 4) | qh->epnum;
2022 switch (urb->dev->speed) {
2023 case USB_SPEED_LOW:
2024 type_reg |= 0xc0;
2025 break;
2026 case USB_SPEED_FULL:
2027 type_reg |= 0x80;
2028 break;
2029 default:
2030 type_reg |= 0x40;
2031 }
2032 qh->type_reg = type_reg;
2033
2034
2035 switch (qh->type) {
2036 case USB_ENDPOINT_XFER_INT:
2037
2038
2039
2040
2041 if (urb->dev->speed <= USB_SPEED_FULL) {
2042 interval = max_t(u8, epd->bInterval, 1);
2043 break;
2044 }
2045
2046 case USB_ENDPOINT_XFER_ISOC:
2047
2048 interval = min_t(u8, epd->bInterval, 16);
2049 break;
2050 default:
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 interval = 0;
2066 }
2067 qh->intv_reg = interval;
2068
2069
2070 if (musb->is_multipoint) {
2071 struct usb_device *parent = urb->dev->parent;
2072
2073#ifndef __UBOOT__
2074 if (parent != hcd->self.root_hub) {
2075#else
2076 if (parent) {
2077#endif
2078 qh->h_addr_reg = (u8) parent->devnum;
2079
2080#ifndef __UBOOT__
2081
2082 if (urb->dev->tt) {
2083 qh->h_port_reg = (u8) urb->dev->ttport;
2084 if (urb->dev->tt->hub)
2085 qh->h_addr_reg =
2086 (u8) urb->dev->tt->hub->devnum;
2087 if (urb->dev->tt->multi)
2088 qh->h_addr_reg |= 0x80;
2089 }
2090#else
2091 if (tt_needed(musb, urb->dev)) {
2092 u16 hub_port = find_tt(urb->dev);
2093 qh->h_addr_reg = (u8) (hub_port >> 8);
2094 qh->h_port_reg = (u8) (hub_port & 0xff);
2095 }
2096#endif
2097 }
2098 }
2099
2100
2101
2102
2103
2104 spin_lock_irqsave(&musb->lock, flags);
2105 if (hep->hcpriv) {
2106
2107
2108
2109 kfree(qh);
2110 qh = NULL;
2111 ret = 0;
2112 } else
2113 ret = musb_schedule(musb, qh,
2114 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2115
2116 if (ret == 0) {
2117 urb->hcpriv = qh;
2118
2119
2120
2121 }
2122 spin_unlock_irqrestore(&musb->lock, flags);
2123
2124done:
2125 if (ret != 0) {
2126 spin_lock_irqsave(&musb->lock, flags);
2127 usb_hcd_unlink_urb_from_ep(hcd, urb);
2128 spin_unlock_irqrestore(&musb->lock, flags);
2129 kfree(qh);
2130 }
2131 return ret;
2132}
2133
2134
2135#ifndef __UBOOT__
2136
2137
2138
2139
2140
2141static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2142{
2143 struct musb_hw_ep *ep = qh->hw_ep;
2144 struct musb *musb = ep->musb;
2145 void __iomem *epio = ep->regs;
2146 unsigned hw_end = ep->epnum;
2147 void __iomem *regs = ep->musb->mregs;
2148 int is_in = usb_pipein(urb->pipe);
2149 int status = 0;
2150 u16 csr;
2151
2152 musb_ep_select(regs, hw_end);
2153
2154 if (is_dma_capable()) {
2155 struct dma_channel *dma;
2156
2157 dma = is_in ? ep->rx_channel : ep->tx_channel;
2158 if (dma) {
2159 status = ep->musb->dma_controller->channel_abort(dma);
2160 dev_dbg(musb->controller,
2161 "abort %cX%d DMA for urb %p --> %d\n",
2162 is_in ? 'R' : 'T', ep->epnum,
2163 urb, status);
2164 urb->actual_length += dma->actual_len;
2165 }
2166 }
2167
2168
2169 if (ep->epnum && is_in) {
2170
2171 csr = musb_h_flush_rxfifo(ep, 0);
2172
2173
2174
2175
2176
2177 } else if (ep->epnum) {
2178 musb_h_tx_flush_fifo(ep);
2179 csr = musb_readw(epio, MUSB_TXCSR);
2180 csr &= ~(MUSB_TXCSR_AUTOSET
2181 | MUSB_TXCSR_DMAENAB
2182 | MUSB_TXCSR_H_RXSTALL
2183 | MUSB_TXCSR_H_NAKTIMEOUT
2184 | MUSB_TXCSR_H_ERROR
2185 | MUSB_TXCSR_TXPKTRDY);
2186 musb_writew(epio, MUSB_TXCSR, csr);
2187
2188 musb_writew(epio, MUSB_TXCSR, csr);
2189
2190 csr = musb_readw(epio, MUSB_TXCSR);
2191 } else {
2192 musb_h_ep0_flush_fifo(ep);
2193 }
2194 if (status == 0)
2195 musb_advance_schedule(ep->musb, urb, ep, is_in);
2196 return status;
2197}
2198
2199static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2200{
2201 struct musb *musb = hcd_to_musb(hcd);
2202 struct musb_qh *qh;
2203 unsigned long flags;
2204 int is_in = usb_pipein(urb->pipe);
2205 int ret;
2206
2207 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2208 usb_pipedevice(urb->pipe),
2209 usb_pipeendpoint(urb->pipe),
2210 is_in ? "in" : "out");
2211
2212 spin_lock_irqsave(&musb->lock, flags);
2213 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2214 if (ret)
2215 goto done;
2216
2217 qh = urb->hcpriv;
2218 if (!qh)
2219 goto done;
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 if (!qh->is_ready
2234 || urb->urb_list.prev != &qh->hep->urb_list
2235 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2236 int ready = qh->is_ready;
2237
2238 qh->is_ready = 0;
2239 musb_giveback(musb, urb, 0);
2240 qh->is_ready = ready;
2241
2242
2243
2244
2245 if (ready && list_empty(&qh->hep->urb_list)) {
2246 qh->hep->hcpriv = NULL;
2247 list_del(&qh->ring);
2248 kfree(qh);
2249 }
2250 } else
2251 ret = musb_cleanup_urb(urb, qh);
2252done:
2253 spin_unlock_irqrestore(&musb->lock, flags);
2254 return ret;
2255}
2256
2257
2258static void
2259musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2260{
2261 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2262 unsigned long flags;
2263 struct musb *musb = hcd_to_musb(hcd);
2264 struct musb_qh *qh;
2265 struct urb *urb;
2266
2267 spin_lock_irqsave(&musb->lock, flags);
2268
2269 qh = hep->hcpriv;
2270 if (qh == NULL)
2271 goto exit;
2272
2273
2274
2275
2276 qh->is_ready = 0;
2277 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2278 urb = next_urb(qh);
2279
2280
2281 if (!urb->unlinked)
2282 urb->status = -ESHUTDOWN;
2283
2284
2285 musb_cleanup_urb(urb, qh);
2286
2287
2288
2289
2290 while (!list_empty(&hep->urb_list)) {
2291 urb = next_urb(qh);
2292 urb->status = -ESHUTDOWN;
2293 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2294 }
2295 } else {
2296
2297
2298
2299
2300 while (!list_empty(&hep->urb_list))
2301 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2302
2303 hep->hcpriv = NULL;
2304 list_del(&qh->ring);
2305 kfree(qh);
2306 }
2307exit:
2308 spin_unlock_irqrestore(&musb->lock, flags);
2309}
2310
2311static int musb_h_get_frame_number(struct usb_hcd *hcd)
2312{
2313 struct musb *musb = hcd_to_musb(hcd);
2314
2315 return musb_readw(musb->mregs, MUSB_FRAME);
2316}
2317
2318static int musb_h_start(struct usb_hcd *hcd)
2319{
2320 struct musb *musb = hcd_to_musb(hcd);
2321
2322
2323
2324
2325 hcd->state = HC_STATE_RUNNING;
2326 musb->port1_status = 0;
2327 return 0;
2328}
2329
2330static void musb_h_stop(struct usb_hcd *hcd)
2331{
2332 musb_stop(hcd_to_musb(hcd));
2333 hcd->state = HC_STATE_HALT;
2334}
2335
2336static int musb_bus_suspend(struct usb_hcd *hcd)
2337{
2338 struct musb *musb = hcd_to_musb(hcd);
2339 u8 devctl;
2340
2341 if (!is_host_active(musb))
2342 return 0;
2343
2344 switch (musb->xceiv->state) {
2345 case OTG_STATE_A_SUSPEND:
2346 return 0;
2347 case OTG_STATE_A_WAIT_VRISE:
2348
2349
2350
2351
2352 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2353 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2354 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2355 break;
2356 default:
2357 break;
2358 }
2359
2360 if (musb->is_active) {
2361 WARNING("trying to suspend as %s while active\n",
2362 otg_state_string(musb->xceiv->state));
2363 return -EBUSY;
2364 } else
2365 return 0;
2366}
2367
2368static int musb_bus_resume(struct usb_hcd *hcd)
2369{
2370
2371 return 0;
2372}
2373
2374const struct hc_driver musb_hc_driver = {
2375 .description = "musb-hcd",
2376 .product_desc = "MUSB HDRC host driver",
2377 .hcd_priv_size = sizeof(struct musb),
2378 .flags = HCD_USB2 | HCD_MEMORY,
2379
2380
2381
2382
2383
2384 .start = musb_h_start,
2385 .stop = musb_h_stop,
2386
2387 .get_frame_number = musb_h_get_frame_number,
2388
2389 .urb_enqueue = musb_urb_enqueue,
2390 .urb_dequeue = musb_urb_dequeue,
2391 .endpoint_disable = musb_h_disable,
2392
2393 .hub_status_data = musb_hub_status_data,
2394 .hub_control = musb_hub_control,
2395 .bus_suspend = musb_bus_suspend,
2396 .bus_resume = musb_bus_resume,
2397
2398
2399};
2400#endif
2401