1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/dma-mapping.h>
19
20#include "musb_core.h"
21#include "musb_host.h"
22#include "musb_trace.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct musb *hcd_to_musb(struct usb_hcd *hcd)
74{
75 return *(struct musb **) hcd->hcd_priv;
76}
77
78
79static void musb_ep_program(struct musb *musb, u8 epnum,
80 struct urb *urb, int is_out,
81 u8 *buf, u32 offset, u32 len);
82
83
84
85
86static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
87{
88 struct musb *musb = ep->musb;
89 void __iomem *epio = ep->regs;
90 u16 csr;
91 int retries = 1000;
92
93 csr = musb_readw(epio, MUSB_TXCSR);
94 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
95 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
96 musb_writew(epio, MUSB_TXCSR, csr);
97 csr = musb_readw(epio, MUSB_TXCSR);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
114 "Could not flush host TX%d fifo: csr: %04x\n",
115 ep->epnum, csr))
116 return;
117 mdelay(1);
118 }
119}
120
121static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
122{
123 void __iomem *epio = ep->regs;
124 u16 csr;
125 int retries = 5;
126
127
128 do {
129 csr = musb_readw(epio, MUSB_TXCSR);
130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131 break;
132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133 csr = musb_readw(epio, MUSB_TXCSR);
134 udelay(10);
135 } while (--retries);
136
137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138 ep->epnum, csr);
139
140
141 musb_writew(epio, MUSB_TXCSR, 0);
142}
143
144
145
146
147
148static inline void musb_h_tx_start(struct musb_hw_ep *ep)
149{
150 u16 txcsr;
151
152
153 if (ep->epnum) {
154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157 } else {
158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
160 }
161
162}
163
164static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
165{
166 u16 txcsr;
167
168
169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
171 if (is_cppi_enabled(ep->musb))
172 txcsr |= MUSB_TXCSR_DMAMODE;
173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
174}
175
176static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
177{
178 if (is_in != 0 || ep->is_shared_fifo)
179 ep->in_qh = qh;
180 if (is_in == 0 || ep->is_shared_fifo)
181 ep->out_qh = qh;
182}
183
184static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
185{
186 return is_in ? ep->in_qh : ep->out_qh;
187}
188
189
190
191
192
193
194
195static void
196musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
197{
198 u32 len;
199 void __iomem *mbase = musb->mregs;
200 struct urb *urb = next_urb(qh);
201 void *buf = urb->transfer_buffer;
202 u32 offset = 0;
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
204 int epnum = hw_ep->epnum;
205
206
207 qh->offset = 0;
208 qh->segsize = 0;
209
210
211 switch (qh->type) {
212 case USB_ENDPOINT_XFER_CONTROL:
213
214 is_in = 0;
215 musb->ep0_stage = MUSB_EP0_START;
216 buf = urb->setup_packet;
217 len = 8;
218 break;
219 case USB_ENDPOINT_XFER_ISOC:
220 qh->iso_idx = 0;
221 qh->frame = 0;
222 offset = urb->iso_frame_desc[0].offset;
223 len = urb->iso_frame_desc[0].length;
224 break;
225 default:
226
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
229 }
230
231 trace_musb_urb_start(musb, urb);
232
233
234 musb_ep_set_qh(hw_ep, is_in, qh);
235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
236
237
238 if (is_in)
239 return;
240
241
242 switch (qh->type) {
243 case USB_ENDPOINT_XFER_ISOC:
244 case USB_ENDPOINT_XFER_INT:
245 musb_dbg(musb, "check whether there's still time for periodic Tx");
246
247
248
249 if (1) {
250
251
252
253 qh->frame = 0;
254 goto start;
255 } else {
256 qh->frame = urb->start_frame;
257
258 musb_dbg(musb, "SOF for %d", epnum);
259#if 1
260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261#endif
262 }
263 break;
264 default:
265start:
266 musb_dbg(musb, "Start TX%d %s", epnum,
267 hw_ep->tx_channel ? "dma" : "pio");
268
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
272 musb_h_tx_dma_start(hw_ep);
273 }
274}
275
276
277static void musb_giveback(struct musb *musb, struct urb *urb, int status)
278__releases(musb->lock)
279__acquires(musb->lock)
280{
281 trace_musb_urb_gb(musb, urb);
282
283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
284 spin_unlock(&musb->lock);
285 usb_hcd_giveback_urb(musb->hcd, urb, status);
286 spin_lock(&musb->lock);
287}
288
289
290static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
291 struct urb *urb)
292{
293 void __iomem *epio = qh->hw_ep->regs;
294 u16 csr;
295
296
297
298
299
300
301 if (is_in)
302 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
303 else
304 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
305
306 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
307}
308
309
310
311
312
313
314
315
316static void musb_advance_schedule(struct musb *musb, struct urb *urb,
317 struct musb_hw_ep *hw_ep, int is_in)
318{
319 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
320 struct musb_hw_ep *ep = qh->hw_ep;
321 int ready = qh->is_ready;
322 int status;
323
324 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
325
326
327 switch (qh->type) {
328 case USB_ENDPOINT_XFER_BULK:
329 case USB_ENDPOINT_XFER_INT:
330 musb_save_toggle(qh, is_in, urb);
331 break;
332 case USB_ENDPOINT_XFER_ISOC:
333 if (status == 0 && urb->error_count)
334 status = -EXDEV;
335 break;
336 }
337
338 qh->is_ready = 0;
339 musb_giveback(musb, urb, status);
340 qh->is_ready = ready;
341
342
343
344
345 if (list_empty(&qh->hep->urb_list)) {
346 struct list_head *head;
347 struct dma_controller *dma = musb->dma_controller;
348
349 if (is_in) {
350 ep->rx_reinit = 1;
351 if (ep->rx_channel) {
352 dma->channel_release(ep->rx_channel);
353 ep->rx_channel = NULL;
354 }
355 } else {
356 ep->tx_reinit = 1;
357 if (ep->tx_channel) {
358 dma->channel_release(ep->tx_channel);
359 ep->tx_channel = NULL;
360 }
361 }
362
363
364 musb_ep_set_qh(ep, is_in, NULL);
365 qh->hep->hcpriv = NULL;
366
367 switch (qh->type) {
368
369 case USB_ENDPOINT_XFER_CONTROL:
370 case USB_ENDPOINT_XFER_BULK:
371
372
373
374 if (qh->mux == 1) {
375 head = qh->ring.prev;
376 list_del(&qh->ring);
377 kfree(qh);
378 qh = first_qh(head);
379 break;
380 }
381
382 case USB_ENDPOINT_XFER_ISOC:
383 case USB_ENDPOINT_XFER_INT:
384
385
386
387
388 kfree(qh);
389 qh = NULL;
390 break;
391 }
392 }
393
394 if (qh != NULL && qh->is_ready) {
395 musb_dbg(musb, "... next ep%d %cX urb %p",
396 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
397 musb_start_urb(musb, is_in, qh);
398 }
399}
400
401static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
402{
403
404
405
406
407 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
408 csr &= ~(MUSB_RXCSR_H_REQPKT
409 | MUSB_RXCSR_H_AUTOREQ
410 | MUSB_RXCSR_AUTOCLEAR);
411
412
413 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
414 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
415
416
417 return musb_readw(hw_ep->regs, MUSB_RXCSR);
418}
419
420
421
422
423static bool
424musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
425{
426 u16 rx_count;
427 u8 *buf;
428 u16 csr;
429 bool done = false;
430 u32 length;
431 int do_flush = 0;
432 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
433 void __iomem *epio = hw_ep->regs;
434 struct musb_qh *qh = hw_ep->in_qh;
435 int pipe = urb->pipe;
436 void *buffer = urb->transfer_buffer;
437
438
439 rx_count = musb_readw(epio, MUSB_RXCOUNT);
440 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
441 urb->transfer_buffer, qh->offset,
442 urb->transfer_buffer_length);
443
444
445 if (usb_pipeisoc(pipe)) {
446 int status = 0;
447 struct usb_iso_packet_descriptor *d;
448
449 if (iso_err) {
450 status = -EILSEQ;
451 urb->error_count++;
452 }
453
454 d = urb->iso_frame_desc + qh->iso_idx;
455 buf = buffer + d->offset;
456 length = d->length;
457 if (rx_count > length) {
458 if (status == 0) {
459 status = -EOVERFLOW;
460 urb->error_count++;
461 }
462 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
463 do_flush = 1;
464 } else
465 length = rx_count;
466 urb->actual_length += length;
467 d->actual_length = length;
468
469 d->status = status;
470
471
472 done = (++qh->iso_idx >= urb->number_of_packets);
473 } else {
474
475 buf = buffer + qh->offset;
476 length = urb->transfer_buffer_length - qh->offset;
477 if (rx_count > length) {
478 if (urb->status == -EINPROGRESS)
479 urb->status = -EOVERFLOW;
480 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
481 do_flush = 1;
482 } else
483 length = rx_count;
484 urb->actual_length += length;
485 qh->offset += length;
486
487
488 done = (urb->actual_length == urb->transfer_buffer_length)
489 || (rx_count < qh->maxpacket)
490 || (urb->status != -EINPROGRESS);
491 if (done
492 && (urb->status == -EINPROGRESS)
493 && (urb->transfer_flags & URB_SHORT_NOT_OK)
494 && (urb->actual_length
495 < urb->transfer_buffer_length))
496 urb->status = -EREMOTEIO;
497 }
498
499 musb_read_fifo(hw_ep, length, buf);
500
501 csr = musb_readw(epio, MUSB_RXCSR);
502 csr |= MUSB_RXCSR_H_WZC_BITS;
503 if (unlikely(do_flush))
504 musb_h_flush_rxfifo(hw_ep, csr);
505 else {
506
507 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
508 if (!done)
509 csr |= MUSB_RXCSR_H_REQPKT;
510 musb_writew(epio, MUSB_RXCSR, csr);
511 }
512
513 return done;
514}
515
516
517
518
519
520
521
522
523
524static void
525musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
526{
527 struct musb_hw_ep *ep = musb->endpoints + epnum;
528 u16 csr;
529
530
531
532
533
534
535
536 if (ep->is_shared_fifo) {
537 csr = musb_readw(ep->regs, MUSB_TXCSR);
538 if (csr & MUSB_TXCSR_MODE) {
539 musb_h_tx_flush_fifo(ep);
540 csr = musb_readw(ep->regs, MUSB_TXCSR);
541 musb_writew(ep->regs, MUSB_TXCSR,
542 csr | MUSB_TXCSR_FRCDATATOG);
543 }
544
545
546
547
548
549 if (csr & MUSB_TXCSR_DMAMODE)
550 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
551 musb_writew(ep->regs, MUSB_TXCSR, 0);
552
553
554 }
555 csr = musb_readw(ep->regs, MUSB_RXCSR);
556 if (csr & MUSB_RXCSR_RXPKTRDY)
557 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
558 musb_readw(ep->regs, MUSB_RXCOUNT));
559
560 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
561
562
563 if (musb->is_multipoint) {
564 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
565 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
566 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
567 } else
568 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
569
570
571 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
572 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
573
574
575
576
577 musb_writew(ep->regs, MUSB_RXMAXP,
578 qh->maxpacket | ((qh->hb_mult - 1) << 11));
579
580 ep->rx_reinit = 0;
581}
582
583static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
584 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
585 struct urb *urb, u32 offset,
586 u32 *length, u8 *mode)
587{
588 struct dma_channel *channel = hw_ep->tx_channel;
589 void __iomem *epio = hw_ep->regs;
590 u16 pkt_size = qh->maxpacket;
591 u16 csr;
592
593 if (*length > channel->max_len)
594 *length = channel->max_len;
595
596 csr = musb_readw(epio, MUSB_TXCSR);
597 if (*length > pkt_size) {
598 *mode = 1;
599 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
600
601
602
603
604
605
606
607
608
609
610 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
611 can_bulk_split(hw_ep->musb, qh->type)))
612 csr |= MUSB_TXCSR_AUTOSET;
613 } else {
614 *mode = 0;
615 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
616 csr |= MUSB_TXCSR_DMAENAB;
617 }
618 channel->desired_mode = *mode;
619 musb_writew(epio, MUSB_TXCSR, csr);
620}
621
622static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
623 struct musb_hw_ep *hw_ep,
624 struct musb_qh *qh,
625 struct urb *urb,
626 u32 offset,
627 u32 *length,
628 u8 *mode)
629{
630 struct dma_channel *channel = hw_ep->tx_channel;
631
632 channel->actual_len = 0;
633
634
635
636
637
638 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
639}
640
641static bool musb_tx_dma_program(struct dma_controller *dma,
642 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
643 struct urb *urb, u32 offset, u32 length)
644{
645 struct dma_channel *channel = hw_ep->tx_channel;
646 u16 pkt_size = qh->maxpacket;
647 u8 mode;
648
649 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
650 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
651 &length, &mode);
652 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
653 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
654 &length, &mode);
655 else
656 return false;
657
658 qh->segsize = length;
659
660
661
662
663
664 wmb();
665
666 if (!dma->channel_program(channel, pkt_size, mode,
667 urb->transfer_dma + offset, length)) {
668 void __iomem *epio = hw_ep->regs;
669 u16 csr;
670
671 dma->channel_release(channel);
672 hw_ep->tx_channel = NULL;
673
674 csr = musb_readw(epio, MUSB_TXCSR);
675 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
676 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
677 return false;
678 }
679 return true;
680}
681
682
683
684
685
686static void musb_ep_program(struct musb *musb, u8 epnum,
687 struct urb *urb, int is_out,
688 u8 *buf, u32 offset, u32 len)
689{
690 struct dma_controller *dma_controller;
691 struct dma_channel *dma_channel;
692 u8 dma_ok;
693 void __iomem *mbase = musb->mregs;
694 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
695 void __iomem *epio = hw_ep->regs;
696 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
697 u16 packet_sz = qh->maxpacket;
698 u8 use_dma = 1;
699 u16 csr;
700
701 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
702 "h_addr%02x h_port%02x bytes %d",
703 is_out ? "-->" : "<--",
704 epnum, urb, urb->dev->speed,
705 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
706 qh->h_addr_reg, qh->h_port_reg,
707 len);
708
709 musb_ep_select(mbase, epnum);
710
711 if (is_out && !len) {
712 use_dma = 0;
713 csr = musb_readw(epio, MUSB_TXCSR);
714 csr &= ~MUSB_TXCSR_DMAENAB;
715 musb_writew(epio, MUSB_TXCSR, csr);
716 hw_ep->tx_channel = NULL;
717 }
718
719
720 dma_controller = musb->dma_controller;
721 if (use_dma && is_dma_capable() && epnum && dma_controller) {
722 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
723 if (!dma_channel) {
724 dma_channel = dma_controller->channel_alloc(
725 dma_controller, hw_ep, is_out);
726 if (is_out)
727 hw_ep->tx_channel = dma_channel;
728 else
729 hw_ep->rx_channel = dma_channel;
730 }
731 } else
732 dma_channel = NULL;
733
734
735
736
737 if (is_out) {
738 u16 csr;
739 u16 int_txe;
740 u16 load_count;
741
742 csr = musb_readw(epio, MUSB_TXCSR);
743
744
745 int_txe = musb->intrtxe;
746 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
747
748
749 if (epnum) {
750
751
752
753
754
755
756 if (!hw_ep->tx_double_buffered)
757 musb_h_tx_flush_fifo(hw_ep);
758
759
760
761
762
763
764 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
765 | MUSB_TXCSR_AUTOSET
766 | MUSB_TXCSR_DMAENAB
767 | MUSB_TXCSR_FRCDATATOG
768 | MUSB_TXCSR_H_RXSTALL
769 | MUSB_TXCSR_H_ERROR
770 | MUSB_TXCSR_TXPKTRDY
771 );
772 csr |= MUSB_TXCSR_MODE;
773
774 if (!hw_ep->tx_double_buffered) {
775 if (usb_gettoggle(urb->dev, qh->epnum, 1))
776 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
777 | MUSB_TXCSR_H_DATATOGGLE;
778 else
779 csr |= MUSB_TXCSR_CLRDATATOG;
780 }
781
782 musb_writew(epio, MUSB_TXCSR, csr);
783
784 csr &= ~MUSB_TXCSR_DMAMODE;
785 musb_writew(epio, MUSB_TXCSR, csr);
786 csr = musb_readw(epio, MUSB_TXCSR);
787 } else {
788
789 musb_h_ep0_flush_fifo(hw_ep);
790 }
791
792
793 if (musb->is_multipoint) {
794 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
795 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
796 musb_write_txhubport(musb, epnum, qh->h_port_reg);
797
798 } else
799 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
800
801
802 if (epnum) {
803 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
804 if (can_bulk_split(musb, qh->type)) {
805 qh->hb_mult = hw_ep->max_packet_sz_tx
806 / packet_sz;
807 musb_writew(epio, MUSB_TXMAXP, packet_sz
808 | ((qh->hb_mult) - 1) << 11);
809 } else {
810 musb_writew(epio, MUSB_TXMAXP,
811 qh->maxpacket |
812 ((qh->hb_mult - 1) << 11));
813 }
814 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
815 } else {
816 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
817 if (musb->is_multipoint)
818 musb_writeb(epio, MUSB_TYPE0,
819 qh->type_reg);
820 }
821
822 if (can_bulk_split(musb, qh->type))
823 load_count = min((u32) hw_ep->max_packet_sz_tx,
824 len);
825 else
826 load_count = min((u32) packet_sz, len);
827
828 if (dma_channel && musb_tx_dma_program(dma_controller,
829 hw_ep, qh, urb, offset, len))
830 load_count = 0;
831
832 if (load_count) {
833
834 qh->segsize = load_count;
835 if (!buf) {
836 sg_miter_start(&qh->sg_miter, urb->sg, 1,
837 SG_MITER_ATOMIC
838 | SG_MITER_FROM_SG);
839 if (!sg_miter_next(&qh->sg_miter)) {
840 dev_err(musb->controller,
841 "error: sg"
842 "list empty\n");
843 sg_miter_stop(&qh->sg_miter);
844 goto finish;
845 }
846 buf = qh->sg_miter.addr + urb->sg->offset +
847 urb->actual_length;
848 load_count = min_t(u32, load_count,
849 qh->sg_miter.length);
850 musb_write_fifo(hw_ep, load_count, buf);
851 qh->sg_miter.consumed = load_count;
852 sg_miter_stop(&qh->sg_miter);
853 } else
854 musb_write_fifo(hw_ep, load_count, buf);
855 }
856finish:
857
858 musb_writew(mbase, MUSB_INTRTXE, int_txe);
859
860
861 } else {
862 u16 csr;
863
864 if (hw_ep->rx_reinit) {
865 musb_rx_reinit(musb, qh, epnum);
866
867
868 if (usb_gettoggle(urb->dev, qh->epnum, 0))
869 csr = MUSB_RXCSR_H_WR_DATATOGGLE
870 | MUSB_RXCSR_H_DATATOGGLE;
871 else
872 csr = 0;
873 if (qh->type == USB_ENDPOINT_XFER_INT)
874 csr |= MUSB_RXCSR_DISNYET;
875
876 } else {
877 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
878
879 if (csr & (MUSB_RXCSR_RXPKTRDY
880 | MUSB_RXCSR_DMAENAB
881 | MUSB_RXCSR_H_REQPKT))
882 ERR("broken !rx_reinit, ep%d csr %04x\n",
883 hw_ep->epnum, csr);
884
885
886 csr &= MUSB_RXCSR_DISNYET;
887 }
888
889
890
891 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
892
893 dma_channel->actual_len = 0L;
894 qh->segsize = len;
895
896
897 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
898 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
899
900
901
902
903
904 dma_ok = dma_controller->channel_program(dma_channel,
905 packet_sz, !(urb->transfer_flags &
906 URB_SHORT_NOT_OK),
907 urb->transfer_dma + offset,
908 qh->segsize);
909 if (!dma_ok) {
910 dma_controller->channel_release(dma_channel);
911 hw_ep->rx_channel = dma_channel = NULL;
912 } else
913 csr |= MUSB_RXCSR_DMAENAB;
914 }
915
916 csr |= MUSB_RXCSR_H_REQPKT;
917 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
918 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
919 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
920 }
921}
922
923
924
925
926static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
927 int is_in)
928{
929 struct dma_channel *dma;
930 struct urb *urb;
931 void __iomem *mbase = musb->mregs;
932 void __iomem *epio = ep->regs;
933 struct musb_qh *cur_qh, *next_qh;
934 u16 rx_csr, tx_csr;
935
936 musb_ep_select(mbase, ep->epnum);
937 if (is_in) {
938 dma = is_dma_capable() ? ep->rx_channel : NULL;
939
940
941
942
943
944
945 rx_csr = musb_readw(epio, MUSB_RXCSR);
946 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
947 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
948 musb_writew(epio, MUSB_RXCSR, rx_csr);
949 rx_csr &= ~MUSB_RXCSR_DATAERROR;
950 musb_writew(epio, MUSB_RXCSR, rx_csr);
951
952 cur_qh = first_qh(&musb->in_bulk);
953 } else {
954 dma = is_dma_capable() ? ep->tx_channel : NULL;
955
956
957 tx_csr = musb_readw(epio, MUSB_TXCSR);
958 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
959 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
960 musb_writew(epio, MUSB_TXCSR, tx_csr);
961
962 cur_qh = first_qh(&musb->out_bulk);
963 }
964 if (cur_qh) {
965 urb = next_urb(cur_qh);
966 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
967 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
968 musb->dma_controller->channel_abort(dma);
969 urb->actual_length += dma->actual_len;
970 dma->actual_len = 0L;
971 }
972 musb_save_toggle(cur_qh, is_in, urb);
973
974 if (is_in) {
975
976 list_move_tail(&cur_qh->ring, &musb->in_bulk);
977
978
979 next_qh = first_qh(&musb->in_bulk);
980
981
982 ep->rx_reinit = 1;
983 } else {
984
985 list_move_tail(&cur_qh->ring, &musb->out_bulk);
986
987
988 next_qh = first_qh(&musb->out_bulk);
989
990
991 ep->tx_reinit = 1;
992 }
993
994 if (next_qh)
995 musb_start_urb(musb, is_in, next_qh);
996 }
997}
998
999
1000
1001
1002
1003static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1004{
1005 bool more = false;
1006 u8 *fifo_dest = NULL;
1007 u16 fifo_count = 0;
1008 struct musb_hw_ep *hw_ep = musb->control_ep;
1009 struct musb_qh *qh = hw_ep->in_qh;
1010 struct usb_ctrlrequest *request;
1011
1012 switch (musb->ep0_stage) {
1013 case MUSB_EP0_IN:
1014 fifo_dest = urb->transfer_buffer + urb->actual_length;
1015 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1016 urb->actual_length);
1017 if (fifo_count < len)
1018 urb->status = -EOVERFLOW;
1019
1020 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1021
1022 urb->actual_length += fifo_count;
1023 if (len < qh->maxpacket) {
1024
1025
1026
1027 } else if (urb->actual_length <
1028 urb->transfer_buffer_length)
1029 more = true;
1030 break;
1031 case MUSB_EP0_START:
1032 request = (struct usb_ctrlrequest *) urb->setup_packet;
1033
1034 if (!request->wLength) {
1035 musb_dbg(musb, "start no-DATA");
1036 break;
1037 } else if (request->bRequestType & USB_DIR_IN) {
1038 musb_dbg(musb, "start IN-DATA");
1039 musb->ep0_stage = MUSB_EP0_IN;
1040 more = true;
1041 break;
1042 } else {
1043 musb_dbg(musb, "start OUT-DATA");
1044 musb->ep0_stage = MUSB_EP0_OUT;
1045 more = true;
1046 }
1047
1048 case MUSB_EP0_OUT:
1049 fifo_count = min_t(size_t, qh->maxpacket,
1050 urb->transfer_buffer_length -
1051 urb->actual_length);
1052 if (fifo_count) {
1053 fifo_dest = (u8 *) (urb->transfer_buffer
1054 + urb->actual_length);
1055 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1056 fifo_count,
1057 (fifo_count == 1) ? "" : "s",
1058 fifo_dest);
1059 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1060
1061 urb->actual_length += fifo_count;
1062 more = true;
1063 }
1064 break;
1065 default:
1066 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1067 break;
1068 }
1069
1070 return more;
1071}
1072
1073
1074
1075
1076
1077
1078
1079irqreturn_t musb_h_ep0_irq(struct musb *musb)
1080{
1081 struct urb *urb;
1082 u16 csr, len;
1083 int status = 0;
1084 void __iomem *mbase = musb->mregs;
1085 struct musb_hw_ep *hw_ep = musb->control_ep;
1086 void __iomem *epio = hw_ep->regs;
1087 struct musb_qh *qh = hw_ep->in_qh;
1088 bool complete = false;
1089 irqreturn_t retval = IRQ_NONE;
1090
1091
1092 urb = next_urb(qh);
1093
1094 musb_ep_select(mbase, 0);
1095 csr = musb_readw(epio, MUSB_CSR0);
1096 len = (csr & MUSB_CSR0_RXPKTRDY)
1097 ? musb_readb(epio, MUSB_COUNT0)
1098 : 0;
1099
1100 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1101 csr, qh, len, urb, musb->ep0_stage);
1102
1103
1104 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1105 retval = IRQ_HANDLED;
1106 complete = true;
1107 }
1108
1109
1110 if (csr & MUSB_CSR0_H_RXSTALL) {
1111 musb_dbg(musb, "STALLING ENDPOINT");
1112 status = -EPIPE;
1113
1114 } else if (csr & MUSB_CSR0_H_ERROR) {
1115 musb_dbg(musb, "no response, csr0 %04x", csr);
1116 status = -EPROTO;
1117
1118 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1119 musb_dbg(musb, "control NAK timeout");
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 musb_writew(epio, MUSB_CSR0, 0);
1130 retval = IRQ_HANDLED;
1131 }
1132
1133 if (status) {
1134 musb_dbg(musb, "aborting");
1135 retval = IRQ_HANDLED;
1136 if (urb)
1137 urb->status = status;
1138 complete = true;
1139
1140
1141 if (csr & MUSB_CSR0_H_REQPKT) {
1142 csr &= ~MUSB_CSR0_H_REQPKT;
1143 musb_writew(epio, MUSB_CSR0, csr);
1144 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1145 musb_writew(epio, MUSB_CSR0, csr);
1146 } else {
1147 musb_h_ep0_flush_fifo(hw_ep);
1148 }
1149
1150 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1151
1152
1153 musb_writew(epio, MUSB_CSR0, 0);
1154 }
1155
1156 if (unlikely(!urb)) {
1157
1158
1159 ERR("no URB for end 0\n");
1160
1161 musb_h_ep0_flush_fifo(hw_ep);
1162 goto done;
1163 }
1164
1165 if (!complete) {
1166
1167 if (musb_h_ep0_continue(musb, len, urb)) {
1168
1169 csr = (MUSB_EP0_IN == musb->ep0_stage)
1170 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1171 } else {
1172
1173 if (usb_pipeout(urb->pipe)
1174 || !urb->transfer_buffer_length)
1175 csr = MUSB_CSR0_H_STATUSPKT
1176 | MUSB_CSR0_H_REQPKT;
1177 else
1178 csr = MUSB_CSR0_H_STATUSPKT
1179 | MUSB_CSR0_TXPKTRDY;
1180
1181
1182 csr |= MUSB_CSR0_H_DIS_PING;
1183
1184
1185 musb->ep0_stage = MUSB_EP0_STATUS;
1186
1187 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1188
1189 }
1190 musb_writew(epio, MUSB_CSR0, csr);
1191 retval = IRQ_HANDLED;
1192 } else
1193 musb->ep0_stage = MUSB_EP0_IDLE;
1194
1195
1196 if (complete)
1197 musb_advance_schedule(musb, urb, hw_ep, 1);
1198done:
1199 return retval;
1200}
1201
1202
1203#ifdef CONFIG_USB_INVENTRA_DMA
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217#endif
1218
1219
1220void musb_host_tx(struct musb *musb, u8 epnum)
1221{
1222 int pipe;
1223 bool done = false;
1224 u16 tx_csr;
1225 size_t length = 0;
1226 size_t offset = 0;
1227 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1228 void __iomem *epio = hw_ep->regs;
1229 struct musb_qh *qh = hw_ep->out_qh;
1230 struct urb *urb = next_urb(qh);
1231 u32 status = 0;
1232 void __iomem *mbase = musb->mregs;
1233 struct dma_channel *dma;
1234 bool transfer_pending = false;
1235
1236 musb_ep_select(mbase, epnum);
1237 tx_csr = musb_readw(epio, MUSB_TXCSR);
1238
1239
1240 if (!urb) {
1241 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1242 return;
1243 }
1244
1245 pipe = urb->pipe;
1246 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1247 trace_musb_urb_tx(musb, urb);
1248 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1249 dma ? ", dma" : "");
1250
1251
1252 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1253
1254 musb_dbg(musb, "TX end %d stall", epnum);
1255
1256
1257 status = -EPIPE;
1258
1259 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1260
1261 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1262
1263 status = -ETIMEDOUT;
1264
1265 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1266 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1267 && !list_is_singular(&musb->out_bulk)) {
1268 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1269 musb_bulk_nak_timeout(musb, hw_ep, 0);
1270 } else {
1271 musb_dbg(musb, "TX ep%d device not responding", epnum);
1272
1273
1274
1275
1276
1277
1278
1279
1280 musb_ep_select(mbase, epnum);
1281 musb_writew(epio, MUSB_TXCSR,
1282 MUSB_TXCSR_H_WZC_BITS
1283 | MUSB_TXCSR_TXPKTRDY);
1284 }
1285 return;
1286 }
1287
1288done:
1289 if (status) {
1290 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1291 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1292 musb->dma_controller->channel_abort(dma);
1293 }
1294
1295
1296
1297
1298 musb_h_tx_flush_fifo(hw_ep);
1299 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1300 | MUSB_TXCSR_DMAENAB
1301 | MUSB_TXCSR_H_ERROR
1302 | MUSB_TXCSR_H_RXSTALL
1303 | MUSB_TXCSR_H_NAKTIMEOUT
1304 );
1305
1306 musb_ep_select(mbase, epnum);
1307 musb_writew(epio, MUSB_TXCSR, tx_csr);
1308
1309 musb_writew(epio, MUSB_TXCSR, tx_csr);
1310 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1311
1312 done = true;
1313 }
1314
1315
1316 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1317 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1318 return;
1319 }
1320
1321 if (is_dma_capable() && dma && !status) {
1322
1323
1324
1325
1326
1327
1328
1329
1330 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1348 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1349 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1350 MUSB_TXCSR_TXPKTRDY);
1351 musb_writew(epio, MUSB_TXCSR,
1352 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1353 }
1354 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1355 MUSB_TXCSR_TXPKTRDY);
1356 musb_writew(epio, MUSB_TXCSR,
1357 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1358
1359
1360
1361
1362
1363
1364
1365 tx_csr = musb_readw(epio, MUSB_TXCSR);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1376 musb_dbg(musb,
1377 "DMA complete but FIFO not empty, CSR %04x",
1378 tx_csr);
1379 return;
1380 }
1381 }
1382
1383 if (!status || dma || usb_pipeisoc(pipe)) {
1384 if (dma)
1385 length = dma->actual_len;
1386 else
1387 length = qh->segsize;
1388 qh->offset += length;
1389
1390 if (usb_pipeisoc(pipe)) {
1391 struct usb_iso_packet_descriptor *d;
1392
1393 d = urb->iso_frame_desc + qh->iso_idx;
1394 d->actual_length = length;
1395 d->status = status;
1396 if (++qh->iso_idx >= urb->number_of_packets) {
1397 done = true;
1398 } else {
1399 d++;
1400 offset = d->offset;
1401 length = d->length;
1402 }
1403 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1404 done = true;
1405 } else {
1406
1407 if (qh->segsize < qh->maxpacket)
1408 done = true;
1409 else if (qh->offset == urb->transfer_buffer_length
1410 && !(urb->transfer_flags
1411 & URB_ZERO_PACKET))
1412 done = true;
1413 if (!done) {
1414 offset = qh->offset;
1415 length = urb->transfer_buffer_length - offset;
1416 transfer_pending = true;
1417 }
1418 }
1419 }
1420
1421
1422
1423
1424 if (urb->status != -EINPROGRESS) {
1425 done = true;
1426 if (status == 0)
1427 status = urb->status;
1428 }
1429
1430 if (done) {
1431
1432 urb->status = status;
1433 urb->actual_length = qh->offset;
1434 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1435 return;
1436 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1437 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1438 offset, length)) {
1439 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1440 musb_h_tx_dma_start(hw_ep);
1441 return;
1442 }
1443 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1444 musb_dbg(musb, "not complete, but DMA enabled?");
1445 return;
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455 if (length > qh->maxpacket)
1456 length = qh->maxpacket;
1457
1458 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1459
1460
1461
1462
1463
1464 if (!urb->transfer_buffer)
1465 qh->use_sg = true;
1466
1467 if (qh->use_sg) {
1468
1469 if (!sg_miter_next(&qh->sg_miter)) {
1470 dev_err(musb->controller, "error: sg list empty\n");
1471 sg_miter_stop(&qh->sg_miter);
1472 status = -EINVAL;
1473 goto done;
1474 }
1475 urb->transfer_buffer = qh->sg_miter.addr;
1476 length = min_t(u32, length, qh->sg_miter.length);
1477 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1478 qh->sg_miter.consumed = length;
1479 sg_miter_stop(&qh->sg_miter);
1480 } else {
1481 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1482 }
1483
1484 qh->segsize = length;
1485
1486 if (qh->use_sg) {
1487 if (offset + length >= urb->transfer_buffer_length)
1488 qh->use_sg = false;
1489 }
1490
1491 musb_ep_select(mbase, epnum);
1492 musb_writew(epio, MUSB_TXCSR,
1493 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1494}
1495
1496#ifdef CONFIG_USB_TI_CPPI41_DMA
1497
1498static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1499 struct musb_hw_ep *hw_ep,
1500 struct musb_qh *qh,
1501 struct urb *urb,
1502 size_t len)
1503{
1504 struct dma_channel *channel = hw_ep->rx_channel;
1505 void __iomem *epio = hw_ep->regs;
1506 dma_addr_t *buf;
1507 u32 length;
1508 u16 val;
1509
1510 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1511 (u32)urb->transfer_dma;
1512
1513 length = urb->iso_frame_desc[qh->iso_idx].length;
1514
1515 val = musb_readw(epio, MUSB_RXCSR);
1516 val |= MUSB_RXCSR_DMAENAB;
1517 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1518
1519 return dma->channel_program(channel, qh->maxpacket, 0,
1520 (u32)buf, length);
1521}
1522#else
1523static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1524 struct musb_hw_ep *hw_ep,
1525 struct musb_qh *qh,
1526 struct urb *urb,
1527 size_t len)
1528{
1529 return false;
1530}
1531#endif
1532
1533#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1534 defined(CONFIG_USB_TI_CPPI41_DMA)
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1570 struct musb_hw_ep *hw_ep,
1571 struct musb_qh *qh,
1572 struct urb *urb,
1573 size_t len)
1574{
1575 struct dma_channel *channel = hw_ep->rx_channel;
1576 void __iomem *epio = hw_ep->regs;
1577 u16 val;
1578 int pipe;
1579 bool done;
1580
1581 pipe = urb->pipe;
1582
1583 if (usb_pipeisoc(pipe)) {
1584 struct usb_iso_packet_descriptor *d;
1585
1586 d = urb->iso_frame_desc + qh->iso_idx;
1587 d->actual_length = len;
1588
1589
1590
1591
1592 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1593 d->status = 0;
1594
1595 if (++qh->iso_idx >= urb->number_of_packets) {
1596 done = true;
1597 } else {
1598
1599 if (musb_dma_cppi41(hw_ep->musb))
1600 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1601 urb, len);
1602 done = false;
1603 }
1604
1605 } else {
1606
1607 done = (urb->actual_length + len >=
1608 urb->transfer_buffer_length
1609 || channel->actual_len < qh->maxpacket
1610 || channel->rx_packet_done);
1611 }
1612
1613
1614 if (!done) {
1615 val = musb_readw(epio, MUSB_RXCSR);
1616 val |= MUSB_RXCSR_H_REQPKT;
1617 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1618 }
1619
1620 return done;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1640 struct musb_hw_ep *hw_ep,
1641 struct musb_qh *qh,
1642 struct urb *urb,
1643 size_t len,
1644 u8 iso_err)
1645{
1646 struct musb *musb = hw_ep->musb;
1647 void __iomem *epio = hw_ep->regs;
1648 struct dma_channel *channel = hw_ep->rx_channel;
1649 u16 rx_count, val;
1650 int length, pipe, done;
1651 dma_addr_t buf;
1652
1653 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1654 pipe = urb->pipe;
1655
1656 if (usb_pipeisoc(pipe)) {
1657 int d_status = 0;
1658 struct usb_iso_packet_descriptor *d;
1659
1660 d = urb->iso_frame_desc + qh->iso_idx;
1661
1662 if (iso_err) {
1663 d_status = -EILSEQ;
1664 urb->error_count++;
1665 }
1666 if (rx_count > d->length) {
1667 if (d_status == 0) {
1668 d_status = -EOVERFLOW;
1669 urb->error_count++;
1670 }
1671 musb_dbg(musb, "** OVERFLOW %d into %d",
1672 rx_count, d->length);
1673
1674 length = d->length;
1675 } else
1676 length = rx_count;
1677 d->status = d_status;
1678 buf = urb->transfer_dma + d->offset;
1679 } else {
1680 length = rx_count;
1681 buf = urb->transfer_dma + urb->actual_length;
1682 }
1683
1684 channel->desired_mode = 0;
1685#ifdef USE_MODE1
1686
1687
1688
1689 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1690 && (urb->transfer_buffer_length - urb->actual_length)
1691 > qh->maxpacket)
1692 channel->desired_mode = 1;
1693 if (rx_count < hw_ep->max_packet_sz_rx) {
1694 length = rx_count;
1695 channel->desired_mode = 0;
1696 } else {
1697 length = urb->transfer_buffer_length;
1698 }
1699#endif
1700
1701
1702 val = musb_readw(epio, MUSB_RXCSR);
1703 val &= ~MUSB_RXCSR_H_REQPKT;
1704
1705 if (channel->desired_mode == 0)
1706 val &= ~MUSB_RXCSR_H_AUTOREQ;
1707 else
1708 val |= MUSB_RXCSR_H_AUTOREQ;
1709 val |= MUSB_RXCSR_DMAENAB;
1710
1711
1712 if (qh->hb_mult == 1)
1713 val |= MUSB_RXCSR_AUTOCLEAR;
1714
1715 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1716
1717
1718
1719
1720
1721 done = dma->channel_program(channel, qh->maxpacket,
1722 channel->desired_mode,
1723 buf, length);
1724
1725 if (!done) {
1726 dma->channel_release(channel);
1727 hw_ep->rx_channel = NULL;
1728 channel = NULL;
1729 val = musb_readw(epio, MUSB_RXCSR);
1730 val &= ~(MUSB_RXCSR_DMAENAB
1731 | MUSB_RXCSR_H_AUTOREQ
1732 | MUSB_RXCSR_AUTOCLEAR);
1733 musb_writew(epio, MUSB_RXCSR, val);
1734 }
1735
1736 return done;
1737}
1738#else
1739static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1740 struct musb_hw_ep *hw_ep,
1741 struct musb_qh *qh,
1742 struct urb *urb,
1743 size_t len)
1744{
1745 return false;
1746}
1747
1748static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1749 struct musb_hw_ep *hw_ep,
1750 struct musb_qh *qh,
1751 struct urb *urb,
1752 size_t len,
1753 u8 iso_err)
1754{
1755 return false;
1756}
1757#endif
1758
1759
1760
1761
1762
1763void musb_host_rx(struct musb *musb, u8 epnum)
1764{
1765 struct urb *urb;
1766 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1767 struct dma_controller *c = musb->dma_controller;
1768 void __iomem *epio = hw_ep->regs;
1769 struct musb_qh *qh = hw_ep->in_qh;
1770 size_t xfer_len;
1771 void __iomem *mbase = musb->mregs;
1772 u16 rx_csr, val;
1773 bool iso_err = false;
1774 bool done = false;
1775 u32 status;
1776 struct dma_channel *dma;
1777 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1778
1779 musb_ep_select(mbase, epnum);
1780
1781 urb = next_urb(qh);
1782 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1783 status = 0;
1784 xfer_len = 0;
1785
1786 rx_csr = musb_readw(epio, MUSB_RXCSR);
1787 val = rx_csr;
1788
1789 if (unlikely(!urb)) {
1790
1791
1792
1793
1794 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1795 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1796 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1797 return;
1798 }
1799
1800 trace_musb_urb_rx(musb, urb);
1801
1802
1803
1804 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1805 musb_dbg(musb, "RX end %d STALL", epnum);
1806
1807
1808 status = -EPIPE;
1809
1810 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1811 musb_dbg(musb, "end %d RX proto error", epnum);
1812
1813 status = -EPROTO;
1814 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1815
1816 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1817 musb_writew(epio, MUSB_RXCSR, rx_csr);
1818
1819 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1820
1821 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1822 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 if (usb_pipebulk(urb->pipe)
1833 && qh->mux == 1
1834 && !list_is_singular(&musb->in_bulk)) {
1835 musb_bulk_nak_timeout(musb, hw_ep, 1);
1836 return;
1837 }
1838 musb_ep_select(mbase, epnum);
1839 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1840 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1841 musb_writew(epio, MUSB_RXCSR, rx_csr);
1842
1843 goto finish;
1844 } else {
1845 musb_dbg(musb, "RX end %d ISO data error", epnum);
1846
1847 iso_err = true;
1848 }
1849 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1850 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1851 epnum);
1852 status = -EPROTO;
1853 }
1854
1855
1856 if (status) {
1857
1858 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1859 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1860 musb->dma_controller->channel_abort(dma);
1861 xfer_len = dma->actual_len;
1862 }
1863 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1864 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1865 done = true;
1866 goto finish;
1867 }
1868
1869 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1870
1871 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1872 goto finish;
1873 }
1874
1875
1876
1877
1878
1879
1880
1881 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1882 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1883
1884
1885
1886
1887
1888 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1889 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1890 musb->dma_controller->channel_abort(dma);
1891 xfer_len = dma->actual_len;
1892 done = true;
1893 }
1894
1895 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1896 xfer_len, dma ? ", dma" : "");
1897 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1898
1899 musb_ep_select(mbase, epnum);
1900 musb_writew(epio, MUSB_RXCSR,
1901 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1902 }
1903
1904 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1905 xfer_len = dma->actual_len;
1906
1907 val &= ~(MUSB_RXCSR_DMAENAB
1908 | MUSB_RXCSR_H_AUTOREQ
1909 | MUSB_RXCSR_AUTOCLEAR
1910 | MUSB_RXCSR_RXPKTRDY);
1911 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1912
1913 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1914 musb_dma_cppi41(musb)) {
1915 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1916 musb_dbg(hw_ep->musb,
1917 "ep %d dma %s, rxcsr %04x, rxcount %d",
1918 epnum, done ? "off" : "reset",
1919 musb_readw(epio, MUSB_RXCSR),
1920 musb_readw(epio, MUSB_RXCOUNT));
1921 } else {
1922 done = true;
1923 }
1924
1925 } else if (urb->status == -EINPROGRESS) {
1926
1927 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1928 status = -EPROTO;
1929 ERR("Rx interrupt with no errors or packet!\n");
1930
1931
1932
1933
1934
1935 musb_ep_select(mbase, epnum);
1936 val &= ~MUSB_RXCSR_H_REQPKT;
1937 musb_writew(epio, MUSB_RXCSR, val);
1938 goto finish;
1939 }
1940
1941
1942 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1943 musb_dma_cppi41(musb)) && dma) {
1944 musb_dbg(hw_ep->musb,
1945 "RX%d count %d, buffer 0x%llx len %d/%d",
1946 epnum, musb_readw(epio, MUSB_RXCOUNT),
1947 (unsigned long long) urb->transfer_dma
1948 + urb->actual_length,
1949 qh->offset,
1950 urb->transfer_buffer_length);
1951
1952 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1953 xfer_len, iso_err))
1954 goto finish;
1955 else
1956 dev_err(musb->controller, "error: rx_dma failed\n");
1957 }
1958
1959 if (!dma) {
1960 unsigned int received_len;
1961
1962
1963 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1964
1965
1966
1967
1968
1969 if (!urb->transfer_buffer) {
1970 qh->use_sg = true;
1971 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1972 sg_flags);
1973 }
1974
1975 if (qh->use_sg) {
1976 if (!sg_miter_next(&qh->sg_miter)) {
1977 dev_err(musb->controller, "error: sg list empty\n");
1978 sg_miter_stop(&qh->sg_miter);
1979 status = -EINVAL;
1980 done = true;
1981 goto finish;
1982 }
1983 urb->transfer_buffer = qh->sg_miter.addr;
1984 received_len = urb->actual_length;
1985 qh->offset = 0x0;
1986 done = musb_host_packet_rx(musb, urb, epnum,
1987 iso_err);
1988
1989 received_len = urb->actual_length -
1990 received_len;
1991 qh->sg_miter.consumed = received_len;
1992 sg_miter_stop(&qh->sg_miter);
1993 } else {
1994 done = musb_host_packet_rx(musb, urb,
1995 epnum, iso_err);
1996 }
1997 musb_dbg(musb, "read %spacket", done ? "last " : "");
1998 }
1999 }
2000
2001finish:
2002 urb->actual_length += xfer_len;
2003 qh->offset += xfer_len;
2004 if (done) {
2005 if (qh->use_sg)
2006 qh->use_sg = false;
2007
2008 if (urb->status == -EINPROGRESS)
2009 urb->status = status;
2010 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2011 }
2012}
2013
2014
2015
2016
2017
2018
2019static int musb_schedule(
2020 struct musb *musb,
2021 struct musb_qh *qh,
2022 int is_in)
2023{
2024 int idle = 0;
2025 int best_diff;
2026 int best_end, epnum;
2027 struct musb_hw_ep *hw_ep = NULL;
2028 struct list_head *head = NULL;
2029 u8 toggle;
2030 u8 txtype;
2031 struct urb *urb = next_urb(qh);
2032
2033
2034 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2035 head = &musb->control;
2036 hw_ep = musb->control_ep;
2037 goto success;
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049 best_diff = 4096;
2050 best_end = -1;
2051
2052 for (epnum = 1, hw_ep = musb->endpoints + 1;
2053 epnum < musb->nr_endpoints;
2054 epnum++, hw_ep++) {
2055 int diff;
2056
2057 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2058 continue;
2059
2060 if (hw_ep == musb->bulk_ep)
2061 continue;
2062
2063 if (is_in)
2064 diff = hw_ep->max_packet_sz_rx;
2065 else
2066 diff = hw_ep->max_packet_sz_tx;
2067 diff -= (qh->maxpacket * qh->hb_mult);
2068
2069 if (diff >= 0 && best_diff > diff) {
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083 hw_ep = musb->endpoints + epnum;
2084 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2085 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2086 >> 4) & 0x3;
2087 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2088 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2089 continue;
2090
2091 best_diff = diff;
2092 best_end = epnum;
2093 }
2094 }
2095
2096 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2097 hw_ep = musb->bulk_ep;
2098 if (is_in)
2099 head = &musb->in_bulk;
2100 else
2101 head = &musb->out_bulk;
2102
2103
2104
2105
2106
2107
2108
2109
2110 if (qh->dev)
2111 qh->intv_reg =
2112 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2113 goto success;
2114 } else if (best_end < 0) {
2115 dev_err(musb->controller,
2116 "%s hwep alloc failed for %dx%d\n",
2117 musb_ep_xfertype_string(qh->type),
2118 qh->hb_mult, qh->maxpacket);
2119 return -ENOSPC;
2120 }
2121
2122 idle = 1;
2123 qh->mux = 0;
2124 hw_ep = musb->endpoints + best_end;
2125 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2126success:
2127 if (head) {
2128 idle = list_empty(head);
2129 list_add_tail(&qh->ring, head);
2130 qh->mux = 1;
2131 }
2132 qh->hw_ep = hw_ep;
2133 qh->hep->hcpriv = qh;
2134 if (idle)
2135 musb_start_urb(musb, is_in, qh);
2136 return 0;
2137}
2138
2139static int musb_urb_enqueue(
2140 struct usb_hcd *hcd,
2141 struct urb *urb,
2142 gfp_t mem_flags)
2143{
2144 unsigned long flags;
2145 struct musb *musb = hcd_to_musb(hcd);
2146 struct usb_host_endpoint *hep = urb->ep;
2147 struct musb_qh *qh;
2148 struct usb_endpoint_descriptor *epd = &hep->desc;
2149 int ret;
2150 unsigned type_reg;
2151 unsigned interval;
2152
2153
2154 if (!is_host_active(musb) || !musb->is_active)
2155 return -ENODEV;
2156
2157 trace_musb_urb_enq(musb, urb);
2158
2159 spin_lock_irqsave(&musb->lock, flags);
2160 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2161 qh = ret ? NULL : hep->hcpriv;
2162 if (qh)
2163 urb->hcpriv = qh;
2164 spin_unlock_irqrestore(&musb->lock, flags);
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174 if (qh || ret)
2175 return ret;
2176
2177
2178
2179
2180
2181
2182
2183 qh = kzalloc(sizeof *qh, mem_flags);
2184 if (!qh) {
2185 spin_lock_irqsave(&musb->lock, flags);
2186 usb_hcd_unlink_urb_from_ep(hcd, urb);
2187 spin_unlock_irqrestore(&musb->lock, flags);
2188 return -ENOMEM;
2189 }
2190
2191 qh->hep = hep;
2192 qh->dev = urb->dev;
2193 INIT_LIST_HEAD(&qh->ring);
2194 qh->is_ready = 1;
2195
2196 qh->maxpacket = usb_endpoint_maxp(epd);
2197 qh->type = usb_endpoint_type(epd);
2198
2199
2200
2201
2202
2203 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2204 if (qh->hb_mult > 1) {
2205 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2206
2207 if (ok)
2208 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2209 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2210 if (!ok) {
2211 dev_err(musb->controller,
2212 "high bandwidth %s (%dx%d) not supported\n",
2213 musb_ep_xfertype_string(qh->type),
2214 qh->hb_mult, qh->maxpacket & 0x7ff);
2215 ret = -EMSGSIZE;
2216 goto done;
2217 }
2218 qh->maxpacket &= 0x7ff;
2219 }
2220
2221 qh->epnum = usb_endpoint_num(epd);
2222
2223
2224 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2225
2226
2227 type_reg = (qh->type << 4) | qh->epnum;
2228 switch (urb->dev->speed) {
2229 case USB_SPEED_LOW:
2230 type_reg |= 0xc0;
2231 break;
2232 case USB_SPEED_FULL:
2233 type_reg |= 0x80;
2234 break;
2235 default:
2236 type_reg |= 0x40;
2237 }
2238 qh->type_reg = type_reg;
2239
2240
2241 switch (qh->type) {
2242 case USB_ENDPOINT_XFER_INT:
2243
2244
2245
2246
2247 if (urb->dev->speed <= USB_SPEED_FULL) {
2248 interval = max_t(u8, epd->bInterval, 1);
2249 break;
2250 }
2251
2252 case USB_ENDPOINT_XFER_ISOC:
2253
2254 interval = min_t(u8, epd->bInterval, 16);
2255 break;
2256 default:
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271 interval = 0;
2272 }
2273 qh->intv_reg = interval;
2274
2275
2276 if (musb->is_multipoint) {
2277 struct usb_device *parent = urb->dev->parent;
2278
2279 if (parent != hcd->self.root_hub) {
2280 qh->h_addr_reg = (u8) parent->devnum;
2281
2282
2283 if (urb->dev->tt) {
2284 qh->h_port_reg = (u8) urb->dev->ttport;
2285 if (urb->dev->tt->hub)
2286 qh->h_addr_reg =
2287 (u8) urb->dev->tt->hub->devnum;
2288 if (urb->dev->tt->multi)
2289 qh->h_addr_reg |= 0x80;
2290 }
2291 }
2292 }
2293
2294
2295
2296
2297
2298 spin_lock_irqsave(&musb->lock, flags);
2299 if (hep->hcpriv || !next_urb(qh)) {
2300
2301
2302
2303 kfree(qh);
2304 qh = NULL;
2305 ret = 0;
2306 } else
2307 ret = musb_schedule(musb, qh,
2308 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2309
2310 if (ret == 0) {
2311 urb->hcpriv = qh;
2312
2313
2314
2315 }
2316 spin_unlock_irqrestore(&musb->lock, flags);
2317
2318done:
2319 if (ret != 0) {
2320 spin_lock_irqsave(&musb->lock, flags);
2321 usb_hcd_unlink_urb_from_ep(hcd, urb);
2322 spin_unlock_irqrestore(&musb->lock, flags);
2323 kfree(qh);
2324 }
2325 return ret;
2326}
2327
2328
2329
2330
2331
2332
2333
2334static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2335{
2336 struct musb_hw_ep *ep = qh->hw_ep;
2337 struct musb *musb = ep->musb;
2338 void __iomem *epio = ep->regs;
2339 unsigned hw_end = ep->epnum;
2340 void __iomem *regs = ep->musb->mregs;
2341 int is_in = usb_pipein(urb->pipe);
2342 int status = 0;
2343 u16 csr;
2344 struct dma_channel *dma = NULL;
2345
2346 musb_ep_select(regs, hw_end);
2347
2348 if (is_dma_capable()) {
2349 dma = is_in ? ep->rx_channel : ep->tx_channel;
2350 if (dma) {
2351 status = ep->musb->dma_controller->channel_abort(dma);
2352 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2353 is_in ? 'R' : 'T', ep->epnum,
2354 urb, status);
2355 urb->actual_length += dma->actual_len;
2356 }
2357 }
2358
2359
2360 if (ep->epnum && is_in) {
2361
2362 csr = musb_h_flush_rxfifo(ep, 0);
2363
2364
2365 if (is_dma_capable() && dma)
2366 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2367 } else if (ep->epnum) {
2368 musb_h_tx_flush_fifo(ep);
2369 csr = musb_readw(epio, MUSB_TXCSR);
2370 csr &= ~(MUSB_TXCSR_AUTOSET
2371 | MUSB_TXCSR_DMAENAB
2372 | MUSB_TXCSR_H_RXSTALL
2373 | MUSB_TXCSR_H_NAKTIMEOUT
2374 | MUSB_TXCSR_H_ERROR
2375 | MUSB_TXCSR_TXPKTRDY);
2376 musb_writew(epio, MUSB_TXCSR, csr);
2377
2378 musb_writew(epio, MUSB_TXCSR, csr);
2379
2380 csr = musb_readw(epio, MUSB_TXCSR);
2381 } else {
2382 musb_h_ep0_flush_fifo(ep);
2383 }
2384 if (status == 0)
2385 musb_advance_schedule(ep->musb, urb, ep, is_in);
2386 return status;
2387}
2388
2389static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2390{
2391 struct musb *musb = hcd_to_musb(hcd);
2392 struct musb_qh *qh;
2393 unsigned long flags;
2394 int is_in = usb_pipein(urb->pipe);
2395 int ret;
2396
2397 trace_musb_urb_deq(musb, urb);
2398
2399 spin_lock_irqsave(&musb->lock, flags);
2400 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2401 if (ret)
2402 goto done;
2403
2404 qh = urb->hcpriv;
2405 if (!qh)
2406 goto done;
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420 if (!qh->is_ready
2421 || urb->urb_list.prev != &qh->hep->urb_list
2422 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2423 int ready = qh->is_ready;
2424
2425 qh->is_ready = 0;
2426 musb_giveback(musb, urb, 0);
2427 qh->is_ready = ready;
2428
2429
2430
2431
2432 if (ready && list_empty(&qh->hep->urb_list)) {
2433 qh->hep->hcpriv = NULL;
2434 list_del(&qh->ring);
2435 kfree(qh);
2436 }
2437 } else
2438 ret = musb_cleanup_urb(urb, qh);
2439done:
2440 spin_unlock_irqrestore(&musb->lock, flags);
2441 return ret;
2442}
2443
2444
2445static void
2446musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2447{
2448 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2449 unsigned long flags;
2450 struct musb *musb = hcd_to_musb(hcd);
2451 struct musb_qh *qh;
2452 struct urb *urb;
2453
2454 spin_lock_irqsave(&musb->lock, flags);
2455
2456 qh = hep->hcpriv;
2457 if (qh == NULL)
2458 goto exit;
2459
2460
2461
2462
2463 qh->is_ready = 0;
2464 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2465 urb = next_urb(qh);
2466
2467
2468 if (!urb->unlinked)
2469 urb->status = -ESHUTDOWN;
2470
2471
2472 musb_cleanup_urb(urb, qh);
2473
2474
2475
2476
2477 while (!list_empty(&hep->urb_list)) {
2478 urb = next_urb(qh);
2479 urb->status = -ESHUTDOWN;
2480 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2481 }
2482 } else {
2483
2484
2485
2486
2487 while (!list_empty(&hep->urb_list))
2488 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2489
2490 hep->hcpriv = NULL;
2491 list_del(&qh->ring);
2492 kfree(qh);
2493 }
2494exit:
2495 spin_unlock_irqrestore(&musb->lock, flags);
2496}
2497
2498static int musb_h_get_frame_number(struct usb_hcd *hcd)
2499{
2500 struct musb *musb = hcd_to_musb(hcd);
2501
2502 return musb_readw(musb->mregs, MUSB_FRAME);
2503}
2504
2505static int musb_h_start(struct usb_hcd *hcd)
2506{
2507 struct musb *musb = hcd_to_musb(hcd);
2508
2509
2510
2511
2512 hcd->state = HC_STATE_RUNNING;
2513 musb->port1_status = 0;
2514 return 0;
2515}
2516
2517static void musb_h_stop(struct usb_hcd *hcd)
2518{
2519 musb_stop(hcd_to_musb(hcd));
2520 hcd->state = HC_STATE_HALT;
2521}
2522
2523static int musb_bus_suspend(struct usb_hcd *hcd)
2524{
2525 struct musb *musb = hcd_to_musb(hcd);
2526 u8 devctl;
2527 int ret;
2528
2529 ret = musb_port_suspend(musb, true);
2530 if (ret)
2531 return ret;
2532
2533 if (!is_host_active(musb))
2534 return 0;
2535
2536 switch (musb->xceiv->otg->state) {
2537 case OTG_STATE_A_SUSPEND:
2538 return 0;
2539 case OTG_STATE_A_WAIT_VRISE:
2540
2541
2542
2543
2544 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2545 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2546 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2547 break;
2548 default:
2549 break;
2550 }
2551
2552 if (musb->is_active) {
2553 WARNING("trying to suspend as %s while active\n",
2554 usb_otg_state_string(musb->xceiv->otg->state));
2555 return -EBUSY;
2556 } else
2557 return 0;
2558}
2559
2560static int musb_bus_resume(struct usb_hcd *hcd)
2561{
2562 struct musb *musb = hcd_to_musb(hcd);
2563
2564 if (musb->config &&
2565 musb->config->host_port_deassert_reset_at_resume)
2566 musb_port_reset(musb, false);
2567
2568 return 0;
2569}
2570
2571#ifndef CONFIG_MUSB_PIO_ONLY
2572
2573#define MUSB_USB_DMA_ALIGN 4
2574
2575struct musb_temp_buffer {
2576 void *kmalloc_ptr;
2577 void *old_xfer_buffer;
2578 u8 data[0];
2579};
2580
2581static void musb_free_temp_buffer(struct urb *urb)
2582{
2583 enum dma_data_direction dir;
2584 struct musb_temp_buffer *temp;
2585 size_t length;
2586
2587 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2588 return;
2589
2590 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2591
2592 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2593 data);
2594
2595 if (dir == DMA_FROM_DEVICE) {
2596 if (usb_pipeisoc(urb->pipe))
2597 length = urb->transfer_buffer_length;
2598 else
2599 length = urb->actual_length;
2600
2601 memcpy(temp->old_xfer_buffer, temp->data, length);
2602 }
2603 urb->transfer_buffer = temp->old_xfer_buffer;
2604 kfree(temp->kmalloc_ptr);
2605
2606 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2607}
2608
2609static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2610{
2611 enum dma_data_direction dir;
2612 struct musb_temp_buffer *temp;
2613 void *kmalloc_ptr;
2614 size_t kmalloc_size;
2615
2616 if (urb->num_sgs || urb->sg ||
2617 urb->transfer_buffer_length == 0 ||
2618 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2619 return 0;
2620
2621 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2622
2623
2624 kmalloc_size = urb->transfer_buffer_length +
2625 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2626
2627 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2628 if (!kmalloc_ptr)
2629 return -ENOMEM;
2630
2631
2632 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2633
2634
2635 temp->kmalloc_ptr = kmalloc_ptr;
2636 temp->old_xfer_buffer = urb->transfer_buffer;
2637 if (dir == DMA_TO_DEVICE)
2638 memcpy(temp->data, urb->transfer_buffer,
2639 urb->transfer_buffer_length);
2640 urb->transfer_buffer = temp->data;
2641
2642 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2643
2644 return 0;
2645}
2646
2647static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2648 gfp_t mem_flags)
2649{
2650 struct musb *musb = hcd_to_musb(hcd);
2651 int ret;
2652
2653
2654
2655
2656
2657
2658
2659 if (musb->hwvers < MUSB_HWVERS_1800)
2660 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2661
2662 ret = musb_alloc_temp_buffer(urb, mem_flags);
2663 if (ret)
2664 return ret;
2665
2666 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2667 if (ret)
2668 musb_free_temp_buffer(urb);
2669
2670 return ret;
2671}
2672
2673static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2674{
2675 struct musb *musb = hcd_to_musb(hcd);
2676
2677 usb_hcd_unmap_urb_for_dma(hcd, urb);
2678
2679
2680 if (musb->hwvers < MUSB_HWVERS_1800)
2681 return;
2682
2683 musb_free_temp_buffer(urb);
2684}
2685#endif
2686
2687static const struct hc_driver musb_hc_driver = {
2688 .description = "musb-hcd",
2689 .product_desc = "MUSB HDRC host driver",
2690 .hcd_priv_size = sizeof(struct musb *),
2691 .flags = HCD_USB2 | HCD_MEMORY,
2692
2693
2694
2695
2696
2697 .start = musb_h_start,
2698 .stop = musb_h_stop,
2699
2700 .get_frame_number = musb_h_get_frame_number,
2701
2702 .urb_enqueue = musb_urb_enqueue,
2703 .urb_dequeue = musb_urb_dequeue,
2704 .endpoint_disable = musb_h_disable,
2705
2706#ifndef CONFIG_MUSB_PIO_ONLY
2707 .map_urb_for_dma = musb_map_urb_for_dma,
2708 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2709#endif
2710
2711 .hub_status_data = musb_hub_status_data,
2712 .hub_control = musb_hub_control,
2713 .bus_suspend = musb_bus_suspend,
2714 .bus_resume = musb_bus_resume,
2715
2716
2717};
2718
2719int musb_host_alloc(struct musb *musb)
2720{
2721 struct device *dev = musb->controller;
2722
2723
2724 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2725 if (!musb->hcd)
2726 return -EINVAL;
2727
2728 *musb->hcd->hcd_priv = (unsigned long) musb;
2729 musb->hcd->self.uses_pio_for_control = 1;
2730 musb->hcd->uses_new_polling = 1;
2731 musb->hcd->has_tt = 1;
2732
2733 return 0;
2734}
2735
2736void musb_host_cleanup(struct musb *musb)
2737{
2738 if (musb->port_mode == MUSB_PERIPHERAL)
2739 return;
2740 usb_remove_hcd(musb->hcd);
2741}
2742
2743void musb_host_free(struct musb *musb)
2744{
2745 usb_put_hcd(musb->hcd);
2746}
2747
2748int musb_host_setup(struct musb *musb, int power_budget)
2749{
2750 int ret;
2751 struct usb_hcd *hcd = musb->hcd;
2752
2753 if (musb->port_mode == MUSB_HOST) {
2754 MUSB_HST_MODE(musb);
2755 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2756 }
2757 otg_set_host(musb->xceiv->otg, &hcd->self);
2758
2759 hcd->self.otg_port = 0;
2760 musb->xceiv->otg->host = &hcd->self;
2761 hcd->power_budget = 2 * (power_budget ? : 250);
2762 hcd->skip_phy_initialization = 1;
2763
2764 ret = usb_add_hcd(hcd, 0, 0);
2765 if (ret < 0)
2766 return ret;
2767
2768 device_wakeup_enable(hcd->self.controller);
2769 return 0;
2770}
2771
2772void musb_host_resume_root_hub(struct musb *musb)
2773{
2774 usb_hcd_resume_root_hub(musb->hcd);
2775}
2776
2777void musb_host_poke_root_hub(struct musb *musb)
2778{
2779 MUSB_HST_MODE(musb);
2780 if (musb->hcd->status_urb)
2781 usb_hcd_poll_rh_status(musb->hcd);
2782 else
2783 usb_hcd_resume_root_hub(musb->hcd);
2784}
2785