1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/delay.h>
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/errno.h>
42#include <linux/init.h>
43#include <linux/list.h>
44#include <linux/dma-mapping.h>
45
46#include "musb_core.h"
47#include "musb_host.h"
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98struct musb *hcd_to_musb(struct usb_hcd *hcd)
99{
100 return *(struct musb **) hcd->hcd_priv;
101}
102
103
104static void musb_ep_program(struct musb *musb, u8 epnum,
105 struct urb *urb, int is_out,
106 u8 *buf, u32 offset, u32 len);
107
108
109
110
111static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112{
113 struct musb *musb = ep->musb;
114 void __iomem *epio = ep->regs;
115 u16 csr;
116 u16 lastcsr = 0;
117 int retries = 1000;
118
119 csr = musb_readw(epio, MUSB_TXCSR);
120 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
121 if (csr != lastcsr)
122 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
123 lastcsr = csr;
124 csr |= MUSB_TXCSR_FLUSHFIFO;
125 musb_writew(epio, MUSB_TXCSR, csr);
126 csr = musb_readw(epio, MUSB_TXCSR);
127 if (WARN(retries-- < 1,
128 "Could not flush host TX%d fifo: csr: %04x\n",
129 ep->epnum, csr))
130 return;
131 mdelay(1);
132 }
133}
134
135static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
136{
137 void __iomem *epio = ep->regs;
138 u16 csr;
139 int retries = 5;
140
141
142 do {
143 csr = musb_readw(epio, MUSB_TXCSR);
144 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
145 break;
146 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
147 csr = musb_readw(epio, MUSB_TXCSR);
148 udelay(10);
149 } while (--retries);
150
151 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
152 ep->epnum, csr);
153
154
155 musb_writew(epio, MUSB_TXCSR, 0);
156}
157
158
159
160
161
162static inline void musb_h_tx_start(struct musb_hw_ep *ep)
163{
164 u16 txcsr;
165
166
167 if (ep->epnum) {
168 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
169 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
170 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
171 } else {
172 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
173 musb_writew(ep->regs, MUSB_CSR0, txcsr);
174 }
175
176}
177
178static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
179{
180 u16 txcsr;
181
182
183 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
184 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
185 if (is_cppi_enabled())
186 txcsr |= MUSB_TXCSR_DMAMODE;
187 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
188}
189
190static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
191{
192 if (is_in != 0 || ep->is_shared_fifo)
193 ep->in_qh = qh;
194 if (is_in == 0 || ep->is_shared_fifo)
195 ep->out_qh = qh;
196}
197
198static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
199{
200 return is_in ? ep->in_qh : ep->out_qh;
201}
202
203
204
205
206
207
208
209static void
210musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
211{
212 u16 frame;
213 u32 len;
214 void __iomem *mbase = musb->mregs;
215 struct urb *urb = next_urb(qh);
216 void *buf = urb->transfer_buffer;
217 u32 offset = 0;
218 struct musb_hw_ep *hw_ep = qh->hw_ep;
219 unsigned pipe = urb->pipe;
220 u8 address = usb_pipedevice(pipe);
221 int epnum = hw_ep->epnum;
222
223
224 qh->offset = 0;
225 qh->segsize = 0;
226
227
228 switch (qh->type) {
229 case USB_ENDPOINT_XFER_CONTROL:
230
231 is_in = 0;
232 musb->ep0_stage = MUSB_EP0_START;
233 buf = urb->setup_packet;
234 len = 8;
235 break;
236 case USB_ENDPOINT_XFER_ISOC:
237 qh->iso_idx = 0;
238 qh->frame = 0;
239 offset = urb->iso_frame_desc[0].offset;
240 len = urb->iso_frame_desc[0].length;
241 break;
242 default:
243
244 buf = urb->transfer_buffer + urb->actual_length;
245 len = urb->transfer_buffer_length - urb->actual_length;
246 }
247
248 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
249 qh, urb, address, qh->epnum,
250 is_in ? "in" : "out",
251 ({char *s; switch (qh->type) {
252 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
253 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
254 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
255 default: s = "-intr"; break;
256 } s; }),
257 epnum, buf + offset, len);
258
259
260 musb_ep_set_qh(hw_ep, is_in, qh);
261 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
262
263
264 if (is_in)
265 return;
266
267
268 switch (qh->type) {
269 case USB_ENDPOINT_XFER_ISOC:
270 case USB_ENDPOINT_XFER_INT:
271 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
272 frame = musb_readw(mbase, MUSB_FRAME);
273
274
275
276 if (1) {
277
278
279
280 qh->frame = 0;
281 goto start;
282 } else {
283 qh->frame = urb->start_frame;
284
285 dev_dbg(musb->controller, "SOF for %d\n", epnum);
286#if 1
287 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
288#endif
289 }
290 break;
291 default:
292start:
293 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
294 hw_ep->tx_channel ? "dma" : "pio");
295
296 if (!hw_ep->tx_channel)
297 musb_h_tx_start(hw_ep);
298 else if (is_cppi_enabled() || tusb_dma_omap())
299 musb_h_tx_dma_start(hw_ep);
300 }
301}
302
303
304static void musb_giveback(struct musb *musb, struct urb *urb, int status)
305__releases(musb->lock)
306__acquires(musb->lock)
307{
308 dev_dbg(musb->controller,
309 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
310 urb, urb->complete, status,
311 usb_pipedevice(urb->pipe),
312 usb_pipeendpoint(urb->pipe),
313 usb_pipein(urb->pipe) ? "in" : "out",
314 urb->actual_length, urb->transfer_buffer_length
315 );
316
317 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
318 spin_unlock(&musb->lock);
319 usb_hcd_giveback_urb(musb->hcd, urb, status);
320 spin_lock(&musb->lock);
321}
322
323
324static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
325 struct urb *urb)
326{
327 void __iomem *epio = qh->hw_ep->regs;
328 u16 csr;
329
330
331
332
333
334
335 if (is_in)
336 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
337 else
338 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
339
340 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
341}
342
343
344
345
346
347
348
349
350static void musb_advance_schedule(struct musb *musb, struct urb *urb,
351 struct musb_hw_ep *hw_ep, int is_in)
352{
353 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
354 struct musb_hw_ep *ep = qh->hw_ep;
355 int ready = qh->is_ready;
356 int status;
357
358 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
359
360
361 switch (qh->type) {
362 case USB_ENDPOINT_XFER_BULK:
363 case USB_ENDPOINT_XFER_INT:
364 musb_save_toggle(qh, is_in, urb);
365 break;
366 case USB_ENDPOINT_XFER_ISOC:
367 if (status == 0 && urb->error_count)
368 status = -EXDEV;
369 break;
370 }
371
372 qh->is_ready = 0;
373 musb_giveback(musb, urb, status);
374 qh->is_ready = ready;
375
376
377
378
379 if (list_empty(&qh->hep->urb_list)) {
380 struct list_head *head;
381 struct dma_controller *dma = musb->dma_controller;
382
383 if (is_in) {
384 ep->rx_reinit = 1;
385 if (ep->rx_channel) {
386 dma->channel_release(ep->rx_channel);
387 ep->rx_channel = NULL;
388 }
389 } else {
390 ep->tx_reinit = 1;
391 if (ep->tx_channel) {
392 dma->channel_release(ep->tx_channel);
393 ep->tx_channel = NULL;
394 }
395 }
396
397
398 musb_ep_set_qh(ep, is_in, NULL);
399 qh->hep->hcpriv = NULL;
400
401 switch (qh->type) {
402
403 case USB_ENDPOINT_XFER_CONTROL:
404 case USB_ENDPOINT_XFER_BULK:
405
406
407
408 if (qh->mux == 1) {
409 head = qh->ring.prev;
410 list_del(&qh->ring);
411 kfree(qh);
412 qh = first_qh(head);
413 break;
414 }
415
416 case USB_ENDPOINT_XFER_ISOC:
417 case USB_ENDPOINT_XFER_INT:
418
419
420
421
422 kfree(qh);
423 qh = NULL;
424 break;
425 }
426 }
427
428 if (qh != NULL && qh->is_ready) {
429 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
430 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
431 musb_start_urb(musb, is_in, qh);
432 }
433}
434
435static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
436{
437
438
439
440
441 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
442 csr &= ~(MUSB_RXCSR_H_REQPKT
443 | MUSB_RXCSR_H_AUTOREQ
444 | MUSB_RXCSR_AUTOCLEAR);
445
446
447 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
448 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
449
450
451 return musb_readw(hw_ep->regs, MUSB_RXCSR);
452}
453
454
455
456
457static bool
458musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
459{
460 u16 rx_count;
461 u8 *buf;
462 u16 csr;
463 bool done = false;
464 u32 length;
465 int do_flush = 0;
466 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
467 void __iomem *epio = hw_ep->regs;
468 struct musb_qh *qh = hw_ep->in_qh;
469 int pipe = urb->pipe;
470 void *buffer = urb->transfer_buffer;
471
472
473 rx_count = musb_readw(epio, MUSB_RXCOUNT);
474 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
475 urb->transfer_buffer, qh->offset,
476 urb->transfer_buffer_length);
477
478
479 if (usb_pipeisoc(pipe)) {
480 int status = 0;
481 struct usb_iso_packet_descriptor *d;
482
483 if (iso_err) {
484 status = -EILSEQ;
485 urb->error_count++;
486 }
487
488 d = urb->iso_frame_desc + qh->iso_idx;
489 buf = buffer + d->offset;
490 length = d->length;
491 if (rx_count > length) {
492 if (status == 0) {
493 status = -EOVERFLOW;
494 urb->error_count++;
495 }
496 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
497 do_flush = 1;
498 } else
499 length = rx_count;
500 urb->actual_length += length;
501 d->actual_length = length;
502
503 d->status = status;
504
505
506 done = (++qh->iso_idx >= urb->number_of_packets);
507 } else {
508
509 buf = buffer + qh->offset;
510 length = urb->transfer_buffer_length - qh->offset;
511 if (rx_count > length) {
512 if (urb->status == -EINPROGRESS)
513 urb->status = -EOVERFLOW;
514 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
515 do_flush = 1;
516 } else
517 length = rx_count;
518 urb->actual_length += length;
519 qh->offset += length;
520
521
522 done = (urb->actual_length == urb->transfer_buffer_length)
523 || (rx_count < qh->maxpacket)
524 || (urb->status != -EINPROGRESS);
525 if (done
526 && (urb->status == -EINPROGRESS)
527 && (urb->transfer_flags & URB_SHORT_NOT_OK)
528 && (urb->actual_length
529 < urb->transfer_buffer_length))
530 urb->status = -EREMOTEIO;
531 }
532
533 musb_read_fifo(hw_ep, length, buf);
534
535 csr = musb_readw(epio, MUSB_RXCSR);
536 csr |= MUSB_RXCSR_H_WZC_BITS;
537 if (unlikely(do_flush))
538 musb_h_flush_rxfifo(hw_ep, csr);
539 else {
540
541 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
542 if (!done)
543 csr |= MUSB_RXCSR_H_REQPKT;
544 musb_writew(epio, MUSB_RXCSR, csr);
545 }
546
547 return done;
548}
549
550
551
552
553
554
555
556
557
558static void
559musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
560{
561 u16 csr;
562
563
564
565
566
567
568
569 if (ep->is_shared_fifo) {
570 csr = musb_readw(ep->regs, MUSB_TXCSR);
571 if (csr & MUSB_TXCSR_MODE) {
572 musb_h_tx_flush_fifo(ep);
573 csr = musb_readw(ep->regs, MUSB_TXCSR);
574 musb_writew(ep->regs, MUSB_TXCSR,
575 csr | MUSB_TXCSR_FRCDATATOG);
576 }
577
578
579
580
581
582 if (csr & MUSB_TXCSR_DMAMODE)
583 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
584 musb_writew(ep->regs, MUSB_TXCSR, 0);
585
586
587 } else {
588 csr = musb_readw(ep->regs, MUSB_RXCSR);
589 if (csr & MUSB_RXCSR_RXPKTRDY)
590 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
591 musb_readw(ep->regs, MUSB_RXCOUNT));
592
593 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
594 }
595
596
597 if (musb->is_multipoint) {
598 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
599 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
600 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
601
602 } else
603 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
604
605
606 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
607 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
608
609
610
611
612 if (musb->double_buffer_not_ok)
613 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
614 else
615 musb_writew(ep->regs, MUSB_RXMAXP,
616 qh->maxpacket | ((qh->hb_mult - 1) << 11));
617
618 ep->rx_reinit = 0;
619}
620
621static bool musb_tx_dma_program(struct dma_controller *dma,
622 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
623 struct urb *urb, u32 offset, u32 length)
624{
625 struct dma_channel *channel = hw_ep->tx_channel;
626 void __iomem *epio = hw_ep->regs;
627 u16 pkt_size = qh->maxpacket;
628 u16 csr;
629 u8 mode;
630
631#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
632 if (length > channel->max_len)
633 length = channel->max_len;
634
635 csr = musb_readw(epio, MUSB_TXCSR);
636 if (length > pkt_size) {
637 mode = 1;
638 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
639
640
641
642
643
644
645
646
647
648
649 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
650 can_bulk_split(hw_ep->musb, qh->type)))
651 csr |= MUSB_TXCSR_AUTOSET;
652 } else {
653 mode = 0;
654 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
655 csr |= MUSB_TXCSR_DMAENAB;
656 }
657 channel->desired_mode = mode;
658 musb_writew(epio, MUSB_TXCSR, csr);
659#else
660 if (!is_cppi_enabled() && !tusb_dma_omap())
661 return false;
662
663 channel->actual_len = 0;
664
665
666
667
668
669 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
670#endif
671
672 qh->segsize = length;
673
674
675
676
677
678 wmb();
679
680 if (!dma->channel_program(channel, pkt_size, mode,
681 urb->transfer_dma + offset, length)) {
682 dma->channel_release(channel);
683 hw_ep->tx_channel = NULL;
684
685 csr = musb_readw(epio, MUSB_TXCSR);
686 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
687 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
688 return false;
689 }
690 return true;
691}
692
693
694
695
696
697static void musb_ep_program(struct musb *musb, u8 epnum,
698 struct urb *urb, int is_out,
699 u8 *buf, u32 offset, u32 len)
700{
701 struct dma_controller *dma_controller;
702 struct dma_channel *dma_channel;
703 u8 dma_ok;
704 void __iomem *mbase = musb->mregs;
705 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
706 void __iomem *epio = hw_ep->regs;
707 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
708 u16 packet_sz = qh->maxpacket;
709 u8 use_dma = 1;
710 u16 csr;
711
712 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
713 "h_addr%02x h_port%02x bytes %d\n",
714 is_out ? "-->" : "<--",
715 epnum, urb, urb->dev->speed,
716 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
717 qh->h_addr_reg, qh->h_port_reg,
718 len);
719
720 musb_ep_select(mbase, epnum);
721
722 if (is_out && !len) {
723 use_dma = 0;
724 csr = musb_readw(epio, MUSB_TXCSR);
725 csr &= ~MUSB_TXCSR_DMAENAB;
726 musb_writew(epio, MUSB_TXCSR, csr);
727 hw_ep->tx_channel = NULL;
728 }
729
730
731 dma_controller = musb->dma_controller;
732 if (use_dma && is_dma_capable() && epnum && dma_controller) {
733 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
734 if (!dma_channel) {
735 dma_channel = dma_controller->channel_alloc(
736 dma_controller, hw_ep, is_out);
737 if (is_out)
738 hw_ep->tx_channel = dma_channel;
739 else
740 hw_ep->rx_channel = dma_channel;
741 }
742 } else
743 dma_channel = NULL;
744
745
746
747
748 if (is_out) {
749 u16 csr;
750 u16 int_txe;
751 u16 load_count;
752
753 csr = musb_readw(epio, MUSB_TXCSR);
754
755
756 int_txe = musb->intrtxe;
757 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
758
759
760 if (epnum) {
761
762
763
764
765
766
767 if (!hw_ep->tx_double_buffered)
768 musb_h_tx_flush_fifo(hw_ep);
769
770
771
772
773
774
775 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
776 | MUSB_TXCSR_AUTOSET
777 | MUSB_TXCSR_DMAENAB
778 | MUSB_TXCSR_FRCDATATOG
779 | MUSB_TXCSR_H_RXSTALL
780 | MUSB_TXCSR_H_ERROR
781 | MUSB_TXCSR_TXPKTRDY
782 );
783 csr |= MUSB_TXCSR_MODE;
784
785 if (!hw_ep->tx_double_buffered) {
786 if (usb_gettoggle(urb->dev, qh->epnum, 1))
787 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
788 | MUSB_TXCSR_H_DATATOGGLE;
789 else
790 csr |= MUSB_TXCSR_CLRDATATOG;
791 }
792
793 musb_writew(epio, MUSB_TXCSR, csr);
794
795 csr &= ~MUSB_TXCSR_DMAMODE;
796 musb_writew(epio, MUSB_TXCSR, csr);
797 csr = musb_readw(epio, MUSB_TXCSR);
798 } else {
799
800 musb_h_ep0_flush_fifo(hw_ep);
801 }
802
803
804 if (musb->is_multipoint) {
805 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
806 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
807 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
808
809 } else
810 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
811
812
813 if (epnum) {
814 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
815 if (musb->double_buffer_not_ok) {
816 musb_writew(epio, MUSB_TXMAXP,
817 hw_ep->max_packet_sz_tx);
818 } else if (can_bulk_split(musb, qh->type)) {
819 qh->hb_mult = hw_ep->max_packet_sz_tx
820 / packet_sz;
821 musb_writew(epio, MUSB_TXMAXP, packet_sz
822 | ((qh->hb_mult) - 1) << 11);
823 } else {
824 musb_writew(epio, MUSB_TXMAXP,
825 qh->maxpacket |
826 ((qh->hb_mult - 1) << 11));
827 }
828 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
829 } else {
830 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
831 if (musb->is_multipoint)
832 musb_writeb(epio, MUSB_TYPE0,
833 qh->type_reg);
834 }
835
836 if (can_bulk_split(musb, qh->type))
837 load_count = min((u32) hw_ep->max_packet_sz_tx,
838 len);
839 else
840 load_count = min((u32) packet_sz, len);
841
842 if (dma_channel && musb_tx_dma_program(dma_controller,
843 hw_ep, qh, urb, offset, len))
844 load_count = 0;
845
846 if (load_count) {
847
848 qh->segsize = load_count;
849 if (!buf) {
850 sg_miter_start(&qh->sg_miter, urb->sg, 1,
851 SG_MITER_ATOMIC
852 | SG_MITER_FROM_SG);
853 if (!sg_miter_next(&qh->sg_miter)) {
854 dev_err(musb->controller,
855 "error: sg"
856 "list empty\n");
857 sg_miter_stop(&qh->sg_miter);
858 goto finish;
859 }
860 buf = qh->sg_miter.addr + urb->sg->offset +
861 urb->actual_length;
862 load_count = min_t(u32, load_count,
863 qh->sg_miter.length);
864 musb_write_fifo(hw_ep, load_count, buf);
865 qh->sg_miter.consumed = load_count;
866 sg_miter_stop(&qh->sg_miter);
867 } else
868 musb_write_fifo(hw_ep, load_count, buf);
869 }
870finish:
871
872 musb_writew(mbase, MUSB_INTRTXE, int_txe);
873
874
875 } else {
876 u16 csr;
877
878 if (hw_ep->rx_reinit) {
879 musb_rx_reinit(musb, qh, hw_ep);
880
881
882 if (usb_gettoggle(urb->dev, qh->epnum, 0))
883 csr = MUSB_RXCSR_H_WR_DATATOGGLE
884 | MUSB_RXCSR_H_DATATOGGLE;
885 else
886 csr = 0;
887 if (qh->type == USB_ENDPOINT_XFER_INT)
888 csr |= MUSB_RXCSR_DISNYET;
889
890 } else {
891 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
892
893 if (csr & (MUSB_RXCSR_RXPKTRDY
894 | MUSB_RXCSR_DMAENAB
895 | MUSB_RXCSR_H_REQPKT))
896 ERR("broken !rx_reinit, ep%d csr %04x\n",
897 hw_ep->epnum, csr);
898
899
900 csr &= MUSB_RXCSR_DISNYET;
901 }
902
903
904
905 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
906
907 dma_channel->actual_len = 0L;
908 qh->segsize = len;
909
910
911 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
912 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
913
914
915
916
917
918 dma_ok = dma_controller->channel_program(dma_channel,
919 packet_sz, !(urb->transfer_flags &
920 URB_SHORT_NOT_OK),
921 urb->transfer_dma + offset,
922 qh->segsize);
923 if (!dma_ok) {
924 dma_controller->channel_release(dma_channel);
925 hw_ep->rx_channel = dma_channel = NULL;
926 } else
927 csr |= MUSB_RXCSR_DMAENAB;
928 }
929
930 csr |= MUSB_RXCSR_H_REQPKT;
931 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
932 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
933 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
934 }
935}
936
937
938
939
940static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
941 int is_in)
942{
943 struct dma_channel *dma;
944 struct urb *urb;
945 void __iomem *mbase = musb->mregs;
946 void __iomem *epio = ep->regs;
947 struct musb_qh *cur_qh, *next_qh;
948 u16 rx_csr, tx_csr;
949
950 musb_ep_select(mbase, ep->epnum);
951 if (is_in) {
952 dma = is_dma_capable() ? ep->rx_channel : NULL;
953
954
955 rx_csr = musb_readw(epio, MUSB_RXCSR);
956 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
957 rx_csr &= ~MUSB_RXCSR_DATAERROR;
958 musb_writew(epio, MUSB_RXCSR, rx_csr);
959
960 cur_qh = first_qh(&musb->in_bulk);
961 } else {
962 dma = is_dma_capable() ? ep->tx_channel : NULL;
963
964
965 tx_csr = musb_readw(epio, MUSB_TXCSR);
966 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
967 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
968 musb_writew(epio, MUSB_TXCSR, tx_csr);
969
970 cur_qh = first_qh(&musb->out_bulk);
971 }
972 if (cur_qh) {
973 urb = next_urb(cur_qh);
974 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
975 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
976 musb->dma_controller->channel_abort(dma);
977 urb->actual_length += dma->actual_len;
978 dma->actual_len = 0L;
979 }
980 musb_save_toggle(cur_qh, is_in, urb);
981
982 if (is_in) {
983
984 list_move_tail(&cur_qh->ring, &musb->in_bulk);
985
986
987 next_qh = first_qh(&musb->in_bulk);
988
989
990 ep->rx_reinit = 1;
991 } else {
992
993 list_move_tail(&cur_qh->ring, &musb->out_bulk);
994
995
996 next_qh = first_qh(&musb->out_bulk);
997
998
999 ep->tx_reinit = 1;
1000 }
1001 musb_start_urb(musb, is_in, next_qh);
1002 }
1003}
1004
1005
1006
1007
1008
1009static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1010{
1011 bool more = false;
1012 u8 *fifo_dest = NULL;
1013 u16 fifo_count = 0;
1014 struct musb_hw_ep *hw_ep = musb->control_ep;
1015 struct musb_qh *qh = hw_ep->in_qh;
1016 struct usb_ctrlrequest *request;
1017
1018 switch (musb->ep0_stage) {
1019 case MUSB_EP0_IN:
1020 fifo_dest = urb->transfer_buffer + urb->actual_length;
1021 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1022 urb->actual_length);
1023 if (fifo_count < len)
1024 urb->status = -EOVERFLOW;
1025
1026 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1027
1028 urb->actual_length += fifo_count;
1029 if (len < qh->maxpacket) {
1030
1031
1032
1033 } else if (urb->actual_length <
1034 urb->transfer_buffer_length)
1035 more = true;
1036 break;
1037 case MUSB_EP0_START:
1038 request = (struct usb_ctrlrequest *) urb->setup_packet;
1039
1040 if (!request->wLength) {
1041 dev_dbg(musb->controller, "start no-DATA\n");
1042 break;
1043 } else if (request->bRequestType & USB_DIR_IN) {
1044 dev_dbg(musb->controller, "start IN-DATA\n");
1045 musb->ep0_stage = MUSB_EP0_IN;
1046 more = true;
1047 break;
1048 } else {
1049 dev_dbg(musb->controller, "start OUT-DATA\n");
1050 musb->ep0_stage = MUSB_EP0_OUT;
1051 more = true;
1052 }
1053
1054 case MUSB_EP0_OUT:
1055 fifo_count = min_t(size_t, qh->maxpacket,
1056 urb->transfer_buffer_length -
1057 urb->actual_length);
1058 if (fifo_count) {
1059 fifo_dest = (u8 *) (urb->transfer_buffer
1060 + urb->actual_length);
1061 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1062 fifo_count,
1063 (fifo_count == 1) ? "" : "s",
1064 fifo_dest);
1065 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1066
1067 urb->actual_length += fifo_count;
1068 more = true;
1069 }
1070 break;
1071 default:
1072 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1073 break;
1074 }
1075
1076 return more;
1077}
1078
1079
1080
1081
1082
1083
1084
1085irqreturn_t musb_h_ep0_irq(struct musb *musb)
1086{
1087 struct urb *urb;
1088 u16 csr, len;
1089 int status = 0;
1090 void __iomem *mbase = musb->mregs;
1091 struct musb_hw_ep *hw_ep = musb->control_ep;
1092 void __iomem *epio = hw_ep->regs;
1093 struct musb_qh *qh = hw_ep->in_qh;
1094 bool complete = false;
1095 irqreturn_t retval = IRQ_NONE;
1096
1097
1098 urb = next_urb(qh);
1099
1100 musb_ep_select(mbase, 0);
1101 csr = musb_readw(epio, MUSB_CSR0);
1102 len = (csr & MUSB_CSR0_RXPKTRDY)
1103 ? musb_readb(epio, MUSB_COUNT0)
1104 : 0;
1105
1106 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1107 csr, qh, len, urb, musb->ep0_stage);
1108
1109
1110 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1111 retval = IRQ_HANDLED;
1112 complete = true;
1113 }
1114
1115
1116 if (csr & MUSB_CSR0_H_RXSTALL) {
1117 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1118 status = -EPIPE;
1119
1120 } else if (csr & MUSB_CSR0_H_ERROR) {
1121 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1122 status = -EPROTO;
1123
1124 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1125 dev_dbg(musb->controller, "control NAK timeout\n");
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 musb_writew(epio, MUSB_CSR0, 0);
1136 retval = IRQ_HANDLED;
1137 }
1138
1139 if (status) {
1140 dev_dbg(musb->controller, "aborting\n");
1141 retval = IRQ_HANDLED;
1142 if (urb)
1143 urb->status = status;
1144 complete = true;
1145
1146
1147 if (csr & MUSB_CSR0_H_REQPKT) {
1148 csr &= ~MUSB_CSR0_H_REQPKT;
1149 musb_writew(epio, MUSB_CSR0, csr);
1150 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1151 musb_writew(epio, MUSB_CSR0, csr);
1152 } else {
1153 musb_h_ep0_flush_fifo(hw_ep);
1154 }
1155
1156 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1157
1158
1159 musb_writew(epio, MUSB_CSR0, 0);
1160 }
1161
1162 if (unlikely(!urb)) {
1163
1164
1165 ERR("no URB for end 0\n");
1166
1167 musb_h_ep0_flush_fifo(hw_ep);
1168 goto done;
1169 }
1170
1171 if (!complete) {
1172
1173 if (musb_h_ep0_continue(musb, len, urb)) {
1174
1175 csr = (MUSB_EP0_IN == musb->ep0_stage)
1176 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1177 } else {
1178
1179 if (usb_pipeout(urb->pipe)
1180 || !urb->transfer_buffer_length)
1181 csr = MUSB_CSR0_H_STATUSPKT
1182 | MUSB_CSR0_H_REQPKT;
1183 else
1184 csr = MUSB_CSR0_H_STATUSPKT
1185 | MUSB_CSR0_TXPKTRDY;
1186
1187
1188 musb->ep0_stage = MUSB_EP0_STATUS;
1189
1190 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1191
1192 }
1193 musb_writew(epio, MUSB_CSR0, csr);
1194 retval = IRQ_HANDLED;
1195 } else
1196 musb->ep0_stage = MUSB_EP0_IDLE;
1197
1198
1199 if (complete)
1200 musb_advance_schedule(musb, urb, hw_ep, 1);
1201done:
1202 return retval;
1203}
1204
1205
1206#ifdef CONFIG_USB_INVENTRA_DMA
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220#endif
1221
1222
1223void musb_host_tx(struct musb *musb, u8 epnum)
1224{
1225 int pipe;
1226 bool done = false;
1227 u16 tx_csr;
1228 size_t length = 0;
1229 size_t offset = 0;
1230 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1231 void __iomem *epio = hw_ep->regs;
1232 struct musb_qh *qh = hw_ep->out_qh;
1233 struct urb *urb = next_urb(qh);
1234 u32 status = 0;
1235 void __iomem *mbase = musb->mregs;
1236 struct dma_channel *dma;
1237 bool transfer_pending = false;
1238
1239 musb_ep_select(mbase, epnum);
1240 tx_csr = musb_readw(epio, MUSB_TXCSR);
1241
1242
1243 if (!urb) {
1244 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1245 return;
1246 }
1247
1248 pipe = urb->pipe;
1249 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1250 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1251 dma ? ", dma" : "");
1252
1253
1254 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1255
1256 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1257
1258
1259 status = -EPIPE;
1260
1261 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1262
1263 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1264
1265 status = -ETIMEDOUT;
1266
1267 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1268 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1269 && !list_is_singular(&musb->out_bulk)) {
1270 dev_dbg(musb->controller,
1271 "NAK timeout on TX%d ep\n", epnum);
1272 musb_bulk_nak_timeout(musb, hw_ep, 0);
1273 } else {
1274 dev_dbg(musb->controller,
1275 "TX end=%d device not responding\n", epnum);
1276
1277
1278
1279
1280
1281
1282
1283
1284 musb_ep_select(mbase, epnum);
1285 musb_writew(epio, MUSB_TXCSR,
1286 MUSB_TXCSR_H_WZC_BITS
1287 | MUSB_TXCSR_TXPKTRDY);
1288 }
1289 return;
1290 }
1291
1292done:
1293 if (status) {
1294 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1295 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1296 (void) musb->dma_controller->channel_abort(dma);
1297 }
1298
1299
1300
1301
1302 musb_h_tx_flush_fifo(hw_ep);
1303 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1304 | MUSB_TXCSR_DMAENAB
1305 | MUSB_TXCSR_H_ERROR
1306 | MUSB_TXCSR_H_RXSTALL
1307 | MUSB_TXCSR_H_NAKTIMEOUT
1308 );
1309
1310 musb_ep_select(mbase, epnum);
1311 musb_writew(epio, MUSB_TXCSR, tx_csr);
1312
1313 musb_writew(epio, MUSB_TXCSR, tx_csr);
1314 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1315
1316 done = true;
1317 }
1318
1319
1320 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1321 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1322 return;
1323 }
1324
1325 if (is_dma_capable() && dma && !status) {
1326
1327
1328
1329
1330
1331
1332
1333
1334 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1352 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1353 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1354 MUSB_TXCSR_TXPKTRDY);
1355 musb_writew(epio, MUSB_TXCSR,
1356 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1357 }
1358 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1359 MUSB_TXCSR_TXPKTRDY);
1360 musb_writew(epio, MUSB_TXCSR,
1361 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1362
1363
1364
1365
1366
1367
1368
1369 tx_csr = musb_readw(epio, MUSB_TXCSR);
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1380 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1381 "CSR %04x\n", tx_csr);
1382 return;
1383 }
1384 }
1385
1386 if (!status || dma || usb_pipeisoc(pipe)) {
1387 if (dma)
1388 length = dma->actual_len;
1389 else
1390 length = qh->segsize;
1391 qh->offset += length;
1392
1393 if (usb_pipeisoc(pipe)) {
1394 struct usb_iso_packet_descriptor *d;
1395
1396 d = urb->iso_frame_desc + qh->iso_idx;
1397 d->actual_length = length;
1398 d->status = status;
1399 if (++qh->iso_idx >= urb->number_of_packets) {
1400 done = true;
1401 } else {
1402 d++;
1403 offset = d->offset;
1404 length = d->length;
1405 }
1406 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1407 done = true;
1408 } else {
1409
1410 if (qh->segsize < qh->maxpacket)
1411 done = true;
1412 else if (qh->offset == urb->transfer_buffer_length
1413 && !(urb->transfer_flags
1414 & URB_ZERO_PACKET))
1415 done = true;
1416 if (!done) {
1417 offset = qh->offset;
1418 length = urb->transfer_buffer_length - offset;
1419 transfer_pending = true;
1420 }
1421 }
1422 }
1423
1424
1425
1426
1427 if (urb->status != -EINPROGRESS) {
1428 done = true;
1429 if (status == 0)
1430 status = urb->status;
1431 }
1432
1433 if (done) {
1434
1435 urb->status = status;
1436 urb->actual_length = qh->offset;
1437 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1438 return;
1439 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1440 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1441 offset, length)) {
1442 if (is_cppi_enabled() || tusb_dma_omap())
1443 musb_h_tx_dma_start(hw_ep);
1444 return;
1445 }
1446 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1447 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1448 return;
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458 if (length > qh->maxpacket)
1459 length = qh->maxpacket;
1460
1461 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1462
1463
1464
1465
1466
1467 if (!urb->transfer_buffer)
1468 qh->use_sg = true;
1469
1470 if (qh->use_sg) {
1471
1472 if (!sg_miter_next(&qh->sg_miter)) {
1473 dev_err(musb->controller, "error: sg list empty\n");
1474 sg_miter_stop(&qh->sg_miter);
1475 status = -EINVAL;
1476 goto done;
1477 }
1478 urb->transfer_buffer = qh->sg_miter.addr;
1479 length = min_t(u32, length, qh->sg_miter.length);
1480 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1481 qh->sg_miter.consumed = length;
1482 sg_miter_stop(&qh->sg_miter);
1483 } else {
1484 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1485 }
1486
1487 qh->segsize = length;
1488
1489 if (qh->use_sg) {
1490 if (offset + length >= urb->transfer_buffer_length)
1491 qh->use_sg = false;
1492 }
1493
1494 musb_ep_select(mbase, epnum);
1495 musb_writew(epio, MUSB_TXCSR,
1496 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1497}
1498
1499
1500#ifdef CONFIG_USB_INVENTRA_DMA
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537#endif
1538
1539
1540
1541
1542
1543void musb_host_rx(struct musb *musb, u8 epnum)
1544{
1545 struct urb *urb;
1546 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1547 void __iomem *epio = hw_ep->regs;
1548 struct musb_qh *qh = hw_ep->in_qh;
1549 size_t xfer_len;
1550 void __iomem *mbase = musb->mregs;
1551 int pipe;
1552 u16 rx_csr, val;
1553 bool iso_err = false;
1554 bool done = false;
1555 u32 status;
1556 struct dma_channel *dma;
1557 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1558
1559 musb_ep_select(mbase, epnum);
1560
1561 urb = next_urb(qh);
1562 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1563 status = 0;
1564 xfer_len = 0;
1565
1566 rx_csr = musb_readw(epio, MUSB_RXCSR);
1567 val = rx_csr;
1568
1569 if (unlikely(!urb)) {
1570
1571
1572
1573
1574 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1575 musb_readw(epio, MUSB_RXCOUNT));
1576 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1577 return;
1578 }
1579
1580 pipe = urb->pipe;
1581
1582 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1583 epnum, rx_csr, urb->actual_length,
1584 dma ? dma->actual_len : 0);
1585
1586
1587
1588 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1589 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1590
1591
1592 status = -EPIPE;
1593
1594 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1595 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1596
1597 status = -EPROTO;
1598 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1599
1600 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1601
1602 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1603 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 if (usb_pipebulk(urb->pipe)
1614 && qh->mux == 1
1615 && !list_is_singular(&musb->in_bulk)) {
1616 musb_bulk_nak_timeout(musb, hw_ep, 1);
1617 return;
1618 }
1619 musb_ep_select(mbase, epnum);
1620 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1621 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1622 musb_writew(epio, MUSB_RXCSR, rx_csr);
1623
1624 goto finish;
1625 } else {
1626 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1627
1628 iso_err = true;
1629 }
1630 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1631 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1632 epnum);
1633 status = -EPROTO;
1634 }
1635
1636
1637 if (status) {
1638
1639 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1640 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1641 (void) musb->dma_controller->channel_abort(dma);
1642 xfer_len = dma->actual_len;
1643 }
1644 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1645 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1646 done = true;
1647 goto finish;
1648 }
1649
1650 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1651
1652 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1653 goto finish;
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
1664 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1665
1666
1667
1668
1669
1670 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1671 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1672 (void) musb->dma_controller->channel_abort(dma);
1673 xfer_len = dma->actual_len;
1674 done = true;
1675 }
1676
1677 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1678 xfer_len, dma ? ", dma" : "");
1679 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1680
1681 musb_ep_select(mbase, epnum);
1682 musb_writew(epio, MUSB_RXCSR,
1683 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1684 }
1685#endif
1686 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1687 xfer_len = dma->actual_len;
1688
1689 val &= ~(MUSB_RXCSR_DMAENAB
1690 | MUSB_RXCSR_H_AUTOREQ
1691 | MUSB_RXCSR_AUTOCLEAR
1692 | MUSB_RXCSR_RXPKTRDY);
1693 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1694
1695#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
1696 if (usb_pipeisoc(pipe)) {
1697 struct usb_iso_packet_descriptor *d;
1698
1699 d = urb->iso_frame_desc + qh->iso_idx;
1700 d->actual_length = xfer_len;
1701
1702
1703
1704
1705 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1706 d->status = 0;
1707
1708 if (++qh->iso_idx >= urb->number_of_packets)
1709 done = true;
1710 else
1711 done = false;
1712
1713 } else {
1714
1715 done = (urb->actual_length + xfer_len >=
1716 urb->transfer_buffer_length
1717 || dma->actual_len < qh->maxpacket);
1718 }
1719
1720
1721 if (!done) {
1722 val |= MUSB_RXCSR_H_REQPKT;
1723 musb_writew(epio, MUSB_RXCSR,
1724 MUSB_RXCSR_H_WZC_BITS | val);
1725 }
1726
1727 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1728 done ? "off" : "reset",
1729 musb_readw(epio, MUSB_RXCSR),
1730 musb_readw(epio, MUSB_RXCOUNT));
1731#else
1732 done = true;
1733#endif
1734 } else if (urb->status == -EINPROGRESS) {
1735
1736 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1737 status = -EPROTO;
1738 ERR("Rx interrupt with no errors or packet!\n");
1739
1740
1741
1742
1743
1744 musb_ep_select(mbase, epnum);
1745 val &= ~MUSB_RXCSR_H_REQPKT;
1746 musb_writew(epio, MUSB_RXCSR, val);
1747 goto finish;
1748 }
1749
1750
1751#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
1752 if (dma) {
1753 struct dma_controller *c;
1754 u16 rx_count;
1755 int ret, length;
1756 dma_addr_t buf;
1757
1758 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1759
1760 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
1761 epnum, rx_count,
1762 (unsigned long long) urb->transfer_dma
1763 + urb->actual_length,
1764 qh->offset,
1765 urb->transfer_buffer_length);
1766
1767 c = musb->dma_controller;
1768
1769 if (usb_pipeisoc(pipe)) {
1770 int d_status = 0;
1771 struct usb_iso_packet_descriptor *d;
1772
1773 d = urb->iso_frame_desc + qh->iso_idx;
1774
1775 if (iso_err) {
1776 d_status = -EILSEQ;
1777 urb->error_count++;
1778 }
1779 if (rx_count > d->length) {
1780 if (d_status == 0) {
1781 d_status = -EOVERFLOW;
1782 urb->error_count++;
1783 }
1784 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1785 rx_count, d->length);
1786
1787 length = d->length;
1788 } else
1789 length = rx_count;
1790 d->status = d_status;
1791 buf = urb->transfer_dma + d->offset;
1792 } else {
1793 length = rx_count;
1794 buf = urb->transfer_dma +
1795 urb->actual_length;
1796 }
1797
1798 dma->desired_mode = 0;
1799#ifdef USE_MODE1
1800
1801
1802
1803 if ((urb->transfer_flags &
1804 URB_SHORT_NOT_OK)
1805 && (urb->transfer_buffer_length -
1806 urb->actual_length)
1807 > qh->maxpacket)
1808 dma->desired_mode = 1;
1809 if (rx_count < hw_ep->max_packet_sz_rx) {
1810 length = rx_count;
1811 dma->desired_mode = 0;
1812 } else {
1813 length = urb->transfer_buffer_length;
1814 }
1815#endif
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 val = musb_readw(epio, MUSB_RXCSR);
1835 val &= ~MUSB_RXCSR_H_REQPKT;
1836
1837 if (dma->desired_mode == 0)
1838 val &= ~MUSB_RXCSR_H_AUTOREQ;
1839 else
1840 val |= MUSB_RXCSR_H_AUTOREQ;
1841 val |= MUSB_RXCSR_DMAENAB;
1842
1843
1844 if (qh->hb_mult == 1)
1845 val |= MUSB_RXCSR_AUTOCLEAR;
1846
1847 musb_writew(epio, MUSB_RXCSR,
1848 MUSB_RXCSR_H_WZC_BITS | val);
1849
1850
1851
1852
1853
1854 ret = c->channel_program(
1855 dma, qh->maxpacket,
1856 dma->desired_mode, buf, length);
1857
1858 if (!ret) {
1859 c->channel_release(dma);
1860 hw_ep->rx_channel = NULL;
1861 dma = NULL;
1862 val = musb_readw(epio, MUSB_RXCSR);
1863 val &= ~(MUSB_RXCSR_DMAENAB
1864 | MUSB_RXCSR_H_AUTOREQ
1865 | MUSB_RXCSR_AUTOCLEAR);
1866 musb_writew(epio, MUSB_RXCSR, val);
1867 }
1868 }
1869#endif
1870
1871 if (!dma) {
1872 unsigned int received_len;
1873
1874
1875 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1876
1877
1878
1879
1880
1881 if (!urb->transfer_buffer) {
1882 qh->use_sg = true;
1883 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1884 sg_flags);
1885 }
1886
1887 if (qh->use_sg) {
1888 if (!sg_miter_next(&qh->sg_miter)) {
1889 dev_err(musb->controller, "error: sg list empty\n");
1890 sg_miter_stop(&qh->sg_miter);
1891 status = -EINVAL;
1892 done = true;
1893 goto finish;
1894 }
1895 urb->transfer_buffer = qh->sg_miter.addr;
1896 received_len = urb->actual_length;
1897 qh->offset = 0x0;
1898 done = musb_host_packet_rx(musb, urb, epnum,
1899 iso_err);
1900
1901 received_len = urb->actual_length -
1902 received_len;
1903 qh->sg_miter.consumed = received_len;
1904 sg_miter_stop(&qh->sg_miter);
1905 } else {
1906 done = musb_host_packet_rx(musb, urb,
1907 epnum, iso_err);
1908 }
1909 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1910 }
1911 }
1912
1913finish:
1914 urb->actual_length += xfer_len;
1915 qh->offset += xfer_len;
1916 if (done) {
1917 if (qh->use_sg)
1918 qh->use_sg = false;
1919
1920 if (urb->status == -EINPROGRESS)
1921 urb->status = status;
1922 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1923 }
1924}
1925
1926
1927
1928
1929
1930
1931static int musb_schedule(
1932 struct musb *musb,
1933 struct musb_qh *qh,
1934 int is_in)
1935{
1936 int idle;
1937 int best_diff;
1938 int best_end, epnum;
1939 struct musb_hw_ep *hw_ep = NULL;
1940 struct list_head *head = NULL;
1941 u8 toggle;
1942 u8 txtype;
1943 struct urb *urb = next_urb(qh);
1944
1945
1946 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1947 head = &musb->control;
1948 hw_ep = musb->control_ep;
1949 goto success;
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 best_diff = 4096;
1962 best_end = -1;
1963
1964 for (epnum = 1, hw_ep = musb->endpoints + 1;
1965 epnum < musb->nr_endpoints;
1966 epnum++, hw_ep++) {
1967 int diff;
1968
1969 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1970 continue;
1971
1972 if (hw_ep == musb->bulk_ep)
1973 continue;
1974
1975 if (is_in)
1976 diff = hw_ep->max_packet_sz_rx;
1977 else
1978 diff = hw_ep->max_packet_sz_tx;
1979 diff -= (qh->maxpacket * qh->hb_mult);
1980
1981 if (diff >= 0 && best_diff > diff) {
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 hw_ep = musb->endpoints + epnum;
1996 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1997 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1998 >> 4) & 0x3;
1999 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2000 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2001 continue;
2002
2003 best_diff = diff;
2004 best_end = epnum;
2005 }
2006 }
2007
2008 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2009 hw_ep = musb->bulk_ep;
2010 if (is_in)
2011 head = &musb->in_bulk;
2012 else
2013 head = &musb->out_bulk;
2014
2015
2016
2017
2018
2019
2020
2021
2022 if (qh->dev)
2023 qh->intv_reg =
2024 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2025 goto success;
2026 } else if (best_end < 0) {
2027 return -ENOSPC;
2028 }
2029
2030 idle = 1;
2031 qh->mux = 0;
2032 hw_ep = musb->endpoints + best_end;
2033 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
2034success:
2035 if (head) {
2036 idle = list_empty(head);
2037 list_add_tail(&qh->ring, head);
2038 qh->mux = 1;
2039 }
2040 qh->hw_ep = hw_ep;
2041 qh->hep->hcpriv = qh;
2042 if (idle)
2043 musb_start_urb(musb, is_in, qh);
2044 return 0;
2045}
2046
2047static int musb_urb_enqueue(
2048 struct usb_hcd *hcd,
2049 struct urb *urb,
2050 gfp_t mem_flags)
2051{
2052 unsigned long flags;
2053 struct musb *musb = hcd_to_musb(hcd);
2054 struct usb_host_endpoint *hep = urb->ep;
2055 struct musb_qh *qh;
2056 struct usb_endpoint_descriptor *epd = &hep->desc;
2057 int ret;
2058 unsigned type_reg;
2059 unsigned interval;
2060
2061
2062 if (!is_host_active(musb) || !musb->is_active)
2063 return -ENODEV;
2064
2065 spin_lock_irqsave(&musb->lock, flags);
2066 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2067 qh = ret ? NULL : hep->hcpriv;
2068 if (qh)
2069 urb->hcpriv = qh;
2070 spin_unlock_irqrestore(&musb->lock, flags);
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 if (qh || ret)
2081 return ret;
2082
2083
2084
2085
2086
2087
2088
2089 qh = kzalloc(sizeof *qh, mem_flags);
2090 if (!qh) {
2091 spin_lock_irqsave(&musb->lock, flags);
2092 usb_hcd_unlink_urb_from_ep(hcd, urb);
2093 spin_unlock_irqrestore(&musb->lock, flags);
2094 return -ENOMEM;
2095 }
2096
2097 qh->hep = hep;
2098 qh->dev = urb->dev;
2099 INIT_LIST_HEAD(&qh->ring);
2100 qh->is_ready = 1;
2101
2102 qh->maxpacket = usb_endpoint_maxp(epd);
2103 qh->type = usb_endpoint_type(epd);
2104
2105
2106
2107
2108
2109 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2110 if (qh->hb_mult > 1) {
2111 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2112
2113 if (ok)
2114 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2115 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2116 if (!ok) {
2117 ret = -EMSGSIZE;
2118 goto done;
2119 }
2120 qh->maxpacket &= 0x7ff;
2121 }
2122
2123 qh->epnum = usb_endpoint_num(epd);
2124
2125
2126 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2127
2128
2129 type_reg = (qh->type << 4) | qh->epnum;
2130 switch (urb->dev->speed) {
2131 case USB_SPEED_LOW:
2132 type_reg |= 0xc0;
2133 break;
2134 case USB_SPEED_FULL:
2135 type_reg |= 0x80;
2136 break;
2137 default:
2138 type_reg |= 0x40;
2139 }
2140 qh->type_reg = type_reg;
2141
2142
2143 switch (qh->type) {
2144 case USB_ENDPOINT_XFER_INT:
2145
2146
2147
2148
2149 if (urb->dev->speed <= USB_SPEED_FULL) {
2150 interval = max_t(u8, epd->bInterval, 1);
2151 break;
2152 }
2153
2154 case USB_ENDPOINT_XFER_ISOC:
2155
2156 interval = min_t(u8, epd->bInterval, 16);
2157 break;
2158 default:
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 interval = 0;
2174 }
2175 qh->intv_reg = interval;
2176
2177
2178 if (musb->is_multipoint) {
2179 struct usb_device *parent = urb->dev->parent;
2180
2181 if (parent != hcd->self.root_hub) {
2182 qh->h_addr_reg = (u8) parent->devnum;
2183
2184
2185 if (urb->dev->tt) {
2186 qh->h_port_reg = (u8) urb->dev->ttport;
2187 if (urb->dev->tt->hub)
2188 qh->h_addr_reg =
2189 (u8) urb->dev->tt->hub->devnum;
2190 if (urb->dev->tt->multi)
2191 qh->h_addr_reg |= 0x80;
2192 }
2193 }
2194 }
2195
2196
2197
2198
2199
2200 spin_lock_irqsave(&musb->lock, flags);
2201 if (hep->hcpriv || !next_urb(qh)) {
2202
2203
2204
2205 kfree(qh);
2206 qh = NULL;
2207 ret = 0;
2208 } else
2209 ret = musb_schedule(musb, qh,
2210 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2211
2212 if (ret == 0) {
2213 urb->hcpriv = qh;
2214
2215
2216
2217 }
2218 spin_unlock_irqrestore(&musb->lock, flags);
2219
2220done:
2221 if (ret != 0) {
2222 spin_lock_irqsave(&musb->lock, flags);
2223 usb_hcd_unlink_urb_from_ep(hcd, urb);
2224 spin_unlock_irqrestore(&musb->lock, flags);
2225 kfree(qh);
2226 }
2227 return ret;
2228}
2229
2230
2231
2232
2233
2234
2235
2236static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2237{
2238 struct musb_hw_ep *ep = qh->hw_ep;
2239 struct musb *musb = ep->musb;
2240 void __iomem *epio = ep->regs;
2241 unsigned hw_end = ep->epnum;
2242 void __iomem *regs = ep->musb->mregs;
2243 int is_in = usb_pipein(urb->pipe);
2244 int status = 0;
2245 u16 csr;
2246
2247 musb_ep_select(regs, hw_end);
2248
2249 if (is_dma_capable()) {
2250 struct dma_channel *dma;
2251
2252 dma = is_in ? ep->rx_channel : ep->tx_channel;
2253 if (dma) {
2254 status = ep->musb->dma_controller->channel_abort(dma);
2255 dev_dbg(musb->controller,
2256 "abort %cX%d DMA for urb %p --> %d\n",
2257 is_in ? 'R' : 'T', ep->epnum,
2258 urb, status);
2259 urb->actual_length += dma->actual_len;
2260 }
2261 }
2262
2263
2264 if (ep->epnum && is_in) {
2265
2266 csr = musb_h_flush_rxfifo(ep, 0);
2267
2268
2269
2270
2271
2272 } else if (ep->epnum) {
2273 musb_h_tx_flush_fifo(ep);
2274 csr = musb_readw(epio, MUSB_TXCSR);
2275 csr &= ~(MUSB_TXCSR_AUTOSET
2276 | MUSB_TXCSR_DMAENAB
2277 | MUSB_TXCSR_H_RXSTALL
2278 | MUSB_TXCSR_H_NAKTIMEOUT
2279 | MUSB_TXCSR_H_ERROR
2280 | MUSB_TXCSR_TXPKTRDY);
2281 musb_writew(epio, MUSB_TXCSR, csr);
2282
2283 musb_writew(epio, MUSB_TXCSR, csr);
2284
2285 csr = musb_readw(epio, MUSB_TXCSR);
2286 } else {
2287 musb_h_ep0_flush_fifo(ep);
2288 }
2289 if (status == 0)
2290 musb_advance_schedule(ep->musb, urb, ep, is_in);
2291 return status;
2292}
2293
2294static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2295{
2296 struct musb *musb = hcd_to_musb(hcd);
2297 struct musb_qh *qh;
2298 unsigned long flags;
2299 int is_in = usb_pipein(urb->pipe);
2300 int ret;
2301
2302 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2303 usb_pipedevice(urb->pipe),
2304 usb_pipeendpoint(urb->pipe),
2305 is_in ? "in" : "out");
2306
2307 spin_lock_irqsave(&musb->lock, flags);
2308 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2309 if (ret)
2310 goto done;
2311
2312 qh = urb->hcpriv;
2313 if (!qh)
2314 goto done;
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328 if (!qh->is_ready
2329 || urb->urb_list.prev != &qh->hep->urb_list
2330 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2331 int ready = qh->is_ready;
2332
2333 qh->is_ready = 0;
2334 musb_giveback(musb, urb, 0);
2335 qh->is_ready = ready;
2336
2337
2338
2339
2340 if (ready && list_empty(&qh->hep->urb_list)) {
2341 qh->hep->hcpriv = NULL;
2342 list_del(&qh->ring);
2343 kfree(qh);
2344 }
2345 } else
2346 ret = musb_cleanup_urb(urb, qh);
2347done:
2348 spin_unlock_irqrestore(&musb->lock, flags);
2349 return ret;
2350}
2351
2352
2353static void
2354musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2355{
2356 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2357 unsigned long flags;
2358 struct musb *musb = hcd_to_musb(hcd);
2359 struct musb_qh *qh;
2360 struct urb *urb;
2361
2362 spin_lock_irqsave(&musb->lock, flags);
2363
2364 qh = hep->hcpriv;
2365 if (qh == NULL)
2366 goto exit;
2367
2368
2369
2370
2371 qh->is_ready = 0;
2372 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2373 urb = next_urb(qh);
2374
2375
2376 if (!urb->unlinked)
2377 urb->status = -ESHUTDOWN;
2378
2379
2380 musb_cleanup_urb(urb, qh);
2381
2382
2383
2384
2385 while (!list_empty(&hep->urb_list)) {
2386 urb = next_urb(qh);
2387 urb->status = -ESHUTDOWN;
2388 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2389 }
2390 } else {
2391
2392
2393
2394
2395 while (!list_empty(&hep->urb_list))
2396 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2397
2398 hep->hcpriv = NULL;
2399 list_del(&qh->ring);
2400 kfree(qh);
2401 }
2402exit:
2403 spin_unlock_irqrestore(&musb->lock, flags);
2404}
2405
2406static int musb_h_get_frame_number(struct usb_hcd *hcd)
2407{
2408 struct musb *musb = hcd_to_musb(hcd);
2409
2410 return musb_readw(musb->mregs, MUSB_FRAME);
2411}
2412
2413static int musb_h_start(struct usb_hcd *hcd)
2414{
2415 struct musb *musb = hcd_to_musb(hcd);
2416
2417
2418
2419
2420 hcd->state = HC_STATE_RUNNING;
2421 musb->port1_status = 0;
2422 return 0;
2423}
2424
2425static void musb_h_stop(struct usb_hcd *hcd)
2426{
2427 musb_stop(hcd_to_musb(hcd));
2428 hcd->state = HC_STATE_HALT;
2429}
2430
2431static int musb_bus_suspend(struct usb_hcd *hcd)
2432{
2433 struct musb *musb = hcd_to_musb(hcd);
2434 u8 devctl;
2435
2436 if (!is_host_active(musb))
2437 return 0;
2438
2439 switch (musb->xceiv->state) {
2440 case OTG_STATE_A_SUSPEND:
2441 return 0;
2442 case OTG_STATE_A_WAIT_VRISE:
2443
2444
2445
2446
2447 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2448 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2449 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2450 break;
2451 default:
2452 break;
2453 }
2454
2455 if (musb->is_active) {
2456 WARNING("trying to suspend as %s while active\n",
2457 usb_otg_state_string(musb->xceiv->state));
2458 return -EBUSY;
2459 } else
2460 return 0;
2461}
2462
2463static int musb_bus_resume(struct usb_hcd *hcd)
2464{
2465
2466 return 0;
2467}
2468
2469#ifndef CONFIG_MUSB_PIO_ONLY
2470
2471#define MUSB_USB_DMA_ALIGN 4
2472
2473struct musb_temp_buffer {
2474 void *kmalloc_ptr;
2475 void *old_xfer_buffer;
2476 u8 data[0];
2477};
2478
2479static void musb_free_temp_buffer(struct urb *urb)
2480{
2481 enum dma_data_direction dir;
2482 struct musb_temp_buffer *temp;
2483
2484 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2485 return;
2486
2487 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2488
2489 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2490 data);
2491
2492 if (dir == DMA_FROM_DEVICE) {
2493 memcpy(temp->old_xfer_buffer, temp->data,
2494 urb->transfer_buffer_length);
2495 }
2496 urb->transfer_buffer = temp->old_xfer_buffer;
2497 kfree(temp->kmalloc_ptr);
2498
2499 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2500}
2501
2502static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2503{
2504 enum dma_data_direction dir;
2505 struct musb_temp_buffer *temp;
2506 void *kmalloc_ptr;
2507 size_t kmalloc_size;
2508
2509 if (urb->num_sgs || urb->sg ||
2510 urb->transfer_buffer_length == 0 ||
2511 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2512 return 0;
2513
2514 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2515
2516
2517 kmalloc_size = urb->transfer_buffer_length +
2518 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2519
2520 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2521 if (!kmalloc_ptr)
2522 return -ENOMEM;
2523
2524
2525 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2526
2527
2528 temp->kmalloc_ptr = kmalloc_ptr;
2529 temp->old_xfer_buffer = urb->transfer_buffer;
2530 if (dir == DMA_TO_DEVICE)
2531 memcpy(temp->data, urb->transfer_buffer,
2532 urb->transfer_buffer_length);
2533 urb->transfer_buffer = temp->data;
2534
2535 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2536
2537 return 0;
2538}
2539
2540static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2541 gfp_t mem_flags)
2542{
2543 struct musb *musb = hcd_to_musb(hcd);
2544 int ret;
2545
2546
2547
2548
2549
2550
2551
2552 if (musb->hwvers < MUSB_HWVERS_1800)
2553 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2554
2555 ret = musb_alloc_temp_buffer(urb, mem_flags);
2556 if (ret)
2557 return ret;
2558
2559 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2560 if (ret)
2561 musb_free_temp_buffer(urb);
2562
2563 return ret;
2564}
2565
2566static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2567{
2568 struct musb *musb = hcd_to_musb(hcd);
2569
2570 usb_hcd_unmap_urb_for_dma(hcd, urb);
2571
2572
2573 if (musb->hwvers < MUSB_HWVERS_1800)
2574 return;
2575
2576 musb_free_temp_buffer(urb);
2577}
2578#endif
2579
2580static const struct hc_driver musb_hc_driver = {
2581 .description = "musb-hcd",
2582 .product_desc = "MUSB HDRC host driver",
2583 .hcd_priv_size = sizeof(struct musb *),
2584 .flags = HCD_USB2 | HCD_MEMORY,
2585
2586
2587
2588
2589
2590 .start = musb_h_start,
2591 .stop = musb_h_stop,
2592
2593 .get_frame_number = musb_h_get_frame_number,
2594
2595 .urb_enqueue = musb_urb_enqueue,
2596 .urb_dequeue = musb_urb_dequeue,
2597 .endpoint_disable = musb_h_disable,
2598
2599#ifndef CONFIG_MUSB_PIO_ONLY
2600 .map_urb_for_dma = musb_map_urb_for_dma,
2601 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2602#endif
2603
2604 .hub_status_data = musb_hub_status_data,
2605 .hub_control = musb_hub_control,
2606 .bus_suspend = musb_bus_suspend,
2607 .bus_resume = musb_bus_resume,
2608
2609
2610};
2611
2612int musb_host_alloc(struct musb *musb)
2613{
2614 struct device *dev = musb->controller;
2615
2616
2617 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2618 if (!musb->hcd)
2619 return -EINVAL;
2620
2621 *musb->hcd->hcd_priv = (unsigned long) musb;
2622 musb->hcd->self.uses_pio_for_control = 1;
2623 musb->hcd->uses_new_polling = 1;
2624 musb->hcd->has_tt = 1;
2625
2626 return 0;
2627}
2628
2629void musb_host_cleanup(struct musb *musb)
2630{
2631 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2632 return;
2633 usb_remove_hcd(musb->hcd);
2634 musb->hcd = NULL;
2635}
2636
2637void musb_host_free(struct musb *musb)
2638{
2639 usb_put_hcd(musb->hcd);
2640}
2641
2642int musb_host_setup(struct musb *musb, int power_budget)
2643{
2644 int ret;
2645 struct usb_hcd *hcd = musb->hcd;
2646
2647 MUSB_HST_MODE(musb);
2648 musb->xceiv->otg->default_a = 1;
2649 musb->xceiv->state = OTG_STATE_A_IDLE;
2650
2651 otg_set_host(musb->xceiv->otg, &hcd->self);
2652 hcd->self.otg_port = 1;
2653 musb->xceiv->otg->host = &hcd->self;
2654 hcd->power_budget = 2 * (power_budget ? : 250);
2655
2656 ret = usb_add_hcd(hcd, 0, 0);
2657 if (ret < 0)
2658 return ret;
2659
2660 return 0;
2661}
2662
2663void musb_host_resume_root_hub(struct musb *musb)
2664{
2665 usb_hcd_resume_root_hub(musb->hcd);
2666}
2667
2668void musb_host_poke_root_hub(struct musb *musb)
2669{
2670 MUSB_HST_MODE(musb);
2671 if (musb->hcd->status_urb)
2672 usb_hcd_poll_rh_status(musb->hcd);
2673 else
2674 usb_hcd_resume_root_hub(musb->hcd);
2675}
2676