1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/delay.h>
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/errno.h>
42#include <linux/list.h>
43#include <linux/dma-mapping.h>
44
45#include "musb_core.h"
46#include "musb_host.h"
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97struct musb *hcd_to_musb(struct usb_hcd *hcd)
98{
99 return *(struct musb **) hcd->hcd_priv;
100}
101
102
103static void musb_ep_program(struct musb *musb, u8 epnum,
104 struct urb *urb, int is_out,
105 u8 *buf, u32 offset, u32 len);
106
107
108
109
110static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
111{
112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs;
114 u16 csr;
115 int retries = 1000;
116
117 csr = musb_readw(epio, MUSB_TXCSR);
118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
120 musb_writew(epio, MUSB_TXCSR, csr);
121 csr = musb_readw(epio, MUSB_TXCSR);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
138 "Could not flush host TX%d fifo: csr: %04x\n",
139 ep->epnum, csr))
140 return;
141 }
142}
143
144static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
145{
146 void __iomem *epio = ep->regs;
147 u16 csr;
148 int retries = 5;
149
150
151 do {
152 csr = musb_readw(epio, MUSB_TXCSR);
153 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
154 break;
155 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
156 csr = musb_readw(epio, MUSB_TXCSR);
157 udelay(10);
158 } while (--retries);
159
160 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
161 ep->epnum, csr);
162
163
164 musb_writew(epio, MUSB_TXCSR, 0);
165}
166
167
168
169
170
171static inline void musb_h_tx_start(struct musb_hw_ep *ep)
172{
173 u16 txcsr;
174
175
176 if (ep->epnum) {
177 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
178 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
179 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
180 } else {
181 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
182 musb_writew(ep->regs, MUSB_CSR0, txcsr);
183 }
184
185}
186
187static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
188{
189 u16 txcsr;
190
191
192 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
193 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
194 if (is_cppi_enabled(ep->musb))
195 txcsr |= MUSB_TXCSR_DMAMODE;
196 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
197}
198
199static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
200{
201 if (is_in != 0 || ep->is_shared_fifo)
202 ep->in_qh = qh;
203 if (is_in == 0 || ep->is_shared_fifo)
204 ep->out_qh = qh;
205}
206
207static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
208{
209 return is_in ? ep->in_qh : ep->out_qh;
210}
211
212
213
214
215
216
217
218static void
219musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
220{
221 u16 frame;
222 u32 len;
223 void __iomem *mbase = musb->mregs;
224 struct urb *urb = next_urb(qh);
225 void *buf = urb->transfer_buffer;
226 u32 offset = 0;
227 struct musb_hw_ep *hw_ep = qh->hw_ep;
228 unsigned pipe = urb->pipe;
229 u8 address = usb_pipedevice(pipe);
230 int epnum = hw_ep->epnum;
231
232
233 qh->offset = 0;
234 qh->segsize = 0;
235
236
237 switch (qh->type) {
238 case USB_ENDPOINT_XFER_CONTROL:
239
240 is_in = 0;
241 musb->ep0_stage = MUSB_EP0_START;
242 buf = urb->setup_packet;
243 len = 8;
244 break;
245 case USB_ENDPOINT_XFER_ISOC:
246 qh->iso_idx = 0;
247 qh->frame = 0;
248 offset = urb->iso_frame_desc[0].offset;
249 len = urb->iso_frame_desc[0].length;
250 break;
251 default:
252
253 buf = urb->transfer_buffer + urb->actual_length;
254 len = urb->transfer_buffer_length - urb->actual_length;
255 }
256
257 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
258 qh, urb, address, qh->epnum,
259 is_in ? "in" : "out",
260 ({char *s; switch (qh->type) {
261 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
262 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
263 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
264 default: s = "-intr"; break;
265 } s; }),
266 epnum, buf + offset, len);
267
268
269 musb_ep_set_qh(hw_ep, is_in, qh);
270 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
271
272
273 if (is_in)
274 return;
275
276
277 switch (qh->type) {
278 case USB_ENDPOINT_XFER_ISOC:
279 case USB_ENDPOINT_XFER_INT:
280 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
281 frame = musb_readw(mbase, MUSB_FRAME);
282
283
284
285 if (1) {
286
287
288
289 qh->frame = 0;
290 goto start;
291 } else {
292 qh->frame = urb->start_frame;
293
294 dev_dbg(musb->controller, "SOF for %d\n", epnum);
295#if 1
296 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
297#endif
298 }
299 break;
300 default:
301start:
302 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
303 hw_ep->tx_channel ? "dma" : "pio");
304
305 if (!hw_ep->tx_channel)
306 musb_h_tx_start(hw_ep);
307 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
308 musb_h_tx_dma_start(hw_ep);
309 }
310}
311
312
313static void musb_giveback(struct musb *musb, struct urb *urb, int status)
314__releases(musb->lock)
315__acquires(musb->lock)
316{
317 dev_dbg(musb->controller,
318 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
319 urb, urb->complete, status,
320 usb_pipedevice(urb->pipe),
321 usb_pipeendpoint(urb->pipe),
322 usb_pipein(urb->pipe) ? "in" : "out",
323 urb->actual_length, urb->transfer_buffer_length
324 );
325
326 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
327 spin_unlock(&musb->lock);
328 usb_hcd_giveback_urb(musb->hcd, urb, status);
329 spin_lock(&musb->lock);
330}
331
332
333static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
334 struct urb *urb)
335{
336 void __iomem *epio = qh->hw_ep->regs;
337 u16 csr;
338
339
340
341
342
343
344 if (is_in)
345 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
346 else
347 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
348
349 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
350}
351
352
353
354
355
356
357
358
359static void musb_advance_schedule(struct musb *musb, struct urb *urb,
360 struct musb_hw_ep *hw_ep, int is_in)
361{
362 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
363 struct musb_hw_ep *ep = qh->hw_ep;
364 int ready = qh->is_ready;
365 int status;
366
367 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
368
369
370 switch (qh->type) {
371 case USB_ENDPOINT_XFER_BULK:
372 case USB_ENDPOINT_XFER_INT:
373 musb_save_toggle(qh, is_in, urb);
374 break;
375 case USB_ENDPOINT_XFER_ISOC:
376 if (status == 0 && urb->error_count)
377 status = -EXDEV;
378 break;
379 }
380
381 qh->is_ready = 0;
382 musb_giveback(musb, urb, status);
383 qh->is_ready = ready;
384
385
386
387
388 if (list_empty(&qh->hep->urb_list)) {
389 struct list_head *head;
390 struct dma_controller *dma = musb->dma_controller;
391
392 if (is_in) {
393 ep->rx_reinit = 1;
394 if (ep->rx_channel) {
395 dma->channel_release(ep->rx_channel);
396 ep->rx_channel = NULL;
397 }
398 } else {
399 ep->tx_reinit = 1;
400 if (ep->tx_channel) {
401 dma->channel_release(ep->tx_channel);
402 ep->tx_channel = NULL;
403 }
404 }
405
406
407 musb_ep_set_qh(ep, is_in, NULL);
408 qh->hep->hcpriv = NULL;
409
410 switch (qh->type) {
411
412 case USB_ENDPOINT_XFER_CONTROL:
413 case USB_ENDPOINT_XFER_BULK:
414
415
416
417 if (qh->mux == 1) {
418 head = qh->ring.prev;
419 list_del(&qh->ring);
420 kfree(qh);
421 qh = first_qh(head);
422 break;
423 }
424
425 case USB_ENDPOINT_XFER_ISOC:
426 case USB_ENDPOINT_XFER_INT:
427
428
429
430
431 kfree(qh);
432 qh = NULL;
433 break;
434 }
435 }
436
437 if (qh != NULL && qh->is_ready) {
438 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
439 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
440 musb_start_urb(musb, is_in, qh);
441 }
442}
443
444static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
445{
446
447
448
449
450 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
451 csr &= ~(MUSB_RXCSR_H_REQPKT
452 | MUSB_RXCSR_H_AUTOREQ
453 | MUSB_RXCSR_AUTOCLEAR);
454
455
456 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
457 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
458
459
460 return musb_readw(hw_ep->regs, MUSB_RXCSR);
461}
462
463
464
465
466static bool
467musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
468{
469 u16 rx_count;
470 u8 *buf;
471 u16 csr;
472 bool done = false;
473 u32 length;
474 int do_flush = 0;
475 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
476 void __iomem *epio = hw_ep->regs;
477 struct musb_qh *qh = hw_ep->in_qh;
478 int pipe = urb->pipe;
479 void *buffer = urb->transfer_buffer;
480
481
482 rx_count = musb_readw(epio, MUSB_RXCOUNT);
483 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
484 urb->transfer_buffer, qh->offset,
485 urb->transfer_buffer_length);
486
487
488 if (usb_pipeisoc(pipe)) {
489 int status = 0;
490 struct usb_iso_packet_descriptor *d;
491
492 if (iso_err) {
493 status = -EILSEQ;
494 urb->error_count++;
495 }
496
497 d = urb->iso_frame_desc + qh->iso_idx;
498 buf = buffer + d->offset;
499 length = d->length;
500 if (rx_count > length) {
501 if (status == 0) {
502 status = -EOVERFLOW;
503 urb->error_count++;
504 }
505 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
506 do_flush = 1;
507 } else
508 length = rx_count;
509 urb->actual_length += length;
510 d->actual_length = length;
511
512 d->status = status;
513
514
515 done = (++qh->iso_idx >= urb->number_of_packets);
516 } else {
517
518 buf = buffer + qh->offset;
519 length = urb->transfer_buffer_length - qh->offset;
520 if (rx_count > length) {
521 if (urb->status == -EINPROGRESS)
522 urb->status = -EOVERFLOW;
523 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
524 do_flush = 1;
525 } else
526 length = rx_count;
527 urb->actual_length += length;
528 qh->offset += length;
529
530
531 done = (urb->actual_length == urb->transfer_buffer_length)
532 || (rx_count < qh->maxpacket)
533 || (urb->status != -EINPROGRESS);
534 if (done
535 && (urb->status == -EINPROGRESS)
536 && (urb->transfer_flags & URB_SHORT_NOT_OK)
537 && (urb->actual_length
538 < urb->transfer_buffer_length))
539 urb->status = -EREMOTEIO;
540 }
541
542 musb_read_fifo(hw_ep, length, buf);
543
544 csr = musb_readw(epio, MUSB_RXCSR);
545 csr |= MUSB_RXCSR_H_WZC_BITS;
546 if (unlikely(do_flush))
547 musb_h_flush_rxfifo(hw_ep, csr);
548 else {
549
550 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
551 if (!done)
552 csr |= MUSB_RXCSR_H_REQPKT;
553 musb_writew(epio, MUSB_RXCSR, csr);
554 }
555
556 return done;
557}
558
559
560
561
562
563
564
565
566
567static void
568musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
569{
570 struct musb_hw_ep *ep = musb->endpoints + epnum;
571 u16 csr;
572
573
574
575
576
577
578
579 if (ep->is_shared_fifo) {
580 csr = musb_readw(ep->regs, MUSB_TXCSR);
581 if (csr & MUSB_TXCSR_MODE) {
582 musb_h_tx_flush_fifo(ep);
583 csr = musb_readw(ep->regs, MUSB_TXCSR);
584 musb_writew(ep->regs, MUSB_TXCSR,
585 csr | MUSB_TXCSR_FRCDATATOG);
586 }
587
588
589
590
591
592 if (csr & MUSB_TXCSR_DMAMODE)
593 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
594 musb_writew(ep->regs, MUSB_TXCSR, 0);
595
596
597 } else {
598 csr = musb_readw(ep->regs, MUSB_RXCSR);
599 if (csr & MUSB_RXCSR_RXPKTRDY)
600 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
601 musb_readw(ep->regs, MUSB_RXCOUNT));
602
603 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
604 }
605
606
607 if (musb->is_multipoint) {
608 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
609 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
610 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
611 } else
612 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
613
614
615 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
616 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
617
618
619
620
621 if (musb->double_buffer_not_ok)
622 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
623 else
624 musb_writew(ep->regs, MUSB_RXMAXP,
625 qh->maxpacket | ((qh->hb_mult - 1) << 11));
626
627 ep->rx_reinit = 0;
628}
629
630static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
631 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
632 struct urb *urb, u32 offset,
633 u32 *length, u8 *mode)
634{
635 struct dma_channel *channel = hw_ep->tx_channel;
636 void __iomem *epio = hw_ep->regs;
637 u16 pkt_size = qh->maxpacket;
638 u16 csr;
639
640 if (*length > channel->max_len)
641 *length = channel->max_len;
642
643 csr = musb_readw(epio, MUSB_TXCSR);
644 if (*length > pkt_size) {
645 *mode = 1;
646 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
647
648
649
650
651
652
653
654
655
656
657 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
658 can_bulk_split(hw_ep->musb, qh->type)))
659 csr |= MUSB_TXCSR_AUTOSET;
660 } else {
661 *mode = 0;
662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
663 csr |= MUSB_TXCSR_DMAENAB;
664 }
665 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr);
667
668 return 0;
669}
670
671static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
672 struct musb_hw_ep *hw_ep,
673 struct musb_qh *qh,
674 struct urb *urb,
675 u32 offset,
676 u32 *length,
677 u8 *mode)
678{
679 struct dma_channel *channel = hw_ep->tx_channel;
680
681 if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
682 return -ENODEV;
683
684 channel->actual_len = 0;
685
686
687
688
689
690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
691
692 return 0;
693}
694
695static bool musb_tx_dma_program(struct dma_controller *dma,
696 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
697 struct urb *urb, u32 offset, u32 length)
698{
699 struct dma_channel *channel = hw_ep->tx_channel;
700 u16 pkt_size = qh->maxpacket;
701 u8 mode;
702 int res;
703
704 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
705 res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
706 offset, &length, &mode);
707 else
708 res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
709 offset, &length, &mode);
710 if (res)
711 return false;
712
713 qh->segsize = length;
714
715
716
717
718
719 wmb();
720
721 if (!dma->channel_program(channel, pkt_size, mode,
722 urb->transfer_dma + offset, length)) {
723 void __iomem *epio = hw_ep->regs;
724 u16 csr;
725
726 dma->channel_release(channel);
727 hw_ep->tx_channel = NULL;
728
729 csr = musb_readw(epio, MUSB_TXCSR);
730 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
731 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
732 return false;
733 }
734 return true;
735}
736
737
738
739
740
741static void musb_ep_program(struct musb *musb, u8 epnum,
742 struct urb *urb, int is_out,
743 u8 *buf, u32 offset, u32 len)
744{
745 struct dma_controller *dma_controller;
746 struct dma_channel *dma_channel;
747 u8 dma_ok;
748 void __iomem *mbase = musb->mregs;
749 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
750 void __iomem *epio = hw_ep->regs;
751 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
752 u16 packet_sz = qh->maxpacket;
753 u8 use_dma = 1;
754 u16 csr;
755
756 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
757 "h_addr%02x h_port%02x bytes %d\n",
758 is_out ? "-->" : "<--",
759 epnum, urb, urb->dev->speed,
760 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
761 qh->h_addr_reg, qh->h_port_reg,
762 len);
763
764 musb_ep_select(mbase, epnum);
765
766 if (is_out && !len) {
767 use_dma = 0;
768 csr = musb_readw(epio, MUSB_TXCSR);
769 csr &= ~MUSB_TXCSR_DMAENAB;
770 musb_writew(epio, MUSB_TXCSR, csr);
771 hw_ep->tx_channel = NULL;
772 }
773
774
775 dma_controller = musb->dma_controller;
776 if (use_dma && is_dma_capable() && epnum && dma_controller) {
777 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
778 if (!dma_channel) {
779 dma_channel = dma_controller->channel_alloc(
780 dma_controller, hw_ep, is_out);
781 if (is_out)
782 hw_ep->tx_channel = dma_channel;
783 else
784 hw_ep->rx_channel = dma_channel;
785 }
786 } else
787 dma_channel = NULL;
788
789
790
791
792 if (is_out) {
793 u16 csr;
794 u16 int_txe;
795 u16 load_count;
796
797 csr = musb_readw(epio, MUSB_TXCSR);
798
799
800 int_txe = musb->intrtxe;
801 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
802
803
804 if (epnum) {
805
806
807
808
809
810
811 if (!hw_ep->tx_double_buffered)
812 musb_h_tx_flush_fifo(hw_ep);
813
814
815
816
817
818
819 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
820 | MUSB_TXCSR_AUTOSET
821 | MUSB_TXCSR_DMAENAB
822 | MUSB_TXCSR_FRCDATATOG
823 | MUSB_TXCSR_H_RXSTALL
824 | MUSB_TXCSR_H_ERROR
825 | MUSB_TXCSR_TXPKTRDY
826 );
827 csr |= MUSB_TXCSR_MODE;
828
829 if (!hw_ep->tx_double_buffered) {
830 if (usb_gettoggle(urb->dev, qh->epnum, 1))
831 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
832 | MUSB_TXCSR_H_DATATOGGLE;
833 else
834 csr |= MUSB_TXCSR_CLRDATATOG;
835 }
836
837 musb_writew(epio, MUSB_TXCSR, csr);
838
839 csr &= ~MUSB_TXCSR_DMAMODE;
840 musb_writew(epio, MUSB_TXCSR, csr);
841 csr = musb_readw(epio, MUSB_TXCSR);
842 } else {
843
844 musb_h_ep0_flush_fifo(hw_ep);
845 }
846
847
848 if (musb->is_multipoint) {
849 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
850 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
851 musb_write_txhubport(musb, epnum, qh->h_port_reg);
852
853 } else
854 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
855
856
857 if (epnum) {
858 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
859 if (musb->double_buffer_not_ok) {
860 musb_writew(epio, MUSB_TXMAXP,
861 hw_ep->max_packet_sz_tx);
862 } else if (can_bulk_split(musb, qh->type)) {
863 qh->hb_mult = hw_ep->max_packet_sz_tx
864 / packet_sz;
865 musb_writew(epio, MUSB_TXMAXP, packet_sz
866 | ((qh->hb_mult) - 1) << 11);
867 } else {
868 musb_writew(epio, MUSB_TXMAXP,
869 qh->maxpacket |
870 ((qh->hb_mult - 1) << 11));
871 }
872 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
873 } else {
874 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
875 if (musb->is_multipoint)
876 musb_writeb(epio, MUSB_TYPE0,
877 qh->type_reg);
878 }
879
880 if (can_bulk_split(musb, qh->type))
881 load_count = min((u32) hw_ep->max_packet_sz_tx,
882 len);
883 else
884 load_count = min((u32) packet_sz, len);
885
886 if (dma_channel && musb_tx_dma_program(dma_controller,
887 hw_ep, qh, urb, offset, len))
888 load_count = 0;
889
890 if (load_count) {
891
892 qh->segsize = load_count;
893 if (!buf) {
894 sg_miter_start(&qh->sg_miter, urb->sg, 1,
895 SG_MITER_ATOMIC
896 | SG_MITER_FROM_SG);
897 if (!sg_miter_next(&qh->sg_miter)) {
898 dev_err(musb->controller,
899 "error: sg"
900 "list empty\n");
901 sg_miter_stop(&qh->sg_miter);
902 goto finish;
903 }
904 buf = qh->sg_miter.addr + urb->sg->offset +
905 urb->actual_length;
906 load_count = min_t(u32, load_count,
907 qh->sg_miter.length);
908 musb_write_fifo(hw_ep, load_count, buf);
909 qh->sg_miter.consumed = load_count;
910 sg_miter_stop(&qh->sg_miter);
911 } else
912 musb_write_fifo(hw_ep, load_count, buf);
913 }
914finish:
915
916 musb_writew(mbase, MUSB_INTRTXE, int_txe);
917
918
919 } else {
920 u16 csr;
921
922 if (hw_ep->rx_reinit) {
923 musb_rx_reinit(musb, qh, epnum);
924
925
926 if (usb_gettoggle(urb->dev, qh->epnum, 0))
927 csr = MUSB_RXCSR_H_WR_DATATOGGLE
928 | MUSB_RXCSR_H_DATATOGGLE;
929 else
930 csr = 0;
931 if (qh->type == USB_ENDPOINT_XFER_INT)
932 csr |= MUSB_RXCSR_DISNYET;
933
934 } else {
935 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
936
937 if (csr & (MUSB_RXCSR_RXPKTRDY
938 | MUSB_RXCSR_DMAENAB
939 | MUSB_RXCSR_H_REQPKT))
940 ERR("broken !rx_reinit, ep%d csr %04x\n",
941 hw_ep->epnum, csr);
942
943
944 csr &= MUSB_RXCSR_DISNYET;
945 }
946
947
948
949 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
950
951 dma_channel->actual_len = 0L;
952 qh->segsize = len;
953
954
955 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
956 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
957
958
959
960
961
962 dma_ok = dma_controller->channel_program(dma_channel,
963 packet_sz, !(urb->transfer_flags &
964 URB_SHORT_NOT_OK),
965 urb->transfer_dma + offset,
966 qh->segsize);
967 if (!dma_ok) {
968 dma_controller->channel_release(dma_channel);
969 hw_ep->rx_channel = dma_channel = NULL;
970 } else
971 csr |= MUSB_RXCSR_DMAENAB;
972 }
973
974 csr |= MUSB_RXCSR_H_REQPKT;
975 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
976 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
977 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
978 }
979}
980
981
982
983
984static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
985 int is_in)
986{
987 struct dma_channel *dma;
988 struct urb *urb;
989 void __iomem *mbase = musb->mregs;
990 void __iomem *epio = ep->regs;
991 struct musb_qh *cur_qh, *next_qh;
992 u16 rx_csr, tx_csr;
993
994 musb_ep_select(mbase, ep->epnum);
995 if (is_in) {
996 dma = is_dma_capable() ? ep->rx_channel : NULL;
997
998
999 rx_csr = musb_readw(epio, MUSB_RXCSR);
1000 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1001 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1002 musb_writew(epio, MUSB_RXCSR, rx_csr);
1003
1004 cur_qh = first_qh(&musb->in_bulk);
1005 } else {
1006 dma = is_dma_capable() ? ep->tx_channel : NULL;
1007
1008
1009 tx_csr = musb_readw(epio, MUSB_TXCSR);
1010 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
1011 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
1012 musb_writew(epio, MUSB_TXCSR, tx_csr);
1013
1014 cur_qh = first_qh(&musb->out_bulk);
1015 }
1016 if (cur_qh) {
1017 urb = next_urb(cur_qh);
1018 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1019 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1020 musb->dma_controller->channel_abort(dma);
1021 urb->actual_length += dma->actual_len;
1022 dma->actual_len = 0L;
1023 }
1024 musb_save_toggle(cur_qh, is_in, urb);
1025
1026 if (is_in) {
1027
1028 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1029
1030
1031 next_qh = first_qh(&musb->in_bulk);
1032
1033
1034 ep->rx_reinit = 1;
1035 } else {
1036
1037 list_move_tail(&cur_qh->ring, &musb->out_bulk);
1038
1039
1040 next_qh = first_qh(&musb->out_bulk);
1041
1042
1043 ep->tx_reinit = 1;
1044 }
1045 musb_start_urb(musb, is_in, next_qh);
1046 }
1047}
1048
1049
1050
1051
1052
1053static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1054{
1055 bool more = false;
1056 u8 *fifo_dest = NULL;
1057 u16 fifo_count = 0;
1058 struct musb_hw_ep *hw_ep = musb->control_ep;
1059 struct musb_qh *qh = hw_ep->in_qh;
1060 struct usb_ctrlrequest *request;
1061
1062 switch (musb->ep0_stage) {
1063 case MUSB_EP0_IN:
1064 fifo_dest = urb->transfer_buffer + urb->actual_length;
1065 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1066 urb->actual_length);
1067 if (fifo_count < len)
1068 urb->status = -EOVERFLOW;
1069
1070 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1071
1072 urb->actual_length += fifo_count;
1073 if (len < qh->maxpacket) {
1074
1075
1076
1077 } else if (urb->actual_length <
1078 urb->transfer_buffer_length)
1079 more = true;
1080 break;
1081 case MUSB_EP0_START:
1082 request = (struct usb_ctrlrequest *) urb->setup_packet;
1083
1084 if (!request->wLength) {
1085 dev_dbg(musb->controller, "start no-DATA\n");
1086 break;
1087 } else if (request->bRequestType & USB_DIR_IN) {
1088 dev_dbg(musb->controller, "start IN-DATA\n");
1089 musb->ep0_stage = MUSB_EP0_IN;
1090 more = true;
1091 break;
1092 } else {
1093 dev_dbg(musb->controller, "start OUT-DATA\n");
1094 musb->ep0_stage = MUSB_EP0_OUT;
1095 more = true;
1096 }
1097
1098 case MUSB_EP0_OUT:
1099 fifo_count = min_t(size_t, qh->maxpacket,
1100 urb->transfer_buffer_length -
1101 urb->actual_length);
1102 if (fifo_count) {
1103 fifo_dest = (u8 *) (urb->transfer_buffer
1104 + urb->actual_length);
1105 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1106 fifo_count,
1107 (fifo_count == 1) ? "" : "s",
1108 fifo_dest);
1109 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1110
1111 urb->actual_length += fifo_count;
1112 more = true;
1113 }
1114 break;
1115 default:
1116 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1117 break;
1118 }
1119
1120 return more;
1121}
1122
1123
1124
1125
1126
1127
1128
1129irqreturn_t musb_h_ep0_irq(struct musb *musb)
1130{
1131 struct urb *urb;
1132 u16 csr, len;
1133 int status = 0;
1134 void __iomem *mbase = musb->mregs;
1135 struct musb_hw_ep *hw_ep = musb->control_ep;
1136 void __iomem *epio = hw_ep->regs;
1137 struct musb_qh *qh = hw_ep->in_qh;
1138 bool complete = false;
1139 irqreturn_t retval = IRQ_NONE;
1140
1141
1142 urb = next_urb(qh);
1143
1144 musb_ep_select(mbase, 0);
1145 csr = musb_readw(epio, MUSB_CSR0);
1146 len = (csr & MUSB_CSR0_RXPKTRDY)
1147 ? musb_readb(epio, MUSB_COUNT0)
1148 : 0;
1149
1150 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1151 csr, qh, len, urb, musb->ep0_stage);
1152
1153
1154 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1155 retval = IRQ_HANDLED;
1156 complete = true;
1157 }
1158
1159
1160 if (csr & MUSB_CSR0_H_RXSTALL) {
1161 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1162 status = -EPIPE;
1163
1164 } else if (csr & MUSB_CSR0_H_ERROR) {
1165 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1166 status = -EPROTO;
1167
1168 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1169 dev_dbg(musb->controller, "control NAK timeout\n");
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 musb_writew(epio, MUSB_CSR0, 0);
1180 retval = IRQ_HANDLED;
1181 }
1182
1183 if (status) {
1184 dev_dbg(musb->controller, "aborting\n");
1185 retval = IRQ_HANDLED;
1186 if (urb)
1187 urb->status = status;
1188 complete = true;
1189
1190
1191 if (csr & MUSB_CSR0_H_REQPKT) {
1192 csr &= ~MUSB_CSR0_H_REQPKT;
1193 musb_writew(epio, MUSB_CSR0, csr);
1194 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1195 musb_writew(epio, MUSB_CSR0, csr);
1196 } else {
1197 musb_h_ep0_flush_fifo(hw_ep);
1198 }
1199
1200 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1201
1202
1203 musb_writew(epio, MUSB_CSR0, 0);
1204 }
1205
1206 if (unlikely(!urb)) {
1207
1208
1209 ERR("no URB for end 0\n");
1210
1211 musb_h_ep0_flush_fifo(hw_ep);
1212 goto done;
1213 }
1214
1215 if (!complete) {
1216
1217 if (musb_h_ep0_continue(musb, len, urb)) {
1218
1219 csr = (MUSB_EP0_IN == musb->ep0_stage)
1220 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1221 } else {
1222
1223 if (usb_pipeout(urb->pipe)
1224 || !urb->transfer_buffer_length)
1225 csr = MUSB_CSR0_H_STATUSPKT
1226 | MUSB_CSR0_H_REQPKT;
1227 else
1228 csr = MUSB_CSR0_H_STATUSPKT
1229 | MUSB_CSR0_TXPKTRDY;
1230
1231
1232 csr |= MUSB_CSR0_H_DIS_PING;
1233
1234
1235 musb->ep0_stage = MUSB_EP0_STATUS;
1236
1237 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1238
1239 }
1240 musb_writew(epio, MUSB_CSR0, csr);
1241 retval = IRQ_HANDLED;
1242 } else
1243 musb->ep0_stage = MUSB_EP0_IDLE;
1244
1245
1246 if (complete)
1247 musb_advance_schedule(musb, urb, hw_ep, 1);
1248done:
1249 return retval;
1250}
1251
1252
1253#ifdef CONFIG_USB_INVENTRA_DMA
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267#endif
1268
1269
1270void musb_host_tx(struct musb *musb, u8 epnum)
1271{
1272 int pipe;
1273 bool done = false;
1274 u16 tx_csr;
1275 size_t length = 0;
1276 size_t offset = 0;
1277 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1278 void __iomem *epio = hw_ep->regs;
1279 struct musb_qh *qh = hw_ep->out_qh;
1280 struct urb *urb = next_urb(qh);
1281 u32 status = 0;
1282 void __iomem *mbase = musb->mregs;
1283 struct dma_channel *dma;
1284 bool transfer_pending = false;
1285
1286 musb_ep_select(mbase, epnum);
1287 tx_csr = musb_readw(epio, MUSB_TXCSR);
1288
1289
1290 if (!urb) {
1291 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1292 return;
1293 }
1294
1295 pipe = urb->pipe;
1296 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1297 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1298 dma ? ", dma" : "");
1299
1300
1301 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1302
1303 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1304
1305
1306 status = -EPIPE;
1307
1308 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1309
1310 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1311
1312 status = -ETIMEDOUT;
1313
1314 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1315 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1316 && !list_is_singular(&musb->out_bulk)) {
1317 dev_dbg(musb->controller,
1318 "NAK timeout on TX%d ep\n", epnum);
1319 musb_bulk_nak_timeout(musb, hw_ep, 0);
1320 } else {
1321 dev_dbg(musb->controller,
1322 "TX end=%d device not responding\n", epnum);
1323
1324
1325
1326
1327
1328
1329
1330
1331 musb_ep_select(mbase, epnum);
1332 musb_writew(epio, MUSB_TXCSR,
1333 MUSB_TXCSR_H_WZC_BITS
1334 | MUSB_TXCSR_TXPKTRDY);
1335 }
1336 return;
1337 }
1338
1339done:
1340 if (status) {
1341 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1342 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1343 musb->dma_controller->channel_abort(dma);
1344 }
1345
1346
1347
1348
1349 musb_h_tx_flush_fifo(hw_ep);
1350 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1351 | MUSB_TXCSR_DMAENAB
1352 | MUSB_TXCSR_H_ERROR
1353 | MUSB_TXCSR_H_RXSTALL
1354 | MUSB_TXCSR_H_NAKTIMEOUT
1355 );
1356
1357 musb_ep_select(mbase, epnum);
1358 musb_writew(epio, MUSB_TXCSR, tx_csr);
1359
1360 musb_writew(epio, MUSB_TXCSR, tx_csr);
1361 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1362
1363 done = true;
1364 }
1365
1366
1367 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1368 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1369 return;
1370 }
1371
1372 if (is_dma_capable() && dma && !status) {
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1399 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1400 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1401 MUSB_TXCSR_TXPKTRDY);
1402 musb_writew(epio, MUSB_TXCSR,
1403 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1404 }
1405 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1406 MUSB_TXCSR_TXPKTRDY);
1407 musb_writew(epio, MUSB_TXCSR,
1408 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1409
1410
1411
1412
1413
1414
1415
1416 tx_csr = musb_readw(epio, MUSB_TXCSR);
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1427 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1428 "CSR %04x\n", tx_csr);
1429 return;
1430 }
1431 }
1432
1433 if (!status || dma || usb_pipeisoc(pipe)) {
1434 if (dma)
1435 length = dma->actual_len;
1436 else
1437 length = qh->segsize;
1438 qh->offset += length;
1439
1440 if (usb_pipeisoc(pipe)) {
1441 struct usb_iso_packet_descriptor *d;
1442
1443 d = urb->iso_frame_desc + qh->iso_idx;
1444 d->actual_length = length;
1445 d->status = status;
1446 if (++qh->iso_idx >= urb->number_of_packets) {
1447 done = true;
1448 } else {
1449 d++;
1450 offset = d->offset;
1451 length = d->length;
1452 }
1453 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1454 done = true;
1455 } else {
1456
1457 if (qh->segsize < qh->maxpacket)
1458 done = true;
1459 else if (qh->offset == urb->transfer_buffer_length
1460 && !(urb->transfer_flags
1461 & URB_ZERO_PACKET))
1462 done = true;
1463 if (!done) {
1464 offset = qh->offset;
1465 length = urb->transfer_buffer_length - offset;
1466 transfer_pending = true;
1467 }
1468 }
1469 }
1470
1471
1472
1473
1474 if (urb->status != -EINPROGRESS) {
1475 done = true;
1476 if (status == 0)
1477 status = urb->status;
1478 }
1479
1480 if (done) {
1481
1482 urb->status = status;
1483 urb->actual_length = qh->offset;
1484 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1485 return;
1486 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1487 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1488 offset, length)) {
1489 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1490 musb_h_tx_dma_start(hw_ep);
1491 return;
1492 }
1493 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1494 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1495 return;
1496 }
1497
1498
1499
1500
1501
1502
1503
1504
1505 if (length > qh->maxpacket)
1506 length = qh->maxpacket;
1507
1508 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1509
1510
1511
1512
1513
1514 if (!urb->transfer_buffer)
1515 qh->use_sg = true;
1516
1517 if (qh->use_sg) {
1518
1519 if (!sg_miter_next(&qh->sg_miter)) {
1520 dev_err(musb->controller, "error: sg list empty\n");
1521 sg_miter_stop(&qh->sg_miter);
1522 status = -EINVAL;
1523 goto done;
1524 }
1525 urb->transfer_buffer = qh->sg_miter.addr;
1526 length = min_t(u32, length, qh->sg_miter.length);
1527 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1528 qh->sg_miter.consumed = length;
1529 sg_miter_stop(&qh->sg_miter);
1530 } else {
1531 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1532 }
1533
1534 qh->segsize = length;
1535
1536 if (qh->use_sg) {
1537 if (offset + length >= urb->transfer_buffer_length)
1538 qh->use_sg = false;
1539 }
1540
1541 musb_ep_select(mbase, epnum);
1542 musb_writew(epio, MUSB_TXCSR,
1543 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1544}
1545
1546#ifdef CONFIG_USB_TI_CPPI41_DMA
1547
1548static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1549 struct musb_hw_ep *hw_ep,
1550 struct musb_qh *qh,
1551 struct urb *urb,
1552 size_t len)
1553{
1554 struct dma_channel *channel = hw_ep->tx_channel;
1555 void __iomem *epio = hw_ep->regs;
1556 dma_addr_t *buf;
1557 u32 length, res;
1558 u16 val;
1559
1560 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1561 (u32)urb->transfer_dma;
1562
1563 length = urb->iso_frame_desc[qh->iso_idx].length;
1564
1565 val = musb_readw(epio, MUSB_RXCSR);
1566 val |= MUSB_RXCSR_DMAENAB;
1567 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1568
1569 res = dma->channel_program(channel, qh->maxpacket, 0,
1570 (u32)buf, length);
1571
1572 return res;
1573}
1574#else
1575static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1576 struct musb_hw_ep *hw_ep,
1577 struct musb_qh *qh,
1578 struct urb *urb,
1579 size_t len)
1580{
1581 return false;
1582}
1583#endif
1584
1585#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1586 defined(CONFIG_USB_TI_CPPI41_DMA)
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1622 struct musb_hw_ep *hw_ep,
1623 struct musb_qh *qh,
1624 struct urb *urb,
1625 size_t len)
1626{
1627 struct dma_channel *channel = hw_ep->rx_channel;
1628 void __iomem *epio = hw_ep->regs;
1629 u16 val;
1630 int pipe;
1631 bool done;
1632
1633 pipe = urb->pipe;
1634
1635 if (usb_pipeisoc(pipe)) {
1636 struct usb_iso_packet_descriptor *d;
1637
1638 d = urb->iso_frame_desc + qh->iso_idx;
1639 d->actual_length = len;
1640
1641
1642
1643
1644 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1645 d->status = 0;
1646
1647 if (++qh->iso_idx >= urb->number_of_packets) {
1648 done = true;
1649 } else {
1650
1651 if (musb_dma_cppi41(hw_ep->musb))
1652 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1653 urb, len);
1654 done = false;
1655 }
1656
1657 } else {
1658
1659 done = (urb->actual_length + len >=
1660 urb->transfer_buffer_length
1661 || channel->actual_len < qh->maxpacket
1662 || channel->rx_packet_done);
1663 }
1664
1665
1666 if (!done) {
1667 val = musb_readw(epio, MUSB_RXCSR);
1668 val |= MUSB_RXCSR_H_REQPKT;
1669 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1670 }
1671
1672 return done;
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1692 struct musb_hw_ep *hw_ep,
1693 struct musb_qh *qh,
1694 struct urb *urb,
1695 size_t len,
1696 u8 iso_err)
1697{
1698 struct musb *musb = hw_ep->musb;
1699 void __iomem *epio = hw_ep->regs;
1700 struct dma_channel *channel = hw_ep->rx_channel;
1701 u16 rx_count, val;
1702 int length, pipe, done;
1703 dma_addr_t buf;
1704
1705 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1706 pipe = urb->pipe;
1707
1708 if (usb_pipeisoc(pipe)) {
1709 int d_status = 0;
1710 struct usb_iso_packet_descriptor *d;
1711
1712 d = urb->iso_frame_desc + qh->iso_idx;
1713
1714 if (iso_err) {
1715 d_status = -EILSEQ;
1716 urb->error_count++;
1717 }
1718 if (rx_count > d->length) {
1719 if (d_status == 0) {
1720 d_status = -EOVERFLOW;
1721 urb->error_count++;
1722 }
1723 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",
1724 rx_count, d->length);
1725
1726 length = d->length;
1727 } else
1728 length = rx_count;
1729 d->status = d_status;
1730 buf = urb->transfer_dma + d->offset;
1731 } else {
1732 length = rx_count;
1733 buf = urb->transfer_dma + urb->actual_length;
1734 }
1735
1736 channel->desired_mode = 0;
1737#ifdef USE_MODE1
1738
1739
1740
1741 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1742 && (urb->transfer_buffer_length - urb->actual_length)
1743 > qh->maxpacket)
1744 channel->desired_mode = 1;
1745 if (rx_count < hw_ep->max_packet_sz_rx) {
1746 length = rx_count;
1747 channel->desired_mode = 0;
1748 } else {
1749 length = urb->transfer_buffer_length;
1750 }
1751#endif
1752
1753
1754 val = musb_readw(epio, MUSB_RXCSR);
1755 val &= ~MUSB_RXCSR_H_REQPKT;
1756
1757 if (channel->desired_mode == 0)
1758 val &= ~MUSB_RXCSR_H_AUTOREQ;
1759 else
1760 val |= MUSB_RXCSR_H_AUTOREQ;
1761 val |= MUSB_RXCSR_DMAENAB;
1762
1763
1764 if (qh->hb_mult == 1)
1765 val |= MUSB_RXCSR_AUTOCLEAR;
1766
1767 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1768
1769
1770
1771
1772
1773 done = dma->channel_program(channel, qh->maxpacket,
1774 channel->desired_mode,
1775 buf, length);
1776
1777 if (!done) {
1778 dma->channel_release(channel);
1779 hw_ep->rx_channel = NULL;
1780 channel = NULL;
1781 val = musb_readw(epio, MUSB_RXCSR);
1782 val &= ~(MUSB_RXCSR_DMAENAB
1783 | MUSB_RXCSR_H_AUTOREQ
1784 | MUSB_RXCSR_AUTOCLEAR);
1785 musb_writew(epio, MUSB_RXCSR, val);
1786 }
1787
1788 return done;
1789}
1790#else
1791static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1792 struct musb_hw_ep *hw_ep,
1793 struct musb_qh *qh,
1794 struct urb *urb,
1795 size_t len)
1796{
1797 return false;
1798}
1799
1800static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1801 struct musb_hw_ep *hw_ep,
1802 struct musb_qh *qh,
1803 struct urb *urb,
1804 size_t len,
1805 u8 iso_err)
1806{
1807 return false;
1808}
1809#endif
1810
1811
1812
1813
1814
1815void musb_host_rx(struct musb *musb, u8 epnum)
1816{
1817 struct urb *urb;
1818 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1819 struct dma_controller *c = musb->dma_controller;
1820 void __iomem *epio = hw_ep->regs;
1821 struct musb_qh *qh = hw_ep->in_qh;
1822 size_t xfer_len;
1823 void __iomem *mbase = musb->mregs;
1824 int pipe;
1825 u16 rx_csr, val;
1826 bool iso_err = false;
1827 bool done = false;
1828 u32 status;
1829 struct dma_channel *dma;
1830 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1831
1832 musb_ep_select(mbase, epnum);
1833
1834 urb = next_urb(qh);
1835 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1836 status = 0;
1837 xfer_len = 0;
1838
1839 rx_csr = musb_readw(epio, MUSB_RXCSR);
1840 val = rx_csr;
1841
1842 if (unlikely(!urb)) {
1843
1844
1845
1846
1847 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1848 musb_readw(epio, MUSB_RXCOUNT));
1849 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1850 return;
1851 }
1852
1853 pipe = urb->pipe;
1854
1855 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1856 epnum, rx_csr, urb->actual_length,
1857 dma ? dma->actual_len : 0);
1858
1859
1860
1861 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1862 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1863
1864
1865 status = -EPIPE;
1866
1867 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1868 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1869
1870 status = -EPROTO;
1871 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1872
1873 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1874
1875 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1876 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 if (usb_pipebulk(urb->pipe)
1887 && qh->mux == 1
1888 && !list_is_singular(&musb->in_bulk)) {
1889 musb_bulk_nak_timeout(musb, hw_ep, 1);
1890 return;
1891 }
1892 musb_ep_select(mbase, epnum);
1893 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1894 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1895 musb_writew(epio, MUSB_RXCSR, rx_csr);
1896
1897 goto finish;
1898 } else {
1899 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1900
1901 iso_err = true;
1902 }
1903 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1904 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1905 epnum);
1906 status = -EPROTO;
1907 }
1908
1909
1910 if (status) {
1911
1912 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1913 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1914 musb->dma_controller->channel_abort(dma);
1915 xfer_len = dma->actual_len;
1916 }
1917 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1918 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1919 done = true;
1920 goto finish;
1921 }
1922
1923 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1924
1925 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1926 goto finish;
1927 }
1928
1929
1930
1931
1932
1933
1934
1935 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1936 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1937
1938
1939
1940
1941
1942 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1943 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1944 musb->dma_controller->channel_abort(dma);
1945 xfer_len = dma->actual_len;
1946 done = true;
1947 }
1948
1949 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1950 xfer_len, dma ? ", dma" : "");
1951 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1952
1953 musb_ep_select(mbase, epnum);
1954 musb_writew(epio, MUSB_RXCSR,
1955 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1956 }
1957
1958 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1959 xfer_len = dma->actual_len;
1960
1961 val &= ~(MUSB_RXCSR_DMAENAB
1962 | MUSB_RXCSR_H_AUTOREQ
1963 | MUSB_RXCSR_AUTOCLEAR
1964 | MUSB_RXCSR_RXPKTRDY);
1965 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1966
1967 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1968 musb_dma_cppi41(musb)) {
1969 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1970 dev_dbg(hw_ep->musb->controller,
1971 "ep %d dma %s, rxcsr %04x, rxcount %d\n",
1972 epnum, done ? "off" : "reset",
1973 musb_readw(epio, MUSB_RXCSR),
1974 musb_readw(epio, MUSB_RXCOUNT));
1975 } else {
1976 done = true;
1977 }
1978
1979 } else if (urb->status == -EINPROGRESS) {
1980
1981 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1982 status = -EPROTO;
1983 ERR("Rx interrupt with no errors or packet!\n");
1984
1985
1986
1987
1988
1989 musb_ep_select(mbase, epnum);
1990 val &= ~MUSB_RXCSR_H_REQPKT;
1991 musb_writew(epio, MUSB_RXCSR, val);
1992 goto finish;
1993 }
1994
1995
1996 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1997 musb_dma_cppi41(musb)) && dma) {
1998 dev_dbg(hw_ep->musb->controller,
1999 "RX%d count %d, buffer 0x%llx len %d/%d\n",
2000 epnum, musb_readw(epio, MUSB_RXCOUNT),
2001 (unsigned long long) urb->transfer_dma
2002 + urb->actual_length,
2003 qh->offset,
2004 urb->transfer_buffer_length);
2005
2006 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
2007 xfer_len, iso_err))
2008 goto finish;
2009 else
2010 dev_err(musb->controller, "error: rx_dma failed\n");
2011 }
2012
2013 if (!dma) {
2014 unsigned int received_len;
2015
2016
2017 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
2018
2019
2020
2021
2022
2023 if (!urb->transfer_buffer) {
2024 qh->use_sg = true;
2025 sg_miter_start(&qh->sg_miter, urb->sg, 1,
2026 sg_flags);
2027 }
2028
2029 if (qh->use_sg) {
2030 if (!sg_miter_next(&qh->sg_miter)) {
2031 dev_err(musb->controller, "error: sg list empty\n");
2032 sg_miter_stop(&qh->sg_miter);
2033 status = -EINVAL;
2034 done = true;
2035 goto finish;
2036 }
2037 urb->transfer_buffer = qh->sg_miter.addr;
2038 received_len = urb->actual_length;
2039 qh->offset = 0x0;
2040 done = musb_host_packet_rx(musb, urb, epnum,
2041 iso_err);
2042
2043 received_len = urb->actual_length -
2044 received_len;
2045 qh->sg_miter.consumed = received_len;
2046 sg_miter_stop(&qh->sg_miter);
2047 } else {
2048 done = musb_host_packet_rx(musb, urb,
2049 epnum, iso_err);
2050 }
2051 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
2052 }
2053 }
2054
2055finish:
2056 urb->actual_length += xfer_len;
2057 qh->offset += xfer_len;
2058 if (done) {
2059 if (qh->use_sg)
2060 qh->use_sg = false;
2061
2062 if (urb->status == -EINPROGRESS)
2063 urb->status = status;
2064 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2065 }
2066}
2067
2068
2069
2070
2071
2072
2073static int musb_schedule(
2074 struct musb *musb,
2075 struct musb_qh *qh,
2076 int is_in)
2077{
2078 int idle = 0;
2079 int best_diff;
2080 int best_end, epnum;
2081 struct musb_hw_ep *hw_ep = NULL;
2082 struct list_head *head = NULL;
2083 u8 toggle;
2084 u8 txtype;
2085 struct urb *urb = next_urb(qh);
2086
2087
2088 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2089 head = &musb->control;
2090 hw_ep = musb->control_ep;
2091 goto success;
2092 }
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103 best_diff = 4096;
2104 best_end = -1;
2105
2106 for (epnum = 1, hw_ep = musb->endpoints + 1;
2107 epnum < musb->nr_endpoints;
2108 epnum++, hw_ep++) {
2109 int diff;
2110
2111 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2112 continue;
2113
2114 if (hw_ep == musb->bulk_ep)
2115 continue;
2116
2117 if (is_in)
2118 diff = hw_ep->max_packet_sz_rx;
2119 else
2120 diff = hw_ep->max_packet_sz_tx;
2121 diff -= (qh->maxpacket * qh->hb_mult);
2122
2123 if (diff >= 0 && best_diff > diff) {
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 hw_ep = musb->endpoints + epnum;
2138 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2139 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2140 >> 4) & 0x3;
2141 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2142 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2143 continue;
2144
2145 best_diff = diff;
2146 best_end = epnum;
2147 }
2148 }
2149
2150 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2151 hw_ep = musb->bulk_ep;
2152 if (is_in)
2153 head = &musb->in_bulk;
2154 else
2155 head = &musb->out_bulk;
2156
2157
2158
2159
2160
2161
2162
2163
2164 if (qh->dev)
2165 qh->intv_reg =
2166 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2167 goto success;
2168 } else if (best_end < 0) {
2169 return -ENOSPC;
2170 }
2171
2172 idle = 1;
2173 qh->mux = 0;
2174 hw_ep = musb->endpoints + best_end;
2175 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
2176success:
2177 if (head) {
2178 idle = list_empty(head);
2179 list_add_tail(&qh->ring, head);
2180 qh->mux = 1;
2181 }
2182 qh->hw_ep = hw_ep;
2183 qh->hep->hcpriv = qh;
2184 if (idle)
2185 musb_start_urb(musb, is_in, qh);
2186 return 0;
2187}
2188
2189static int musb_urb_enqueue(
2190 struct usb_hcd *hcd,
2191 struct urb *urb,
2192 gfp_t mem_flags)
2193{
2194 unsigned long flags;
2195 struct musb *musb = hcd_to_musb(hcd);
2196 struct usb_host_endpoint *hep = urb->ep;
2197 struct musb_qh *qh;
2198 struct usb_endpoint_descriptor *epd = &hep->desc;
2199 int ret;
2200 unsigned type_reg;
2201 unsigned interval;
2202
2203
2204 if (!is_host_active(musb) || !musb->is_active)
2205 return -ENODEV;
2206
2207 spin_lock_irqsave(&musb->lock, flags);
2208 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2209 qh = ret ? NULL : hep->hcpriv;
2210 if (qh)
2211 urb->hcpriv = qh;
2212 spin_unlock_irqrestore(&musb->lock, flags);
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 if (qh || ret)
2223 return ret;
2224
2225
2226
2227
2228
2229
2230
2231 qh = kzalloc(sizeof *qh, mem_flags);
2232 if (!qh) {
2233 spin_lock_irqsave(&musb->lock, flags);
2234 usb_hcd_unlink_urb_from_ep(hcd, urb);
2235 spin_unlock_irqrestore(&musb->lock, flags);
2236 return -ENOMEM;
2237 }
2238
2239 qh->hep = hep;
2240 qh->dev = urb->dev;
2241 INIT_LIST_HEAD(&qh->ring);
2242 qh->is_ready = 1;
2243
2244 qh->maxpacket = usb_endpoint_maxp(epd);
2245 qh->type = usb_endpoint_type(epd);
2246
2247
2248
2249
2250
2251 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2252 if (qh->hb_mult > 1) {
2253 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2254
2255 if (ok)
2256 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2257 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2258 if (!ok) {
2259 ret = -EMSGSIZE;
2260 goto done;
2261 }
2262 qh->maxpacket &= 0x7ff;
2263 }
2264
2265 qh->epnum = usb_endpoint_num(epd);
2266
2267
2268 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2269
2270
2271 type_reg = (qh->type << 4) | qh->epnum;
2272 switch (urb->dev->speed) {
2273 case USB_SPEED_LOW:
2274 type_reg |= 0xc0;
2275 break;
2276 case USB_SPEED_FULL:
2277 type_reg |= 0x80;
2278 break;
2279 default:
2280 type_reg |= 0x40;
2281 }
2282 qh->type_reg = type_reg;
2283
2284
2285 switch (qh->type) {
2286 case USB_ENDPOINT_XFER_INT:
2287
2288
2289
2290
2291 if (urb->dev->speed <= USB_SPEED_FULL) {
2292 interval = max_t(u8, epd->bInterval, 1);
2293 break;
2294 }
2295
2296 case USB_ENDPOINT_XFER_ISOC:
2297
2298 interval = min_t(u8, epd->bInterval, 16);
2299 break;
2300 default:
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 interval = 0;
2316 }
2317 qh->intv_reg = interval;
2318
2319
2320 if (musb->is_multipoint) {
2321 struct usb_device *parent = urb->dev->parent;
2322
2323 if (parent != hcd->self.root_hub) {
2324 qh->h_addr_reg = (u8) parent->devnum;
2325
2326
2327 if (urb->dev->tt) {
2328 qh->h_port_reg = (u8) urb->dev->ttport;
2329 if (urb->dev->tt->hub)
2330 qh->h_addr_reg =
2331 (u8) urb->dev->tt->hub->devnum;
2332 if (urb->dev->tt->multi)
2333 qh->h_addr_reg |= 0x80;
2334 }
2335 }
2336 }
2337
2338
2339
2340
2341
2342 spin_lock_irqsave(&musb->lock, flags);
2343 if (hep->hcpriv || !next_urb(qh)) {
2344
2345
2346
2347 kfree(qh);
2348 qh = NULL;
2349 ret = 0;
2350 } else
2351 ret = musb_schedule(musb, qh,
2352 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2353
2354 if (ret == 0) {
2355 urb->hcpriv = qh;
2356
2357
2358
2359 }
2360 spin_unlock_irqrestore(&musb->lock, flags);
2361
2362done:
2363 if (ret != 0) {
2364 spin_lock_irqsave(&musb->lock, flags);
2365 usb_hcd_unlink_urb_from_ep(hcd, urb);
2366 spin_unlock_irqrestore(&musb->lock, flags);
2367 kfree(qh);
2368 }
2369 return ret;
2370}
2371
2372
2373
2374
2375
2376
2377
2378static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2379{
2380 struct musb_hw_ep *ep = qh->hw_ep;
2381 struct musb *musb = ep->musb;
2382 void __iomem *epio = ep->regs;
2383 unsigned hw_end = ep->epnum;
2384 void __iomem *regs = ep->musb->mregs;
2385 int is_in = usb_pipein(urb->pipe);
2386 int status = 0;
2387 u16 csr;
2388
2389 musb_ep_select(regs, hw_end);
2390
2391 if (is_dma_capable()) {
2392 struct dma_channel *dma;
2393
2394 dma = is_in ? ep->rx_channel : ep->tx_channel;
2395 if (dma) {
2396 status = ep->musb->dma_controller->channel_abort(dma);
2397 dev_dbg(musb->controller,
2398 "abort %cX%d DMA for urb %p --> %d\n",
2399 is_in ? 'R' : 'T', ep->epnum,
2400 urb, status);
2401 urb->actual_length += dma->actual_len;
2402 }
2403 }
2404
2405
2406 if (ep->epnum && is_in) {
2407
2408 csr = musb_h_flush_rxfifo(ep, 0);
2409
2410
2411
2412
2413
2414 } else if (ep->epnum) {
2415 musb_h_tx_flush_fifo(ep);
2416 csr = musb_readw(epio, MUSB_TXCSR);
2417 csr &= ~(MUSB_TXCSR_AUTOSET
2418 | MUSB_TXCSR_DMAENAB
2419 | MUSB_TXCSR_H_RXSTALL
2420 | MUSB_TXCSR_H_NAKTIMEOUT
2421 | MUSB_TXCSR_H_ERROR
2422 | MUSB_TXCSR_TXPKTRDY);
2423 musb_writew(epio, MUSB_TXCSR, csr);
2424
2425 musb_writew(epio, MUSB_TXCSR, csr);
2426
2427 csr = musb_readw(epio, MUSB_TXCSR);
2428 } else {
2429 musb_h_ep0_flush_fifo(ep);
2430 }
2431 if (status == 0)
2432 musb_advance_schedule(ep->musb, urb, ep, is_in);
2433 return status;
2434}
2435
2436static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2437{
2438 struct musb *musb = hcd_to_musb(hcd);
2439 struct musb_qh *qh;
2440 unsigned long flags;
2441 int is_in = usb_pipein(urb->pipe);
2442 int ret;
2443
2444 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2445 usb_pipedevice(urb->pipe),
2446 usb_pipeendpoint(urb->pipe),
2447 is_in ? "in" : "out");
2448
2449 spin_lock_irqsave(&musb->lock, flags);
2450 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2451 if (ret)
2452 goto done;
2453
2454 qh = urb->hcpriv;
2455 if (!qh)
2456 goto done;
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470 if (!qh->is_ready
2471 || urb->urb_list.prev != &qh->hep->urb_list
2472 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2473 int ready = qh->is_ready;
2474
2475 qh->is_ready = 0;
2476 musb_giveback(musb, urb, 0);
2477 qh->is_ready = ready;
2478
2479
2480
2481
2482 if (ready && list_empty(&qh->hep->urb_list)) {
2483 qh->hep->hcpriv = NULL;
2484 list_del(&qh->ring);
2485 kfree(qh);
2486 }
2487 } else
2488 ret = musb_cleanup_urb(urb, qh);
2489done:
2490 spin_unlock_irqrestore(&musb->lock, flags);
2491 return ret;
2492}
2493
2494
2495static void
2496musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2497{
2498 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2499 unsigned long flags;
2500 struct musb *musb = hcd_to_musb(hcd);
2501 struct musb_qh *qh;
2502 struct urb *urb;
2503
2504 spin_lock_irqsave(&musb->lock, flags);
2505
2506 qh = hep->hcpriv;
2507 if (qh == NULL)
2508 goto exit;
2509
2510
2511
2512
2513 qh->is_ready = 0;
2514 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2515 urb = next_urb(qh);
2516
2517
2518 if (!urb->unlinked)
2519 urb->status = -ESHUTDOWN;
2520
2521
2522 musb_cleanup_urb(urb, qh);
2523
2524
2525
2526
2527 while (!list_empty(&hep->urb_list)) {
2528 urb = next_urb(qh);
2529 urb->status = -ESHUTDOWN;
2530 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2531 }
2532 } else {
2533
2534
2535
2536
2537 while (!list_empty(&hep->urb_list))
2538 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2539
2540 hep->hcpriv = NULL;
2541 list_del(&qh->ring);
2542 kfree(qh);
2543 }
2544exit:
2545 spin_unlock_irqrestore(&musb->lock, flags);
2546}
2547
2548static int musb_h_get_frame_number(struct usb_hcd *hcd)
2549{
2550 struct musb *musb = hcd_to_musb(hcd);
2551
2552 return musb_readw(musb->mregs, MUSB_FRAME);
2553}
2554
2555static int musb_h_start(struct usb_hcd *hcd)
2556{
2557 struct musb *musb = hcd_to_musb(hcd);
2558
2559
2560
2561
2562 hcd->state = HC_STATE_RUNNING;
2563 musb->port1_status = 0;
2564 return 0;
2565}
2566
2567static void musb_h_stop(struct usb_hcd *hcd)
2568{
2569 musb_stop(hcd_to_musb(hcd));
2570 hcd->state = HC_STATE_HALT;
2571}
2572
2573static int musb_bus_suspend(struct usb_hcd *hcd)
2574{
2575 struct musb *musb = hcd_to_musb(hcd);
2576 u8 devctl;
2577
2578 musb_port_suspend(musb, true);
2579
2580 if (!is_host_active(musb))
2581 return 0;
2582
2583 switch (musb->xceiv->otg->state) {
2584 case OTG_STATE_A_SUSPEND:
2585 return 0;
2586 case OTG_STATE_A_WAIT_VRISE:
2587
2588
2589
2590
2591 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2592 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2593 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2594 break;
2595 default:
2596 break;
2597 }
2598
2599 if (musb->is_active) {
2600 WARNING("trying to suspend as %s while active\n",
2601 usb_otg_state_string(musb->xceiv->otg->state));
2602 return -EBUSY;
2603 } else
2604 return 0;
2605}
2606
2607static int musb_bus_resume(struct usb_hcd *hcd)
2608{
2609 struct musb *musb = hcd_to_musb(hcd);
2610
2611 if (musb->config &&
2612 musb->config->host_port_deassert_reset_at_resume)
2613 musb_port_reset(musb, false);
2614
2615 return 0;
2616}
2617
2618#ifndef CONFIG_MUSB_PIO_ONLY
2619
2620#define MUSB_USB_DMA_ALIGN 4
2621
2622struct musb_temp_buffer {
2623 void *kmalloc_ptr;
2624 void *old_xfer_buffer;
2625 u8 data[0];
2626};
2627
2628static void musb_free_temp_buffer(struct urb *urb)
2629{
2630 enum dma_data_direction dir;
2631 struct musb_temp_buffer *temp;
2632 size_t length;
2633
2634 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2635 return;
2636
2637 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2638
2639 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2640 data);
2641
2642 if (dir == DMA_FROM_DEVICE) {
2643 if (usb_pipeisoc(urb->pipe))
2644 length = urb->transfer_buffer_length;
2645 else
2646 length = urb->actual_length;
2647
2648 memcpy(temp->old_xfer_buffer, temp->data, length);
2649 }
2650 urb->transfer_buffer = temp->old_xfer_buffer;
2651 kfree(temp->kmalloc_ptr);
2652
2653 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2654}
2655
2656static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2657{
2658 enum dma_data_direction dir;
2659 struct musb_temp_buffer *temp;
2660 void *kmalloc_ptr;
2661 size_t kmalloc_size;
2662
2663 if (urb->num_sgs || urb->sg ||
2664 urb->transfer_buffer_length == 0 ||
2665 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2666 return 0;
2667
2668 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2669
2670
2671 kmalloc_size = urb->transfer_buffer_length +
2672 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2673
2674 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2675 if (!kmalloc_ptr)
2676 return -ENOMEM;
2677
2678
2679 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2680
2681
2682 temp->kmalloc_ptr = kmalloc_ptr;
2683 temp->old_xfer_buffer = urb->transfer_buffer;
2684 if (dir == DMA_TO_DEVICE)
2685 memcpy(temp->data, urb->transfer_buffer,
2686 urb->transfer_buffer_length);
2687 urb->transfer_buffer = temp->data;
2688
2689 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2690
2691 return 0;
2692}
2693
2694static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2695 gfp_t mem_flags)
2696{
2697 struct musb *musb = hcd_to_musb(hcd);
2698 int ret;
2699
2700
2701
2702
2703
2704
2705
2706 if (musb->hwvers < MUSB_HWVERS_1800)
2707 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2708
2709 ret = musb_alloc_temp_buffer(urb, mem_flags);
2710 if (ret)
2711 return ret;
2712
2713 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2714 if (ret)
2715 musb_free_temp_buffer(urb);
2716
2717 return ret;
2718}
2719
2720static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2721{
2722 struct musb *musb = hcd_to_musb(hcd);
2723
2724 usb_hcd_unmap_urb_for_dma(hcd, urb);
2725
2726
2727 if (musb->hwvers < MUSB_HWVERS_1800)
2728 return;
2729
2730 musb_free_temp_buffer(urb);
2731}
2732#endif
2733
2734static const struct hc_driver musb_hc_driver = {
2735 .description = "musb-hcd",
2736 .product_desc = "MUSB HDRC host driver",
2737 .hcd_priv_size = sizeof(struct musb *),
2738 .flags = HCD_USB2 | HCD_MEMORY,
2739
2740
2741
2742
2743
2744 .start = musb_h_start,
2745 .stop = musb_h_stop,
2746
2747 .get_frame_number = musb_h_get_frame_number,
2748
2749 .urb_enqueue = musb_urb_enqueue,
2750 .urb_dequeue = musb_urb_dequeue,
2751 .endpoint_disable = musb_h_disable,
2752
2753#ifndef CONFIG_MUSB_PIO_ONLY
2754 .map_urb_for_dma = musb_map_urb_for_dma,
2755 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2756#endif
2757
2758 .hub_status_data = musb_hub_status_data,
2759 .hub_control = musb_hub_control,
2760 .bus_suspend = musb_bus_suspend,
2761 .bus_resume = musb_bus_resume,
2762
2763
2764};
2765
2766int musb_host_alloc(struct musb *musb)
2767{
2768 struct device *dev = musb->controller;
2769
2770
2771 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2772 if (!musb->hcd)
2773 return -EINVAL;
2774
2775 *musb->hcd->hcd_priv = (unsigned long) musb;
2776 musb->hcd->self.uses_pio_for_control = 1;
2777 musb->hcd->uses_new_polling = 1;
2778 musb->hcd->has_tt = 1;
2779
2780 return 0;
2781}
2782
2783void musb_host_cleanup(struct musb *musb)
2784{
2785 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2786 return;
2787 usb_remove_hcd(musb->hcd);
2788}
2789
2790void musb_host_free(struct musb *musb)
2791{
2792 usb_put_hcd(musb->hcd);
2793}
2794
2795int musb_host_setup(struct musb *musb, int power_budget)
2796{
2797 int ret;
2798 struct usb_hcd *hcd = musb->hcd;
2799
2800 MUSB_HST_MODE(musb);
2801 musb->xceiv->otg->default_a = 1;
2802 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2803
2804 otg_set_host(musb->xceiv->otg, &hcd->self);
2805 hcd->self.otg_port = 1;
2806 musb->xceiv->otg->host = &hcd->self;
2807 hcd->power_budget = 2 * (power_budget ? : 250);
2808
2809 ret = usb_add_hcd(hcd, 0, 0);
2810 if (ret < 0)
2811 return ret;
2812
2813 device_wakeup_enable(hcd->self.controller);
2814 return 0;
2815}
2816
2817void musb_host_resume_root_hub(struct musb *musb)
2818{
2819 usb_hcd_resume_root_hub(musb->hcd);
2820}
2821
2822void musb_host_poke_root_hub(struct musb *musb)
2823{
2824 MUSB_HST_MODE(musb);
2825 if (musb->hcd->status_urb)
2826 usb_hcd_poll_rh_status(musb->hcd);
2827 else
2828 usb_hcd_resume_root_hub(musb->hcd);
2829}
2830