1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#undef USB_TRACE
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/ioport.h>
26#include <linux/types.h>
27#include <linux/errno.h>
28#include <linux/err.h>
29#include <linux/slab.h>
30#include <linux/list.h>
31#include <linux/interrupt.h>
32#include <linux/io.h>
33#include <linux/moduleparam.h>
34#include <linux/of_address.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/otg.h>
40#include <asm/qe.h>
41#include <asm/cpm.h>
42#include <asm/dma.h>
43#include <asm/reg.h>
44#include "fsl_qe_udc.h"
45
46#define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver"
47#define DRIVER_AUTHOR "Xie XiaoBo"
48#define DRIVER_VERSION "1.0"
49
50#define DMA_ADDR_INVALID (~(dma_addr_t)0)
51
52static const char driver_name[] = "fsl_qe_udc";
53static const char driver_desc[] = DRIVER_DESC;
54
55
56static const char *const ep_name[] = {
57 "ep0-control",
58
59 "ep1",
60 "ep2",
61 "ep3",
62};
63
64static struct usb_endpoint_descriptor qe_ep0_desc = {
65 .bLength = USB_DT_ENDPOINT_SIZE,
66 .bDescriptorType = USB_DT_ENDPOINT,
67
68 .bEndpointAddress = 0,
69 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
70 .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
71};
72
73
74
75
76
77
78
79static void done(struct qe_ep *ep, struct qe_req *req, int status)
80{
81 struct qe_udc *udc = ep->udc;
82 unsigned char stopped = ep->stopped;
83
84
85
86
87
88 list_del_init(&req->queue);
89
90
91 if (req->req.status == -EINPROGRESS)
92 req->req.status = status;
93 else
94 status = req->req.status;
95
96 if (req->mapped) {
97 dma_unmap_single(udc->gadget.dev.parent,
98 req->req.dma, req->req.length,
99 ep_is_in(ep)
100 ? DMA_TO_DEVICE
101 : DMA_FROM_DEVICE);
102 req->req.dma = DMA_ADDR_INVALID;
103 req->mapped = 0;
104 } else
105 dma_sync_single_for_cpu(udc->gadget.dev.parent,
106 req->req.dma, req->req.length,
107 ep_is_in(ep)
108 ? DMA_TO_DEVICE
109 : DMA_FROM_DEVICE);
110
111 if (status && (status != -ESHUTDOWN))
112 dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
113 ep->ep.name, &req->req, status,
114 req->req.actual, req->req.length);
115
116
117 ep->stopped = 1;
118 spin_unlock(&udc->lock);
119
120
121
122 if (req->req.complete)
123 req->req.complete(&ep->ep, &req->req);
124
125 spin_lock(&udc->lock);
126
127 ep->stopped = stopped;
128}
129
130
131
132
133static void nuke(struct qe_ep *ep, int status)
134{
135
136 while (!list_empty(&ep->queue)) {
137 struct qe_req *req = NULL;
138 req = list_entry(ep->queue.next, struct qe_req, queue);
139
140 done(ep, req, status);
141 }
142}
143
144
145
146
147
148static int qe_eprx_stall_change(struct qe_ep *ep, int value)
149{
150 u16 tem_usep;
151 u8 epnum = ep->epnum;
152 struct qe_udc *udc = ep->udc;
153
154 tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
155 tem_usep = tem_usep & ~USB_RHS_MASK;
156 if (value == 1)
157 tem_usep |= USB_RHS_STALL;
158 else if (ep->dir == USB_DIR_IN)
159 tem_usep |= USB_RHS_IGNORE_OUT;
160
161 out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
162 return 0;
163}
164
165static int qe_eptx_stall_change(struct qe_ep *ep, int value)
166{
167 u16 tem_usep;
168 u8 epnum = ep->epnum;
169 struct qe_udc *udc = ep->udc;
170
171 tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
172 tem_usep = tem_usep & ~USB_THS_MASK;
173 if (value == 1)
174 tem_usep |= USB_THS_STALL;
175 else if (ep->dir == USB_DIR_OUT)
176 tem_usep |= USB_THS_IGNORE_IN;
177
178 out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
179
180 return 0;
181}
182
183static int qe_ep0_stall(struct qe_udc *udc)
184{
185 qe_eptx_stall_change(&udc->eps[0], 1);
186 qe_eprx_stall_change(&udc->eps[0], 1);
187 udc->ep0_state = WAIT_FOR_SETUP;
188 udc->ep0_dir = 0;
189 return 0;
190}
191
192static int qe_eprx_nack(struct qe_ep *ep)
193{
194 u8 epnum = ep->epnum;
195 struct qe_udc *udc = ep->udc;
196
197 if (ep->state == EP_STATE_IDLE) {
198
199 clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
200 USB_RHS_MASK, USB_RHS_NACK);
201
202
203 clrbits16(&udc->usb_regs->usb_usbmr,
204 (USB_E_RXB_MASK | USB_E_BSY_MASK));
205
206 ep->state = EP_STATE_NACK;
207 }
208 return 0;
209}
210
211static int qe_eprx_normal(struct qe_ep *ep)
212{
213 struct qe_udc *udc = ep->udc;
214
215 if (ep->state == EP_STATE_NACK) {
216 clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
217 USB_RTHS_MASK, USB_THS_IGNORE_IN);
218
219
220 out_be16(&udc->usb_regs->usb_usber,
221 USB_E_BSY_MASK | USB_E_RXB_MASK);
222 setbits16(&udc->usb_regs->usb_usbmr,
223 (USB_E_RXB_MASK | USB_E_BSY_MASK));
224
225 ep->state = EP_STATE_IDLE;
226 ep->has_data = 0;
227 }
228
229 return 0;
230}
231
232static int qe_ep_cmd_stoptx(struct qe_ep *ep)
233{
234 if (ep->udc->soc_type == PORT_CPM)
235 cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
236 CPM_USB_STOP_TX_OPCODE);
237 else
238 qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
239 ep->epnum, 0);
240
241 return 0;
242}
243
244static int qe_ep_cmd_restarttx(struct qe_ep *ep)
245{
246 if (ep->udc->soc_type == PORT_CPM)
247 cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
248 CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
249 else
250 qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
251 ep->epnum, 0);
252
253 return 0;
254}
255
256static int qe_ep_flushtxfifo(struct qe_ep *ep)
257{
258 struct qe_udc *udc = ep->udc;
259 int i;
260
261 i = (int)ep->epnum;
262
263 qe_ep_cmd_stoptx(ep);
264 out_8(&udc->usb_regs->usb_uscom,
265 USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
266 out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
267 out_be32(&udc->ep_param[i]->tstate, 0);
268 out_be16(&udc->ep_param[i]->tbcnt, 0);
269
270 ep->c_txbd = ep->txbase;
271 ep->n_txbd = ep->txbase;
272 qe_ep_cmd_restarttx(ep);
273 return 0;
274}
275
276static int qe_ep_filltxfifo(struct qe_ep *ep)
277{
278 struct qe_udc *udc = ep->udc;
279
280 out_8(&udc->usb_regs->usb_uscom,
281 USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
282 return 0;
283}
284
285static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
286{
287 struct qe_ep *ep;
288 u32 bdring_len;
289 struct qe_bd __iomem *bd;
290 int i;
291
292 ep = &udc->eps[pipe_num];
293
294 if (ep->dir == USB_DIR_OUT)
295 bdring_len = USB_BDRING_LEN_RX;
296 else
297 bdring_len = USB_BDRING_LEN;
298
299 bd = ep->rxbase;
300 for (i = 0; i < (bdring_len - 1); i++) {
301 out_be32((u32 __iomem *)bd, R_E | R_I);
302 bd++;
303 }
304 out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
305
306 bd = ep->txbase;
307 for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
308 out_be32(&bd->buf, 0);
309 out_be32((u32 __iomem *)bd, 0);
310 bd++;
311 }
312 out_be32((u32 __iomem *)bd, T_W);
313
314 return 0;
315}
316
317static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
318{
319 struct qe_ep *ep;
320 u16 tmpusep;
321
322 ep = &udc->eps[pipe_num];
323 tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
324 tmpusep &= ~USB_RTHS_MASK;
325
326 switch (ep->dir) {
327 case USB_DIR_BOTH:
328 qe_ep_flushtxfifo(ep);
329 break;
330 case USB_DIR_OUT:
331 tmpusep |= USB_THS_IGNORE_IN;
332 break;
333 case USB_DIR_IN:
334 qe_ep_flushtxfifo(ep);
335 tmpusep |= USB_RHS_IGNORE_OUT;
336 break;
337 default:
338 break;
339 }
340 out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
341
342 qe_epbds_reset(udc, pipe_num);
343
344 return 0;
345}
346
347static int qe_ep_toggledata01(struct qe_ep *ep)
348{
349 ep->data01 ^= 0x1;
350 return 0;
351}
352
353static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
354{
355 struct qe_ep *ep = &udc->eps[pipe_num];
356 unsigned long tmp_addr = 0;
357 struct usb_ep_para __iomem *epparam;
358 int i;
359 struct qe_bd __iomem *bd;
360 int bdring_len;
361
362 if (ep->dir == USB_DIR_OUT)
363 bdring_len = USB_BDRING_LEN_RX;
364 else
365 bdring_len = USB_BDRING_LEN;
366
367 epparam = udc->ep_param[pipe_num];
368
369 tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
370 USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
371 if (IS_ERR_VALUE(tmp_addr))
372 return -ENOMEM;
373
374 out_be16(&epparam->rbase, (u16)tmp_addr);
375 out_be16(&epparam->tbase, (u16)(tmp_addr +
376 (sizeof(struct qe_bd) * bdring_len)));
377
378 out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
379 out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
380
381 ep->rxbase = cpm_muram_addr(tmp_addr);
382 ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
383 * bdring_len));
384 ep->n_rxbd = ep->rxbase;
385 ep->e_rxbd = ep->rxbase;
386 ep->n_txbd = ep->txbase;
387 ep->c_txbd = ep->txbase;
388 ep->data01 = 0;
389
390
391 bd = ep->rxbase;
392 for (i = 0; i < bdring_len - 1; i++) {
393 out_be32(&bd->buf, 0);
394 out_be32((u32 __iomem *)bd, 0);
395 bd++;
396 }
397 out_be32(&bd->buf, 0);
398 out_be32((u32 __iomem *)bd, R_W);
399
400 bd = ep->txbase;
401 for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
402 out_be32(&bd->buf, 0);
403 out_be32((u32 __iomem *)bd, 0);
404 bd++;
405 }
406 out_be32(&bd->buf, 0);
407 out_be32((u32 __iomem *)bd, T_W);
408
409 return 0;
410}
411
412static int qe_ep_rxbd_update(struct qe_ep *ep)
413{
414 unsigned int size;
415 int i;
416 unsigned int tmp;
417 struct qe_bd __iomem *bd;
418 unsigned int bdring_len;
419
420 if (ep->rxbase == NULL)
421 return -EINVAL;
422
423 bd = ep->rxbase;
424
425 ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
426 if (ep->rxframe == NULL) {
427 dev_err(ep->udc->dev, "malloc rxframe failed\n");
428 return -ENOMEM;
429 }
430
431 qe_frame_init(ep->rxframe);
432
433 if (ep->dir == USB_DIR_OUT)
434 bdring_len = USB_BDRING_LEN_RX;
435 else
436 bdring_len = USB_BDRING_LEN;
437
438 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
439 ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
440 if (ep->rxbuffer == NULL) {
441 dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
442 size);
443 kfree(ep->rxframe);
444 return -ENOMEM;
445 }
446
447 ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
448 if (ep->rxbuf_d == DMA_ADDR_INVALID) {
449 ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
450 ep->rxbuffer,
451 size,
452 DMA_FROM_DEVICE);
453 ep->rxbufmap = 1;
454 } else {
455 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
456 ep->rxbuf_d, size,
457 DMA_FROM_DEVICE);
458 ep->rxbufmap = 0;
459 }
460
461 size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
462 tmp = ep->rxbuf_d;
463 tmp = (u32)(((tmp >> 2) << 2) + 4);
464
465 for (i = 0; i < bdring_len - 1; i++) {
466 out_be32(&bd->buf, tmp);
467 out_be32((u32 __iomem *)bd, (R_E | R_I));
468 tmp = tmp + size;
469 bd++;
470 }
471 out_be32(&bd->buf, tmp);
472 out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
473
474 return 0;
475}
476
477static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
478{
479 struct qe_ep *ep = &udc->eps[pipe_num];
480 struct usb_ep_para __iomem *epparam;
481 u16 usep, logepnum;
482 u16 tmp;
483 u8 rtfcr = 0;
484
485 epparam = udc->ep_param[pipe_num];
486
487 usep = 0;
488 logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
489 usep |= (logepnum << USB_EPNUM_SHIFT);
490
491 switch (ep->ep.desc->bmAttributes & 0x03) {
492 case USB_ENDPOINT_XFER_BULK:
493 usep |= USB_TRANS_BULK;
494 break;
495 case USB_ENDPOINT_XFER_ISOC:
496 usep |= USB_TRANS_ISO;
497 break;
498 case USB_ENDPOINT_XFER_INT:
499 usep |= USB_TRANS_INT;
500 break;
501 default:
502 usep |= USB_TRANS_CTR;
503 break;
504 }
505
506 switch (ep->dir) {
507 case USB_DIR_OUT:
508 usep |= USB_THS_IGNORE_IN;
509 break;
510 case USB_DIR_IN:
511 usep |= USB_RHS_IGNORE_OUT;
512 break;
513 default:
514 break;
515 }
516 out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
517
518 rtfcr = 0x30;
519 out_8(&epparam->rbmr, rtfcr);
520 out_8(&epparam->tbmr, rtfcr);
521
522 tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
523
524 tmp = (u16)(((tmp >> 2) << 2) + 4);
525 out_be16(&epparam->mrblr, tmp);
526
527 return 0;
528}
529
530static int qe_ep_init(struct qe_udc *udc,
531 unsigned char pipe_num,
532 const struct usb_endpoint_descriptor *desc)
533{
534 struct qe_ep *ep = &udc->eps[pipe_num];
535 unsigned long flags;
536 int reval = 0;
537 u16 max = 0;
538
539 max = usb_endpoint_maxp(desc);
540
541
542
543
544 if (pipe_num != 0) {
545 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
546 case USB_ENDPOINT_XFER_BULK:
547 if (strstr(ep->ep.name, "-iso")
548 || strstr(ep->ep.name, "-int"))
549 goto en_done;
550 switch (udc->gadget.speed) {
551 case USB_SPEED_HIGH:
552 if ((max == 128) || (max == 256) || (max == 512))
553 break;
554 default:
555 switch (max) {
556 case 4:
557 case 8:
558 case 16:
559 case 32:
560 case 64:
561 break;
562 default:
563 case USB_SPEED_LOW:
564 goto en_done;
565 }
566 }
567 break;
568 case USB_ENDPOINT_XFER_INT:
569 if (strstr(ep->ep.name, "-iso"))
570 goto en_done;
571 switch (udc->gadget.speed) {
572 case USB_SPEED_HIGH:
573 if (max <= 1024)
574 break;
575 case USB_SPEED_FULL:
576 if (max <= 64)
577 break;
578 default:
579 if (max <= 8)
580 break;
581 goto en_done;
582 }
583 break;
584 case USB_ENDPOINT_XFER_ISOC:
585 if (strstr(ep->ep.name, "-bulk")
586 || strstr(ep->ep.name, "-int"))
587 goto en_done;
588 switch (udc->gadget.speed) {
589 case USB_SPEED_HIGH:
590 if (max <= 1024)
591 break;
592 case USB_SPEED_FULL:
593 if (max <= 1023)
594 break;
595 default:
596 goto en_done;
597 }
598 break;
599 case USB_ENDPOINT_XFER_CONTROL:
600 if (strstr(ep->ep.name, "-iso")
601 || strstr(ep->ep.name, "-int"))
602 goto en_done;
603 switch (udc->gadget.speed) {
604 case USB_SPEED_HIGH:
605 case USB_SPEED_FULL:
606 switch (max) {
607 case 1:
608 case 2:
609 case 4:
610 case 8:
611 case 16:
612 case 32:
613 case 64:
614 break;
615 default:
616 goto en_done;
617 }
618 case USB_SPEED_LOW:
619 switch (max) {
620 case 1:
621 case 2:
622 case 4:
623 case 8:
624 break;
625 default:
626 goto en_done;
627 }
628 default:
629 goto en_done;
630 }
631 break;
632
633 default:
634 goto en_done;
635 }
636 }
637
638 spin_lock_irqsave(&udc->lock, flags);
639
640
641 ep->ep.maxpacket = max;
642 ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
643 ep->ep.desc = desc;
644 ep->stopped = 0;
645 ep->init = 1;
646
647 if (pipe_num == 0) {
648 ep->dir = USB_DIR_BOTH;
649 udc->ep0_dir = USB_DIR_OUT;
650 udc->ep0_state = WAIT_FOR_SETUP;
651 } else {
652 switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
653 case USB_DIR_OUT:
654 ep->dir = USB_DIR_OUT;
655 break;
656 case USB_DIR_IN:
657 ep->dir = USB_DIR_IN;
658 default:
659 break;
660 }
661 }
662
663
664 qe_ep_bd_init(udc, pipe_num);
665 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
666 reval = qe_ep_rxbd_update(ep);
667 if (reval)
668 goto en_done1;
669 }
670
671 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
672 ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
673 if (ep->txframe == NULL) {
674 dev_err(udc->dev, "malloc txframe failed\n");
675 goto en_done2;
676 }
677 qe_frame_init(ep->txframe);
678 }
679
680 qe_ep_register_init(udc, pipe_num);
681
682
683
684 spin_unlock_irqrestore(&udc->lock, flags);
685
686 return 0;
687en_done2:
688 kfree(ep->rxbuffer);
689 kfree(ep->rxframe);
690en_done1:
691 spin_unlock_irqrestore(&udc->lock, flags);
692en_done:
693 dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
694 return -ENODEV;
695}
696
697static inline void qe_usb_enable(struct qe_udc *udc)
698{
699 setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
700}
701
702static inline void qe_usb_disable(struct qe_udc *udc)
703{
704 clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
705}
706
707
708
709
710
711
712
713
714
715static void recycle_one_rxbd(struct qe_ep *ep)
716{
717 u32 bdstatus;
718
719 bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
720 bdstatus = R_I | R_E | (bdstatus & R_W);
721 out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
722
723 if (bdstatus & R_W)
724 ep->e_rxbd = ep->rxbase;
725 else
726 ep->e_rxbd++;
727}
728
729static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
730{
731 u32 bdstatus;
732 struct qe_bd __iomem *bd, *nextbd;
733 unsigned char stop = 0;
734
735 nextbd = ep->n_rxbd;
736 bd = ep->e_rxbd;
737 bdstatus = in_be32((u32 __iomem *)bd);
738
739 while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
740 bdstatus = R_E | R_I | (bdstatus & R_W);
741 out_be32((u32 __iomem *)bd, bdstatus);
742
743 if (bdstatus & R_W)
744 bd = ep->rxbase;
745 else
746 bd++;
747
748 bdstatus = in_be32((u32 __iomem *)bd);
749 if (stopatnext && (bd == nextbd))
750 stop = 1;
751 }
752
753 ep->e_rxbd = bd;
754}
755
756static void ep_recycle_rxbds(struct qe_ep *ep)
757{
758 struct qe_bd __iomem *bd = ep->n_rxbd;
759 u32 bdstatus;
760 u8 epnum = ep->epnum;
761 struct qe_udc *udc = ep->udc;
762
763 bdstatus = in_be32((u32 __iomem *)bd);
764 if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
765 bd = ep->rxbase +
766 ((in_be16(&udc->ep_param[epnum]->rbptr) -
767 in_be16(&udc->ep_param[epnum]->rbase))
768 >> 3);
769 bdstatus = in_be32((u32 __iomem *)bd);
770
771 if (bdstatus & R_W)
772 bd = ep->rxbase;
773 else
774 bd++;
775
776 ep->e_rxbd = bd;
777 recycle_rxbds(ep, 0);
778 ep->e_rxbd = ep->n_rxbd;
779 } else
780 recycle_rxbds(ep, 1);
781
782 if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
783 out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
784
785 if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
786 qe_eprx_normal(ep);
787
788 ep->localnack = 0;
789}
790
791static void setup_received_handle(struct qe_udc *udc,
792 struct usb_ctrlrequest *setup);
793static int qe_ep_rxframe_handle(struct qe_ep *ep);
794static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
795
796static int ep0_setup_handle(struct qe_udc *udc)
797{
798 struct qe_ep *ep = &udc->eps[0];
799 struct qe_frame *pframe;
800 unsigned int fsize;
801 u8 *cp;
802
803 pframe = ep->rxframe;
804 if ((frame_get_info(pframe) & PID_SETUP)
805 && (udc->ep0_state == WAIT_FOR_SETUP)) {
806 fsize = frame_get_length(pframe);
807 if (unlikely(fsize != 8))
808 return -EINVAL;
809 cp = (u8 *)&udc->local_setup_buff;
810 memcpy(cp, pframe->data, fsize);
811 ep->data01 = 1;
812
813
814 setup_received_handle(udc, &udc->local_setup_buff);
815 return 0;
816 }
817 return -EINVAL;
818}
819
820static int qe_ep0_rx(struct qe_udc *udc)
821{
822 struct qe_ep *ep = &udc->eps[0];
823 struct qe_frame *pframe;
824 struct qe_bd __iomem *bd;
825 u32 bdstatus, length;
826 u32 vaddr;
827
828 pframe = ep->rxframe;
829
830 if (ep->dir == USB_DIR_IN) {
831 dev_err(udc->dev, "ep0 not a control endpoint\n");
832 return -EINVAL;
833 }
834
835 bd = ep->n_rxbd;
836 bdstatus = in_be32((u32 __iomem *)bd);
837 length = bdstatus & BD_LENGTH_MASK;
838
839 while (!(bdstatus & R_E) && length) {
840 if ((bdstatus & R_F) && (bdstatus & R_L)
841 && !(bdstatus & R_ERROR)) {
842 if (length == USB_CRC_SIZE) {
843 udc->ep0_state = WAIT_FOR_SETUP;
844 dev_vdbg(udc->dev,
845 "receive a ZLP in status phase\n");
846 } else {
847 qe_frame_clean(pframe);
848 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
849 frame_set_data(pframe, (u8 *)vaddr);
850 frame_set_length(pframe,
851 (length - USB_CRC_SIZE));
852 frame_set_status(pframe, FRAME_OK);
853 switch (bdstatus & R_PID) {
854 case R_PID_SETUP:
855 frame_set_info(pframe, PID_SETUP);
856 break;
857 case R_PID_DATA1:
858 frame_set_info(pframe, PID_DATA1);
859 break;
860 default:
861 frame_set_info(pframe, PID_DATA0);
862 break;
863 }
864
865 if ((bdstatus & R_PID) == R_PID_SETUP)
866 ep0_setup_handle(udc);
867 else
868 qe_ep_rxframe_handle(ep);
869 }
870 } else {
871 dev_err(udc->dev, "The receive frame with error!\n");
872 }
873
874
875 recycle_one_rxbd(ep);
876
877
878 if (bdstatus & R_W)
879 bd = ep->rxbase;
880 else
881 bd++;
882
883 bdstatus = in_be32((u32 __iomem *)bd);
884 length = bdstatus & BD_LENGTH_MASK;
885
886 }
887
888 ep->n_rxbd = bd;
889
890 return 0;
891}
892
893static int qe_ep_rxframe_handle(struct qe_ep *ep)
894{
895 struct qe_frame *pframe;
896 u8 framepid = 0;
897 unsigned int fsize;
898 u8 *cp;
899 struct qe_req *req;
900
901 pframe = ep->rxframe;
902
903 if (frame_get_info(pframe) & PID_DATA1)
904 framepid = 0x1;
905
906 if (framepid != ep->data01) {
907 dev_err(ep->udc->dev, "the data01 error!\n");
908 return -EIO;
909 }
910
911 fsize = frame_get_length(pframe);
912 if (list_empty(&ep->queue)) {
913 dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
914 } else {
915 req = list_entry(ep->queue.next, struct qe_req, queue);
916
917 cp = (u8 *)(req->req.buf) + req->req.actual;
918 if (cp) {
919 memcpy(cp, pframe->data, fsize);
920 req->req.actual += fsize;
921 if ((fsize < ep->ep.maxpacket) ||
922 (req->req.actual >= req->req.length)) {
923 if (ep->epnum == 0)
924 ep0_req_complete(ep->udc, req);
925 else
926 done(ep, req, 0);
927 if (list_empty(&ep->queue) && ep->epnum != 0)
928 qe_eprx_nack(ep);
929 }
930 }
931 }
932
933 qe_ep_toggledata01(ep);
934
935 return 0;
936}
937
938static void ep_rx_tasklet(unsigned long data)
939{
940 struct qe_udc *udc = (struct qe_udc *)data;
941 struct qe_ep *ep;
942 struct qe_frame *pframe;
943 struct qe_bd __iomem *bd;
944 unsigned long flags;
945 u32 bdstatus, length;
946 u32 vaddr, i;
947
948 spin_lock_irqsave(&udc->lock, flags);
949
950 for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
951 ep = &udc->eps[i];
952
953 if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
954 dev_dbg(udc->dev,
955 "This is a transmit ep or disable tasklet!\n");
956 continue;
957 }
958
959 pframe = ep->rxframe;
960 bd = ep->n_rxbd;
961 bdstatus = in_be32((u32 __iomem *)bd);
962 length = bdstatus & BD_LENGTH_MASK;
963
964 while (!(bdstatus & R_E) && length) {
965 if (list_empty(&ep->queue)) {
966 qe_eprx_nack(ep);
967 dev_dbg(udc->dev,
968 "The rxep have noreq %d\n",
969 ep->has_data);
970 break;
971 }
972
973 if ((bdstatus & R_F) && (bdstatus & R_L)
974 && !(bdstatus & R_ERROR)) {
975 qe_frame_clean(pframe);
976 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
977 frame_set_data(pframe, (u8 *)vaddr);
978 frame_set_length(pframe,
979 (length - USB_CRC_SIZE));
980 frame_set_status(pframe, FRAME_OK);
981 switch (bdstatus & R_PID) {
982 case R_PID_DATA1:
983 frame_set_info(pframe, PID_DATA1);
984 break;
985 case R_PID_SETUP:
986 frame_set_info(pframe, PID_SETUP);
987 break;
988 default:
989 frame_set_info(pframe, PID_DATA0);
990 break;
991 }
992
993 qe_ep_rxframe_handle(ep);
994 } else {
995 dev_err(udc->dev,
996 "error in received frame\n");
997 }
998
999
1000 out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
1001 ep->has_data--;
1002 if (!(ep->localnack))
1003 recycle_one_rxbd(ep);
1004
1005
1006 if (bdstatus & R_W)
1007 bd = ep->rxbase;
1008 else
1009 bd++;
1010
1011 bdstatus = in_be32((u32 __iomem *)bd);
1012 length = bdstatus & BD_LENGTH_MASK;
1013 }
1014
1015 ep->n_rxbd = bd;
1016
1017 if (ep->localnack)
1018 ep_recycle_rxbds(ep);
1019
1020 ep->enable_tasklet = 0;
1021 }
1022
1023 spin_unlock_irqrestore(&udc->lock, flags);
1024}
1025
1026static int qe_ep_rx(struct qe_ep *ep)
1027{
1028 struct qe_udc *udc;
1029 struct qe_frame *pframe;
1030 struct qe_bd __iomem *bd;
1031 u16 swoffs, ucoffs, emptybds;
1032
1033 udc = ep->udc;
1034 pframe = ep->rxframe;
1035
1036 if (ep->dir == USB_DIR_IN) {
1037 dev_err(udc->dev, "transmit ep in rx function\n");
1038 return -EINVAL;
1039 }
1040
1041 bd = ep->n_rxbd;
1042
1043 swoffs = (u16)(bd - ep->rxbase);
1044 ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1045 in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1046 if (swoffs < ucoffs)
1047 emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1048 else
1049 emptybds = swoffs - ucoffs;
1050
1051 if (emptybds < MIN_EMPTY_BDS) {
1052 qe_eprx_nack(ep);
1053 ep->localnack = 1;
1054 dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1055 }
1056 ep->has_data = USB_BDRING_LEN_RX - emptybds;
1057
1058 if (list_empty(&ep->queue)) {
1059 qe_eprx_nack(ep);
1060 dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1061 ep->has_data);
1062 return 0;
1063 }
1064
1065 tasklet_schedule(&udc->rx_tasklet);
1066 ep->enable_tasklet = 1;
1067
1068 return 0;
1069}
1070
1071
1072static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1073{
1074 struct qe_udc *udc = ep->udc;
1075 struct qe_bd __iomem *bd;
1076 u16 saveusbmr;
1077 u32 bdstatus, pidmask;
1078 u32 paddr;
1079
1080 if (ep->dir == USB_DIR_OUT) {
1081 dev_err(udc->dev, "receive ep passed to tx function\n");
1082 return -EINVAL;
1083 }
1084
1085
1086 saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1087 out_be16(&udc->usb_regs->usb_usbmr,
1088 saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1089
1090 bd = ep->n_txbd;
1091 bdstatus = in_be32((u32 __iomem *)bd);
1092
1093 if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1094 if (frame_get_length(frame) == 0) {
1095 frame_set_data(frame, udc->nullbuf);
1096 frame_set_length(frame, 2);
1097 frame->info |= (ZLP | NO_CRC);
1098 dev_vdbg(udc->dev, "the frame size = 0\n");
1099 }
1100 paddr = virt_to_phys((void *)frame->data);
1101 out_be32(&bd->buf, paddr);
1102 bdstatus = (bdstatus&T_W);
1103 if (!(frame_get_info(frame) & NO_CRC))
1104 bdstatus |= T_R | T_I | T_L | T_TC
1105 | frame_get_length(frame);
1106 else
1107 bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1108
1109
1110 if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1111 ep->data01 = 0x1;
1112
1113 if (ep->data01) {
1114 pidmask = T_PID_DATA1;
1115 frame->info |= PID_DATA1;
1116 } else {
1117 pidmask = T_PID_DATA0;
1118 frame->info |= PID_DATA0;
1119 }
1120 bdstatus |= T_CNF;
1121 bdstatus |= pidmask;
1122 out_be32((u32 __iomem *)bd, bdstatus);
1123 qe_ep_filltxfifo(ep);
1124
1125
1126 out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1127
1128 qe_ep_toggledata01(ep);
1129 if (bdstatus & T_W)
1130 ep->n_txbd = ep->txbase;
1131 else
1132 ep->n_txbd++;
1133
1134 return 0;
1135 } else {
1136 out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1137 dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1138 return -EBUSY;
1139 }
1140}
1141
1142
1143
1144static int txcomplete(struct qe_ep *ep, unsigned char restart)
1145{
1146 if (ep->tx_req != NULL) {
1147 struct qe_req *req = ep->tx_req;
1148 unsigned zlp = 0, last_len = 0;
1149
1150 last_len = min_t(unsigned, req->req.length - ep->sent,
1151 ep->ep.maxpacket);
1152
1153 if (!restart) {
1154 int asent = ep->last;
1155 ep->sent += asent;
1156 ep->last -= asent;
1157 } else {
1158 ep->last = 0;
1159 }
1160
1161
1162 if (req->req.zero) {
1163 if (last_len == 0 ||
1164 (req->req.length % ep->ep.maxpacket) != 0)
1165 zlp = 0;
1166 else
1167 zlp = 1;
1168 } else
1169 zlp = 0;
1170
1171
1172 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1173 done(ep, ep->tx_req, 0);
1174 ep->tx_req = NULL;
1175 ep->last = 0;
1176 ep->sent = 0;
1177 }
1178 }
1179
1180
1181 if (ep->tx_req == NULL) {
1182 if (!list_empty(&ep->queue)) {
1183 ep->tx_req = list_entry(ep->queue.next, struct qe_req,
1184 queue);
1185 ep->last = 0;
1186 ep->sent = 0;
1187 }
1188 }
1189
1190 return 0;
1191}
1192
1193
1194static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1195{
1196 unsigned int size;
1197 u8 *buf;
1198
1199 qe_frame_clean(frame);
1200 size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1201 ep->ep.maxpacket);
1202 buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1203 if (buf && size) {
1204 ep->last = size;
1205 ep->tx_req->req.actual += size;
1206 frame_set_data(frame, buf);
1207 frame_set_length(frame, size);
1208 frame_set_status(frame, FRAME_OK);
1209 frame_set_info(frame, 0);
1210 return qe_ep_tx(ep, frame);
1211 }
1212 return -EIO;
1213}
1214
1215
1216static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1217{
1218 struct qe_udc *udc = ep->udc;
1219
1220 if (frame == NULL)
1221 return -ENODEV;
1222
1223 qe_frame_clean(frame);
1224 frame_set_data(frame, (u8 *)udc->nullbuf);
1225 frame_set_length(frame, 2);
1226 frame_set_status(frame, FRAME_OK);
1227 frame_set_info(frame, (ZLP | NO_CRC | infor));
1228
1229 return qe_ep_tx(ep, frame);
1230}
1231
1232static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1233{
1234 struct qe_req *req = ep->tx_req;
1235 int reval;
1236
1237 if (req == NULL)
1238 return -ENODEV;
1239
1240 if ((req->req.length - ep->sent) > 0)
1241 reval = qe_usb_senddata(ep, frame);
1242 else
1243 reval = sendnulldata(ep, frame, 0);
1244
1245 return reval;
1246}
1247
1248
1249
1250
1251static int ep0_prime_status(struct qe_udc *udc, int direction)
1252{
1253
1254 struct qe_ep *ep = &udc->eps[0];
1255
1256 if (direction == USB_DIR_IN) {
1257 udc->ep0_state = DATA_STATE_NEED_ZLP;
1258 udc->ep0_dir = USB_DIR_IN;
1259 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1260 } else {
1261 udc->ep0_dir = USB_DIR_OUT;
1262 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1263 }
1264
1265 return 0;
1266}
1267
1268
1269static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1270{
1271 struct qe_ep *ep = &udc->eps[0];
1272
1273
1274 switch (udc->ep0_state) {
1275 case DATA_STATE_XMIT:
1276 done(ep, req, 0);
1277
1278 if (ep0_prime_status(udc, USB_DIR_OUT))
1279 qe_ep0_stall(udc);
1280 break;
1281
1282 case DATA_STATE_NEED_ZLP:
1283 done(ep, req, 0);
1284 udc->ep0_state = WAIT_FOR_SETUP;
1285 break;
1286
1287 case DATA_STATE_RECV:
1288 done(ep, req, 0);
1289
1290 if (ep0_prime_status(udc, USB_DIR_IN))
1291 qe_ep0_stall(udc);
1292 break;
1293
1294 case WAIT_FOR_OUT_STATUS:
1295 done(ep, req, 0);
1296 udc->ep0_state = WAIT_FOR_SETUP;
1297 break;
1298
1299 case WAIT_FOR_SETUP:
1300 dev_vdbg(udc->dev, "Unexpected interrupt\n");
1301 break;
1302
1303 default:
1304 qe_ep0_stall(udc);
1305 break;
1306 }
1307}
1308
1309static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1310{
1311 struct qe_req *tx_req = NULL;
1312 struct qe_frame *frame = ep->txframe;
1313
1314 if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1315 if (!restart)
1316 ep->udc->ep0_state = WAIT_FOR_SETUP;
1317 else
1318 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1319 return 0;
1320 }
1321
1322 tx_req = ep->tx_req;
1323 if (tx_req != NULL) {
1324 if (!restart) {
1325 int asent = ep->last;
1326 ep->sent += asent;
1327 ep->last -= asent;
1328 } else {
1329 ep->last = 0;
1330 }
1331
1332
1333 if ((ep->tx_req->req.length - ep->sent) <= 0) {
1334 ep->tx_req->req.actual = (unsigned int)ep->sent;
1335 ep0_req_complete(ep->udc, ep->tx_req);
1336 ep->tx_req = NULL;
1337 ep->last = 0;
1338 ep->sent = 0;
1339 }
1340 } else {
1341 dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1342 }
1343
1344 return 0;
1345}
1346
1347static int ep0_txframe_handle(struct qe_ep *ep)
1348{
1349
1350 if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1351 qe_ep_flushtxfifo(ep);
1352 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1353 if (frame_get_info(ep->txframe) & PID_DATA0)
1354 ep->data01 = 0;
1355 else
1356 ep->data01 = 1;
1357
1358 ep0_txcomplete(ep, 1);
1359 } else
1360 ep0_txcomplete(ep, 0);
1361
1362 frame_create_tx(ep, ep->txframe);
1363 return 0;
1364}
1365
1366static int qe_ep0_txconf(struct qe_ep *ep)
1367{
1368 struct qe_bd __iomem *bd;
1369 struct qe_frame *pframe;
1370 u32 bdstatus;
1371
1372 bd = ep->c_txbd;
1373 bdstatus = in_be32((u32 __iomem *)bd);
1374 while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1375 pframe = ep->txframe;
1376
1377
1378 out_be32((u32 __iomem *)bd, bdstatus & T_W);
1379 out_be32(&bd->buf, 0);
1380 if (bdstatus & T_W)
1381 ep->c_txbd = ep->txbase;
1382 else
1383 ep->c_txbd++;
1384
1385 if (ep->c_txbd == ep->n_txbd) {
1386 if (bdstatus & DEVICE_T_ERROR) {
1387 frame_set_status(pframe, FRAME_ERROR);
1388 if (bdstatus & T_TO)
1389 pframe->status |= TX_ER_TIMEOUT;
1390 if (bdstatus & T_UN)
1391 pframe->status |= TX_ER_UNDERUN;
1392 }
1393 ep0_txframe_handle(ep);
1394 }
1395
1396 bd = ep->c_txbd;
1397 bdstatus = in_be32((u32 __iomem *)bd);
1398 }
1399
1400 return 0;
1401}
1402
1403static int ep_txframe_handle(struct qe_ep *ep)
1404{
1405 if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1406 qe_ep_flushtxfifo(ep);
1407 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1408 if (frame_get_info(ep->txframe) & PID_DATA0)
1409 ep->data01 = 0;
1410 else
1411 ep->data01 = 1;
1412
1413 txcomplete(ep, 1);
1414 } else
1415 txcomplete(ep, 0);
1416
1417 frame_create_tx(ep, ep->txframe);
1418 return 0;
1419}
1420
1421
1422static int qe_ep_txconf(struct qe_ep *ep)
1423{
1424 struct qe_bd __iomem *bd;
1425 struct qe_frame *pframe = NULL;
1426 u32 bdstatus;
1427 unsigned char breakonrxinterrupt = 0;
1428
1429 bd = ep->c_txbd;
1430 bdstatus = in_be32((u32 __iomem *)bd);
1431 while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1432 pframe = ep->txframe;
1433 if (bdstatus & DEVICE_T_ERROR) {
1434 frame_set_status(pframe, FRAME_ERROR);
1435 if (bdstatus & T_TO)
1436 pframe->status |= TX_ER_TIMEOUT;
1437 if (bdstatus & T_UN)
1438 pframe->status |= TX_ER_UNDERUN;
1439 }
1440
1441
1442 out_be32((u32 __iomem *)bd, bdstatus & T_W);
1443 out_be32(&bd->buf, 0);
1444 if (bdstatus & T_W)
1445 ep->c_txbd = ep->txbase;
1446 else
1447 ep->c_txbd++;
1448
1449
1450 ep_txframe_handle(ep);
1451 bd = ep->c_txbd;
1452 bdstatus = in_be32((u32 __iomem *)bd);
1453 }
1454 if (breakonrxinterrupt)
1455 return -EIO;
1456 else
1457 return 0;
1458}
1459
1460
1461static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1462{
1463 int reval = 0;
1464
1465 if (ep->tx_req == NULL) {
1466 ep->sent = 0;
1467 ep->last = 0;
1468 txcomplete(ep, 0);
1469 reval = frame_create_tx(ep, ep->txframe);
1470 }
1471 return reval;
1472}
1473
1474
1475static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1476{
1477 struct qe_udc *udc = ep->udc;
1478 struct qe_frame *pframe = NULL;
1479 struct qe_bd __iomem *bd;
1480 u32 bdstatus, length;
1481 u32 vaddr, fsize;
1482 u8 *cp;
1483 u8 finish_req = 0;
1484 u8 framepid;
1485
1486 if (list_empty(&ep->queue)) {
1487 dev_vdbg(udc->dev, "the req already finish!\n");
1488 return 0;
1489 }
1490 pframe = ep->rxframe;
1491
1492 bd = ep->n_rxbd;
1493 bdstatus = in_be32((u32 __iomem *)bd);
1494 length = bdstatus & BD_LENGTH_MASK;
1495
1496 while (!(bdstatus & R_E) && length) {
1497 if (finish_req)
1498 break;
1499 if ((bdstatus & R_F) && (bdstatus & R_L)
1500 && !(bdstatus & R_ERROR)) {
1501 qe_frame_clean(pframe);
1502 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1503 frame_set_data(pframe, (u8 *)vaddr);
1504 frame_set_length(pframe, (length - USB_CRC_SIZE));
1505 frame_set_status(pframe, FRAME_OK);
1506 switch (bdstatus & R_PID) {
1507 case R_PID_DATA1:
1508 frame_set_info(pframe, PID_DATA1); break;
1509 default:
1510 frame_set_info(pframe, PID_DATA0); break;
1511 }
1512
1513
1514 if (frame_get_info(pframe) & PID_DATA1)
1515 framepid = 0x1;
1516 else
1517 framepid = 0;
1518
1519 if (framepid != ep->data01) {
1520 dev_vdbg(udc->dev, "the data01 error!\n");
1521 } else {
1522 fsize = frame_get_length(pframe);
1523
1524 cp = (u8 *)(req->req.buf) + req->req.actual;
1525 if (cp) {
1526 memcpy(cp, pframe->data, fsize);
1527 req->req.actual += fsize;
1528 if ((fsize < ep->ep.maxpacket)
1529 || (req->req.actual >=
1530 req->req.length)) {
1531 finish_req = 1;
1532 done(ep, req, 0);
1533 if (list_empty(&ep->queue))
1534 qe_eprx_nack(ep);
1535 }
1536 }
1537 qe_ep_toggledata01(ep);
1538 }
1539 } else {
1540 dev_err(udc->dev, "The receive frame with error!\n");
1541 }
1542
1543
1544
1545 out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1546 ep->has_data--;
1547
1548
1549 if (bdstatus & R_W)
1550 bd = ep->rxbase;
1551 else
1552 bd++;
1553
1554 bdstatus = in_be32((u32 __iomem *)bd);
1555 length = bdstatus & BD_LENGTH_MASK;
1556 }
1557
1558 ep->n_rxbd = bd;
1559 ep_recycle_rxbds(ep);
1560
1561 return 0;
1562}
1563
1564
1565static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1566{
1567 if (ep->state == EP_STATE_NACK) {
1568 if (ep->has_data <= 0) {
1569
1570 qe_eprx_normal(ep);
1571 } else {
1572
1573 ep_req_rx(ep, req);
1574 }
1575 }
1576
1577 return 0;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587static int qe_ep_enable(struct usb_ep *_ep,
1588 const struct usb_endpoint_descriptor *desc)
1589{
1590 struct qe_udc *udc;
1591 struct qe_ep *ep;
1592 int retval = 0;
1593 unsigned char epnum;
1594
1595 ep = container_of(_ep, struct qe_ep, ep);
1596
1597
1598 if (!_ep || !desc || _ep->name == ep_name[0] ||
1599 (desc->bDescriptorType != USB_DT_ENDPOINT))
1600 return -EINVAL;
1601
1602 udc = ep->udc;
1603 if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1604 return -ESHUTDOWN;
1605
1606 epnum = (u8)desc->bEndpointAddress & 0xF;
1607
1608 retval = qe_ep_init(udc, epnum, desc);
1609 if (retval != 0) {
1610 cpm_muram_free(cpm_muram_offset(ep->rxbase));
1611 dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1612 return -EINVAL;
1613 }
1614 dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1615 return 0;
1616}
1617
1618static int qe_ep_disable(struct usb_ep *_ep)
1619{
1620 struct qe_udc *udc;
1621 struct qe_ep *ep;
1622 unsigned long flags;
1623 unsigned int size;
1624
1625 ep = container_of(_ep, struct qe_ep, ep);
1626 udc = ep->udc;
1627
1628 if (!_ep || !ep->ep.desc) {
1629 dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1630 return -EINVAL;
1631 }
1632
1633 spin_lock_irqsave(&udc->lock, flags);
1634
1635 nuke(ep, -ESHUTDOWN);
1636 ep->ep.desc = NULL;
1637 ep->stopped = 1;
1638 ep->tx_req = NULL;
1639 qe_ep_reset(udc, ep->epnum);
1640 spin_unlock_irqrestore(&udc->lock, flags);
1641
1642 cpm_muram_free(cpm_muram_offset(ep->rxbase));
1643
1644 if (ep->dir == USB_DIR_OUT)
1645 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1646 (USB_BDRING_LEN_RX + 1);
1647 else
1648 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1649 (USB_BDRING_LEN + 1);
1650
1651 if (ep->dir != USB_DIR_IN) {
1652 kfree(ep->rxframe);
1653 if (ep->rxbufmap) {
1654 dma_unmap_single(udc->gadget.dev.parent,
1655 ep->rxbuf_d, size,
1656 DMA_FROM_DEVICE);
1657 ep->rxbuf_d = DMA_ADDR_INVALID;
1658 } else {
1659 dma_sync_single_for_cpu(
1660 udc->gadget.dev.parent,
1661 ep->rxbuf_d, size,
1662 DMA_FROM_DEVICE);
1663 }
1664 kfree(ep->rxbuffer);
1665 }
1666
1667 if (ep->dir != USB_DIR_OUT)
1668 kfree(ep->txframe);
1669
1670 dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1671 return 0;
1672}
1673
1674static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1675{
1676 struct qe_req *req;
1677
1678 req = kzalloc(sizeof(*req), gfp_flags);
1679 if (!req)
1680 return NULL;
1681
1682 req->req.dma = DMA_ADDR_INVALID;
1683
1684 INIT_LIST_HEAD(&req->queue);
1685
1686 return &req->req;
1687}
1688
1689static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1690{
1691 struct qe_req *req;
1692
1693 req = container_of(_req, struct qe_req, req);
1694
1695 if (_req)
1696 kfree(req);
1697}
1698
1699static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1700{
1701 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1702 struct qe_req *req = container_of(_req, struct qe_req, req);
1703 struct qe_udc *udc;
1704 int reval;
1705
1706 udc = ep->udc;
1707
1708 if (!_req || !req->req.complete || !req->req.buf
1709 || !list_empty(&req->queue)) {
1710 dev_dbg(udc->dev, "bad params\n");
1711 return -EINVAL;
1712 }
1713 if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1714 dev_dbg(udc->dev, "bad ep\n");
1715 return -EINVAL;
1716 }
1717
1718 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1719 return -ESHUTDOWN;
1720
1721 req->ep = ep;
1722
1723
1724 if (req->req.dma == DMA_ADDR_INVALID) {
1725 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1726 req->req.buf,
1727 req->req.length,
1728 ep_is_in(ep)
1729 ? DMA_TO_DEVICE :
1730 DMA_FROM_DEVICE);
1731 req->mapped = 1;
1732 } else {
1733 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1734 req->req.dma, req->req.length,
1735 ep_is_in(ep)
1736 ? DMA_TO_DEVICE :
1737 DMA_FROM_DEVICE);
1738 req->mapped = 0;
1739 }
1740
1741 req->req.status = -EINPROGRESS;
1742 req->req.actual = 0;
1743
1744 list_add_tail(&req->queue, &ep->queue);
1745 dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1746 ep->name, req->req.length);
1747
1748
1749 if (ep_is_in(ep))
1750 reval = ep_req_send(ep, req);
1751
1752
1753 if (ep_index(ep) == 0 && req->req.length > 0) {
1754 if (ep_is_in(ep))
1755 udc->ep0_state = DATA_STATE_XMIT;
1756 else
1757 udc->ep0_state = DATA_STATE_RECV;
1758 }
1759
1760 if (ep->dir == USB_DIR_OUT)
1761 reval = ep_req_receive(ep, req);
1762
1763 return 0;
1764}
1765
1766
1767static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1768 gfp_t gfp_flags)
1769{
1770 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1771 struct qe_udc *udc = ep->udc;
1772 unsigned long flags;
1773 int ret;
1774
1775 spin_lock_irqsave(&udc->lock, flags);
1776 ret = __qe_ep_queue(_ep, _req);
1777 spin_unlock_irqrestore(&udc->lock, flags);
1778 return ret;
1779}
1780
1781
1782static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1783{
1784 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1785 struct qe_req *req;
1786 unsigned long flags;
1787
1788 if (!_ep || !_req)
1789 return -EINVAL;
1790
1791 spin_lock_irqsave(&ep->udc->lock, flags);
1792
1793
1794 list_for_each_entry(req, &ep->queue, queue) {
1795 if (&req->req == _req)
1796 break;
1797 }
1798
1799 if (&req->req != _req) {
1800 spin_unlock_irqrestore(&ep->udc->lock, flags);
1801 return -EINVAL;
1802 }
1803
1804 done(ep, req, -ECONNRESET);
1805
1806 spin_unlock_irqrestore(&ep->udc->lock, flags);
1807 return 0;
1808}
1809
1810
1811
1812
1813
1814
1815
1816static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1817{
1818 struct qe_ep *ep;
1819 unsigned long flags;
1820 int status = -EOPNOTSUPP;
1821 struct qe_udc *udc;
1822
1823 ep = container_of(_ep, struct qe_ep, ep);
1824 if (!_ep || !ep->ep.desc) {
1825 status = -EINVAL;
1826 goto out;
1827 }
1828
1829 udc = ep->udc;
1830
1831
1832 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1833 status = -EAGAIN;
1834 goto out;
1835 }
1836
1837 status = 0;
1838 spin_lock_irqsave(&ep->udc->lock, flags);
1839 qe_eptx_stall_change(ep, value);
1840 qe_eprx_stall_change(ep, value);
1841 spin_unlock_irqrestore(&ep->udc->lock, flags);
1842
1843 if (ep->epnum == 0) {
1844 udc->ep0_state = WAIT_FOR_SETUP;
1845 udc->ep0_dir = 0;
1846 }
1847
1848
1849 if (value == 0)
1850 ep->data01 = 0;
1851out:
1852 dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1853 value ? "set" : "clear", status);
1854
1855 return status;
1856}
1857
1858static struct usb_ep_ops qe_ep_ops = {
1859 .enable = qe_ep_enable,
1860 .disable = qe_ep_disable,
1861
1862 .alloc_request = qe_alloc_request,
1863 .free_request = qe_free_request,
1864
1865 .queue = qe_ep_queue,
1866 .dequeue = qe_ep_dequeue,
1867
1868 .set_halt = qe_ep_set_halt,
1869};
1870
1871
1872
1873
1874
1875
1876static int qe_get_frame(struct usb_gadget *gadget)
1877{
1878 struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1879 u16 tmp;
1880
1881 tmp = in_be16(&udc->usb_param->frame_n);
1882 if (tmp & 0x8000)
1883 tmp = tmp & 0x07ff;
1884 else
1885 tmp = -EINVAL;
1886
1887 return (int)tmp;
1888}
1889
1890static int fsl_qe_start(struct usb_gadget *gadget,
1891 struct usb_gadget_driver *driver);
1892static int fsl_qe_stop(struct usb_gadget *gadget,
1893 struct usb_gadget_driver *driver);
1894
1895
1896static const struct usb_gadget_ops qe_gadget_ops = {
1897 .get_frame = qe_get_frame,
1898 .udc_start = fsl_qe_start,
1899 .udc_stop = fsl_qe_stop,
1900};
1901
1902
1903
1904
1905static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1906{
1907 struct qe_ep *ep = &udc->eps[pipe];
1908
1909 nuke(ep, -ECONNRESET);
1910 ep->tx_req = NULL;
1911 return 0;
1912}
1913
1914static int reset_queues(struct qe_udc *udc)
1915{
1916 u8 pipe;
1917
1918 for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1919 udc_reset_ep_queue(udc, pipe);
1920
1921
1922 spin_unlock(&udc->lock);
1923 udc->driver->disconnect(&udc->gadget);
1924 spin_lock(&udc->lock);
1925
1926 return 0;
1927}
1928
1929static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1930 u16 length)
1931{
1932
1933 udc->device_address = (u8) value;
1934
1935 udc->usb_state = USB_STATE_ADDRESS;
1936
1937
1938 if (ep0_prime_status(udc, USB_DIR_IN))
1939 qe_ep0_stall(udc);
1940}
1941
1942static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1943{
1944 struct qe_req *req = container_of(_req, struct qe_req, req);
1945
1946 req->req.buf = NULL;
1947 kfree(req);
1948}
1949
1950static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1951 u16 index, u16 length)
1952{
1953 u16 usb_status = 0;
1954 struct qe_req *req;
1955 struct qe_ep *ep;
1956 int status = 0;
1957
1958 ep = &udc->eps[0];
1959 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1960
1961 usb_status = 1 << USB_DEVICE_SELF_POWERED;
1962 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1963
1964
1965 usb_status = 0;
1966 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1967
1968 int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1969 struct qe_ep *target_ep = &udc->eps[pipe];
1970 u16 usep;
1971
1972
1973 if (!target_ep->ep.desc)
1974 goto stall;
1975
1976 usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1977 if (index & USB_DIR_IN) {
1978 if (target_ep->dir != USB_DIR_IN)
1979 goto stall;
1980 if ((usep & USB_THS_MASK) == USB_THS_STALL)
1981 usb_status = 1 << USB_ENDPOINT_HALT;
1982 } else {
1983 if (target_ep->dir != USB_DIR_OUT)
1984 goto stall;
1985 if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1986 usb_status = 1 << USB_ENDPOINT_HALT;
1987 }
1988 }
1989
1990 req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1991 struct qe_req, req);
1992 req->req.length = 2;
1993 req->req.buf = udc->statusbuf;
1994 *(u16 *)req->req.buf = cpu_to_le16(usb_status);
1995 req->req.status = -EINPROGRESS;
1996 req->req.actual = 0;
1997 req->req.complete = ownercomplete;
1998
1999 udc->ep0_dir = USB_DIR_IN;
2000
2001
2002 status = __qe_ep_queue(&ep->ep, &req->req);
2003
2004 if (status == 0)
2005 return;
2006stall:
2007 dev_err(udc->dev, "Can't respond to getstatus request \n");
2008 qe_ep0_stall(udc);
2009}
2010
2011
2012static void setup_received_handle(struct qe_udc *udc,
2013 struct usb_ctrlrequest *setup)
2014{
2015
2016 u16 wValue = le16_to_cpu(setup->wValue);
2017 u16 wIndex = le16_to_cpu(setup->wIndex);
2018 u16 wLength = le16_to_cpu(setup->wLength);
2019
2020
2021 udc_reset_ep_queue(udc, 0);
2022
2023 if (setup->bRequestType & USB_DIR_IN)
2024 udc->ep0_dir = USB_DIR_IN;
2025 else
2026 udc->ep0_dir = USB_DIR_OUT;
2027
2028 switch (setup->bRequest) {
2029 case USB_REQ_GET_STATUS:
2030
2031 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2032 != (USB_DIR_IN | USB_TYPE_STANDARD))
2033 break;
2034 ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2035 wLength);
2036 return;
2037
2038 case USB_REQ_SET_ADDRESS:
2039
2040 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2041 USB_RECIP_DEVICE))
2042 break;
2043 ch9setaddress(udc, wValue, wIndex, wLength);
2044 return;
2045
2046 case USB_REQ_CLEAR_FEATURE:
2047 case USB_REQ_SET_FEATURE:
2048
2049 if ((setup->bRequestType & USB_TYPE_MASK)
2050 != USB_TYPE_STANDARD)
2051 break;
2052
2053 if ((setup->bRequestType & USB_RECIP_MASK)
2054 == USB_RECIP_ENDPOINT) {
2055 int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2056 struct qe_ep *ep;
2057
2058 if (wValue != 0 || wLength != 0
2059 || pipe > USB_MAX_ENDPOINTS)
2060 break;
2061 ep = &udc->eps[pipe];
2062
2063 spin_unlock(&udc->lock);
2064 qe_ep_set_halt(&ep->ep,
2065 (setup->bRequest == USB_REQ_SET_FEATURE)
2066 ? 1 : 0);
2067 spin_lock(&udc->lock);
2068 }
2069
2070 ep0_prime_status(udc, USB_DIR_IN);
2071
2072 return;
2073
2074 default:
2075 break;
2076 }
2077
2078 if (wLength) {
2079
2080 if (setup->bRequestType & USB_DIR_IN) {
2081 udc->ep0_state = DATA_STATE_XMIT;
2082 udc->ep0_dir = USB_DIR_IN;
2083 } else {
2084 udc->ep0_state = DATA_STATE_RECV;
2085 udc->ep0_dir = USB_DIR_OUT;
2086 }
2087 spin_unlock(&udc->lock);
2088 if (udc->driver->setup(&udc->gadget,
2089 &udc->local_setup_buff) < 0)
2090 qe_ep0_stall(udc);
2091 spin_lock(&udc->lock);
2092 } else {
2093
2094 udc->ep0_dir = USB_DIR_IN;
2095 spin_unlock(&udc->lock);
2096 if (udc->driver->setup(&udc->gadget,
2097 &udc->local_setup_buff) < 0)
2098 qe_ep0_stall(udc);
2099 spin_lock(&udc->lock);
2100 udc->ep0_state = DATA_STATE_NEED_ZLP;
2101 }
2102}
2103
2104
2105
2106
2107static void suspend_irq(struct qe_udc *udc)
2108{
2109 udc->resume_state = udc->usb_state;
2110 udc->usb_state = USB_STATE_SUSPENDED;
2111
2112
2113 if (udc->driver->suspend)
2114 udc->driver->suspend(&udc->gadget);
2115}
2116
2117static void resume_irq(struct qe_udc *udc)
2118{
2119 udc->usb_state = udc->resume_state;
2120 udc->resume_state = 0;
2121
2122
2123 if (udc->driver->resume)
2124 udc->driver->resume(&udc->gadget);
2125}
2126
2127static void idle_irq(struct qe_udc *udc)
2128{
2129 u8 usbs;
2130
2131 usbs = in_8(&udc->usb_regs->usb_usbs);
2132 if (usbs & USB_IDLE_STATUS_MASK) {
2133 if ((udc->usb_state) != USB_STATE_SUSPENDED)
2134 suspend_irq(udc);
2135 } else {
2136 if (udc->usb_state == USB_STATE_SUSPENDED)
2137 resume_irq(udc);
2138 }
2139}
2140
2141static int reset_irq(struct qe_udc *udc)
2142{
2143 unsigned char i;
2144
2145 if (udc->usb_state == USB_STATE_DEFAULT)
2146 return 0;
2147
2148 qe_usb_disable(udc);
2149 out_8(&udc->usb_regs->usb_usadr, 0);
2150
2151 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2152 if (udc->eps[i].init)
2153 qe_ep_reset(udc, i);
2154 }
2155
2156 reset_queues(udc);
2157 udc->usb_state = USB_STATE_DEFAULT;
2158 udc->ep0_state = WAIT_FOR_SETUP;
2159 udc->ep0_dir = USB_DIR_OUT;
2160 qe_usb_enable(udc);
2161 return 0;
2162}
2163
2164static int bsy_irq(struct qe_udc *udc)
2165{
2166 return 0;
2167}
2168
2169static int txe_irq(struct qe_udc *udc)
2170{
2171 return 0;
2172}
2173
2174
2175static int tx_irq(struct qe_udc *udc)
2176{
2177 struct qe_ep *ep;
2178 struct qe_bd __iomem *bd;
2179 int i, res = 0;
2180
2181 if ((udc->usb_state == USB_STATE_ADDRESS)
2182 && (in_8(&udc->usb_regs->usb_usadr) == 0))
2183 out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2184
2185 for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2186 ep = &udc->eps[i];
2187 if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2188 bd = ep->c_txbd;
2189 if (!(in_be32((u32 __iomem *)bd) & T_R)
2190 && (in_be32(&bd->buf))) {
2191
2192 if (ep->epnum == 0)
2193 res = qe_ep0_txconf(ep);
2194 else
2195 res = qe_ep_txconf(ep);
2196 }
2197 }
2198 }
2199 return res;
2200}
2201
2202
2203
2204static void rx_irq(struct qe_udc *udc)
2205{
2206 struct qe_ep *ep;
2207 struct qe_bd __iomem *bd;
2208 int i;
2209
2210 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2211 ep = &udc->eps[i];
2212 if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2213 bd = ep->n_rxbd;
2214 if (!(in_be32((u32 __iomem *)bd) & R_E)
2215 && (in_be32(&bd->buf))) {
2216 if (ep->epnum == 0) {
2217 qe_ep0_rx(udc);
2218 } else {
2219
2220 qe_ep_rx(ep);
2221 }
2222 }
2223 }
2224 }
2225}
2226
2227static irqreturn_t qe_udc_irq(int irq, void *_udc)
2228{
2229 struct qe_udc *udc = (struct qe_udc *)_udc;
2230 u16 irq_src;
2231 irqreturn_t status = IRQ_NONE;
2232 unsigned long flags;
2233
2234 spin_lock_irqsave(&udc->lock, flags);
2235
2236 irq_src = in_be16(&udc->usb_regs->usb_usber) &
2237 in_be16(&udc->usb_regs->usb_usbmr);
2238
2239 out_be16(&udc->usb_regs->usb_usber, irq_src);
2240
2241 if (irq_src & USB_E_IDLE_MASK) {
2242 idle_irq(udc);
2243 irq_src &= ~USB_E_IDLE_MASK;
2244 status = IRQ_HANDLED;
2245 }
2246
2247 if (irq_src & USB_E_TXB_MASK) {
2248 tx_irq(udc);
2249 irq_src &= ~USB_E_TXB_MASK;
2250 status = IRQ_HANDLED;
2251 }
2252
2253 if (irq_src & USB_E_RXB_MASK) {
2254 rx_irq(udc);
2255 irq_src &= ~USB_E_RXB_MASK;
2256 status = IRQ_HANDLED;
2257 }
2258
2259 if (irq_src & USB_E_RESET_MASK) {
2260 reset_irq(udc);
2261 irq_src &= ~USB_E_RESET_MASK;
2262 status = IRQ_HANDLED;
2263 }
2264
2265 if (irq_src & USB_E_BSY_MASK) {
2266 bsy_irq(udc);
2267 irq_src &= ~USB_E_BSY_MASK;
2268 status = IRQ_HANDLED;
2269 }
2270
2271 if (irq_src & USB_E_TXE_MASK) {
2272 txe_irq(udc);
2273 irq_src &= ~USB_E_TXE_MASK;
2274 status = IRQ_HANDLED;
2275 }
2276
2277 spin_unlock_irqrestore(&udc->lock, flags);
2278
2279 return status;
2280}
2281
2282
2283
2284
2285static int fsl_qe_start(struct usb_gadget *gadget,
2286 struct usb_gadget_driver *driver)
2287{
2288 struct qe_udc *udc;
2289 unsigned long flags;
2290
2291 udc = container_of(gadget, struct qe_udc, gadget);
2292
2293 spin_lock_irqsave(&udc->lock, flags);
2294
2295 driver->driver.bus = NULL;
2296
2297 udc->driver = driver;
2298 udc->gadget.speed = driver->max_speed;
2299
2300
2301 qe_usb_enable(udc);
2302
2303 out_be16(&udc->usb_regs->usb_usber, 0xffff);
2304 out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2305 udc->usb_state = USB_STATE_ATTACHED;
2306 udc->ep0_state = WAIT_FOR_SETUP;
2307 udc->ep0_dir = USB_DIR_OUT;
2308 spin_unlock_irqrestore(&udc->lock, flags);
2309
2310 dev_info(udc->dev, "%s bind to driver %s\n", udc->gadget.name,
2311 driver->driver.name);
2312 return 0;
2313}
2314
2315static int fsl_qe_stop(struct usb_gadget *gadget,
2316 struct usb_gadget_driver *driver)
2317{
2318 struct qe_udc *udc;
2319 struct qe_ep *loop_ep;
2320 unsigned long flags;
2321
2322 udc = container_of(gadget, struct qe_udc, gadget);
2323
2324 qe_usb_disable(udc);
2325
2326
2327 udc->usb_state = USB_STATE_ATTACHED;
2328 udc->ep0_state = WAIT_FOR_SETUP;
2329 udc->ep0_dir = 0;
2330
2331
2332 spin_lock_irqsave(&udc->lock, flags);
2333 udc->gadget.speed = USB_SPEED_UNKNOWN;
2334 nuke(&udc->eps[0], -ESHUTDOWN);
2335 list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2336 nuke(loop_ep, -ESHUTDOWN);
2337 spin_unlock_irqrestore(&udc->lock, flags);
2338
2339 udc->driver = NULL;
2340
2341 dev_info(udc->dev, "unregistered gadget driver '%s'\r\n",
2342 driver->driver.name);
2343 return 0;
2344}
2345
2346
2347static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2348{
2349 struct qe_udc *udc;
2350 struct device_node *np = ofdev->dev.of_node;
2351 unsigned int tmp_addr = 0;
2352 struct usb_device_para __iomem *usbpram;
2353 unsigned int i;
2354 u64 size;
2355 u32 offset;
2356
2357 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2358 if (udc == NULL) {
2359 dev_err(&ofdev->dev, "malloc udc failed\n");
2360 goto cleanup;
2361 }
2362
2363 udc->dev = &ofdev->dev;
2364
2365
2366 offset = *of_get_address(np, 1, &size, NULL);
2367 udc->usb_param = cpm_muram_addr(offset);
2368 memset_io(udc->usb_param, 0, size);
2369
2370 usbpram = udc->usb_param;
2371 out_be16(&usbpram->frame_n, 0);
2372 out_be32(&usbpram->rstate, 0);
2373
2374 tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2375 sizeof(struct usb_ep_para)),
2376 USB_EP_PARA_ALIGNMENT);
2377 if (IS_ERR_VALUE(tmp_addr))
2378 goto cleanup;
2379
2380 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2381 out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2382 udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2383 tmp_addr += 32;
2384 }
2385
2386 memset_io(udc->ep_param[0], 0,
2387 USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2388
2389 udc->resume_state = USB_STATE_NOTATTACHED;
2390 udc->usb_state = USB_STATE_POWERED;
2391 udc->ep0_dir = 0;
2392
2393 spin_lock_init(&udc->lock);
2394 return udc;
2395
2396cleanup:
2397 kfree(udc);
2398 return NULL;
2399}
2400
2401
2402static int qe_udc_reg_init(struct qe_udc *udc)
2403{
2404 struct usb_ctlr __iomem *qe_usbregs;
2405 qe_usbregs = udc->usb_regs;
2406
2407
2408 out_8(&qe_usbregs->usb_usmod, 0x01);
2409
2410 out_8(&qe_usbregs->usb_usmod, 0x00);
2411
2412
2413 out_be16(&qe_usbregs->usb_usbmr, 0);
2414 out_8(&qe_usbregs->usb_uscom, 0);
2415 out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2416
2417 return 0;
2418}
2419
2420static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2421{
2422 struct qe_ep *ep = &udc->eps[pipe_num];
2423
2424 ep->udc = udc;
2425 strcpy(ep->name, ep_name[pipe_num]);
2426 ep->ep.name = ep_name[pipe_num];
2427
2428 ep->ep.ops = &qe_ep_ops;
2429 ep->stopped = 1;
2430 ep->ep.maxpacket = (unsigned short) ~0;
2431 ep->ep.desc = NULL;
2432 ep->dir = 0xff;
2433 ep->epnum = (u8)pipe_num;
2434 ep->sent = 0;
2435 ep->last = 0;
2436 ep->init = 0;
2437 ep->rxframe = NULL;
2438 ep->txframe = NULL;
2439 ep->tx_req = NULL;
2440 ep->state = EP_STATE_IDLE;
2441 ep->has_data = 0;
2442
2443
2444 INIT_LIST_HEAD(&ep->queue);
2445
2446
2447 if (pipe_num != 0)
2448 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2449
2450 ep->gadget = &udc->gadget;
2451
2452 return 0;
2453}
2454
2455
2456
2457
2458static void qe_udc_release(struct device *dev)
2459{
2460 struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2461 int i;
2462
2463 complete(udc->done);
2464 cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2465 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2466 udc->ep_param[i] = NULL;
2467
2468 kfree(udc);
2469}
2470
2471
2472static const struct of_device_id qe_udc_match[];
2473static int qe_udc_probe(struct platform_device *ofdev)
2474{
2475 struct qe_udc *udc;
2476 const struct of_device_id *match;
2477 struct device_node *np = ofdev->dev.of_node;
2478 struct qe_ep *ep;
2479 unsigned int ret = 0;
2480 unsigned int i;
2481 const void *prop;
2482
2483 match = of_match_device(qe_udc_match, &ofdev->dev);
2484 if (!match)
2485 return -EINVAL;
2486
2487 prop = of_get_property(np, "mode", NULL);
2488 if (!prop || strcmp(prop, "peripheral"))
2489 return -ENODEV;
2490
2491
2492 udc = qe_udc_config(ofdev);
2493 if (!udc) {
2494 dev_err(&ofdev->dev, "failed to initialize\n");
2495 return -ENOMEM;
2496 }
2497
2498 udc->soc_type = (unsigned long)match->data;
2499 udc->usb_regs = of_iomap(np, 0);
2500 if (!udc->usb_regs) {
2501 ret = -ENOMEM;
2502 goto err1;
2503 }
2504
2505
2506
2507 qe_udc_reg_init(udc);
2508
2509
2510
2511 udc->gadget.ops = &qe_gadget_ops;
2512
2513
2514 udc->gadget.ep0 = &udc->eps[0].ep;
2515
2516 INIT_LIST_HEAD(&udc->gadget.ep_list);
2517
2518
2519 udc->gadget.speed = USB_SPEED_UNKNOWN;
2520
2521
2522 udc->gadget.name = driver_name;
2523 udc->gadget.dev.parent = &ofdev->dev;
2524
2525
2526 for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2527
2528
2529
2530
2531
2532 qe_ep_config(udc, (unsigned char)i);
2533 }
2534
2535
2536 ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2537 if (ret)
2538 goto err2;
2539
2540
2541 udc->nullbuf = kzalloc(256, GFP_KERNEL);
2542 if (udc->nullbuf == NULL) {
2543 dev_err(udc->dev, "cannot alloc nullbuf\n");
2544 ret = -ENOMEM;
2545 goto err3;
2546 }
2547
2548
2549 udc->statusbuf = kzalloc(2, GFP_KERNEL);
2550 if (udc->statusbuf == NULL) {
2551 ret = -ENOMEM;
2552 goto err4;
2553 }
2554
2555 udc->nullp = virt_to_phys((void *)udc->nullbuf);
2556 if (udc->nullp == DMA_ADDR_INVALID) {
2557 udc->nullp = dma_map_single(
2558 udc->gadget.dev.parent,
2559 udc->nullbuf,
2560 256,
2561 DMA_TO_DEVICE);
2562 udc->nullmap = 1;
2563 } else {
2564 dma_sync_single_for_device(udc->gadget.dev.parent,
2565 udc->nullp, 256,
2566 DMA_TO_DEVICE);
2567 }
2568
2569 tasklet_init(&udc->rx_tasklet, ep_rx_tasklet,
2570 (unsigned long)udc);
2571
2572 udc->usb_irq = irq_of_parse_and_map(np, 0);
2573 if (!udc->usb_irq) {
2574 ret = -EINVAL;
2575 goto err_noirq;
2576 }
2577
2578 ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2579 driver_name, udc);
2580 if (ret) {
2581 dev_err(udc->dev, "cannot request irq %d err %d\n",
2582 udc->usb_irq, ret);
2583 goto err5;
2584 }
2585
2586 ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2587 qe_udc_release);
2588 if (ret)
2589 goto err6;
2590
2591 dev_set_drvdata(&ofdev->dev, udc);
2592 dev_info(udc->dev,
2593 "%s USB controller initialized as device\n",
2594 (udc->soc_type == PORT_QE) ? "QE" : "CPM");
2595 return 0;
2596
2597err6:
2598 free_irq(udc->usb_irq, udc);
2599err5:
2600 irq_dispose_mapping(udc->usb_irq);
2601err_noirq:
2602 if (udc->nullmap) {
2603 dma_unmap_single(udc->gadget.dev.parent,
2604 udc->nullp, 256,
2605 DMA_TO_DEVICE);
2606 udc->nullp = DMA_ADDR_INVALID;
2607 } else {
2608 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2609 udc->nullp, 256,
2610 DMA_TO_DEVICE);
2611 }
2612 kfree(udc->statusbuf);
2613err4:
2614 kfree(udc->nullbuf);
2615err3:
2616 ep = &udc->eps[0];
2617 cpm_muram_free(cpm_muram_offset(ep->rxbase));
2618 kfree(ep->rxframe);
2619 kfree(ep->rxbuffer);
2620 kfree(ep->txframe);
2621err2:
2622 iounmap(udc->usb_regs);
2623err1:
2624 kfree(udc);
2625 return ret;
2626}
2627
2628#ifdef CONFIG_PM
2629static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2630{
2631 return -ENOTSUPP;
2632}
2633
2634static int qe_udc_resume(struct platform_device *dev)
2635{
2636 return -ENOTSUPP;
2637}
2638#endif
2639
2640static int qe_udc_remove(struct platform_device *ofdev)
2641{
2642 struct qe_udc *udc = dev_get_drvdata(&ofdev->dev);
2643 struct qe_ep *ep;
2644 unsigned int size;
2645 DECLARE_COMPLETION(done);
2646
2647 usb_del_gadget_udc(&udc->gadget);
2648
2649 udc->done = &done;
2650 tasklet_disable(&udc->rx_tasklet);
2651
2652 if (udc->nullmap) {
2653 dma_unmap_single(udc->gadget.dev.parent,
2654 udc->nullp, 256,
2655 DMA_TO_DEVICE);
2656 udc->nullp = DMA_ADDR_INVALID;
2657 } else {
2658 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2659 udc->nullp, 256,
2660 DMA_TO_DEVICE);
2661 }
2662 kfree(udc->statusbuf);
2663 kfree(udc->nullbuf);
2664
2665 ep = &udc->eps[0];
2666 cpm_muram_free(cpm_muram_offset(ep->rxbase));
2667 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2668
2669 kfree(ep->rxframe);
2670 if (ep->rxbufmap) {
2671 dma_unmap_single(udc->gadget.dev.parent,
2672 ep->rxbuf_d, size,
2673 DMA_FROM_DEVICE);
2674 ep->rxbuf_d = DMA_ADDR_INVALID;
2675 } else {
2676 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2677 ep->rxbuf_d, size,
2678 DMA_FROM_DEVICE);
2679 }
2680
2681 kfree(ep->rxbuffer);
2682 kfree(ep->txframe);
2683
2684 free_irq(udc->usb_irq, udc);
2685 irq_dispose_mapping(udc->usb_irq);
2686
2687 tasklet_kill(&udc->rx_tasklet);
2688
2689 iounmap(udc->usb_regs);
2690
2691
2692 wait_for_completion(&done);
2693
2694 return 0;
2695}
2696
2697
2698static const struct of_device_id qe_udc_match[] = {
2699 {
2700 .compatible = "fsl,mpc8323-qe-usb",
2701 .data = (void *)PORT_QE,
2702 },
2703 {
2704 .compatible = "fsl,mpc8360-qe-usb",
2705 .data = (void *)PORT_QE,
2706 },
2707 {
2708 .compatible = "fsl,mpc8272-cpm-usb",
2709 .data = (void *)PORT_CPM,
2710 },
2711 {},
2712};
2713
2714MODULE_DEVICE_TABLE(of, qe_udc_match);
2715
2716static struct platform_driver udc_driver = {
2717 .driver = {
2718 .name = (char *)driver_name,
2719 .owner = THIS_MODULE,
2720 .of_match_table = qe_udc_match,
2721 },
2722 .probe = qe_udc_probe,
2723 .remove = qe_udc_remove,
2724#ifdef CONFIG_PM
2725 .suspend = qe_udc_suspend,
2726 .resume = qe_udc_resume,
2727#endif
2728};
2729
2730module_platform_driver(udc_driver);
2731
2732MODULE_DESCRIPTION(DRIVER_DESC);
2733MODULE_AUTHOR(DRIVER_AUTHOR);
2734MODULE_LICENSE("GPL");
2735
2736