1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <linux/usb/ch9.h>
21#include <linux/usb/gadget.h>
22
23#include "r8a66597-udc.h"
24
25#define DRIVER_VERSION "2011-09-26"
26
27static const char udc_name[] = "r8a66597_udc";
28static const char *r8a66597_ep_name[] = {
29 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
30 "ep8", "ep9",
31};
32
33static void init_controller(struct r8a66597 *r8a66597);
34static void disable_controller(struct r8a66597 *r8a66597);
35static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
36static void irq_packet_write(struct r8a66597_ep *ep,
37 struct r8a66597_request *req);
38static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
39 gfp_t gfp_flags);
40
41static void transfer_complete(struct r8a66597_ep *ep,
42 struct r8a66597_request *req, int status);
43
44
45static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
46{
47 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
48}
49
50static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
51 unsigned long reg)
52{
53 u16 tmp;
54
55 tmp = r8a66597_read(r8a66597, INTENB0);
56 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
57 INTENB0);
58 r8a66597_bset(r8a66597, (1 << pipenum), reg);
59 r8a66597_write(r8a66597, tmp, INTENB0);
60}
61
62static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
63 unsigned long reg)
64{
65 u16 tmp;
66
67 tmp = r8a66597_read(r8a66597, INTENB0);
68 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
69 INTENB0);
70 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
71 r8a66597_write(r8a66597, tmp, INTENB0);
72}
73
74static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
75{
76 r8a66597_bset(r8a66597, CTRE, INTENB0);
77 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
78
79 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
80}
81
82static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
83__releases(r8a66597->lock)
84__acquires(r8a66597->lock)
85{
86 r8a66597_bclr(r8a66597, CTRE, INTENB0);
87 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
88 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
89
90 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
91 spin_unlock(&r8a66597->lock);
92 r8a66597->driver->disconnect(&r8a66597->gadget);
93 spin_lock(&r8a66597->lock);
94
95 disable_controller(r8a66597);
96 init_controller(r8a66597);
97 r8a66597_bset(r8a66597, VBSE, INTENB0);
98 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
99}
100
101static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
102{
103 u16 pid = 0;
104 unsigned long offset;
105
106 if (pipenum == 0) {
107 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
108 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
109 offset = get_pipectr_addr(pipenum);
110 pid = r8a66597_read(r8a66597, offset) & PID;
111 } else {
112 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
113 pipenum);
114 }
115
116 return pid;
117}
118
119static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
120 u16 pid)
121{
122 unsigned long offset;
123
124 if (pipenum == 0) {
125 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
126 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
127 offset = get_pipectr_addr(pipenum);
128 r8a66597_mdfy(r8a66597, pid, PID, offset);
129 } else {
130 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
131 pipenum);
132 }
133}
134
135static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
136{
137 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
138}
139
140static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
141{
142 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
143}
144
145static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
146{
147 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
148}
149
150static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
151{
152 u16 ret = 0;
153 unsigned long offset;
154
155 if (pipenum == 0) {
156 ret = r8a66597_read(r8a66597, DCPCTR);
157 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
158 offset = get_pipectr_addr(pipenum);
159 ret = r8a66597_read(r8a66597, offset);
160 } else {
161 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
162 pipenum);
163 }
164
165 return ret;
166}
167
168static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
169{
170 unsigned long offset;
171
172 pipe_stop(r8a66597, pipenum);
173
174 if (pipenum == 0) {
175 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
176 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
177 offset = get_pipectr_addr(pipenum);
178 r8a66597_bset(r8a66597, SQCLR, offset);
179 } else {
180 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
181 pipenum);
182 }
183}
184
185static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
186{
187 unsigned long offset;
188
189 pipe_stop(r8a66597, pipenum);
190
191 if (pipenum == 0) {
192 r8a66597_bset(r8a66597, SQSET, DCPCTR);
193 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
194 offset = get_pipectr_addr(pipenum);
195 r8a66597_bset(r8a66597, SQSET, offset);
196 } else {
197 dev_err(r8a66597_to_dev(r8a66597),
198 "unexpect pipe num(%d)\n", pipenum);
199 }
200}
201
202static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
203{
204 unsigned long offset;
205
206 if (pipenum == 0) {
207 return r8a66597_read(r8a66597, DCPCTR) & SQMON;
208 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
209 offset = get_pipectr_addr(pipenum);
210 return r8a66597_read(r8a66597, offset) & SQMON;
211 } else {
212 dev_err(r8a66597_to_dev(r8a66597),
213 "unexpect pipe num(%d)\n", pipenum);
214 }
215
216 return 0;
217}
218
219static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
220{
221 return control_reg_sqmon(r8a66597, pipenum);
222}
223
224static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
225 u16 toggle)
226{
227 if (toggle)
228 control_reg_sqset(r8a66597, pipenum);
229 else
230 control_reg_sqclr(r8a66597, pipenum);
231}
232
233static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
234{
235 u16 tmp;
236 int size;
237
238 if (pipenum == 0) {
239 tmp = r8a66597_read(r8a66597, DCPCFG);
240 if ((tmp & R8A66597_CNTMD) != 0)
241 size = 256;
242 else {
243 tmp = r8a66597_read(r8a66597, DCPMAXP);
244 size = tmp & MAXP;
245 }
246 } else {
247 r8a66597_write(r8a66597, pipenum, PIPESEL);
248 tmp = r8a66597_read(r8a66597, PIPECFG);
249 if ((tmp & R8A66597_CNTMD) != 0) {
250 tmp = r8a66597_read(r8a66597, PIPEBUF);
251 size = ((tmp >> 10) + 1) * 64;
252 } else {
253 tmp = r8a66597_read(r8a66597, PIPEMAXP);
254 size = tmp & MXPS;
255 }
256 }
257
258 return size;
259}
260
261static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
262{
263 if (r8a66597->pdata->on_chip)
264 return MBW_32;
265 else
266 return MBW_16;
267}
268
269static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
270 u16 isel, u16 fifosel)
271{
272 u16 tmp, mask, loop;
273 int i = 0;
274
275 if (!pipenum) {
276 mask = ISEL | CURPIPE;
277 loop = isel;
278 } else {
279 mask = CURPIPE;
280 loop = pipenum;
281 }
282 r8a66597_mdfy(r8a66597, loop, mask, fifosel);
283
284 do {
285 tmp = r8a66597_read(r8a66597, fifosel);
286 if (i++ > 1000000) {
287 dev_err(r8a66597_to_dev(r8a66597),
288 "r8a66597: register%x, loop %x "
289 "is timeout\n", fifosel, loop);
290 break;
291 }
292 ndelay(1);
293 } while ((tmp & mask) != loop);
294}
295
296static void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
297{
298 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
299
300 if (ep->use_dma)
301 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
302
303 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
304
305 ndelay(450);
306
307 if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
308 r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
309 else
310 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
311
312 if (ep->use_dma)
313 r8a66597_bset(r8a66597, DREQE, ep->fifosel);
314}
315
316static int pipe_buffer_setting(struct r8a66597 *r8a66597,
317 struct r8a66597_pipe_info *info)
318{
319 u16 bufnum = 0, buf_bsize = 0;
320 u16 pipecfg = 0;
321
322 if (info->pipe == 0)
323 return -EINVAL;
324
325 r8a66597_write(r8a66597, info->pipe, PIPESEL);
326
327 if (info->dir_in)
328 pipecfg |= R8A66597_DIR;
329 pipecfg |= info->type;
330 pipecfg |= info->epnum;
331 switch (info->type) {
332 case R8A66597_INT:
333 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
334 buf_bsize = 0;
335 break;
336 case R8A66597_BULK:
337
338 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
339 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
340 else
341 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
342
343 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
344 buf_bsize = 7;
345 pipecfg |= R8A66597_DBLB;
346 if (!info->dir_in)
347 pipecfg |= R8A66597_SHTNAK;
348 break;
349 case R8A66597_ISO:
350 bufnum = R8A66597_BASE_BUFNUM +
351 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
352 buf_bsize = 7;
353 break;
354 }
355
356 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
357 pr_err("r8a66597 pipe memory is insufficient\n");
358 return -ENOMEM;
359 }
360
361 r8a66597_write(r8a66597, pipecfg, PIPECFG);
362 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
363 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
364 if (info->interval)
365 info->interval--;
366 r8a66597_write(r8a66597, info->interval, PIPEPERI);
367
368 return 0;
369}
370
371static void pipe_buffer_release(struct r8a66597 *r8a66597,
372 struct r8a66597_pipe_info *info)
373{
374 if (info->pipe == 0)
375 return;
376
377 if (is_bulk_pipe(info->pipe)) {
378 r8a66597->bulk--;
379 } else if (is_interrupt_pipe(info->pipe)) {
380 r8a66597->interrupt--;
381 } else if (is_isoc_pipe(info->pipe)) {
382 r8a66597->isochronous--;
383 if (info->type == R8A66597_BULK)
384 r8a66597->bulk--;
385 } else {
386 dev_err(r8a66597_to_dev(r8a66597),
387 "ep_release: unexpect pipenum (%d)\n", info->pipe);
388 }
389}
390
391static void pipe_initialize(struct r8a66597_ep *ep)
392{
393 struct r8a66597 *r8a66597 = ep->r8a66597;
394
395 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
396
397 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
398 r8a66597_write(r8a66597, 0, ep->pipectr);
399 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
400 if (ep->use_dma) {
401 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
402
403 ndelay(450);
404
405 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
406 }
407}
408
409static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
410 struct r8a66597_ep *ep,
411 const struct usb_endpoint_descriptor *desc,
412 u16 pipenum, int dma)
413{
414 ep->use_dma = 0;
415 ep->fifoaddr = CFIFO;
416 ep->fifosel = CFIFOSEL;
417 ep->fifoctr = CFIFOCTR;
418
419 ep->pipectr = get_pipectr_addr(pipenum);
420 if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
421 ep->pipetre = get_pipetre_addr(pipenum);
422 ep->pipetrn = get_pipetrn_addr(pipenum);
423 } else {
424 ep->pipetre = 0;
425 ep->pipetrn = 0;
426 }
427 ep->pipenum = pipenum;
428 ep->ep.maxpacket = usb_endpoint_maxp(desc);
429 r8a66597->pipenum2ep[pipenum] = ep;
430 r8a66597->epaddr2ep[usb_endpoint_num(desc)]
431 = ep;
432 INIT_LIST_HEAD(&ep->queue);
433}
434
435static void r8a66597_ep_release(struct r8a66597_ep *ep)
436{
437 struct r8a66597 *r8a66597 = ep->r8a66597;
438 u16 pipenum = ep->pipenum;
439
440 if (pipenum == 0)
441 return;
442
443 if (ep->use_dma)
444 r8a66597->num_dma--;
445 ep->pipenum = 0;
446 ep->busy = 0;
447 ep->use_dma = 0;
448}
449
450static int alloc_pipe_config(struct r8a66597_ep *ep,
451 const struct usb_endpoint_descriptor *desc)
452{
453 struct r8a66597 *r8a66597 = ep->r8a66597;
454 struct r8a66597_pipe_info info;
455 int dma = 0;
456 unsigned char *counter;
457 int ret;
458
459 ep->ep.desc = desc;
460
461 if (ep->pipenum)
462 return 0;
463
464 switch (usb_endpoint_type(desc)) {
465 case USB_ENDPOINT_XFER_BULK:
466 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
467 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
468 dev_err(r8a66597_to_dev(r8a66597),
469 "bulk pipe is insufficient\n");
470 return -ENODEV;
471 } else {
472 info.pipe = R8A66597_BASE_PIPENUM_ISOC
473 + r8a66597->isochronous;
474 counter = &r8a66597->isochronous;
475 }
476 } else {
477 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
478 counter = &r8a66597->bulk;
479 }
480 info.type = R8A66597_BULK;
481 dma = 1;
482 break;
483 case USB_ENDPOINT_XFER_INT:
484 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
485 dev_err(r8a66597_to_dev(r8a66597),
486 "interrupt pipe is insufficient\n");
487 return -ENODEV;
488 }
489 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
490 info.type = R8A66597_INT;
491 counter = &r8a66597->interrupt;
492 break;
493 case USB_ENDPOINT_XFER_ISOC:
494 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
495 dev_err(r8a66597_to_dev(r8a66597),
496 "isochronous pipe is insufficient\n");
497 return -ENODEV;
498 }
499 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
500 info.type = R8A66597_ISO;
501 counter = &r8a66597->isochronous;
502 break;
503 default:
504 dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
505 return -EINVAL;
506 }
507 ep->type = info.type;
508
509 info.epnum = usb_endpoint_num(desc);
510 info.maxpacket = usb_endpoint_maxp(desc);
511 info.interval = desc->bInterval;
512 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
513 info.dir_in = 1;
514 else
515 info.dir_in = 0;
516
517 ret = pipe_buffer_setting(r8a66597, &info);
518 if (ret < 0) {
519 dev_err(r8a66597_to_dev(r8a66597),
520 "pipe_buffer_setting fail\n");
521 return ret;
522 }
523
524 (*counter)++;
525 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
526 r8a66597->bulk++;
527
528 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
529 pipe_initialize(ep);
530
531 return 0;
532}
533
534static int free_pipe_config(struct r8a66597_ep *ep)
535{
536 struct r8a66597 *r8a66597 = ep->r8a66597;
537 struct r8a66597_pipe_info info;
538
539 info.pipe = ep->pipenum;
540 info.type = ep->type;
541 pipe_buffer_release(r8a66597, &info);
542 r8a66597_ep_release(ep);
543
544 return 0;
545}
546
547
548static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
549{
550 enable_irq_ready(r8a66597, pipenum);
551 enable_irq_nrdy(r8a66597, pipenum);
552}
553
554static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
555{
556 disable_irq_ready(r8a66597, pipenum);
557 disable_irq_nrdy(r8a66597, pipenum);
558}
559
560
561static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
562{
563 r8a66597->ep[0].internal_ccpl = ccpl;
564 pipe_start(r8a66597, 0);
565 r8a66597_bset(r8a66597, CCPL, DCPCTR);
566}
567
568static void start_ep0_write(struct r8a66597_ep *ep,
569 struct r8a66597_request *req)
570{
571 struct r8a66597 *r8a66597 = ep->r8a66597;
572
573 pipe_change(r8a66597, ep->pipenum);
574 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
575 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
576 if (req->req.length == 0) {
577 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
578 pipe_start(r8a66597, 0);
579 transfer_complete(ep, req, 0);
580 } else {
581 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
582 irq_ep0_write(ep, req);
583 }
584}
585
586static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
587 u16 fifosel)
588{
589 u16 tmp;
590
591 tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
592 if (tmp == pipenum)
593 r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
594}
595
596static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
597 int enable)
598{
599 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
600 u16 tmp, toggle;
601
602
603 r8a66597_write(r8a66597, pipenum, PIPESEL);
604 tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
605 if ((enable && tmp) || (!enable && !tmp))
606 return;
607
608
609 pipe_stop(r8a66597, pipenum);
610 disable_fifosel(r8a66597, pipenum, CFIFOSEL);
611 disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
612 disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
613
614 toggle = save_usb_toggle(r8a66597, pipenum);
615
616 r8a66597_write(r8a66597, pipenum, PIPESEL);
617 if (enable)
618 r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
619 else
620 r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
621
622
623 r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
624 r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
625
626 restore_usb_toggle(r8a66597, pipenum, toggle);
627}
628
629static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
630 struct r8a66597_ep *ep,
631 struct r8a66597_request *req)
632{
633 struct r8a66597_dma *dma;
634
635 if (!r8a66597_is_sudmac(r8a66597))
636 return -ENODEV;
637
638
639 if (!is_bulk_pipe(ep->pipenum))
640 return -EIO;
641
642 if (r8a66597->dma.used)
643 return -EBUSY;
644
645
646 dma = &r8a66597->dma;
647 dma->used = 1;
648 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) {
649 dma->dir = 1;
650 } else {
651 dma->dir = 0;
652 change_bfre_mode(r8a66597, ep->pipenum, 1);
653 }
654
655
656 ep->use_dma = 1;
657 ep->dma = dma;
658 ep->fifoaddr = D0FIFO;
659 ep->fifosel = D0FIFOSEL;
660 ep->fifoctr = D0FIFOCTR;
661
662
663 return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
664}
665
666static void sudmac_free_channel(struct r8a66597 *r8a66597,
667 struct r8a66597_ep *ep,
668 struct r8a66597_request *req)
669{
670 if (!r8a66597_is_sudmac(r8a66597))
671 return;
672
673 usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
674
675 r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
676 r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
677
678 ep->dma->used = 0;
679 ep->use_dma = 0;
680 ep->fifoaddr = CFIFO;
681 ep->fifosel = CFIFOSEL;
682 ep->fifoctr = CFIFOCTR;
683}
684
685static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
686 struct r8a66597_request *req)
687{
688 BUG_ON(req->req.length == 0);
689
690 r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
691 r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
692 r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
693 r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
694
695 r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
696}
697
698static void start_packet_write(struct r8a66597_ep *ep,
699 struct r8a66597_request *req)
700{
701 struct r8a66597 *r8a66597 = ep->r8a66597;
702 u16 tmp;
703
704 pipe_change(r8a66597, ep->pipenum);
705 disable_irq_empty(r8a66597, ep->pipenum);
706 pipe_start(r8a66597, ep->pipenum);
707
708 if (req->req.length == 0) {
709 transfer_complete(ep, req, 0);
710 } else {
711 r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
712 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
713
714 pipe_change(r8a66597, ep->pipenum);
715 disable_irq_empty(r8a66597, ep->pipenum);
716 pipe_start(r8a66597, ep->pipenum);
717 tmp = r8a66597_read(r8a66597, ep->fifoctr);
718 if (unlikely((tmp & FRDY) == 0))
719 pipe_irq_enable(r8a66597, ep->pipenum);
720 else
721 irq_packet_write(ep, req);
722 } else {
723
724 pipe_change(r8a66597, ep->pipenum);
725 disable_irq_nrdy(r8a66597, ep->pipenum);
726 pipe_start(r8a66597, ep->pipenum);
727 enable_irq_nrdy(r8a66597, ep->pipenum);
728 sudmac_start(r8a66597, ep, req);
729 }
730 }
731}
732
733static void start_packet_read(struct r8a66597_ep *ep,
734 struct r8a66597_request *req)
735{
736 struct r8a66597 *r8a66597 = ep->r8a66597;
737 u16 pipenum = ep->pipenum;
738
739 if (ep->pipenum == 0) {
740 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
741 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
742 pipe_start(r8a66597, pipenum);
743 pipe_irq_enable(r8a66597, pipenum);
744 } else {
745 pipe_stop(r8a66597, pipenum);
746 if (ep->pipetre) {
747 enable_irq_nrdy(r8a66597, pipenum);
748 r8a66597_write(r8a66597, TRCLR, ep->pipetre);
749 r8a66597_write(r8a66597,
750 DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
751 ep->pipetrn);
752 r8a66597_bset(r8a66597, TRENB, ep->pipetre);
753 }
754
755 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
756
757 change_bfre_mode(r8a66597, ep->pipenum, 0);
758 pipe_start(r8a66597, pipenum);
759 pipe_irq_enable(r8a66597, pipenum);
760 } else {
761 pipe_change(r8a66597, pipenum);
762 sudmac_start(r8a66597, ep, req);
763 pipe_start(r8a66597, pipenum);
764 }
765 }
766}
767
768static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
769{
770 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
771 start_packet_write(ep, req);
772 else
773 start_packet_read(ep, req);
774}
775
776static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
777{
778 u16 ctsq;
779
780 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
781
782 switch (ctsq) {
783 case CS_RDDS:
784 start_ep0_write(ep, req);
785 break;
786 case CS_WRDS:
787 start_packet_read(ep, req);
788 break;
789
790 case CS_WRND:
791 control_end(ep->r8a66597, 0);
792 break;
793 default:
794 dev_err(r8a66597_to_dev(ep->r8a66597),
795 "start_ep0: unexpect ctsq(%x)\n", ctsq);
796 break;
797 }
798}
799
800static void init_controller(struct r8a66597 *r8a66597)
801{
802 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
803 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
804 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
805
806 if (r8a66597->pdata->on_chip) {
807 if (r8a66597->pdata->buswait)
808 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
809 SYSCFG1);
810 else
811 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
812 r8a66597_bset(r8a66597, HSE, SYSCFG0);
813
814 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
815 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
816 r8a66597_bset(r8a66597, USBE, SYSCFG0);
817
818 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
819
820 r8a66597_bset(r8a66597, irq_sense, INTENB1);
821 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
822 DMA0CFG);
823 } else {
824 r8a66597_bset(r8a66597, vif | endian, PINCFG);
825 r8a66597_bset(r8a66597, HSE, SYSCFG0);
826 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
827 XTAL, SYSCFG0);
828
829 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
830 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
831 r8a66597_bset(r8a66597, USBE, SYSCFG0);
832
833 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
834
835 mdelay(3);
836
837 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
838
839 mdelay(1);
840
841 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
842
843 r8a66597_bset(r8a66597, irq_sense, INTENB1);
844 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
845 DMA0CFG);
846 }
847}
848
849static void disable_controller(struct r8a66597 *r8a66597)
850{
851 if (r8a66597->pdata->on_chip) {
852 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
853 r8a66597_bclr(r8a66597, UTST, TESTMODE);
854
855
856 r8a66597_write(r8a66597, 0, INTENB0);
857 r8a66597_write(r8a66597, 0, INTENB1);
858 r8a66597_write(r8a66597, 0, BRDYENB);
859 r8a66597_write(r8a66597, 0, BEMPENB);
860 r8a66597_write(r8a66597, 0, NRDYENB);
861
862
863 r8a66597_write(r8a66597, 0, BRDYSTS);
864 r8a66597_write(r8a66597, 0, NRDYSTS);
865 r8a66597_write(r8a66597, 0, BEMPSTS);
866
867 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
868 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
869
870 } else {
871 r8a66597_bclr(r8a66597, UTST, TESTMODE);
872 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
873 udelay(1);
874 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
875 udelay(1);
876 udelay(1);
877 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
878 }
879}
880
881static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
882{
883 u16 tmp;
884
885 if (!r8a66597->pdata->on_chip) {
886 tmp = r8a66597_read(r8a66597, SYSCFG0);
887 if (!(tmp & XCKE))
888 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
889 }
890}
891
892static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
893{
894 return list_entry(ep->queue.next, struct r8a66597_request, queue);
895}
896
897
898static void transfer_complete(struct r8a66597_ep *ep,
899 struct r8a66597_request *req, int status)
900__releases(r8a66597->lock)
901__acquires(r8a66597->lock)
902{
903 int restart = 0;
904
905 if (unlikely(ep->pipenum == 0)) {
906 if (ep->internal_ccpl) {
907 ep->internal_ccpl = 0;
908 return;
909 }
910 }
911
912 list_del_init(&req->queue);
913 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
914 req->req.status = -ESHUTDOWN;
915 else
916 req->req.status = status;
917
918 if (!list_empty(&ep->queue))
919 restart = 1;
920
921 if (ep->use_dma)
922 sudmac_free_channel(ep->r8a66597, ep, req);
923
924 spin_unlock(&ep->r8a66597->lock);
925 usb_gadget_giveback_request(&ep->ep, &req->req);
926 spin_lock(&ep->r8a66597->lock);
927
928 if (restart) {
929 req = get_request_from_ep(ep);
930 if (ep->ep.desc)
931 start_packet(ep, req);
932 }
933}
934
935static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
936{
937 int i;
938 u16 tmp;
939 unsigned bufsize;
940 size_t size;
941 void *buf;
942 u16 pipenum = ep->pipenum;
943 struct r8a66597 *r8a66597 = ep->r8a66597;
944
945 pipe_change(r8a66597, pipenum);
946 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
947
948 i = 0;
949 do {
950 tmp = r8a66597_read(r8a66597, ep->fifoctr);
951 if (i++ > 100000) {
952 dev_err(r8a66597_to_dev(r8a66597),
953 "pipe0 is busy. maybe cpu i/o bus "
954 "conflict. please power off this controller.");
955 return;
956 }
957 ndelay(1);
958 } while ((tmp & FRDY) == 0);
959
960
961 bufsize = get_buffer_size(r8a66597, pipenum);
962 buf = req->req.buf + req->req.actual;
963 size = min(bufsize, req->req.length - req->req.actual);
964
965
966 if (req->req.buf) {
967 if (size > 0)
968 r8a66597_write_fifo(r8a66597, ep, buf, size);
969 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
970 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
971 }
972
973
974 req->req.actual += size;
975
976
977 if ((!req->req.zero && (req->req.actual == req->req.length))
978 || (size % ep->ep.maxpacket)
979 || (size == 0)) {
980 disable_irq_ready(r8a66597, pipenum);
981 disable_irq_empty(r8a66597, pipenum);
982 } else {
983 disable_irq_ready(r8a66597, pipenum);
984 enable_irq_empty(r8a66597, pipenum);
985 }
986 pipe_start(r8a66597, pipenum);
987}
988
989static void irq_packet_write(struct r8a66597_ep *ep,
990 struct r8a66597_request *req)
991{
992 u16 tmp;
993 unsigned bufsize;
994 size_t size;
995 void *buf;
996 u16 pipenum = ep->pipenum;
997 struct r8a66597 *r8a66597 = ep->r8a66597;
998
999 pipe_change(r8a66597, pipenum);
1000 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1001 if (unlikely((tmp & FRDY) == 0)) {
1002 pipe_stop(r8a66597, pipenum);
1003 pipe_irq_disable(r8a66597, pipenum);
1004 dev_err(r8a66597_to_dev(r8a66597),
1005 "write fifo not ready. pipnum=%d\n", pipenum);
1006 return;
1007 }
1008
1009
1010 bufsize = get_buffer_size(r8a66597, pipenum);
1011 buf = req->req.buf + req->req.actual;
1012 size = min(bufsize, req->req.length - req->req.actual);
1013
1014
1015 if (req->req.buf) {
1016 r8a66597_write_fifo(r8a66597, ep, buf, size);
1017 if ((size == 0)
1018 || ((size % ep->ep.maxpacket) != 0)
1019 || ((bufsize != ep->ep.maxpacket)
1020 && (bufsize > size)))
1021 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
1022 }
1023
1024
1025 req->req.actual += size;
1026
1027 if ((!req->req.zero && (req->req.actual == req->req.length))
1028 || (size % ep->ep.maxpacket)
1029 || (size == 0)) {
1030 disable_irq_ready(r8a66597, pipenum);
1031 enable_irq_empty(r8a66597, pipenum);
1032 } else {
1033 disable_irq_empty(r8a66597, pipenum);
1034 pipe_irq_enable(r8a66597, pipenum);
1035 }
1036}
1037
1038static void irq_packet_read(struct r8a66597_ep *ep,
1039 struct r8a66597_request *req)
1040{
1041 u16 tmp;
1042 int rcv_len, bufsize, req_len;
1043 int size;
1044 void *buf;
1045 u16 pipenum = ep->pipenum;
1046 struct r8a66597 *r8a66597 = ep->r8a66597;
1047 int finish = 0;
1048
1049 pipe_change(r8a66597, pipenum);
1050 tmp = r8a66597_read(r8a66597, ep->fifoctr);
1051 if (unlikely((tmp & FRDY) == 0)) {
1052 req->req.status = -EPIPE;
1053 pipe_stop(r8a66597, pipenum);
1054 pipe_irq_disable(r8a66597, pipenum);
1055 dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
1056 return;
1057 }
1058
1059
1060 rcv_len = tmp & DTLN;
1061 bufsize = get_buffer_size(r8a66597, pipenum);
1062
1063 buf = req->req.buf + req->req.actual;
1064 req_len = req->req.length - req->req.actual;
1065 if (rcv_len < bufsize)
1066 size = min(rcv_len, req_len);
1067 else
1068 size = min(bufsize, req_len);
1069
1070
1071 req->req.actual += size;
1072
1073
1074 if ((!req->req.zero && (req->req.actual == req->req.length))
1075 || (size % ep->ep.maxpacket)
1076 || (size == 0)) {
1077 pipe_stop(r8a66597, pipenum);
1078 pipe_irq_disable(r8a66597, pipenum);
1079 finish = 1;
1080 }
1081
1082
1083 if (req->req.buf) {
1084 if (size == 0)
1085 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
1086 else
1087 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
1088
1089 }
1090
1091 if ((ep->pipenum != 0) && finish)
1092 transfer_complete(ep, req, 0);
1093}
1094
1095static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
1096{
1097 u16 check;
1098 u16 pipenum;
1099 struct r8a66597_ep *ep;
1100 struct r8a66597_request *req;
1101
1102 if ((status & BRDY0) && (enb & BRDY0)) {
1103 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
1104 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
1105
1106 ep = &r8a66597->ep[0];
1107 req = get_request_from_ep(ep);
1108 irq_packet_read(ep, req);
1109 } else {
1110 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1111 check = 1 << pipenum;
1112 if ((status & check) && (enb & check)) {
1113 r8a66597_write(r8a66597, ~check, BRDYSTS);
1114 ep = r8a66597->pipenum2ep[pipenum];
1115 req = get_request_from_ep(ep);
1116 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
1117 irq_packet_write(ep, req);
1118 else
1119 irq_packet_read(ep, req);
1120 }
1121 }
1122 }
1123}
1124
1125static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
1126{
1127 u16 tmp;
1128 u16 check;
1129 u16 pipenum;
1130 struct r8a66597_ep *ep;
1131 struct r8a66597_request *req;
1132
1133 if ((status & BEMP0) && (enb & BEMP0)) {
1134 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
1135
1136 ep = &r8a66597->ep[0];
1137 req = get_request_from_ep(ep);
1138 irq_ep0_write(ep, req);
1139 } else {
1140 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1141 check = 1 << pipenum;
1142 if ((status & check) && (enb & check)) {
1143 r8a66597_write(r8a66597, ~check, BEMPSTS);
1144 tmp = control_reg_get(r8a66597, pipenum);
1145 if ((tmp & INBUFM) == 0) {
1146 disable_irq_empty(r8a66597, pipenum);
1147 pipe_irq_disable(r8a66597, pipenum);
1148 pipe_stop(r8a66597, pipenum);
1149 ep = r8a66597->pipenum2ep[pipenum];
1150 req = get_request_from_ep(ep);
1151 if (!list_empty(&ep->queue))
1152 transfer_complete(ep, req, 0);
1153 }
1154 }
1155 }
1156 }
1157}
1158
1159static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1160__releases(r8a66597->lock)
1161__acquires(r8a66597->lock)
1162{
1163 struct r8a66597_ep *ep;
1164 u16 pid;
1165 u16 status = 0;
1166 u16 w_index = le16_to_cpu(ctrl->wIndex);
1167
1168 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1169 case USB_RECIP_DEVICE:
1170 status = r8a66597->device_status;
1171 break;
1172 case USB_RECIP_INTERFACE:
1173 status = 0;
1174 break;
1175 case USB_RECIP_ENDPOINT:
1176 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1177 pid = control_reg_get_pid(r8a66597, ep->pipenum);
1178 if (pid == PID_STALL)
1179 status = 1 << USB_ENDPOINT_HALT;
1180 else
1181 status = 0;
1182 break;
1183 default:
1184 pipe_stall(r8a66597, 0);
1185 return;
1186 }
1187
1188 r8a66597->ep0_data = cpu_to_le16(status);
1189 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
1190 r8a66597->ep0_req->length = 2;
1191
1192 spin_unlock(&r8a66597->lock);
1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1194 spin_lock(&r8a66597->lock);
1195}
1196
1197static void clear_feature(struct r8a66597 *r8a66597,
1198 struct usb_ctrlrequest *ctrl)
1199{
1200 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1201 case USB_RECIP_DEVICE:
1202 control_end(r8a66597, 1);
1203 break;
1204 case USB_RECIP_INTERFACE:
1205 control_end(r8a66597, 1);
1206 break;
1207 case USB_RECIP_ENDPOINT: {
1208 struct r8a66597_ep *ep;
1209 struct r8a66597_request *req;
1210 u16 w_index = le16_to_cpu(ctrl->wIndex);
1211
1212 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1213 if (!ep->wedge) {
1214 pipe_stop(r8a66597, ep->pipenum);
1215 control_reg_sqclr(r8a66597, ep->pipenum);
1216 spin_unlock(&r8a66597->lock);
1217 usb_ep_clear_halt(&ep->ep);
1218 spin_lock(&r8a66597->lock);
1219 }
1220
1221 control_end(r8a66597, 1);
1222
1223 req = get_request_from_ep(ep);
1224 if (ep->busy) {
1225 ep->busy = 0;
1226 if (list_empty(&ep->queue))
1227 break;
1228 start_packet(ep, req);
1229 } else if (!list_empty(&ep->queue))
1230 pipe_start(r8a66597, ep->pipenum);
1231 }
1232 break;
1233 default:
1234 pipe_stall(r8a66597, 0);
1235 break;
1236 }
1237}
1238
1239static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1240{
1241 u16 tmp;
1242 int timeout = 3000;
1243
1244 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1245 case USB_RECIP_DEVICE:
1246 switch (le16_to_cpu(ctrl->wValue)) {
1247 case USB_DEVICE_TEST_MODE:
1248 control_end(r8a66597, 1);
1249
1250 do {
1251 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1252 udelay(1);
1253 } while (tmp != CS_IDST || timeout-- > 0);
1254
1255 if (tmp == CS_IDST)
1256 r8a66597_bset(r8a66597,
1257 le16_to_cpu(ctrl->wIndex >> 8),
1258 TESTMODE);
1259 break;
1260 default:
1261 pipe_stall(r8a66597, 0);
1262 break;
1263 }
1264 break;
1265 case USB_RECIP_INTERFACE:
1266 control_end(r8a66597, 1);
1267 break;
1268 case USB_RECIP_ENDPOINT: {
1269 struct r8a66597_ep *ep;
1270 u16 w_index = le16_to_cpu(ctrl->wIndex);
1271
1272 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1273 pipe_stall(r8a66597, ep->pipenum);
1274
1275 control_end(r8a66597, 1);
1276 }
1277 break;
1278 default:
1279 pipe_stall(r8a66597, 0);
1280 break;
1281 }
1282}
1283
1284
1285static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1286{
1287 u16 *p = (u16 *)ctrl;
1288 unsigned long offset = USBREQ;
1289 int i, ret = 0;
1290
1291
1292 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1293
1294 for (i = 0; i < 4; i++)
1295 p[i] = r8a66597_read(r8a66597, offset + i*2);
1296
1297
1298 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1299 switch (ctrl->bRequest) {
1300 case USB_REQ_GET_STATUS:
1301 get_status(r8a66597, ctrl);
1302 break;
1303 case USB_REQ_CLEAR_FEATURE:
1304 clear_feature(r8a66597, ctrl);
1305 break;
1306 case USB_REQ_SET_FEATURE:
1307 set_feature(r8a66597, ctrl);
1308 break;
1309 default:
1310 ret = 1;
1311 break;
1312 }
1313 } else
1314 ret = 1;
1315 return ret;
1316}
1317
1318static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1319{
1320 u16 speed = get_usb_speed(r8a66597);
1321
1322 switch (speed) {
1323 case HSMODE:
1324 r8a66597->gadget.speed = USB_SPEED_HIGH;
1325 break;
1326 case FSMODE:
1327 r8a66597->gadget.speed = USB_SPEED_FULL;
1328 break;
1329 default:
1330 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1331 dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
1332 }
1333}
1334
1335static void irq_device_state(struct r8a66597 *r8a66597)
1336{
1337 u16 dvsq;
1338
1339 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1340 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1341
1342 if (dvsq == DS_DFLT) {
1343
1344 spin_unlock(&r8a66597->lock);
1345 usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
1346 spin_lock(&r8a66597->lock);
1347 r8a66597_update_usb_speed(r8a66597);
1348 }
1349 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1350 r8a66597_update_usb_speed(r8a66597);
1351 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1352 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1353 r8a66597_update_usb_speed(r8a66597);
1354
1355 r8a66597->old_dvsq = dvsq;
1356}
1357
1358static void irq_control_stage(struct r8a66597 *r8a66597)
1359__releases(r8a66597->lock)
1360__acquires(r8a66597->lock)
1361{
1362 struct usb_ctrlrequest ctrl;
1363 u16 ctsq;
1364
1365 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1366 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1367
1368 switch (ctsq) {
1369 case CS_IDST: {
1370 struct r8a66597_ep *ep;
1371 struct r8a66597_request *req;
1372 ep = &r8a66597->ep[0];
1373 req = get_request_from_ep(ep);
1374 transfer_complete(ep, req, 0);
1375 }
1376 break;
1377
1378 case CS_RDDS:
1379 case CS_WRDS:
1380 case CS_WRND:
1381 if (setup_packet(r8a66597, &ctrl)) {
1382 spin_unlock(&r8a66597->lock);
1383 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1384 < 0)
1385 pipe_stall(r8a66597, 0);
1386 spin_lock(&r8a66597->lock);
1387 }
1388 break;
1389 case CS_RDSS:
1390 case CS_WRSS:
1391 control_end(r8a66597, 0);
1392 break;
1393 default:
1394 dev_err(r8a66597_to_dev(r8a66597),
1395 "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1396 break;
1397 }
1398}
1399
1400static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
1401{
1402 u16 pipenum;
1403 struct r8a66597_request *req;
1404 u32 len;
1405 int i = 0;
1406
1407 pipenum = ep->pipenum;
1408 pipe_change(r8a66597, pipenum);
1409
1410 while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
1411 udelay(1);
1412 if (unlikely(i++ >= 10000)) {
1413 dev_err(r8a66597_to_dev(r8a66597),
1414 "%s: FRDY was not set (%d)\n",
1415 __func__, pipenum);
1416 return;
1417 }
1418 }
1419
1420 r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
1421 req = get_request_from_ep(ep);
1422
1423
1424 len = r8a66597_sudmac_read(r8a66597, CH0CBC);
1425 req->req.actual += len;
1426
1427
1428 r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
1429
1430
1431 if ((!req->req.zero && (req->req.actual == req->req.length))
1432 || (len % ep->ep.maxpacket)) {
1433 if (ep->dma->dir) {
1434 disable_irq_ready(r8a66597, pipenum);
1435 enable_irq_empty(r8a66597, pipenum);
1436 } else {
1437
1438 r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
1439 transfer_complete(ep, req, 0);
1440 }
1441 }
1442}
1443
1444static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
1445{
1446 u32 irqsts;
1447 struct r8a66597_ep *ep;
1448 u16 pipenum;
1449
1450 irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
1451 if (irqsts & CH0ENDS) {
1452 r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
1453 pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
1454 ep = r8a66597->pipenum2ep[pipenum];
1455 sudmac_finish(r8a66597, ep);
1456 }
1457}
1458
1459static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1460{
1461 struct r8a66597 *r8a66597 = _r8a66597;
1462 u16 intsts0;
1463 u16 intenb0;
1464 u16 savepipe;
1465 u16 mask0;
1466
1467 spin_lock(&r8a66597->lock);
1468
1469 if (r8a66597_is_sudmac(r8a66597))
1470 r8a66597_sudmac_irq(r8a66597);
1471
1472 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1473 intenb0 = r8a66597_read(r8a66597, INTENB0);
1474
1475 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1476
1477 mask0 = intsts0 & intenb0;
1478 if (mask0) {
1479 u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1480 u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1481 u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1482 u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
1483
1484 if (mask0 & VBINT) {
1485 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1486 INTSTS0);
1487 r8a66597_start_xclock(r8a66597);
1488
1489
1490 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1491 & VBSTS;
1492 r8a66597->scount = R8A66597_MAX_SAMPLING;
1493
1494 mod_timer(&r8a66597->timer,
1495 jiffies + msecs_to_jiffies(50));
1496 }
1497 if (intsts0 & DVSQ)
1498 irq_device_state(r8a66597);
1499
1500 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1501 && (brdysts & brdyenb))
1502 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1503 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1504 && (bempsts & bempenb))
1505 irq_pipe_empty(r8a66597, bempsts, bempenb);
1506
1507 if (intsts0 & CTRT)
1508 irq_control_stage(r8a66597);
1509 }
1510
1511 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1512
1513 spin_unlock(&r8a66597->lock);
1514 return IRQ_HANDLED;
1515}
1516
1517static void r8a66597_timer(struct timer_list *t)
1518{
1519 struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
1520 unsigned long flags;
1521 u16 tmp;
1522
1523 spin_lock_irqsave(&r8a66597->lock, flags);
1524 tmp = r8a66597_read(r8a66597, SYSCFG0);
1525 if (r8a66597->scount > 0) {
1526 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1527 if (tmp == r8a66597->old_vbus) {
1528 r8a66597->scount--;
1529 if (r8a66597->scount == 0) {
1530 if (tmp == VBSTS)
1531 r8a66597_usb_connect(r8a66597);
1532 else
1533 r8a66597_usb_disconnect(r8a66597);
1534 } else {
1535 mod_timer(&r8a66597->timer,
1536 jiffies + msecs_to_jiffies(50));
1537 }
1538 } else {
1539 r8a66597->scount = R8A66597_MAX_SAMPLING;
1540 r8a66597->old_vbus = tmp;
1541 mod_timer(&r8a66597->timer,
1542 jiffies + msecs_to_jiffies(50));
1543 }
1544 }
1545 spin_unlock_irqrestore(&r8a66597->lock, flags);
1546}
1547
1548
1549static int r8a66597_enable(struct usb_ep *_ep,
1550 const struct usb_endpoint_descriptor *desc)
1551{
1552 struct r8a66597_ep *ep;
1553
1554 ep = container_of(_ep, struct r8a66597_ep, ep);
1555 return alloc_pipe_config(ep, desc);
1556}
1557
1558static int r8a66597_disable(struct usb_ep *_ep)
1559{
1560 struct r8a66597_ep *ep;
1561 struct r8a66597_request *req;
1562 unsigned long flags;
1563
1564 ep = container_of(_ep, struct r8a66597_ep, ep);
1565 BUG_ON(!ep);
1566
1567 while (!list_empty(&ep->queue)) {
1568 req = get_request_from_ep(ep);
1569 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1570 transfer_complete(ep, req, -ECONNRESET);
1571 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1572 }
1573
1574 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1575 return free_pipe_config(ep);
1576}
1577
1578static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1579 gfp_t gfp_flags)
1580{
1581 struct r8a66597_request *req;
1582
1583 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1584 if (!req)
1585 return NULL;
1586
1587 INIT_LIST_HEAD(&req->queue);
1588
1589 return &req->req;
1590}
1591
1592static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1593{
1594 struct r8a66597_request *req;
1595
1596 req = container_of(_req, struct r8a66597_request, req);
1597 kfree(req);
1598}
1599
1600static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1601 gfp_t gfp_flags)
1602{
1603 struct r8a66597_ep *ep;
1604 struct r8a66597_request *req;
1605 unsigned long flags;
1606 int request = 0;
1607
1608 ep = container_of(_ep, struct r8a66597_ep, ep);
1609 req = container_of(_req, struct r8a66597_request, req);
1610
1611 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1612 return -ESHUTDOWN;
1613
1614 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1615
1616 if (list_empty(&ep->queue))
1617 request = 1;
1618
1619 list_add_tail(&req->queue, &ep->queue);
1620 req->req.actual = 0;
1621 req->req.status = -EINPROGRESS;
1622
1623 if (ep->ep.desc == NULL)
1624 start_ep0(ep, req);
1625 else {
1626 if (request && !ep->busy)
1627 start_packet(ep, req);
1628 }
1629
1630 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1631
1632 return 0;
1633}
1634
1635static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1636{
1637 struct r8a66597_ep *ep;
1638 struct r8a66597_request *req;
1639 unsigned long flags;
1640
1641 ep = container_of(_ep, struct r8a66597_ep, ep);
1642 req = container_of(_req, struct r8a66597_request, req);
1643
1644 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1645 if (!list_empty(&ep->queue))
1646 transfer_complete(ep, req, -ECONNRESET);
1647 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1648
1649 return 0;
1650}
1651
1652static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1653{
1654 struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
1655 unsigned long flags;
1656 int ret = 0;
1657
1658 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1659 if (!list_empty(&ep->queue)) {
1660 ret = -EAGAIN;
1661 } else if (value) {
1662 ep->busy = 1;
1663 pipe_stall(ep->r8a66597, ep->pipenum);
1664 } else {
1665 ep->busy = 0;
1666 ep->wedge = 0;
1667 pipe_stop(ep->r8a66597, ep->pipenum);
1668 }
1669 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1670 return ret;
1671}
1672
1673static int r8a66597_set_wedge(struct usb_ep *_ep)
1674{
1675 struct r8a66597_ep *ep;
1676 unsigned long flags;
1677
1678 ep = container_of(_ep, struct r8a66597_ep, ep);
1679
1680 if (!ep || !ep->ep.desc)
1681 return -EINVAL;
1682
1683 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1684 ep->wedge = 1;
1685 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1686
1687 return usb_ep_set_halt(_ep);
1688}
1689
1690static void r8a66597_fifo_flush(struct usb_ep *_ep)
1691{
1692 struct r8a66597_ep *ep;
1693 unsigned long flags;
1694
1695 ep = container_of(_ep, struct r8a66597_ep, ep);
1696 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1697 if (list_empty(&ep->queue) && !ep->busy) {
1698 pipe_stop(ep->r8a66597, ep->pipenum);
1699 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1700 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1701 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1702 }
1703 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1704}
1705
1706static const struct usb_ep_ops r8a66597_ep_ops = {
1707 .enable = r8a66597_enable,
1708 .disable = r8a66597_disable,
1709
1710 .alloc_request = r8a66597_alloc_request,
1711 .free_request = r8a66597_free_request,
1712
1713 .queue = r8a66597_queue,
1714 .dequeue = r8a66597_dequeue,
1715
1716 .set_halt = r8a66597_set_halt,
1717 .set_wedge = r8a66597_set_wedge,
1718 .fifo_flush = r8a66597_fifo_flush,
1719};
1720
1721
1722static int r8a66597_start(struct usb_gadget *gadget,
1723 struct usb_gadget_driver *driver)
1724{
1725 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1726
1727 if (!driver
1728 || driver->max_speed < USB_SPEED_HIGH
1729 || !driver->setup)
1730 return -EINVAL;
1731 if (!r8a66597)
1732 return -ENODEV;
1733
1734
1735 r8a66597->driver = driver;
1736
1737 init_controller(r8a66597);
1738 r8a66597_bset(r8a66597, VBSE, INTENB0);
1739 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1740 r8a66597_start_xclock(r8a66597);
1741
1742 r8a66597->old_vbus = r8a66597_read(r8a66597,
1743 INTSTS0) & VBSTS;
1744 r8a66597->scount = R8A66597_MAX_SAMPLING;
1745 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1746 }
1747
1748 return 0;
1749}
1750
1751static int r8a66597_stop(struct usb_gadget *gadget)
1752{
1753 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1754 unsigned long flags;
1755
1756 spin_lock_irqsave(&r8a66597->lock, flags);
1757 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1758 disable_controller(r8a66597);
1759 spin_unlock_irqrestore(&r8a66597->lock, flags);
1760
1761 r8a66597->driver = NULL;
1762 return 0;
1763}
1764
1765
1766static int r8a66597_get_frame(struct usb_gadget *_gadget)
1767{
1768 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1769 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1770}
1771
1772static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1773{
1774 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1775 unsigned long flags;
1776
1777 spin_lock_irqsave(&r8a66597->lock, flags);
1778 if (is_on)
1779 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1780 else
1781 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1782 spin_unlock_irqrestore(&r8a66597->lock, flags);
1783
1784 return 0;
1785}
1786
1787static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
1788{
1789 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1790
1791 gadget->is_selfpowered = (is_self != 0);
1792 if (is_self)
1793 r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
1794 else
1795 r8a66597->device_status &= ~(1 << USB_DEVICE_SELF_POWERED);
1796
1797 return 0;
1798}
1799
1800static const struct usb_gadget_ops r8a66597_gadget_ops = {
1801 .get_frame = r8a66597_get_frame,
1802 .udc_start = r8a66597_start,
1803 .udc_stop = r8a66597_stop,
1804 .pullup = r8a66597_pullup,
1805 .set_selfpowered = r8a66597_set_selfpowered,
1806};
1807
1808static int r8a66597_remove(struct platform_device *pdev)
1809{
1810 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
1811
1812 usb_del_gadget_udc(&r8a66597->gadget);
1813 del_timer_sync(&r8a66597->timer);
1814 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1815
1816 if (r8a66597->pdata->on_chip) {
1817 clk_disable_unprepare(r8a66597->clk);
1818 }
1819
1820 return 0;
1821}
1822
1823static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1824{
1825}
1826
1827static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
1828 struct platform_device *pdev)
1829{
1830 struct resource *res;
1831
1832 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
1833 r8a66597->sudmac_reg = devm_ioremap_resource(&pdev->dev, res);
1834 return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
1835}
1836
1837static int r8a66597_probe(struct platform_device *pdev)
1838{
1839 struct device *dev = &pdev->dev;
1840 char clk_name[8];
1841 struct resource *res, *ires;
1842 int irq;
1843 void __iomem *reg = NULL;
1844 struct r8a66597 *r8a66597 = NULL;
1845 int ret = 0;
1846 int i;
1847 unsigned long irq_trigger;
1848
1849 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1850 reg = devm_ioremap_resource(&pdev->dev, res);
1851 if (IS_ERR(reg))
1852 return PTR_ERR(reg);
1853
1854 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1855 irq = ires->start;
1856 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1857
1858 if (irq < 0) {
1859 dev_err(dev, "platform_get_irq error.\n");
1860 return -ENODEV;
1861 }
1862
1863
1864 r8a66597 = devm_kzalloc(dev, sizeof(struct r8a66597), GFP_KERNEL);
1865 if (r8a66597 == NULL)
1866 return -ENOMEM;
1867
1868 spin_lock_init(&r8a66597->lock);
1869 platform_set_drvdata(pdev, r8a66597);
1870 r8a66597->pdata = dev_get_platdata(dev);
1871 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1872
1873 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1874 r8a66597->gadget.max_speed = USB_SPEED_HIGH;
1875 r8a66597->gadget.name = udc_name;
1876
1877 timer_setup(&r8a66597->timer, r8a66597_timer, 0);
1878 r8a66597->reg = reg;
1879
1880 if (r8a66597->pdata->on_chip) {
1881 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1882 r8a66597->clk = devm_clk_get(dev, clk_name);
1883 if (IS_ERR(r8a66597->clk)) {
1884 dev_err(dev, "cannot get clock \"%s\"\n", clk_name);
1885 return PTR_ERR(r8a66597->clk);
1886 }
1887 clk_prepare_enable(r8a66597->clk);
1888 }
1889
1890 if (r8a66597->pdata->sudmac) {
1891 ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
1892 if (ret < 0)
1893 goto clean_up2;
1894 }
1895
1896 disable_controller(r8a66597);
1897
1898 ret = devm_request_irq(dev, irq, r8a66597_irq, IRQF_SHARED,
1899 udc_name, r8a66597);
1900 if (ret < 0) {
1901 dev_err(dev, "request_irq error (%d)\n", ret);
1902 goto clean_up2;
1903 }
1904
1905 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1906 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1907 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1908 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1909 struct r8a66597_ep *ep = &r8a66597->ep[i];
1910
1911 if (i != 0) {
1912 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1913 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1914 &r8a66597->gadget.ep_list);
1915 }
1916 ep->r8a66597 = r8a66597;
1917 INIT_LIST_HEAD(&ep->queue);
1918 ep->ep.name = r8a66597_ep_name[i];
1919 ep->ep.ops = &r8a66597_ep_ops;
1920 usb_ep_set_maxpacket_limit(&ep->ep, 512);
1921
1922 if (i == 0) {
1923 ep->ep.caps.type_control = true;
1924 } else {
1925 ep->ep.caps.type_iso = true;
1926 ep->ep.caps.type_bulk = true;
1927 ep->ep.caps.type_int = true;
1928 }
1929 ep->ep.caps.dir_in = true;
1930 ep->ep.caps.dir_out = true;
1931 }
1932 usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
1933 r8a66597->ep[0].pipenum = 0;
1934 r8a66597->ep[0].fifoaddr = CFIFO;
1935 r8a66597->ep[0].fifosel = CFIFOSEL;
1936 r8a66597->ep[0].fifoctr = CFIFOCTR;
1937 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1938 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1939 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1940
1941 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1942 GFP_KERNEL);
1943 if (r8a66597->ep0_req == NULL) {
1944 ret = -ENOMEM;
1945 goto clean_up2;
1946 }
1947 r8a66597->ep0_req->complete = nop_completion;
1948
1949 ret = usb_add_gadget_udc(dev, &r8a66597->gadget);
1950 if (ret)
1951 goto err_add_udc;
1952
1953 dev_info(dev, "version %s\n", DRIVER_VERSION);
1954 return 0;
1955
1956err_add_udc:
1957 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1958clean_up2:
1959 if (r8a66597->pdata->on_chip)
1960 clk_disable_unprepare(r8a66597->clk);
1961
1962 if (r8a66597->ep0_req)
1963 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1964
1965 return ret;
1966}
1967
1968
1969static struct platform_driver r8a66597_driver = {
1970 .remove = r8a66597_remove,
1971 .driver = {
1972 .name = (char *) udc_name,
1973 },
1974};
1975
1976module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
1977
1978MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1979MODULE_LICENSE("GPL");
1980MODULE_AUTHOR("Yoshihiro Shimoda");
1981MODULE_ALIAS("platform:r8a66597_udc");
1982