1
2
3
4
5
6
7
8
9
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/slab.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18#include <linux/list.h>
19#include <linux/platform_device.h>
20#include <linux/usb/ch9.h>
21#include <linux/usb/gadget.h>
22#include <linux/usb/atmel_usba_udc.h>
23#include <linux/delay.h>
24
25#include <asm/gpio.h>
26#include <mach/board.h>
27
28#include "atmel_usba_udc.h"
29
30
31static struct usba_udc the_udc;
32static struct usba_ep *usba_ep;
33
34#ifdef CONFIG_USB_GADGET_DEBUG_FS
35#include <linux/debugfs.h>
36#include <linux/uaccess.h>
37
38static int queue_dbg_open(struct inode *inode, struct file *file)
39{
40 struct usba_ep *ep = inode->i_private;
41 struct usba_request *req, *req_copy;
42 struct list_head *queue_data;
43
44 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
45 if (!queue_data)
46 return -ENOMEM;
47 INIT_LIST_HEAD(queue_data);
48
49 spin_lock_irq(&ep->udc->lock);
50 list_for_each_entry(req, &ep->queue, queue) {
51 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
52 if (!req_copy)
53 goto fail;
54 list_add_tail(&req_copy->queue, queue_data);
55 }
56 spin_unlock_irq(&ep->udc->lock);
57
58 file->private_data = queue_data;
59 return 0;
60
61fail:
62 spin_unlock_irq(&ep->udc->lock);
63 list_for_each_entry_safe(req, req_copy, queue_data, queue) {
64 list_del(&req->queue);
65 kfree(req);
66 }
67 kfree(queue_data);
68 return -ENOMEM;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static ssize_t queue_dbg_read(struct file *file, char __user *buf,
86 size_t nbytes, loff_t *ppos)
87{
88 struct list_head *queue = file->private_data;
89 struct usba_request *req, *tmp_req;
90 size_t len, remaining, actual = 0;
91 char tmpbuf[38];
92
93 if (!access_ok(VERIFY_WRITE, buf, nbytes))
94 return -EFAULT;
95
96 mutex_lock(&file->f_dentry->d_inode->i_mutex);
97 list_for_each_entry_safe(req, tmp_req, queue, queue) {
98 len = snprintf(tmpbuf, sizeof(tmpbuf),
99 "%8p %08x %c%c%c %5d %c%c%c\n",
100 req->req.buf, req->req.length,
101 req->req.no_interrupt ? 'i' : 'I',
102 req->req.zero ? 'Z' : 'z',
103 req->req.short_not_ok ? 's' : 'S',
104 req->req.status,
105 req->submitted ? 'F' : 'f',
106 req->using_dma ? 'D' : 'd',
107 req->last_transaction ? 'L' : 'l');
108 len = min(len, sizeof(tmpbuf));
109 if (len > nbytes)
110 break;
111
112 list_del(&req->queue);
113 kfree(req);
114
115 remaining = __copy_to_user(buf, tmpbuf, len);
116 actual += len - remaining;
117 if (remaining)
118 break;
119
120 nbytes -= len;
121 buf += len;
122 }
123 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
124
125 return actual;
126}
127
128static int queue_dbg_release(struct inode *inode, struct file *file)
129{
130 struct list_head *queue_data = file->private_data;
131 struct usba_request *req, *tmp_req;
132
133 list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
134 list_del(&req->queue);
135 kfree(req);
136 }
137 kfree(queue_data);
138 return 0;
139}
140
141static int regs_dbg_open(struct inode *inode, struct file *file)
142{
143 struct usba_udc *udc;
144 unsigned int i;
145 u32 *data;
146 int ret = -ENOMEM;
147
148 mutex_lock(&inode->i_mutex);
149 udc = inode->i_private;
150 data = kmalloc(inode->i_size, GFP_KERNEL);
151 if (!data)
152 goto out;
153
154 spin_lock_irq(&udc->lock);
155 for (i = 0; i < inode->i_size / 4; i++)
156 data[i] = __raw_readl(udc->regs + i * 4);
157 spin_unlock_irq(&udc->lock);
158
159 file->private_data = data;
160 ret = 0;
161
162out:
163 mutex_unlock(&inode->i_mutex);
164
165 return ret;
166}
167
168static ssize_t regs_dbg_read(struct file *file, char __user *buf,
169 size_t nbytes, loff_t *ppos)
170{
171 struct inode *inode = file->f_dentry->d_inode;
172 int ret;
173
174 mutex_lock(&inode->i_mutex);
175 ret = simple_read_from_buffer(buf, nbytes, ppos,
176 file->private_data,
177 file->f_dentry->d_inode->i_size);
178 mutex_unlock(&inode->i_mutex);
179
180 return ret;
181}
182
183static int regs_dbg_release(struct inode *inode, struct file *file)
184{
185 kfree(file->private_data);
186 return 0;
187}
188
189const struct file_operations queue_dbg_fops = {
190 .owner = THIS_MODULE,
191 .open = queue_dbg_open,
192 .llseek = no_llseek,
193 .read = queue_dbg_read,
194 .release = queue_dbg_release,
195};
196
197const struct file_operations regs_dbg_fops = {
198 .owner = THIS_MODULE,
199 .open = regs_dbg_open,
200 .llseek = generic_file_llseek,
201 .read = regs_dbg_read,
202 .release = regs_dbg_release,
203};
204
205static void usba_ep_init_debugfs(struct usba_udc *udc,
206 struct usba_ep *ep)
207{
208 struct dentry *ep_root;
209
210 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
211 if (!ep_root)
212 goto err_root;
213 ep->debugfs_dir = ep_root;
214
215 ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
216 ep, &queue_dbg_fops);
217 if (!ep->debugfs_queue)
218 goto err_queue;
219
220 if (ep->can_dma) {
221 ep->debugfs_dma_status
222 = debugfs_create_u32("dma_status", 0400, ep_root,
223 &ep->last_dma_status);
224 if (!ep->debugfs_dma_status)
225 goto err_dma_status;
226 }
227 if (ep_is_control(ep)) {
228 ep->debugfs_state
229 = debugfs_create_u32("state", 0400, ep_root,
230 &ep->state);
231 if (!ep->debugfs_state)
232 goto err_state;
233 }
234
235 return;
236
237err_state:
238 if (ep->can_dma)
239 debugfs_remove(ep->debugfs_dma_status);
240err_dma_status:
241 debugfs_remove(ep->debugfs_queue);
242err_queue:
243 debugfs_remove(ep_root);
244err_root:
245 dev_err(&ep->udc->pdev->dev,
246 "failed to create debugfs directory for %s\n", ep->ep.name);
247}
248
249static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
250{
251 debugfs_remove(ep->debugfs_queue);
252 debugfs_remove(ep->debugfs_dma_status);
253 debugfs_remove(ep->debugfs_state);
254 debugfs_remove(ep->debugfs_dir);
255 ep->debugfs_dma_status = NULL;
256 ep->debugfs_dir = NULL;
257}
258
259static void usba_init_debugfs(struct usba_udc *udc)
260{
261 struct dentry *root, *regs;
262 struct resource *regs_resource;
263
264 root = debugfs_create_dir(udc->gadget.name, NULL);
265 if (IS_ERR(root) || !root)
266 goto err_root;
267 udc->debugfs_root = root;
268
269 regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops);
270 if (!regs)
271 goto err_regs;
272
273 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
274 CTRL_IOMEM_ID);
275 regs->d_inode->i_size = resource_size(regs_resource);
276 udc->debugfs_regs = regs;
277
278 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
279
280 return;
281
282err_regs:
283 debugfs_remove(root);
284err_root:
285 udc->debugfs_root = NULL;
286 dev_err(&udc->pdev->dev, "debugfs is not available\n");
287}
288
289static void usba_cleanup_debugfs(struct usba_udc *udc)
290{
291 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
292 debugfs_remove(udc->debugfs_regs);
293 debugfs_remove(udc->debugfs_root);
294 udc->debugfs_regs = NULL;
295 udc->debugfs_root = NULL;
296}
297#else
298static inline void usba_ep_init_debugfs(struct usba_udc *udc,
299 struct usba_ep *ep)
300{
301
302}
303
304static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
305{
306
307}
308
309static inline void usba_init_debugfs(struct usba_udc *udc)
310{
311
312}
313
314static inline void usba_cleanup_debugfs(struct usba_udc *udc)
315{
316
317}
318#endif
319
320static int vbus_is_present(struct usba_udc *udc)
321{
322 if (gpio_is_valid(udc->vbus_pin))
323 return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
324
325
326 return 1;
327}
328
329#if defined(CONFIG_ARCH_AT91SAM9RL)
330
331#include <mach/at91_pmc.h>
332
333static void toggle_bias(int is_on)
334{
335 unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
336
337 if (is_on)
338 at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
339 else
340 at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
341}
342
343#else
344
345static void toggle_bias(int is_on)
346{
347}
348
349#endif
350
351static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
352{
353 unsigned int transaction_len;
354
355 transaction_len = req->req.length - req->req.actual;
356 req->last_transaction = 1;
357 if (transaction_len > ep->ep.maxpacket) {
358 transaction_len = ep->ep.maxpacket;
359 req->last_transaction = 0;
360 } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
361 req->last_transaction = 0;
362
363 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
364 ep->ep.name, req, transaction_len,
365 req->last_transaction ? ", done" : "");
366
367 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
368 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
369 req->req.actual += transaction_len;
370}
371
372static void submit_request(struct usba_ep *ep, struct usba_request *req)
373{
374 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
375 ep->ep.name, req, req->req.length);
376
377 req->req.actual = 0;
378 req->submitted = 1;
379
380 if (req->using_dma) {
381 if (req->req.length == 0) {
382 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
383 return;
384 }
385
386 if (req->req.zero)
387 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
388 else
389 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
390
391 usba_dma_writel(ep, ADDRESS, req->req.dma);
392 usba_dma_writel(ep, CONTROL, req->ctrl);
393 } else {
394 next_fifo_transaction(ep, req);
395 if (req->last_transaction) {
396 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
397 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
398 } else {
399 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
400 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
401 }
402 }
403}
404
405static void submit_next_request(struct usba_ep *ep)
406{
407 struct usba_request *req;
408
409 if (list_empty(&ep->queue)) {
410 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
411 return;
412 }
413
414 req = list_entry(ep->queue.next, struct usba_request, queue);
415 if (!req->submitted)
416 submit_request(ep, req);
417}
418
419static void send_status(struct usba_udc *udc, struct usba_ep *ep)
420{
421 ep->state = STATUS_STAGE_IN;
422 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
423 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
424}
425
426static void receive_data(struct usba_ep *ep)
427{
428 struct usba_udc *udc = ep->udc;
429 struct usba_request *req;
430 unsigned long status;
431 unsigned int bytecount, nr_busy;
432 int is_complete = 0;
433
434 status = usba_ep_readl(ep, STA);
435 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
436
437 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
438
439 while (nr_busy > 0) {
440 if (list_empty(&ep->queue)) {
441 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
442 break;
443 }
444 req = list_entry(ep->queue.next,
445 struct usba_request, queue);
446
447 bytecount = USBA_BFEXT(BYTE_COUNT, status);
448
449 if (status & (1 << 31))
450 is_complete = 1;
451 if (req->req.actual + bytecount >= req->req.length) {
452 is_complete = 1;
453 bytecount = req->req.length - req->req.actual;
454 }
455
456 memcpy_fromio(req->req.buf + req->req.actual,
457 ep->fifo, bytecount);
458 req->req.actual += bytecount;
459
460 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
461
462 if (is_complete) {
463 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
464 req->req.status = 0;
465 list_del_init(&req->queue);
466 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
467 spin_unlock(&udc->lock);
468 req->req.complete(&ep->ep, &req->req);
469 spin_lock(&udc->lock);
470 }
471
472 status = usba_ep_readl(ep, STA);
473 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
474
475 if (is_complete && ep_is_control(ep)) {
476 send_status(udc, ep);
477 break;
478 }
479 }
480}
481
482static void
483request_complete(struct usba_ep *ep, struct usba_request *req, int status)
484{
485 struct usba_udc *udc = ep->udc;
486
487 WARN_ON(!list_empty(&req->queue));
488
489 if (req->req.status == -EINPROGRESS)
490 req->req.status = status;
491
492 if (req->mapped) {
493 dma_unmap_single(
494 &udc->pdev->dev, req->req.dma, req->req.length,
495 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
496 req->req.dma = DMA_ADDR_INVALID;
497 req->mapped = 0;
498 }
499
500 DBG(DBG_GADGET | DBG_REQ,
501 "%s: req %p complete: status %d, actual %u\n",
502 ep->ep.name, req, req->req.status, req->req.actual);
503
504 spin_unlock(&udc->lock);
505 req->req.complete(&ep->ep, &req->req);
506 spin_lock(&udc->lock);
507}
508
509static void
510request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
511{
512 struct usba_request *req, *tmp_req;
513
514 list_for_each_entry_safe(req, tmp_req, list, queue) {
515 list_del_init(&req->queue);
516 request_complete(ep, req, status);
517 }
518}
519
520static int
521usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
522{
523 struct usba_ep *ep = to_usba_ep(_ep);
524 struct usba_udc *udc = ep->udc;
525 unsigned long flags, ept_cfg, maxpacket;
526 unsigned int nr_trans;
527
528 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
529
530 maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
531
532 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
533 || ep->index == 0
534 || desc->bDescriptorType != USB_DT_ENDPOINT
535 || maxpacket == 0
536 || maxpacket > ep->fifo_size) {
537 DBG(DBG_ERR, "ep_enable: Invalid argument");
538 return -EINVAL;
539 }
540
541 ep->is_isoc = 0;
542 ep->is_in = 0;
543
544 if (maxpacket <= 8)
545 ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
546 else
547
548 ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
549
550 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
551 ep->ep.name, ept_cfg, maxpacket);
552
553 if (usb_endpoint_dir_in(desc)) {
554 ep->is_in = 1;
555 ept_cfg |= USBA_EPT_DIR_IN;
556 }
557
558 switch (usb_endpoint_type(desc)) {
559 case USB_ENDPOINT_XFER_CONTROL:
560 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
561 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
562 break;
563 case USB_ENDPOINT_XFER_ISOC:
564 if (!ep->can_isoc) {
565 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
566 ep->ep.name);
567 return -EINVAL;
568 }
569
570
571
572
573
574 nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
575 if (nr_trans > 3)
576 return -EINVAL;
577
578 ep->is_isoc = 1;
579 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
580
581
582
583
584 if (nr_trans > 1 && ep->nr_banks == 3)
585 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
586 else
587 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
588 ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
589 break;
590 case USB_ENDPOINT_XFER_BULK:
591 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
592 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
593 break;
594 case USB_ENDPOINT_XFER_INT:
595 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
596 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
597 break;
598 }
599
600 spin_lock_irqsave(&ep->udc->lock, flags);
601
602 ep->ep.desc = desc;
603 ep->ep.maxpacket = maxpacket;
604
605 usba_ep_writel(ep, CFG, ept_cfg);
606 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
607
608 if (ep->can_dma) {
609 u32 ctrl;
610
611 usba_writel(udc, INT_ENB,
612 (usba_readl(udc, INT_ENB)
613 | USBA_BF(EPT_INT, 1 << ep->index)
614 | USBA_BF(DMA_INT, 1 << ep->index)));
615 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
616 usba_ep_writel(ep, CTL_ENB, ctrl);
617 } else {
618 usba_writel(udc, INT_ENB,
619 (usba_readl(udc, INT_ENB)
620 | USBA_BF(EPT_INT, 1 << ep->index)));
621 }
622
623 spin_unlock_irqrestore(&udc->lock, flags);
624
625 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
626 (unsigned long)usba_ep_readl(ep, CFG));
627 DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
628 (unsigned long)usba_readl(udc, INT_ENB));
629
630 return 0;
631}
632
633static int usba_ep_disable(struct usb_ep *_ep)
634{
635 struct usba_ep *ep = to_usba_ep(_ep);
636 struct usba_udc *udc = ep->udc;
637 LIST_HEAD(req_list);
638 unsigned long flags;
639
640 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
641
642 spin_lock_irqsave(&udc->lock, flags);
643
644 if (!ep->ep.desc) {
645 spin_unlock_irqrestore(&udc->lock, flags);
646
647
648
649
650 if (udc->gadget.speed != USB_SPEED_UNKNOWN)
651 DBG(DBG_ERR, "ep_disable: %s not enabled\n",
652 ep->ep.name);
653 return -EINVAL;
654 }
655 ep->ep.desc = NULL;
656
657 list_splice_init(&ep->queue, &req_list);
658 if (ep->can_dma) {
659 usba_dma_writel(ep, CONTROL, 0);
660 usba_dma_writel(ep, ADDRESS, 0);
661 usba_dma_readl(ep, STATUS);
662 }
663 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
664 usba_writel(udc, INT_ENB,
665 usba_readl(udc, INT_ENB)
666 & ~USBA_BF(EPT_INT, 1 << ep->index));
667
668 request_complete_list(ep, &req_list, -ESHUTDOWN);
669
670 spin_unlock_irqrestore(&udc->lock, flags);
671
672 return 0;
673}
674
675static struct usb_request *
676usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
677{
678 struct usba_request *req;
679
680 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
681
682 req = kzalloc(sizeof(*req), gfp_flags);
683 if (!req)
684 return NULL;
685
686 INIT_LIST_HEAD(&req->queue);
687 req->req.dma = DMA_ADDR_INVALID;
688
689 return &req->req;
690}
691
692static void
693usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
694{
695 struct usba_request *req = to_usba_req(_req);
696
697 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
698
699 kfree(req);
700}
701
702static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
703 struct usba_request *req, gfp_t gfp_flags)
704{
705 unsigned long flags;
706 int ret;
707
708 DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
709 ep->ep.name, req->req.length, req->req.dma,
710 req->req.zero ? 'Z' : 'z',
711 req->req.short_not_ok ? 'S' : 's',
712 req->req.no_interrupt ? 'I' : 'i');
713
714 if (req->req.length > 0x10000) {
715
716 DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
717 return -EINVAL;
718 }
719
720 req->using_dma = 1;
721
722 if (req->req.dma == DMA_ADDR_INVALID) {
723 req->req.dma = dma_map_single(
724 &udc->pdev->dev, req->req.buf, req->req.length,
725 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
726 req->mapped = 1;
727 } else {
728 dma_sync_single_for_device(
729 &udc->pdev->dev, req->req.dma, req->req.length,
730 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
731 req->mapped = 0;
732 }
733
734 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
735 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
736 | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
737
738 if (ep->is_in)
739 req->ctrl |= USBA_DMA_END_BUF_EN;
740
741
742
743
744
745
746 ret = -ESHUTDOWN;
747 spin_lock_irqsave(&udc->lock, flags);
748 if (ep->ep.desc) {
749 if (list_empty(&ep->queue))
750 submit_request(ep, req);
751
752 list_add_tail(&req->queue, &ep->queue);
753 ret = 0;
754 }
755 spin_unlock_irqrestore(&udc->lock, flags);
756
757 return ret;
758}
759
760static int
761usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
762{
763 struct usba_request *req = to_usba_req(_req);
764 struct usba_ep *ep = to_usba_ep(_ep);
765 struct usba_udc *udc = ep->udc;
766 unsigned long flags;
767 int ret;
768
769 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
770 ep->ep.name, req, _req->length);
771
772 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
773 !ep->ep.desc)
774 return -ESHUTDOWN;
775
776 req->submitted = 0;
777 req->using_dma = 0;
778 req->last_transaction = 0;
779
780 _req->status = -EINPROGRESS;
781 _req->actual = 0;
782
783 if (ep->can_dma)
784 return queue_dma(udc, ep, req, gfp_flags);
785
786
787 ret = -ESHUTDOWN;
788 spin_lock_irqsave(&udc->lock, flags);
789 if (ep->ep.desc) {
790 list_add_tail(&req->queue, &ep->queue);
791
792 if ((!ep_is_control(ep) && ep->is_in) ||
793 (ep_is_control(ep)
794 && (ep->state == DATA_STAGE_IN
795 || ep->state == STATUS_STAGE_IN)))
796 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
797 else
798 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
799 ret = 0;
800 }
801 spin_unlock_irqrestore(&udc->lock, flags);
802
803 return ret;
804}
805
806static void
807usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
808{
809 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
810}
811
812static int stop_dma(struct usba_ep *ep, u32 *pstatus)
813{
814 unsigned int timeout;
815 u32 status;
816
817
818
819
820
821 usba_dma_writel(ep, CONTROL, 0);
822
823
824 for (timeout = 40; timeout; --timeout) {
825 status = usba_dma_readl(ep, STATUS);
826 if (!(status & USBA_DMA_CH_EN))
827 break;
828 udelay(1);
829 }
830
831 if (pstatus)
832 *pstatus = status;
833
834 if (timeout == 0) {
835 dev_err(&ep->udc->pdev->dev,
836 "%s: timed out waiting for DMA FIFO to empty\n",
837 ep->ep.name);
838 return -ETIMEDOUT;
839 }
840
841 return 0;
842}
843
844static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
845{
846 struct usba_ep *ep = to_usba_ep(_ep);
847 struct usba_udc *udc = ep->udc;
848 struct usba_request *req = to_usba_req(_req);
849 unsigned long flags;
850 u32 status;
851
852 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
853 ep->ep.name, req);
854
855 spin_lock_irqsave(&udc->lock, flags);
856
857 if (req->using_dma) {
858
859
860
861
862 if (ep->queue.next == &req->queue) {
863 status = usba_dma_readl(ep, STATUS);
864 if (status & USBA_DMA_CH_EN)
865 stop_dma(ep, &status);
866
867#ifdef CONFIG_USB_GADGET_DEBUG_FS
868 ep->last_dma_status = status;
869#endif
870
871 usba_writel(udc, EPT_RST, 1 << ep->index);
872
873 usba_update_req(ep, req, status);
874 }
875 }
876
877
878
879
880
881 list_del_init(&req->queue);
882
883 request_complete(ep, req, -ECONNRESET);
884
885
886 submit_next_request(ep);
887 spin_unlock_irqrestore(&udc->lock, flags);
888
889 return 0;
890}
891
892static int usba_ep_set_halt(struct usb_ep *_ep, int value)
893{
894 struct usba_ep *ep = to_usba_ep(_ep);
895 struct usba_udc *udc = ep->udc;
896 unsigned long flags;
897 int ret = 0;
898
899 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
900 value ? "set" : "clear");
901
902 if (!ep->ep.desc) {
903 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
904 ep->ep.name);
905 return -ENODEV;
906 }
907 if (ep->is_isoc) {
908 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
909 ep->ep.name);
910 return -ENOTTY;
911 }
912
913 spin_lock_irqsave(&udc->lock, flags);
914
915
916
917
918
919 if (!list_empty(&ep->queue)
920 || ((value && ep->is_in && (usba_ep_readl(ep, STA)
921 & USBA_BF(BUSY_BANKS, -1L))))) {
922 ret = -EAGAIN;
923 } else {
924 if (value)
925 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
926 else
927 usba_ep_writel(ep, CLR_STA,
928 USBA_FORCE_STALL | USBA_TOGGLE_CLR);
929 usba_ep_readl(ep, STA);
930 }
931
932 spin_unlock_irqrestore(&udc->lock, flags);
933
934 return ret;
935}
936
937static int usba_ep_fifo_status(struct usb_ep *_ep)
938{
939 struct usba_ep *ep = to_usba_ep(_ep);
940
941 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
942}
943
944static void usba_ep_fifo_flush(struct usb_ep *_ep)
945{
946 struct usba_ep *ep = to_usba_ep(_ep);
947 struct usba_udc *udc = ep->udc;
948
949 usba_writel(udc, EPT_RST, 1 << ep->index);
950}
951
952static const struct usb_ep_ops usba_ep_ops = {
953 .enable = usba_ep_enable,
954 .disable = usba_ep_disable,
955 .alloc_request = usba_ep_alloc_request,
956 .free_request = usba_ep_free_request,
957 .queue = usba_ep_queue,
958 .dequeue = usba_ep_dequeue,
959 .set_halt = usba_ep_set_halt,
960 .fifo_status = usba_ep_fifo_status,
961 .fifo_flush = usba_ep_fifo_flush,
962};
963
964static int usba_udc_get_frame(struct usb_gadget *gadget)
965{
966 struct usba_udc *udc = to_usba_udc(gadget);
967
968 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
969}
970
971static int usba_udc_wakeup(struct usb_gadget *gadget)
972{
973 struct usba_udc *udc = to_usba_udc(gadget);
974 unsigned long flags;
975 u32 ctrl;
976 int ret = -EINVAL;
977
978 spin_lock_irqsave(&udc->lock, flags);
979 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
980 ctrl = usba_readl(udc, CTRL);
981 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
982 ret = 0;
983 }
984 spin_unlock_irqrestore(&udc->lock, flags);
985
986 return ret;
987}
988
989static int
990usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
991{
992 struct usba_udc *udc = to_usba_udc(gadget);
993 unsigned long flags;
994
995 spin_lock_irqsave(&udc->lock, flags);
996 if (is_selfpowered)
997 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
998 else
999 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1000 spin_unlock_irqrestore(&udc->lock, flags);
1001
1002 return 0;
1003}
1004
1005static int atmel_usba_start(struct usb_gadget *gadget,
1006 struct usb_gadget_driver *driver);
1007static int atmel_usba_stop(struct usb_gadget *gadget,
1008 struct usb_gadget_driver *driver);
1009static const struct usb_gadget_ops usba_udc_ops = {
1010 .get_frame = usba_udc_get_frame,
1011 .wakeup = usba_udc_wakeup,
1012 .set_selfpowered = usba_udc_set_selfpowered,
1013 .udc_start = atmel_usba_start,
1014 .udc_stop = atmel_usba_stop,
1015};
1016
1017static struct usb_endpoint_descriptor usba_ep0_desc = {
1018 .bLength = USB_DT_ENDPOINT_SIZE,
1019 .bDescriptorType = USB_DT_ENDPOINT,
1020 .bEndpointAddress = 0,
1021 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1022 .wMaxPacketSize = cpu_to_le16(64),
1023
1024 .bInterval = 1,
1025};
1026
1027static void nop_release(struct device *dev)
1028{
1029
1030}
1031
1032static struct usba_udc the_udc = {
1033 .gadget = {
1034 .ops = &usba_udc_ops,
1035 .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
1036 .max_speed = USB_SPEED_HIGH,
1037 .name = "atmel_usba_udc",
1038 .dev = {
1039 .init_name = "gadget",
1040 .release = nop_release,
1041 },
1042 },
1043};
1044
1045
1046
1047
1048static void reset_all_endpoints(struct usba_udc *udc)
1049{
1050 struct usba_ep *ep;
1051 struct usba_request *req, *tmp_req;
1052
1053 usba_writel(udc, EPT_RST, ~0UL);
1054
1055 ep = to_usba_ep(udc->gadget.ep0);
1056 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1057 list_del_init(&req->queue);
1058 request_complete(ep, req, -ECONNRESET);
1059 }
1060
1061
1062
1063
1064
1065
1066
1067 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1068 if (ep->ep.desc) {
1069 spin_unlock(&udc->lock);
1070 usba_ep_disable(&ep->ep);
1071 spin_lock(&udc->lock);
1072 }
1073 }
1074}
1075
1076static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1077{
1078 struct usba_ep *ep;
1079
1080 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1081 return to_usba_ep(udc->gadget.ep0);
1082
1083 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1084 u8 bEndpointAddress;
1085
1086 if (!ep->ep.desc)
1087 continue;
1088 bEndpointAddress = ep->ep.desc->bEndpointAddress;
1089 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1090 continue;
1091 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1092 == (wIndex & USB_ENDPOINT_NUMBER_MASK))
1093 return ep;
1094 }
1095
1096 return NULL;
1097}
1098
1099
1100static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1101{
1102 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1103 ep->state = WAIT_FOR_SETUP;
1104}
1105
1106static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1107{
1108 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1109 return 1;
1110 return 0;
1111}
1112
1113static inline void set_address(struct usba_udc *udc, unsigned int addr)
1114{
1115 u32 regval;
1116
1117 DBG(DBG_BUS, "setting address %u...\n", addr);
1118 regval = usba_readl(udc, CTRL);
1119 regval = USBA_BFINS(DEV_ADDR, addr, regval);
1120 usba_writel(udc, CTRL, regval);
1121}
1122
1123static int do_test_mode(struct usba_udc *udc)
1124{
1125 static const char test_packet_buffer[] = {
1126
1127 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1128
1129 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1130
1131 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1132
1133 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1134 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1135
1136 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1137
1138 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1139 };
1140 struct usba_ep *ep;
1141 struct device *dev = &udc->pdev->dev;
1142 int test_mode;
1143
1144 test_mode = udc->test_mode;
1145
1146
1147 reset_all_endpoints(udc);
1148
1149 switch (test_mode) {
1150 case 0x0100:
1151
1152 usba_writel(udc, TST, USBA_TST_J_MODE);
1153 dev_info(dev, "Entering Test_J mode...\n");
1154 break;
1155 case 0x0200:
1156
1157 usba_writel(udc, TST, USBA_TST_K_MODE);
1158 dev_info(dev, "Entering Test_K mode...\n");
1159 break;
1160 case 0x0300:
1161
1162
1163
1164
1165 ep = &usba_ep[0];
1166 usba_writel(udc, TST,
1167 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1168 usba_ep_writel(ep, CFG,
1169 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1170 | USBA_EPT_DIR_IN
1171 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1172 | USBA_BF(BK_NUMBER, 1));
1173 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1174 set_protocol_stall(udc, ep);
1175 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1176 } else {
1177 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1178 dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1179 }
1180 break;
1181 case 0x0400:
1182
1183 ep = &usba_ep[0];
1184 usba_ep_writel(ep, CFG,
1185 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1186 | USBA_EPT_DIR_IN
1187 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1188 | USBA_BF(BK_NUMBER, 1));
1189 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1190 set_protocol_stall(udc, ep);
1191 dev_err(dev, "Test_Packet: ep0 not mapped\n");
1192 } else {
1193 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1194 usba_writel(udc, TST, USBA_TST_PKT_MODE);
1195 memcpy_toio(ep->fifo, test_packet_buffer,
1196 sizeof(test_packet_buffer));
1197 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1198 dev_info(dev, "Entering Test_Packet mode...\n");
1199 }
1200 break;
1201 default:
1202 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1203 return -EINVAL;
1204 }
1205
1206 return 0;
1207}
1208
1209
1210static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1211{
1212 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1213 return true;
1214 return false;
1215}
1216
1217static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1218{
1219 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
1220 return true;
1221 return false;
1222}
1223
1224static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1225{
1226 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
1227 return true;
1228 return false;
1229}
1230
1231static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1232 struct usb_ctrlrequest *crq)
1233{
1234 int retval = 0;
1235
1236 switch (crq->bRequest) {
1237 case USB_REQ_GET_STATUS: {
1238 u16 status;
1239
1240 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1241 status = cpu_to_le16(udc->devstatus);
1242 } else if (crq->bRequestType
1243 == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1244 status = cpu_to_le16(0);
1245 } else if (crq->bRequestType
1246 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1247 struct usba_ep *target;
1248
1249 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1250 if (!target)
1251 goto stall;
1252
1253 status = 0;
1254 if (is_stalled(udc, target))
1255 status |= cpu_to_le16(1);
1256 } else
1257 goto delegate;
1258
1259
1260 if (crq->wLength != cpu_to_le16(sizeof(status)))
1261 goto stall;
1262 ep->state = DATA_STAGE_IN;
1263 __raw_writew(status, ep->fifo);
1264 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1265 break;
1266 }
1267
1268 case USB_REQ_CLEAR_FEATURE: {
1269 if (crq->bRequestType == USB_RECIP_DEVICE) {
1270 if (feature_is_dev_remote_wakeup(crq))
1271 udc->devstatus
1272 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1273 else
1274
1275 goto stall;
1276 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1277 struct usba_ep *target;
1278
1279 if (crq->wLength != cpu_to_le16(0)
1280 || !feature_is_ep_halt(crq))
1281 goto stall;
1282 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1283 if (!target)
1284 goto stall;
1285
1286 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1287 if (target->index != 0)
1288 usba_ep_writel(target, CLR_STA,
1289 USBA_TOGGLE_CLR);
1290 } else {
1291 goto delegate;
1292 }
1293
1294 send_status(udc, ep);
1295 break;
1296 }
1297
1298 case USB_REQ_SET_FEATURE: {
1299 if (crq->bRequestType == USB_RECIP_DEVICE) {
1300 if (feature_is_dev_test_mode(crq)) {
1301 send_status(udc, ep);
1302 ep->state = STATUS_STAGE_TEST;
1303 udc->test_mode = le16_to_cpu(crq->wIndex);
1304 return 0;
1305 } else if (feature_is_dev_remote_wakeup(crq)) {
1306 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1307 } else {
1308 goto stall;
1309 }
1310 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1311 struct usba_ep *target;
1312
1313 if (crq->wLength != cpu_to_le16(0)
1314 || !feature_is_ep_halt(crq))
1315 goto stall;
1316
1317 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1318 if (!target)
1319 goto stall;
1320
1321 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1322 } else
1323 goto delegate;
1324
1325 send_status(udc, ep);
1326 break;
1327 }
1328
1329 case USB_REQ_SET_ADDRESS:
1330 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1331 goto delegate;
1332
1333 set_address(udc, le16_to_cpu(crq->wValue));
1334 send_status(udc, ep);
1335 ep->state = STATUS_STAGE_ADDR;
1336 break;
1337
1338 default:
1339delegate:
1340 spin_unlock(&udc->lock);
1341 retval = udc->driver->setup(&udc->gadget, crq);
1342 spin_lock(&udc->lock);
1343 }
1344
1345 return retval;
1346
1347stall:
1348 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1349 "halting endpoint...\n",
1350 ep->ep.name, crq->bRequestType, crq->bRequest,
1351 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1352 le16_to_cpu(crq->wLength));
1353 set_protocol_stall(udc, ep);
1354 return -1;
1355}
1356
1357static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1358{
1359 struct usba_request *req;
1360 u32 epstatus;
1361 u32 epctrl;
1362
1363restart:
1364 epstatus = usba_ep_readl(ep, STA);
1365 epctrl = usba_ep_readl(ep, CTL);
1366
1367 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1368 ep->ep.name, ep->state, epstatus, epctrl);
1369
1370 req = NULL;
1371 if (!list_empty(&ep->queue))
1372 req = list_entry(ep->queue.next,
1373 struct usba_request, queue);
1374
1375 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1376 if (req->submitted)
1377 next_fifo_transaction(ep, req);
1378 else
1379 submit_request(ep, req);
1380
1381 if (req->last_transaction) {
1382 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1383 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1384 }
1385 goto restart;
1386 }
1387 if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1388 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1389
1390 switch (ep->state) {
1391 case DATA_STAGE_IN:
1392 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1393 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1394 ep->state = STATUS_STAGE_OUT;
1395 break;
1396 case STATUS_STAGE_ADDR:
1397
1398 usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1399 | USBA_FADDR_EN));
1400 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1401 ep->state = WAIT_FOR_SETUP;
1402 break;
1403 case STATUS_STAGE_IN:
1404 if (req) {
1405 list_del_init(&req->queue);
1406 request_complete(ep, req, 0);
1407 submit_next_request(ep);
1408 }
1409 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1410 ep->state = WAIT_FOR_SETUP;
1411 break;
1412 case STATUS_STAGE_TEST:
1413 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1414 ep->state = WAIT_FOR_SETUP;
1415 if (do_test_mode(udc))
1416 set_protocol_stall(udc, ep);
1417 break;
1418 default:
1419 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1420 "halting endpoint...\n",
1421 ep->ep.name, ep->state);
1422 set_protocol_stall(udc, ep);
1423 break;
1424 }
1425
1426 goto restart;
1427 }
1428 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1429 switch (ep->state) {
1430 case STATUS_STAGE_OUT:
1431 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1432 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1433
1434 if (req) {
1435 list_del_init(&req->queue);
1436 request_complete(ep, req, 0);
1437 }
1438 ep->state = WAIT_FOR_SETUP;
1439 break;
1440
1441 case DATA_STAGE_OUT:
1442 receive_data(ep);
1443 break;
1444
1445 default:
1446 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1447 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1448 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1449 "halting endpoint...\n",
1450 ep->ep.name, ep->state);
1451 set_protocol_stall(udc, ep);
1452 break;
1453 }
1454
1455 goto restart;
1456 }
1457 if (epstatus & USBA_RX_SETUP) {
1458 union {
1459 struct usb_ctrlrequest crq;
1460 unsigned long data[2];
1461 } crq;
1462 unsigned int pkt_len;
1463 int ret;
1464
1465 if (ep->state != WAIT_FOR_SETUP) {
1466
1467
1468
1469
1470
1471 int status = -EPROTO;
1472
1473
1474
1475
1476
1477
1478 if (ep->state == STATUS_STAGE_OUT
1479 || ep->state == STATUS_STAGE_IN) {
1480 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1481 status = 0;
1482 }
1483
1484 if (req) {
1485 list_del_init(&req->queue);
1486 request_complete(ep, req, status);
1487 }
1488 }
1489
1490 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1491 DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1492 if (pkt_len != sizeof(crq)) {
1493 pr_warning("udc: Invalid packet length %u "
1494 "(expected %zu)\n", pkt_len, sizeof(crq));
1495 set_protocol_stall(udc, ep);
1496 return;
1497 }
1498
1499 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1500 memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
1501
1502
1503
1504 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1505
1506
1507
1508
1509
1510 if (crq.crq.bRequestType & USB_DIR_IN) {
1511
1512
1513
1514
1515
1516
1517 ep->state = DATA_STAGE_IN;
1518 } else {
1519 if (crq.crq.wLength != cpu_to_le16(0))
1520 ep->state = DATA_STAGE_OUT;
1521 else
1522 ep->state = STATUS_STAGE_IN;
1523 }
1524
1525 ret = -1;
1526 if (ep->index == 0)
1527 ret = handle_ep0_setup(udc, ep, &crq.crq);
1528 else {
1529 spin_unlock(&udc->lock);
1530 ret = udc->driver->setup(&udc->gadget, &crq.crq);
1531 spin_lock(&udc->lock);
1532 }
1533
1534 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1535 crq.crq.bRequestType, crq.crq.bRequest,
1536 le16_to_cpu(crq.crq.wLength), ep->state, ret);
1537
1538 if (ret < 0) {
1539
1540 set_protocol_stall(udc, ep);
1541 }
1542 }
1543}
1544
1545static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1546{
1547 struct usba_request *req;
1548 u32 epstatus;
1549 u32 epctrl;
1550
1551 epstatus = usba_ep_readl(ep, STA);
1552 epctrl = usba_ep_readl(ep, CTL);
1553
1554 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1555
1556 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1557 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1558
1559 if (list_empty(&ep->queue)) {
1560 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1561 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1562 return;
1563 }
1564
1565 req = list_entry(ep->queue.next, struct usba_request, queue);
1566
1567 if (req->using_dma) {
1568
1569 usba_ep_writel(ep, SET_STA,
1570 USBA_TX_PK_RDY);
1571 usba_ep_writel(ep, CTL_DIS,
1572 USBA_TX_PK_RDY);
1573 list_del_init(&req->queue);
1574 submit_next_request(ep);
1575 request_complete(ep, req, 0);
1576 } else {
1577 if (req->submitted)
1578 next_fifo_transaction(ep, req);
1579 else
1580 submit_request(ep, req);
1581
1582 if (req->last_transaction) {
1583 list_del_init(&req->queue);
1584 submit_next_request(ep);
1585 request_complete(ep, req, 0);
1586 }
1587 }
1588
1589 epstatus = usba_ep_readl(ep, STA);
1590 epctrl = usba_ep_readl(ep, CTL);
1591 }
1592 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1593 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1594 receive_data(ep);
1595 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1596 }
1597}
1598
1599static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1600{
1601 struct usba_request *req;
1602 u32 status, control, pending;
1603
1604 status = usba_dma_readl(ep, STATUS);
1605 control = usba_dma_readl(ep, CONTROL);
1606#ifdef CONFIG_USB_GADGET_DEBUG_FS
1607 ep->last_dma_status = status;
1608#endif
1609 pending = status & control;
1610 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1611
1612 if (status & USBA_DMA_CH_EN) {
1613 dev_err(&udc->pdev->dev,
1614 "DMA_CH_EN is set after transfer is finished!\n");
1615 dev_err(&udc->pdev->dev,
1616 "status=%#08x, pending=%#08x, control=%#08x\n",
1617 status, pending, control);
1618
1619
1620
1621
1622
1623 }
1624
1625 if (list_empty(&ep->queue))
1626
1627 return;
1628
1629 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1630 req = list_entry(ep->queue.next, struct usba_request, queue);
1631 usba_update_req(ep, req, status);
1632
1633 list_del_init(&req->queue);
1634 submit_next_request(ep);
1635 request_complete(ep, req, 0);
1636 }
1637}
1638
1639static irqreturn_t usba_udc_irq(int irq, void *devid)
1640{
1641 struct usba_udc *udc = devid;
1642 u32 status;
1643 u32 dma_status;
1644 u32 ep_status;
1645
1646 spin_lock(&udc->lock);
1647
1648 status = usba_readl(udc, INT_STA);
1649 DBG(DBG_INT, "irq, status=%#08x\n", status);
1650
1651 if (status & USBA_DET_SUSPEND) {
1652 toggle_bias(0);
1653 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
1654 DBG(DBG_BUS, "Suspend detected\n");
1655 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1656 && udc->driver && udc->driver->suspend) {
1657 spin_unlock(&udc->lock);
1658 udc->driver->suspend(&udc->gadget);
1659 spin_lock(&udc->lock);
1660 }
1661 }
1662
1663 if (status & USBA_WAKE_UP) {
1664 toggle_bias(1);
1665 usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1666 DBG(DBG_BUS, "Wake Up CPU detected\n");
1667 }
1668
1669 if (status & USBA_END_OF_RESUME) {
1670 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1671 DBG(DBG_BUS, "Resume detected\n");
1672 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1673 && udc->driver && udc->driver->resume) {
1674 spin_unlock(&udc->lock);
1675 udc->driver->resume(&udc->gadget);
1676 spin_lock(&udc->lock);
1677 }
1678 }
1679
1680 dma_status = USBA_BFEXT(DMA_INT, status);
1681 if (dma_status) {
1682 int i;
1683
1684 for (i = 1; i < USBA_NR_ENDPOINTS; i++)
1685 if (dma_status & (1 << i))
1686 usba_dma_irq(udc, &usba_ep[i]);
1687 }
1688
1689 ep_status = USBA_BFEXT(EPT_INT, status);
1690 if (ep_status) {
1691 int i;
1692
1693 for (i = 0; i < USBA_NR_ENDPOINTS; i++)
1694 if (ep_status & (1 << i)) {
1695 if (ep_is_control(&usba_ep[i]))
1696 usba_control_irq(udc, &usba_ep[i]);
1697 else
1698 usba_ep_irq(udc, &usba_ep[i]);
1699 }
1700 }
1701
1702 if (status & USBA_END_OF_RESET) {
1703 struct usba_ep *ep0;
1704
1705 usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
1706 reset_all_endpoints(udc);
1707
1708 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1709 && udc->driver->disconnect) {
1710 udc->gadget.speed = USB_SPEED_UNKNOWN;
1711 spin_unlock(&udc->lock);
1712 udc->driver->disconnect(&udc->gadget);
1713 spin_lock(&udc->lock);
1714 }
1715
1716 if (status & USBA_HIGH_SPEED)
1717 udc->gadget.speed = USB_SPEED_HIGH;
1718 else
1719 udc->gadget.speed = USB_SPEED_FULL;
1720 DBG(DBG_BUS, "%s bus reset detected\n",
1721 usb_speed_string(udc->gadget.speed));
1722
1723 ep0 = &usba_ep[0];
1724 ep0->ep.desc = &usba_ep0_desc;
1725 ep0->state = WAIT_FOR_SETUP;
1726 usba_ep_writel(ep0, CFG,
1727 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1728 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1729 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1730 usba_ep_writel(ep0, CTL_ENB,
1731 USBA_EPT_ENABLE | USBA_RX_SETUP);
1732 usba_writel(udc, INT_ENB,
1733 (usba_readl(udc, INT_ENB)
1734 | USBA_BF(EPT_INT, 1)
1735 | USBA_DET_SUSPEND
1736 | USBA_END_OF_RESUME));
1737
1738
1739
1740
1741
1742 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1743 dev_dbg(&udc->pdev->dev,
1744 "ODD: EP0 configuration is invalid!\n");
1745 }
1746
1747 spin_unlock(&udc->lock);
1748
1749 return IRQ_HANDLED;
1750}
1751
1752static irqreturn_t usba_vbus_irq(int irq, void *devid)
1753{
1754 struct usba_udc *udc = devid;
1755 int vbus;
1756
1757
1758 udelay(10);
1759
1760 spin_lock(&udc->lock);
1761
1762
1763 if (!udc->driver)
1764 goto out;
1765
1766 vbus = vbus_is_present(udc);
1767 if (vbus != udc->vbus_prev) {
1768 if (vbus) {
1769 toggle_bias(1);
1770 usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1771 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1772 } else {
1773 udc->gadget.speed = USB_SPEED_UNKNOWN;
1774 reset_all_endpoints(udc);
1775 toggle_bias(0);
1776 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1777 if (udc->driver->disconnect) {
1778 spin_unlock(&udc->lock);
1779 udc->driver->disconnect(&udc->gadget);
1780 spin_lock(&udc->lock);
1781 }
1782 }
1783 udc->vbus_prev = vbus;
1784 }
1785
1786out:
1787 spin_unlock(&udc->lock);
1788
1789 return IRQ_HANDLED;
1790}
1791
1792static int atmel_usba_start(struct usb_gadget *gadget,
1793 struct usb_gadget_driver *driver)
1794{
1795 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
1796 unsigned long flags;
1797
1798 spin_lock_irqsave(&udc->lock, flags);
1799
1800 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1801 udc->driver = driver;
1802 udc->gadget.dev.driver = &driver->driver;
1803 spin_unlock_irqrestore(&udc->lock, flags);
1804
1805 clk_enable(udc->pclk);
1806 clk_enable(udc->hclk);
1807
1808 DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
1809
1810 udc->vbus_prev = 0;
1811 if (gpio_is_valid(udc->vbus_pin))
1812 enable_irq(gpio_to_irq(udc->vbus_pin));
1813
1814
1815 spin_lock_irqsave(&udc->lock, flags);
1816 if (vbus_is_present(udc) && udc->vbus_prev == 0) {
1817 toggle_bias(1);
1818 usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1819 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1820 }
1821 spin_unlock_irqrestore(&udc->lock, flags);
1822
1823 return 0;
1824}
1825
1826static int atmel_usba_stop(struct usb_gadget *gadget,
1827 struct usb_gadget_driver *driver)
1828{
1829 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
1830 unsigned long flags;
1831
1832 if (gpio_is_valid(udc->vbus_pin))
1833 disable_irq(gpio_to_irq(udc->vbus_pin));
1834
1835 spin_lock_irqsave(&udc->lock, flags);
1836 udc->gadget.speed = USB_SPEED_UNKNOWN;
1837 reset_all_endpoints(udc);
1838 spin_unlock_irqrestore(&udc->lock, flags);
1839
1840
1841 toggle_bias(0);
1842 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1843
1844 udc->gadget.dev.driver = NULL;
1845 udc->driver = NULL;
1846
1847 clk_disable(udc->hclk);
1848 clk_disable(udc->pclk);
1849
1850 DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
1851
1852 return 0;
1853}
1854
1855static int __init usba_udc_probe(struct platform_device *pdev)
1856{
1857 struct usba_platform_data *pdata = pdev->dev.platform_data;
1858 struct resource *regs, *fifo;
1859 struct clk *pclk, *hclk;
1860 struct usba_udc *udc = &the_udc;
1861 int irq, ret, i;
1862
1863 regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1864 fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1865 if (!regs || !fifo || !pdata)
1866 return -ENXIO;
1867
1868 irq = platform_get_irq(pdev, 0);
1869 if (irq < 0)
1870 return irq;
1871
1872 pclk = clk_get(&pdev->dev, "pclk");
1873 if (IS_ERR(pclk))
1874 return PTR_ERR(pclk);
1875 hclk = clk_get(&pdev->dev, "hclk");
1876 if (IS_ERR(hclk)) {
1877 ret = PTR_ERR(hclk);
1878 goto err_get_hclk;
1879 }
1880
1881 spin_lock_init(&udc->lock);
1882 udc->pdev = pdev;
1883 udc->pclk = pclk;
1884 udc->hclk = hclk;
1885 udc->vbus_pin = -ENODEV;
1886
1887 ret = -ENOMEM;
1888 udc->regs = ioremap(regs->start, resource_size(regs));
1889 if (!udc->regs) {
1890 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1891 goto err_map_regs;
1892 }
1893 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1894 (unsigned long)regs->start, udc->regs);
1895 udc->fifo = ioremap(fifo->start, resource_size(fifo));
1896 if (!udc->fifo) {
1897 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1898 goto err_map_fifo;
1899 }
1900 dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
1901 (unsigned long)fifo->start, udc->fifo);
1902
1903 device_initialize(&udc->gadget.dev);
1904 udc->gadget.dev.parent = &pdev->dev;
1905 udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
1906
1907 platform_set_drvdata(pdev, udc);
1908
1909
1910 clk_enable(pclk);
1911 toggle_bias(0);
1912 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1913 clk_disable(pclk);
1914
1915 usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
1916 GFP_KERNEL);
1917 if (!usba_ep)
1918 goto err_alloc_ep;
1919
1920 the_udc.gadget.ep0 = &usba_ep[0].ep;
1921
1922 INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
1923 usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
1924 usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
1925 usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
1926 usba_ep[0].ep.ops = &usba_ep_ops;
1927 usba_ep[0].ep.name = pdata->ep[0].name;
1928 usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
1929 usba_ep[0].udc = &the_udc;
1930 INIT_LIST_HEAD(&usba_ep[0].queue);
1931 usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
1932 usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
1933 usba_ep[0].index = pdata->ep[0].index;
1934 usba_ep[0].can_dma = pdata->ep[0].can_dma;
1935 usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
1936
1937 for (i = 1; i < pdata->num_ep; i++) {
1938 struct usba_ep *ep = &usba_ep[i];
1939
1940 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1941 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1942 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1943 ep->ep.ops = &usba_ep_ops;
1944 ep->ep.name = pdata->ep[i].name;
1945 ep->ep.maxpacket = pdata->ep[i].fifo_size;
1946 ep->udc = &the_udc;
1947 INIT_LIST_HEAD(&ep->queue);
1948 ep->fifo_size = pdata->ep[i].fifo_size;
1949 ep->nr_banks = pdata->ep[i].nr_banks;
1950 ep->index = pdata->ep[i].index;
1951 ep->can_dma = pdata->ep[i].can_dma;
1952 ep->can_isoc = pdata->ep[i].can_isoc;
1953
1954 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1955 }
1956
1957 ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
1958 if (ret) {
1959 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
1960 irq, ret);
1961 goto err_request_irq;
1962 }
1963 udc->irq = irq;
1964
1965 ret = device_add(&udc->gadget.dev);
1966 if (ret) {
1967 dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret);
1968 goto err_device_add;
1969 }
1970
1971 if (gpio_is_valid(pdata->vbus_pin)) {
1972 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
1973 udc->vbus_pin = pdata->vbus_pin;
1974 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
1975
1976 ret = request_irq(gpio_to_irq(udc->vbus_pin),
1977 usba_vbus_irq, 0,
1978 "atmel_usba_udc", udc);
1979 if (ret) {
1980 gpio_free(udc->vbus_pin);
1981 udc->vbus_pin = -ENODEV;
1982 dev_warn(&udc->pdev->dev,
1983 "failed to request vbus irq; "
1984 "assuming always on\n");
1985 } else {
1986 disable_irq(gpio_to_irq(udc->vbus_pin));
1987 }
1988 } else {
1989
1990 udc->vbus_pin = -EINVAL;
1991 }
1992 }
1993
1994 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
1995 if (ret)
1996 goto err_add_udc;
1997
1998 usba_init_debugfs(udc);
1999 for (i = 1; i < pdata->num_ep; i++)
2000 usba_ep_init_debugfs(udc, &usba_ep[i]);
2001
2002 return 0;
2003
2004err_add_udc:
2005 if (gpio_is_valid(pdata->vbus_pin)) {
2006 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2007 gpio_free(udc->vbus_pin);
2008 }
2009
2010 device_unregister(&udc->gadget.dev);
2011
2012err_device_add:
2013 free_irq(irq, udc);
2014err_request_irq:
2015 kfree(usba_ep);
2016err_alloc_ep:
2017 iounmap(udc->fifo);
2018err_map_fifo:
2019 iounmap(udc->regs);
2020err_map_regs:
2021 clk_put(hclk);
2022err_get_hclk:
2023 clk_put(pclk);
2024
2025 platform_set_drvdata(pdev, NULL);
2026
2027 return ret;
2028}
2029
2030static int __exit usba_udc_remove(struct platform_device *pdev)
2031{
2032 struct usba_udc *udc;
2033 int i;
2034 struct usba_platform_data *pdata = pdev->dev.platform_data;
2035
2036 udc = platform_get_drvdata(pdev);
2037
2038 usb_del_gadget_udc(&udc->gadget);
2039
2040 for (i = 1; i < pdata->num_ep; i++)
2041 usba_ep_cleanup_debugfs(&usba_ep[i]);
2042 usba_cleanup_debugfs(udc);
2043
2044 if (gpio_is_valid(udc->vbus_pin)) {
2045 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2046 gpio_free(udc->vbus_pin);
2047 }
2048
2049 free_irq(udc->irq, udc);
2050 kfree(usba_ep);
2051 iounmap(udc->fifo);
2052 iounmap(udc->regs);
2053 clk_put(udc->hclk);
2054 clk_put(udc->pclk);
2055
2056 device_unregister(&udc->gadget.dev);
2057
2058 return 0;
2059}
2060
2061static struct platform_driver udc_driver = {
2062 .remove = __exit_p(usba_udc_remove),
2063 .driver = {
2064 .name = "atmel_usba_udc",
2065 .owner = THIS_MODULE,
2066 },
2067};
2068
2069static int __init udc_init(void)
2070{
2071 return platform_driver_probe(&udc_driver, usba_udc_probe);
2072}
2073module_init(udc_init);
2074
2075static void __exit udc_exit(void)
2076{
2077 platform_driver_unregister(&udc_driver);
2078}
2079module_exit(udc_exit);
2080
2081MODULE_DESCRIPTION("Atmel USBA UDC driver");
2082MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2083MODULE_LICENSE("GPL");
2084MODULE_ALIAS("platform:atmel_usba_udc");
2085