1
2
3
4
5
6
7
8
9#include <linux/bitops.h>
10#include <linux/bug.h>
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/usb/ch9.h>
30#include <linux/usb/gadget.h>
31#include <linux/workqueue.h>
32
33#include <bcm63xx_cpu.h>
34#include <bcm63xx_iudma.h>
35#include <bcm63xx_dev_usb_usbd.h>
36#include <bcm63xx_io.h>
37#include <bcm63xx_regs.h>
38
39#define DRV_MODULE_NAME "bcm63xx_udc"
40
41static const char bcm63xx_ep0name[] = "ep0";
42
43static const struct {
44 const char *name;
45 const struct usb_ep_caps caps;
46} bcm63xx_ep_info[] = {
47#define EP_INFO(_name, _caps) \
48 { \
49 .name = _name, \
50 .caps = _caps, \
51 }
52
53 EP_INFO(bcm63xx_ep0name,
54 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
55 EP_INFO("ep1in-bulk",
56 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
57 EP_INFO("ep2out-bulk",
58 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
59 EP_INFO("ep3in-int",
60 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
61 EP_INFO("ep4out-int",
62 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
63
64#undef EP_INFO
65};
66
67static bool use_fullspeed;
68module_param(use_fullspeed, bool, S_IRUGO);
69MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static bool irq_coalesce;
91module_param(irq_coalesce, bool, S_IRUGO);
92MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
93
94#define BCM63XX_NUM_EP 5
95#define BCM63XX_NUM_IUDMA 6
96#define BCM63XX_NUM_FIFO_PAIRS 3
97
98#define IUDMA_RESET_TIMEOUT_US 10000
99
100#define IUDMA_EP0_RXCHAN 0
101#define IUDMA_EP0_TXCHAN 1
102
103#define IUDMA_MAX_FRAGMENT 2048
104#define BCM63XX_MAX_CTRL_PKT 64
105
106#define BCMEP_CTRL 0x00
107#define BCMEP_ISOC 0x01
108#define BCMEP_BULK 0x02
109#define BCMEP_INTR 0x03
110
111#define BCMEP_OUT 0x00
112#define BCMEP_IN 0x01
113
114#define BCM63XX_SPD_FULL 1
115#define BCM63XX_SPD_HIGH 0
116
117#define IUDMA_DMAC_OFFSET 0x200
118#define IUDMA_DMAS_OFFSET 0x400
119
120enum bcm63xx_ep0_state {
121 EP0_REQUEUE,
122 EP0_IDLE,
123 EP0_IN_DATA_PHASE_SETUP,
124 EP0_IN_DATA_PHASE_COMPLETE,
125 EP0_OUT_DATA_PHASE_SETUP,
126 EP0_OUT_DATA_PHASE_COMPLETE,
127 EP0_OUT_STATUS_PHASE,
128 EP0_IN_FAKE_STATUS_PHASE,
129 EP0_SHUTDOWN,
130};
131
132static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
133 "REQUEUE",
134 "IDLE",
135 "IN_DATA_PHASE_SETUP",
136 "IN_DATA_PHASE_COMPLETE",
137 "OUT_DATA_PHASE_SETUP",
138 "OUT_DATA_PHASE_COMPLETE",
139 "OUT_STATUS_PHASE",
140 "IN_FAKE_STATUS_PHASE",
141 "SHUTDOWN",
142};
143
144
145
146
147
148
149
150
151
152
153
154struct iudma_ch_cfg {
155 int ep_num;
156 int n_bds;
157 int ep_type;
158 int dir;
159 int n_fifo_slots;
160 int max_pkt_hs;
161 int max_pkt_fs;
162};
163
164static const struct iudma_ch_cfg iudma_defaults[] = {
165
166
167
168
169
170
171
172
173
174
175 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
176 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
177 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
178 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
179 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
180 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
181};
182
183struct bcm63xx_udc;
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct iudma_ch {
210 unsigned int ch_idx;
211 int ep_num;
212 bool enabled;
213 int max_pkt;
214 bool is_tx;
215 struct bcm63xx_ep *bep;
216 struct bcm63xx_udc *udc;
217
218 struct bcm_enet_desc *read_bd;
219 struct bcm_enet_desc *write_bd;
220 struct bcm_enet_desc *end_bd;
221 int n_bds_used;
222
223 struct bcm_enet_desc *bd_ring;
224 dma_addr_t bd_ring_dma;
225 unsigned int n_bds;
226};
227
228
229
230
231
232
233
234
235
236
237struct bcm63xx_ep {
238 unsigned int ep_num;
239 struct iudma_ch *iudma;
240 struct usb_ep ep;
241 struct bcm63xx_udc *udc;
242 struct list_head queue;
243 unsigned halted:1;
244};
245
246
247
248
249
250
251
252
253
254struct bcm63xx_req {
255 struct list_head queue;
256 struct usb_request req;
257 unsigned int offset;
258 unsigned int bd_bytes;
259 struct iudma_ch *iudma;
260};
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292struct bcm63xx_udc {
293 spinlock_t lock;
294
295 struct device *dev;
296 struct bcm63xx_usbd_platform_data *pd;
297 struct clk *usbd_clk;
298 struct clk *usbh_clk;
299
300 struct usb_gadget gadget;
301 struct usb_gadget_driver *driver;
302
303 void __iomem *usbd_regs;
304 void __iomem *iudma_regs;
305
306 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
307 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
308
309 int cfg;
310 int iface;
311 int alt_iface;
312
313 struct bcm63xx_req ep0_ctrl_req;
314 u8 *ep0_ctrl_buf;
315
316 int ep0state;
317 struct work_struct ep0_wq;
318
319 unsigned long wedgemap;
320
321 unsigned ep0_req_reset:1;
322 unsigned ep0_req_set_cfg:1;
323 unsigned ep0_req_set_iface:1;
324 unsigned ep0_req_shutdown:1;
325
326 unsigned ep0_req_completed:1;
327 struct usb_request *ep0_reply;
328 struct usb_request *ep0_request;
329
330 struct dentry *debugfs_root;
331};
332
333static const struct usb_ep_ops bcm63xx_udc_ep_ops;
334
335
336
337
338
339static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
340{
341 return container_of(g, struct bcm63xx_udc, gadget);
342}
343
344static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
345{
346 return container_of(ep, struct bcm63xx_ep, ep);
347}
348
349static inline struct bcm63xx_req *our_req(struct usb_request *req)
350{
351 return container_of(req, struct bcm63xx_req, req);
352}
353
354static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
355{
356 return bcm_readl(udc->usbd_regs + off);
357}
358
359static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360{
361 bcm_writel(val, udc->usbd_regs + off);
362}
363
364static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
365{
366 return bcm_readl(udc->iudma_regs + off);
367}
368
369static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370{
371 bcm_writel(val, udc->iudma_regs + off);
372}
373
374static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
375{
376 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
377 (ENETDMA_CHAN_WIDTH * chan));
378}
379
380static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
381 int chan)
382{
383 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
384 (ENETDMA_CHAN_WIDTH * chan));
385}
386
387static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
388{
389 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
390 (ENETDMA_CHAN_WIDTH * chan));
391}
392
393static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
394 int chan)
395{
396 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
397 (ENETDMA_CHAN_WIDTH * chan));
398}
399
400static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
401{
402 if (is_enabled) {
403 clk_enable(udc->usbh_clk);
404 clk_enable(udc->usbd_clk);
405 udelay(10);
406 } else {
407 clk_disable(udc->usbd_clk);
408 clk_disable(udc->usbh_clk);
409 }
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
426{
427 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
428
429 val &= ~USBD_CONTROL_INIT_SEL_MASK;
430 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
431 usbd_writel(udc, val, USBD_CONTROL_REG);
432}
433
434
435
436
437
438
439
440
441
442
443static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
444 bool is_stalled)
445{
446 u32 val;
447
448 val = USBD_STALL_UPDATE_MASK |
449 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
450 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
451 usbd_writel(udc, val, USBD_STALL_REG);
452}
453
454
455
456
457
458
459
460
461static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
462{
463 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
464 u32 i, val, rx_fifo_slot, tx_fifo_slot;
465
466
467 rx_fifo_slot = tx_fifo_slot = 0;
468 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
469 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
470 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
471
472 bcm63xx_ep_dma_select(udc, i >> 1);
473
474 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
475 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
476 USBD_RXFIFO_CONFIG_END_SHIFT);
477 rx_fifo_slot += rx_cfg->n_fifo_slots;
478 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
479 usbd_writel(udc,
480 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
481 USBD_RXFIFO_EPSIZE_REG);
482
483 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
484 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
485 USBD_TXFIFO_CONFIG_END_SHIFT);
486 tx_fifo_slot += tx_cfg->n_fifo_slots;
487 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
488 usbd_writel(udc,
489 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
490 USBD_TXFIFO_EPSIZE_REG);
491
492 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
493 }
494}
495
496
497
498
499
500
501static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
502{
503 u32 val;
504
505 bcm63xx_ep_dma_select(udc, ep_num);
506
507 val = usbd_readl(udc, USBD_CONTROL_REG);
508 val |= USBD_CONTROL_FIFO_RESET_MASK;
509 usbd_writel(udc, val, USBD_CONTROL_REG);
510 usbd_readl(udc, USBD_CONTROL_REG);
511}
512
513
514
515
516
517static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
518{
519 int i;
520
521 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
522 bcm63xx_fifo_reset_ep(udc, i);
523}
524
525
526
527
528
529static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
530{
531 u32 i, val;
532
533 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
534 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
535
536 if (cfg->ep_num < 0)
537 continue;
538
539 bcm63xx_ep_dma_select(udc, cfg->ep_num);
540 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
541 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
542 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
543 }
544}
545
546
547
548
549
550
551
552static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
553{
554 u32 val, i;
555
556 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
557
558 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
559 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
560 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
561 cfg->max_pkt_hs : cfg->max_pkt_fs;
562 int idx = cfg->ep_num;
563
564 udc->iudma[i].max_pkt = max_pkt;
565
566 if (idx < 0)
567 continue;
568 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
569
570 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
571 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
572 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
573 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
574 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
575 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
576 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
577 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
578 }
579}
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
595 struct bcm63xx_req *breq)
596{
597 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
598 unsigned int bytes_left = breq->req.length - breq->offset;
599 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
600 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
601
602 iudma->n_bds_used = 0;
603 breq->bd_bytes = 0;
604 breq->iudma = iudma;
605
606 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
607 extra_zero_pkt = 1;
608
609 do {
610 struct bcm_enet_desc *d = iudma->write_bd;
611 u32 dmaflags = 0;
612 unsigned int n_bytes;
613
614 if (d == iudma->end_bd) {
615 dmaflags |= DMADESC_WRAP_MASK;
616 iudma->write_bd = iudma->bd_ring;
617 } else {
618 iudma->write_bd++;
619 }
620 iudma->n_bds_used++;
621
622 n_bytes = min_t(int, bytes_left, max_bd_bytes);
623 if (n_bytes)
624 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
625 else
626 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
627 DMADESC_USB_ZERO_MASK;
628
629 dmaflags |= DMADESC_OWNER_MASK;
630 if (first_bd) {
631 dmaflags |= DMADESC_SOP_MASK;
632 first_bd = 0;
633 }
634
635
636
637
638
639 if (extra_zero_pkt && !bytes_left)
640 extra_zero_pkt = 0;
641
642 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
643 (n_bytes == bytes_left && !extra_zero_pkt)) {
644 last_bd = 1;
645 dmaflags |= DMADESC_EOP_MASK;
646 }
647
648 d->address = breq->req.dma + breq->offset;
649 mb();
650 d->len_stat = dmaflags;
651
652 breq->offset += n_bytes;
653 breq->bd_bytes += n_bytes;
654 bytes_left -= n_bytes;
655 } while (!last_bd);
656
657 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
658 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
659}
660
661
662
663
664
665
666
667
668
669
670static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
671{
672 int i, actual_len = 0;
673 struct bcm_enet_desc *d = iudma->read_bd;
674
675 if (!iudma->n_bds_used)
676 return -EINVAL;
677
678 for (i = 0; i < iudma->n_bds_used; i++) {
679 u32 dmaflags;
680
681 dmaflags = d->len_stat;
682
683 if (dmaflags & DMADESC_OWNER_MASK)
684 return -EBUSY;
685
686 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
687 DMADESC_LENGTH_SHIFT;
688 if (d == iudma->end_bd)
689 d = iudma->bd_ring;
690 else
691 d++;
692 }
693
694 iudma->read_bd = d;
695 iudma->n_bds_used = 0;
696 return actual_len;
697}
698
699
700
701
702
703
704static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
705{
706 int timeout = IUDMA_RESET_TIMEOUT_US;
707 struct bcm_enet_desc *d;
708 int ch_idx = iudma->ch_idx;
709
710 if (!iudma->is_tx)
711 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
712
713
714 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
715
716 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
717 ENETDMAC_CHANCFG_EN_MASK) {
718 udelay(1);
719
720
721 if (iudma->is_tx && iudma->ep_num >= 0)
722 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
723
724 if (!timeout--) {
725 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
726 ch_idx);
727 break;
728 }
729 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
730 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
731 ch_idx);
732 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
733 ENETDMAC_CHANCFG_REG, ch_idx);
734 }
735 }
736 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
737
738
739 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
740 d->len_stat = 0;
741 mb();
742
743 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
744 iudma->n_bds_used = 0;
745
746
747 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
748 ENETDMAC_IRMASK_REG, ch_idx);
749 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
750
751 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
752 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
753}
754
755
756
757
758
759
760static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
761{
762 struct iudma_ch *iudma = &udc->iudma[ch_idx];
763 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
764 unsigned int n_bds = cfg->n_bds;
765 struct bcm63xx_ep *bep = NULL;
766
767 iudma->ep_num = cfg->ep_num;
768 iudma->ch_idx = ch_idx;
769 iudma->is_tx = !!(ch_idx & 0x01);
770 if (iudma->ep_num >= 0) {
771 bep = &udc->bep[iudma->ep_num];
772 bep->iudma = iudma;
773 INIT_LIST_HEAD(&bep->queue);
774 }
775
776 iudma->bep = bep;
777 iudma->udc = udc;
778
779
780 if (iudma->ep_num <= 0)
781 iudma->enabled = true;
782
783 iudma->n_bds = n_bds;
784 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
785 n_bds * sizeof(struct bcm_enet_desc),
786 &iudma->bd_ring_dma, GFP_KERNEL);
787 if (!iudma->bd_ring)
788 return -ENOMEM;
789 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
790
791 return 0;
792}
793
794
795
796
797
798
799
800static int iudma_init(struct bcm63xx_udc *udc)
801{
802 int i, rc;
803
804 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
805
806 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
807 rc = iudma_init_channel(udc, i);
808 if (rc)
809 return rc;
810 iudma_reset_channel(udc, &udc->iudma[i]);
811 }
812
813 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
814 return 0;
815}
816
817
818
819
820
821
822
823static void iudma_uninit(struct bcm63xx_udc *udc)
824{
825 int i;
826
827 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
828
829 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
830 iudma_reset_channel(udc, &udc->iudma[i]);
831
832 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
833}
834
835
836
837
838
839
840
841
842
843
844static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
845{
846 u32 val;
847
848 usbd_writel(udc, 0, USBD_STATUS_REG);
849
850 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
851 BIT(USBD_EVENT_IRQ_SETUP) |
852 BIT(USBD_EVENT_IRQ_SETCFG) |
853 BIT(USBD_EVENT_IRQ_SETINTF) |
854 BIT(USBD_EVENT_IRQ_USB_LINK);
855 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
856 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
857}
858
859
860
861
862
863
864
865
866
867
868
869
870static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
871{
872 u32 val, portmask = BIT(udc->pd->port_no);
873
874 if (BCMCPU_IS_6328()) {
875
876 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
877 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
878 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
879 GPIO_PINMUX_OTHR_6328_USB_HOST;
880 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
881 }
882
883 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
884 if (is_device) {
885 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
886 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
887 } else {
888 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
889 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
890 }
891 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
892
893 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
894 if (is_device)
895 val |= USBH_PRIV_SWAP_USBD_MASK;
896 else
897 val &= ~USBH_PRIV_SWAP_USBD_MASK;
898 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
899}
900
901
902
903
904
905
906
907
908
909
910static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
911{
912 u32 val, portmask = BIT(udc->pd->port_no);
913
914 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
915 if (is_on)
916 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
917 else
918 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
919 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
920}
921
922
923
924
925
926
927
928
929static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
930{
931 set_clocks(udc, true);
932 iudma_uninit(udc);
933 set_clocks(udc, false);
934
935 clk_put(udc->usbd_clk);
936 clk_put(udc->usbh_clk);
937}
938
939
940
941
942
943static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
944{
945 int i, rc = 0;
946 u32 val;
947
948 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
949 GFP_KERNEL);
950 if (!udc->ep0_ctrl_buf)
951 return -ENOMEM;
952
953 INIT_LIST_HEAD(&udc->gadget.ep_list);
954 for (i = 0; i < BCM63XX_NUM_EP; i++) {
955 struct bcm63xx_ep *bep = &udc->bep[i];
956
957 bep->ep.name = bcm63xx_ep_info[i].name;
958 bep->ep.caps = bcm63xx_ep_info[i].caps;
959 bep->ep_num = i;
960 bep->ep.ops = &bcm63xx_udc_ep_ops;
961 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
962 bep->halted = 0;
963 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
964 bep->udc = udc;
965 bep->ep.desc = NULL;
966 INIT_LIST_HEAD(&bep->queue);
967 }
968
969 udc->gadget.ep0 = &udc->bep[0].ep;
970 list_del(&udc->bep[0].ep.ep_list);
971
972 udc->gadget.speed = USB_SPEED_UNKNOWN;
973 udc->ep0state = EP0_SHUTDOWN;
974
975 udc->usbh_clk = clk_get(udc->dev, "usbh");
976 if (IS_ERR(udc->usbh_clk))
977 return -EIO;
978
979 udc->usbd_clk = clk_get(udc->dev, "usbd");
980 if (IS_ERR(udc->usbd_clk)) {
981 clk_put(udc->usbh_clk);
982 return -EIO;
983 }
984
985 set_clocks(udc, true);
986
987 val = USBD_CONTROL_AUTO_CSRS_MASK |
988 USBD_CONTROL_DONE_CSRS_MASK |
989 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
990 usbd_writel(udc, val, USBD_CONTROL_REG);
991
992 val = USBD_STRAPS_APP_SELF_PWR_MASK |
993 USBD_STRAPS_APP_RAM_IF_MASK |
994 USBD_STRAPS_APP_CSRPRGSUP_MASK |
995 USBD_STRAPS_APP_8BITPHY_MASK |
996 USBD_STRAPS_APP_RMTWKUP_MASK;
997
998 if (udc->gadget.max_speed == USB_SPEED_HIGH)
999 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1000 else
1001 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1002 usbd_writel(udc, val, USBD_STRAPS_REG);
1003
1004 bcm63xx_set_ctrl_irqs(udc, false);
1005
1006 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1007
1008 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1009 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1010 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1011
1012 rc = iudma_init(udc);
1013 set_clocks(udc, false);
1014 if (rc)
1015 bcm63xx_uninit_udc_hw(udc);
1016
1017 return 0;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static int bcm63xx_ep_enable(struct usb_ep *ep,
1033 const struct usb_endpoint_descriptor *desc)
1034{
1035 struct bcm63xx_ep *bep = our_ep(ep);
1036 struct bcm63xx_udc *udc = bep->udc;
1037 struct iudma_ch *iudma = bep->iudma;
1038 unsigned long flags;
1039
1040 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1041 return -EINVAL;
1042
1043 if (!udc->driver)
1044 return -ESHUTDOWN;
1045
1046 spin_lock_irqsave(&udc->lock, flags);
1047 if (iudma->enabled) {
1048 spin_unlock_irqrestore(&udc->lock, flags);
1049 return -EINVAL;
1050 }
1051
1052 iudma->enabled = true;
1053 BUG_ON(!list_empty(&bep->queue));
1054
1055 iudma_reset_channel(udc, iudma);
1056
1057 bep->halted = 0;
1058 bcm63xx_set_stall(udc, bep, false);
1059 clear_bit(bep->ep_num, &udc->wedgemap);
1060
1061 ep->desc = desc;
1062 ep->maxpacket = usb_endpoint_maxp(desc);
1063
1064 spin_unlock_irqrestore(&udc->lock, flags);
1065 return 0;
1066}
1067
1068
1069
1070
1071
1072static int bcm63xx_ep_disable(struct usb_ep *ep)
1073{
1074 struct bcm63xx_ep *bep = our_ep(ep);
1075 struct bcm63xx_udc *udc = bep->udc;
1076 struct iudma_ch *iudma = bep->iudma;
1077 struct bcm63xx_req *breq, *n;
1078 unsigned long flags;
1079
1080 if (!ep || !ep->desc)
1081 return -EINVAL;
1082
1083 spin_lock_irqsave(&udc->lock, flags);
1084 if (!iudma->enabled) {
1085 spin_unlock_irqrestore(&udc->lock, flags);
1086 return -EINVAL;
1087 }
1088 iudma->enabled = false;
1089
1090 iudma_reset_channel(udc, iudma);
1091
1092 if (!list_empty(&bep->queue)) {
1093 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1094 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1095 iudma->is_tx);
1096 list_del(&breq->queue);
1097 breq->req.status = -ESHUTDOWN;
1098
1099 spin_unlock_irqrestore(&udc->lock, flags);
1100 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1101 spin_lock_irqsave(&udc->lock, flags);
1102 }
1103 }
1104 ep->desc = NULL;
1105
1106 spin_unlock_irqrestore(&udc->lock, flags);
1107 return 0;
1108}
1109
1110
1111
1112
1113
1114
1115static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1116 gfp_t mem_flags)
1117{
1118 struct bcm63xx_req *breq;
1119
1120 breq = kzalloc(sizeof(*breq), mem_flags);
1121 if (!breq)
1122 return NULL;
1123 return &breq->req;
1124}
1125
1126
1127
1128
1129
1130
1131static void bcm63xx_udc_free_request(struct usb_ep *ep,
1132 struct usb_request *req)
1133{
1134 struct bcm63xx_req *breq = our_req(req);
1135 kfree(breq);
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1153 gfp_t mem_flags)
1154{
1155 struct bcm63xx_ep *bep = our_ep(ep);
1156 struct bcm63xx_udc *udc = bep->udc;
1157 struct bcm63xx_req *breq = our_req(req);
1158 unsigned long flags;
1159 int rc = 0;
1160
1161 if (unlikely(!req || !req->complete || !req->buf || !ep))
1162 return -EINVAL;
1163
1164 req->actual = 0;
1165 req->status = 0;
1166 breq->offset = 0;
1167
1168 if (bep == &udc->bep[0]) {
1169
1170 if (udc->ep0_reply)
1171 return -EINVAL;
1172
1173 udc->ep0_reply = req;
1174 schedule_work(&udc->ep0_wq);
1175 return 0;
1176 }
1177
1178 spin_lock_irqsave(&udc->lock, flags);
1179 if (!bep->iudma->enabled) {
1180 rc = -ESHUTDOWN;
1181 goto out;
1182 }
1183
1184 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1185 if (rc == 0) {
1186 list_add_tail(&breq->queue, &bep->queue);
1187 if (list_is_singular(&bep->queue))
1188 iudma_write(udc, bep->iudma, breq);
1189 }
1190
1191out:
1192 spin_unlock_irqrestore(&udc->lock, flags);
1193 return rc;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1206{
1207 struct bcm63xx_ep *bep = our_ep(ep);
1208 struct bcm63xx_udc *udc = bep->udc;
1209 struct bcm63xx_req *breq = our_req(req), *cur;
1210 unsigned long flags;
1211 int rc = 0;
1212
1213 spin_lock_irqsave(&udc->lock, flags);
1214 if (list_empty(&bep->queue)) {
1215 rc = -EINVAL;
1216 goto out;
1217 }
1218
1219 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1220 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1221
1222 if (breq == cur) {
1223 iudma_reset_channel(udc, bep->iudma);
1224 list_del(&breq->queue);
1225
1226 if (!list_empty(&bep->queue)) {
1227 struct bcm63xx_req *next;
1228
1229 next = list_first_entry(&bep->queue,
1230 struct bcm63xx_req, queue);
1231 iudma_write(udc, bep->iudma, next);
1232 }
1233 } else {
1234 list_del(&breq->queue);
1235 }
1236
1237out:
1238 spin_unlock_irqrestore(&udc->lock, flags);
1239
1240 req->status = -ESHUTDOWN;
1241 req->complete(ep, req);
1242
1243 return rc;
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1254{
1255 struct bcm63xx_ep *bep = our_ep(ep);
1256 struct bcm63xx_udc *udc = bep->udc;
1257 unsigned long flags;
1258
1259 spin_lock_irqsave(&udc->lock, flags);
1260 bcm63xx_set_stall(udc, bep, !!value);
1261 bep->halted = value;
1262 spin_unlock_irqrestore(&udc->lock, flags);
1263
1264 return 0;
1265}
1266
1267
1268
1269
1270
1271
1272
1273static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1274{
1275 struct bcm63xx_ep *bep = our_ep(ep);
1276 struct bcm63xx_udc *udc = bep->udc;
1277 unsigned long flags;
1278
1279 spin_lock_irqsave(&udc->lock, flags);
1280 set_bit(bep->ep_num, &udc->wedgemap);
1281 bcm63xx_set_stall(udc, bep, true);
1282 spin_unlock_irqrestore(&udc->lock, flags);
1283
1284 return 0;
1285}
1286
1287static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1288 .enable = bcm63xx_ep_enable,
1289 .disable = bcm63xx_ep_disable,
1290
1291 .alloc_request = bcm63xx_udc_alloc_request,
1292 .free_request = bcm63xx_udc_free_request,
1293
1294 .queue = bcm63xx_udc_queue,
1295 .dequeue = bcm63xx_udc_dequeue,
1296
1297 .set_halt = bcm63xx_udc_set_halt,
1298 .set_wedge = bcm63xx_udc_set_wedge,
1299};
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1311 struct usb_ctrlrequest *ctrl)
1312{
1313 int rc;
1314
1315 spin_unlock_irq(&udc->lock);
1316 rc = udc->driver->setup(&udc->gadget, ctrl);
1317 spin_lock_irq(&udc->lock);
1318 return rc;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1334{
1335 struct usb_ctrlrequest ctrl;
1336 int rc;
1337
1338 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1339 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1340 ctrl.wValue = cpu_to_le16(udc->cfg);
1341 ctrl.wIndex = 0;
1342 ctrl.wLength = 0;
1343
1344 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1345 if (rc < 0) {
1346 dev_warn_ratelimited(udc->dev,
1347 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1348 udc->cfg);
1349 }
1350 return rc;
1351}
1352
1353
1354
1355
1356
1357static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1358{
1359 struct usb_ctrlrequest ctrl;
1360 int rc;
1361
1362 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1363 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1364 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1365 ctrl.wIndex = cpu_to_le16(udc->iface);
1366 ctrl.wLength = 0;
1367
1368 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1369 if (rc < 0) {
1370 dev_warn_ratelimited(udc->dev,
1371 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1372 udc->iface, udc->alt_iface);
1373 }
1374 return rc;
1375}
1376
1377
1378
1379
1380
1381
1382
1383static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1384 struct usb_request *req)
1385{
1386 struct bcm63xx_req *breq = our_req(req);
1387 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1388
1389 BUG_ON(udc->ep0_request);
1390 udc->ep0_request = req;
1391
1392 req->actual = 0;
1393 breq->offset = 0;
1394 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1395 iudma_write(udc, iudma, breq);
1396}
1397
1398
1399
1400
1401
1402
1403
1404static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1405 struct usb_request *req, int status)
1406{
1407 req->status = status;
1408 if (status)
1409 req->actual = 0;
1410 if (req->complete) {
1411 spin_unlock_irq(&udc->lock);
1412 req->complete(&udc->bep[0].ep, req);
1413 spin_lock_irq(&udc->lock);
1414 }
1415}
1416
1417
1418
1419
1420
1421
1422
1423static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1424{
1425 struct usb_request *req = udc->ep0_reply;
1426
1427 udc->ep0_reply = NULL;
1428 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1429 if (udc->ep0_request == req) {
1430 udc->ep0_req_completed = 0;
1431 udc->ep0_request = NULL;
1432 }
1433 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1434}
1435
1436
1437
1438
1439
1440
1441static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1442{
1443 struct usb_request *req = udc->ep0_request;
1444
1445 udc->ep0_req_completed = 0;
1446 udc->ep0_request = NULL;
1447
1448 return req->actual;
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1461 int length)
1462{
1463 struct usb_request *req = &udc->ep0_ctrl_req.req;
1464
1465 req->buf = udc->ep0_ctrl_buf;
1466 req->length = length;
1467 req->complete = NULL;
1468
1469 bcm63xx_ep0_map_write(udc, ch_idx, req);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1481{
1482 int rc;
1483 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1484
1485 rc = bcm63xx_ep0_read_complete(udc);
1486
1487 if (rc < 0) {
1488 dev_err(udc->dev, "missing SETUP packet\n");
1489 return EP0_IDLE;
1490 }
1491
1492
1493
1494
1495
1496
1497 if (rc == 0)
1498 return EP0_REQUEUE;
1499
1500
1501 if (rc != sizeof(*ctrl)) {
1502 dev_warn_ratelimited(udc->dev,
1503 "malformed SETUP packet (%d bytes)\n", rc);
1504 return EP0_REQUEUE;
1505 }
1506
1507
1508 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1509 if (rc < 0) {
1510 bcm63xx_set_stall(udc, &udc->bep[0], true);
1511 return EP0_REQUEUE;
1512 }
1513
1514 if (!ctrl->wLength)
1515 return EP0_REQUEUE;
1516 else if (ctrl->bRequestType & USB_DIR_IN)
1517 return EP0_IN_DATA_PHASE_SETUP;
1518 else
1519 return EP0_OUT_DATA_PHASE_SETUP;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1534{
1535 if (udc->ep0_req_reset) {
1536 udc->ep0_req_reset = 0;
1537 } else if (udc->ep0_req_set_cfg) {
1538 udc->ep0_req_set_cfg = 0;
1539 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1540 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1541 } else if (udc->ep0_req_set_iface) {
1542 udc->ep0_req_set_iface = 0;
1543 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1544 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1545 } else if (udc->ep0_req_completed) {
1546 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1547 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1548 } else if (udc->ep0_req_shutdown) {
1549 udc->ep0_req_shutdown = 0;
1550 udc->ep0_req_completed = 0;
1551 udc->ep0_request = NULL;
1552 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1553 usb_gadget_unmap_request(&udc->gadget,
1554 &udc->ep0_ctrl_req.req, 0);
1555
1556
1557 mb();
1558 udc->ep0state = EP0_SHUTDOWN;
1559 } else if (udc->ep0_reply) {
1560
1561
1562
1563
1564
1565 dev_warn(udc->dev, "nuking unexpected reply\n");
1566 bcm63xx_ep0_nuke_reply(udc, 0);
1567 } else {
1568 return -EAGAIN;
1569 }
1570
1571 return 0;
1572}
1573
1574
1575
1576
1577
1578
1579
1580static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1581{
1582 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1583 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1584
1585 switch (udc->ep0state) {
1586 case EP0_REQUEUE:
1587
1588 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1589 BCM63XX_MAX_CTRL_PKT);
1590 ep0state = EP0_IDLE;
1591 break;
1592 case EP0_IDLE:
1593 return bcm63xx_ep0_do_idle(udc);
1594 case EP0_IN_DATA_PHASE_SETUP:
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 if (udc->ep0_reply) {
1605 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1606 udc->ep0_reply);
1607 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1608 } else if (shutdown) {
1609 ep0state = EP0_REQUEUE;
1610 }
1611 break;
1612 case EP0_IN_DATA_PHASE_COMPLETE: {
1613
1614
1615
1616
1617
1618
1619
1620 if (udc->ep0_req_completed) {
1621 udc->ep0_reply = NULL;
1622 bcm63xx_ep0_read_complete(udc);
1623
1624
1625
1626
1627 ep0state = EP0_REQUEUE;
1628 } else if (shutdown) {
1629 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1630 bcm63xx_ep0_nuke_reply(udc, 1);
1631 ep0state = EP0_REQUEUE;
1632 }
1633 break;
1634 }
1635 case EP0_OUT_DATA_PHASE_SETUP:
1636
1637 if (udc->ep0_reply) {
1638 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1639 udc->ep0_reply);
1640 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1641 } else if (shutdown) {
1642 ep0state = EP0_REQUEUE;
1643 }
1644 break;
1645 case EP0_OUT_DATA_PHASE_COMPLETE: {
1646
1647 if (udc->ep0_req_completed) {
1648 udc->ep0_reply = NULL;
1649 bcm63xx_ep0_read_complete(udc);
1650
1651
1652 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1653 ep0state = EP0_OUT_STATUS_PHASE;
1654 } else if (shutdown) {
1655 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1656 bcm63xx_ep0_nuke_reply(udc, 0);
1657 ep0state = EP0_REQUEUE;
1658 }
1659 break;
1660 }
1661 case EP0_OUT_STATUS_PHASE:
1662
1663
1664
1665
1666
1667
1668
1669
1670 if (udc->ep0_req_completed) {
1671 bcm63xx_ep0_read_complete(udc);
1672 ep0state = EP0_REQUEUE;
1673 } else if (shutdown) {
1674 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1675 udc->ep0_request = NULL;
1676 ep0state = EP0_REQUEUE;
1677 }
1678 break;
1679 case EP0_IN_FAKE_STATUS_PHASE: {
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 struct usb_request *r = udc->ep0_reply;
1695
1696 if (!r) {
1697 if (shutdown)
1698 ep0state = EP0_IDLE;
1699 break;
1700 }
1701
1702 bcm63xx_ep0_complete(udc, r, 0);
1703 udc->ep0_reply = NULL;
1704 ep0state = EP0_IDLE;
1705 break;
1706 }
1707 case EP0_SHUTDOWN:
1708 break;
1709 }
1710
1711 if (udc->ep0state == ep0state)
1712 return -EAGAIN;
1713
1714 udc->ep0state = ep0state;
1715 return 0;
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static void bcm63xx_ep0_process(struct work_struct *w)
1733{
1734 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1735 spin_lock_irq(&udc->lock);
1736 while (bcm63xx_ep0_one_round(udc) == 0)
1737 ;
1738 spin_unlock_irq(&udc->lock);
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1750{
1751 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1752
1753 return (usbd_readl(udc, USBD_STATUS_REG) &
1754 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1765{
1766 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1767 unsigned long flags;
1768 int i, rc = -EINVAL;
1769
1770 spin_lock_irqsave(&udc->lock, flags);
1771 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1772 udc->gadget.speed = USB_SPEED_UNKNOWN;
1773 udc->ep0state = EP0_REQUEUE;
1774 bcm63xx_fifo_setup(udc);
1775 bcm63xx_fifo_reset(udc);
1776 bcm63xx_ep_setup(udc);
1777
1778 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1779 for (i = 0; i < BCM63XX_NUM_EP; i++)
1780 bcm63xx_set_stall(udc, &udc->bep[i], false);
1781
1782 bcm63xx_set_ctrl_irqs(udc, true);
1783 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1784 rc = 0;
1785 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1786 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1787
1788 udc->ep0_req_shutdown = 1;
1789 spin_unlock_irqrestore(&udc->lock, flags);
1790
1791 while (1) {
1792 schedule_work(&udc->ep0_wq);
1793 if (udc->ep0state == EP0_SHUTDOWN)
1794 break;
1795 msleep(50);
1796 }
1797 bcm63xx_set_ctrl_irqs(udc, false);
1798 cancel_work_sync(&udc->ep0_wq);
1799 return 0;
1800 }
1801
1802 spin_unlock_irqrestore(&udc->lock, flags);
1803 return rc;
1804}
1805
1806
1807
1808
1809
1810
1811static int bcm63xx_udc_start(struct usb_gadget *gadget,
1812 struct usb_gadget_driver *driver)
1813{
1814 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1815 unsigned long flags;
1816
1817 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1818 !driver->setup)
1819 return -EINVAL;
1820 if (!udc)
1821 return -ENODEV;
1822 if (udc->driver)
1823 return -EBUSY;
1824
1825 spin_lock_irqsave(&udc->lock, flags);
1826
1827 set_clocks(udc, true);
1828 bcm63xx_fifo_setup(udc);
1829 bcm63xx_ep_init(udc);
1830 bcm63xx_ep_setup(udc);
1831 bcm63xx_fifo_reset(udc);
1832 bcm63xx_select_phy_mode(udc, true);
1833
1834 udc->driver = driver;
1835 driver->driver.bus = NULL;
1836 udc->gadget.dev.of_node = udc->dev->of_node;
1837
1838 spin_unlock_irqrestore(&udc->lock, flags);
1839
1840 return 0;
1841}
1842
1843
1844
1845
1846
1847
1848static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1849{
1850 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1851 unsigned long flags;
1852
1853 spin_lock_irqsave(&udc->lock, flags);
1854
1855 udc->driver = NULL;
1856
1857
1858
1859
1860
1861
1862
1863 msleep(100);
1864
1865 bcm63xx_select_phy_mode(udc, false);
1866 set_clocks(udc, false);
1867
1868 spin_unlock_irqrestore(&udc->lock, flags);
1869
1870 return 0;
1871}
1872
1873static const struct usb_gadget_ops bcm63xx_udc_ops = {
1874 .get_frame = bcm63xx_udc_get_frame,
1875 .pullup = bcm63xx_udc_pullup,
1876 .udc_start = bcm63xx_udc_start,
1877 .udc_stop = bcm63xx_udc_stop,
1878};
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1894{
1895 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1896
1897 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1898 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1899 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1900 USBD_STATUS_ALTINTF_SHIFT;
1901 bcm63xx_ep_setup(udc);
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1912{
1913 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1914 enum usb_device_speed oldspeed = udc->gadget.speed;
1915
1916 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1917 case BCM63XX_SPD_HIGH:
1918 udc->gadget.speed = USB_SPEED_HIGH;
1919 break;
1920 case BCM63XX_SPD_FULL:
1921 udc->gadget.speed = USB_SPEED_FULL;
1922 break;
1923 default:
1924
1925 udc->gadget.speed = USB_SPEED_UNKNOWN;
1926 dev_err(udc->dev,
1927 "received SETUP packet with invalid link speed\n");
1928 return 0;
1929 }
1930
1931 if (udc->gadget.speed != oldspeed) {
1932 dev_info(udc->dev, "link up, %s-speed mode\n",
1933 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1934 return 1;
1935 } else {
1936 return 0;
1937 }
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1952{
1953 int i;
1954
1955 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1956 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1957 if (!new_status)
1958 clear_bit(i, &udc->wedgemap);
1959 }
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1971{
1972 struct bcm63xx_udc *udc = dev_id;
1973 u32 stat;
1974 bool disconnected = false, bus_reset = false;
1975
1976 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1977 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1978
1979 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1980
1981 spin_lock(&udc->lock);
1982 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1983
1984
1985 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1986 USBD_EVENTS_USB_LINK_MASK) &&
1987 udc->gadget.speed != USB_SPEED_UNKNOWN)
1988 dev_info(udc->dev, "link down\n");
1989
1990 udc->gadget.speed = USB_SPEED_UNKNOWN;
1991 disconnected = true;
1992 }
1993 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1994 bcm63xx_fifo_setup(udc);
1995 bcm63xx_fifo_reset(udc);
1996 bcm63xx_ep_setup(udc);
1997
1998 bcm63xx_update_wedge(udc, false);
1999
2000 udc->ep0_req_reset = 1;
2001 schedule_work(&udc->ep0_wq);
2002 bus_reset = true;
2003 }
2004 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2005 if (bcm63xx_update_link_speed(udc)) {
2006 bcm63xx_fifo_setup(udc);
2007 bcm63xx_ep_setup(udc);
2008 }
2009 bcm63xx_update_wedge(udc, true);
2010 }
2011 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2012 bcm63xx_update_cfg_iface(udc);
2013 udc->ep0_req_set_cfg = 1;
2014 schedule_work(&udc->ep0_wq);
2015 }
2016 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2017 bcm63xx_update_cfg_iface(udc);
2018 udc->ep0_req_set_iface = 1;
2019 schedule_work(&udc->ep0_wq);
2020 }
2021 spin_unlock(&udc->lock);
2022
2023 if (disconnected && udc->driver)
2024 udc->driver->disconnect(&udc->gadget);
2025 else if (bus_reset && udc->driver)
2026 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2027
2028 return IRQ_HANDLED;
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2042{
2043 struct iudma_ch *iudma = dev_id;
2044 struct bcm63xx_udc *udc = iudma->udc;
2045 struct bcm63xx_ep *bep;
2046 struct usb_request *req = NULL;
2047 struct bcm63xx_req *breq = NULL;
2048 int rc;
2049 bool is_done = false;
2050
2051 spin_lock(&udc->lock);
2052
2053 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2054 ENETDMAC_IR_REG, iudma->ch_idx);
2055 bep = iudma->bep;
2056 rc = iudma_read(udc, iudma);
2057
2058
2059 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2060 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2061 req = udc->ep0_request;
2062 breq = our_req(req);
2063
2064
2065 if (rc >= 0) {
2066 req->actual += rc;
2067
2068 if (req->actual >= req->length || breq->bd_bytes > rc) {
2069 udc->ep0_req_completed = 1;
2070 is_done = true;
2071 schedule_work(&udc->ep0_wq);
2072
2073
2074 req->actual = min(req->actual, req->length);
2075 } else {
2076
2077 iudma_write(udc, iudma, breq);
2078 }
2079 }
2080 } else if (!list_empty(&bep->queue)) {
2081 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2082 req = &breq->req;
2083
2084 if (rc >= 0) {
2085 req->actual += rc;
2086
2087 if (req->actual >= req->length || breq->bd_bytes > rc) {
2088 is_done = true;
2089 list_del(&breq->queue);
2090
2091 req->actual = min(req->actual, req->length);
2092
2093 if (!list_empty(&bep->queue)) {
2094 struct bcm63xx_req *next;
2095
2096 next = list_first_entry(&bep->queue,
2097 struct bcm63xx_req, queue);
2098 iudma_write(udc, iudma, next);
2099 }
2100 } else {
2101 iudma_write(udc, iudma, breq);
2102 }
2103 }
2104 }
2105 spin_unlock(&udc->lock);
2106
2107 if (is_done) {
2108 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2109 if (req->complete)
2110 req->complete(&bep->ep, req);
2111 }
2112
2113 return IRQ_HANDLED;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2128{
2129 struct bcm63xx_udc *udc = s->private;
2130
2131 if (!udc->driver)
2132 return -ENODEV;
2133
2134 seq_printf(s, "ep0 state: %s\n",
2135 bcm63xx_ep0_state_names[udc->ep0state]);
2136 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2137 udc->ep0_req_reset ? "reset " : "",
2138 udc->ep0_req_set_cfg ? "set_cfg " : "",
2139 udc->ep0_req_set_iface ? "set_iface " : "",
2140 udc->ep0_req_shutdown ? "shutdown " : "",
2141 udc->ep0_request ? "pending " : "",
2142 udc->ep0_req_completed ? "completed " : "",
2143 udc->ep0_reply ? "reply " : "");
2144 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2145 udc->cfg, udc->iface, udc->alt_iface);
2146 seq_printf(s, "regs:\n");
2147 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2148 usbd_readl(udc, USBD_CONTROL_REG),
2149 usbd_readl(udc, USBD_STRAPS_REG),
2150 usbd_readl(udc, USBD_STATUS_REG));
2151 seq_printf(s, " events: %08x; stall: %08x\n",
2152 usbd_readl(udc, USBD_EVENTS_REG),
2153 usbd_readl(udc, USBD_STALL_REG));
2154
2155 return 0;
2156}
2157DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2158
2159
2160
2161
2162
2163
2164
2165
2166static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2167{
2168 struct bcm63xx_udc *udc = s->private;
2169 int ch_idx, i;
2170 u32 sram2, sram3;
2171
2172 if (!udc->driver)
2173 return -ENODEV;
2174
2175 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2176 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2177 struct list_head *pos;
2178
2179 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2180 switch (iudma_defaults[ch_idx].ep_type) {
2181 case BCMEP_CTRL:
2182 seq_printf(s, "control");
2183 break;
2184 case BCMEP_BULK:
2185 seq_printf(s, "bulk");
2186 break;
2187 case BCMEP_INTR:
2188 seq_printf(s, "interrupt");
2189 break;
2190 }
2191 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2192 seq_printf(s, " [ep%d]:\n",
2193 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2194 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2195 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2196 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2197 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2198 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2199
2200 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2201 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2202 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2203 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2204 sram2 >> 16, sram2 & 0xffff,
2205 sram3 >> 16, sram3 & 0xffff,
2206 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2207 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2208 iudma->n_bds);
2209
2210 if (iudma->bep) {
2211 i = 0;
2212 list_for_each(pos, &iudma->bep->queue)
2213 i++;
2214 seq_printf(s, "; %d queued\n", i);
2215 } else {
2216 seq_printf(s, "\n");
2217 }
2218
2219 for (i = 0; i < iudma->n_bds; i++) {
2220 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2221
2222 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2223 i * sizeof(*d), i,
2224 d->len_stat >> 16, d->len_stat & 0xffff,
2225 d->address);
2226 if (d == iudma->read_bd)
2227 seq_printf(s, " <<RD");
2228 if (d == iudma->write_bd)
2229 seq_printf(s, " <<WR");
2230 seq_printf(s, "\n");
2231 }
2232
2233 seq_printf(s, "\n");
2234 }
2235
2236 return 0;
2237}
2238DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2239
2240
2241
2242
2243
2244static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2245{
2246 struct dentry *root;
2247
2248 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2249 return;
2250
2251 root = debugfs_create_dir(udc->gadget.name, NULL);
2252 udc->debugfs_root = root;
2253
2254 debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2255 debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2256}
2257
2258
2259
2260
2261
2262
2263
2264static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2265{
2266 debugfs_remove_recursive(udc->debugfs_root);
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280static int bcm63xx_udc_probe(struct platform_device *pdev)
2281{
2282 struct device *dev = &pdev->dev;
2283 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2284 struct bcm63xx_udc *udc;
2285 struct resource *res;
2286 int rc = -ENOMEM, i, irq;
2287
2288 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2289 if (!udc)
2290 return -ENOMEM;
2291
2292 platform_set_drvdata(pdev, udc);
2293 udc->dev = dev;
2294 udc->pd = pd;
2295
2296 if (!pd) {
2297 dev_err(dev, "missing platform data\n");
2298 return -EINVAL;
2299 }
2300
2301 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2302 udc->usbd_regs = devm_ioremap_resource(dev, res);
2303 if (IS_ERR(udc->usbd_regs))
2304 return PTR_ERR(udc->usbd_regs);
2305
2306 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2307 udc->iudma_regs = devm_ioremap_resource(dev, res);
2308 if (IS_ERR(udc->iudma_regs))
2309 return PTR_ERR(udc->iudma_regs);
2310
2311 spin_lock_init(&udc->lock);
2312 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2313
2314 udc->gadget.ops = &bcm63xx_udc_ops;
2315 udc->gadget.name = dev_name(dev);
2316
2317 if (!pd->use_fullspeed && !use_fullspeed)
2318 udc->gadget.max_speed = USB_SPEED_HIGH;
2319 else
2320 udc->gadget.max_speed = USB_SPEED_FULL;
2321
2322
2323 rc = bcm63xx_init_udc_hw(udc);
2324 if (rc)
2325 return rc;
2326
2327 rc = -ENXIO;
2328
2329
2330 irq = platform_get_irq(pdev, 0);
2331 if (irq < 0) {
2332 dev_err(dev, "missing IRQ resource #0\n");
2333 goto out_uninit;
2334 }
2335 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2336 dev_name(dev), udc) < 0)
2337 goto report_request_failure;
2338
2339
2340 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2341 irq = platform_get_irq(pdev, i + 1);
2342 if (irq < 0) {
2343 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2344 goto out_uninit;
2345 }
2346 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2347 dev_name(dev), &udc->iudma[i]) < 0)
2348 goto report_request_failure;
2349 }
2350
2351 bcm63xx_udc_init_debugfs(udc);
2352 rc = usb_add_gadget_udc(dev, &udc->gadget);
2353 if (!rc)
2354 return 0;
2355
2356 bcm63xx_udc_cleanup_debugfs(udc);
2357out_uninit:
2358 bcm63xx_uninit_udc_hw(udc);
2359 return rc;
2360
2361report_request_failure:
2362 dev_err(dev, "error requesting IRQ #%d\n", irq);
2363 goto out_uninit;
2364}
2365
2366
2367
2368
2369
2370static int bcm63xx_udc_remove(struct platform_device *pdev)
2371{
2372 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2373
2374 bcm63xx_udc_cleanup_debugfs(udc);
2375 usb_del_gadget_udc(&udc->gadget);
2376 BUG_ON(udc->driver);
2377
2378 bcm63xx_uninit_udc_hw(udc);
2379
2380 return 0;
2381}
2382
2383static struct platform_driver bcm63xx_udc_driver = {
2384 .probe = bcm63xx_udc_probe,
2385 .remove = bcm63xx_udc_remove,
2386 .driver = {
2387 .name = DRV_MODULE_NAME,
2388 },
2389};
2390module_platform_driver(bcm63xx_udc_driver);
2391
2392MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2393MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2394MODULE_LICENSE("GPL");
2395MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2396