1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kconfig.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/platform_device.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/workqueue.h>
37
38#include <bcm63xx_cpu.h>
39#include <bcm63xx_iudma.h>
40#include <bcm63xx_dev_usb_usbd.h>
41#include <bcm63xx_io.h>
42#include <bcm63xx_regs.h>
43
44#define DRV_MODULE_NAME "bcm63xx_udc"
45
46static const char bcm63xx_ep0name[] = "ep0";
47static const char *const bcm63xx_ep_name[] = {
48 bcm63xx_ep0name,
49 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
50};
51
52static bool use_fullspeed;
53module_param(use_fullspeed, bool, S_IRUGO);
54MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static bool irq_coalesce;
76module_param(irq_coalesce, bool, S_IRUGO);
77MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
78
79#define BCM63XX_NUM_EP 5
80#define BCM63XX_NUM_IUDMA 6
81#define BCM63XX_NUM_FIFO_PAIRS 3
82
83#define IUDMA_RESET_TIMEOUT_US 10000
84
85#define IUDMA_EP0_RXCHAN 0
86#define IUDMA_EP0_TXCHAN 1
87
88#define IUDMA_MAX_FRAGMENT 2048
89#define BCM63XX_MAX_CTRL_PKT 64
90
91#define BCMEP_CTRL 0x00
92#define BCMEP_ISOC 0x01
93#define BCMEP_BULK 0x02
94#define BCMEP_INTR 0x03
95
96#define BCMEP_OUT 0x00
97#define BCMEP_IN 0x01
98
99#define BCM63XX_SPD_FULL 1
100#define BCM63XX_SPD_HIGH 0
101
102#define IUDMA_DMAC_OFFSET 0x200
103#define IUDMA_DMAS_OFFSET 0x400
104
105enum bcm63xx_ep0_state {
106 EP0_REQUEUE,
107 EP0_IDLE,
108 EP0_IN_DATA_PHASE_SETUP,
109 EP0_IN_DATA_PHASE_COMPLETE,
110 EP0_OUT_DATA_PHASE_SETUP,
111 EP0_OUT_DATA_PHASE_COMPLETE,
112 EP0_OUT_STATUS_PHASE,
113 EP0_IN_FAKE_STATUS_PHASE,
114 EP0_SHUTDOWN,
115};
116
117static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
118 "REQUEUE",
119 "IDLE",
120 "IN_DATA_PHASE_SETUP",
121 "IN_DATA_PHASE_COMPLETE",
122 "OUT_DATA_PHASE_SETUP",
123 "OUT_DATA_PHASE_COMPLETE",
124 "OUT_STATUS_PHASE",
125 "IN_FAKE_STATUS_PHASE",
126 "SHUTDOWN",
127};
128
129
130
131
132
133
134
135
136
137
138
139struct iudma_ch_cfg {
140 int ep_num;
141 int n_bds;
142 int ep_type;
143 int dir;
144 int n_fifo_slots;
145 int max_pkt_hs;
146 int max_pkt_fs;
147};
148
149static const struct iudma_ch_cfg iudma_defaults[] = {
150
151
152
153
154
155
156
157
158
159
160 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
161 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
163 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
164 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
165 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
166};
167
168struct bcm63xx_udc;
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194struct iudma_ch {
195 unsigned int ch_idx;
196 int ep_num;
197 bool enabled;
198 int max_pkt;
199 bool is_tx;
200 struct bcm63xx_ep *bep;
201 struct bcm63xx_udc *udc;
202
203 struct bcm_enet_desc *read_bd;
204 struct bcm_enet_desc *write_bd;
205 struct bcm_enet_desc *end_bd;
206 int n_bds_used;
207
208 struct bcm_enet_desc *bd_ring;
209 dma_addr_t bd_ring_dma;
210 unsigned int n_bds;
211};
212
213
214
215
216
217
218
219
220
221
222struct bcm63xx_ep {
223 unsigned int ep_num;
224 struct iudma_ch *iudma;
225 struct usb_ep ep;
226 struct bcm63xx_udc *udc;
227 struct list_head queue;
228 unsigned halted:1;
229};
230
231
232
233
234
235
236
237
238
239struct bcm63xx_req {
240 struct list_head queue;
241 struct usb_request req;
242 unsigned int offset;
243 unsigned int bd_bytes;
244 struct iudma_ch *iudma;
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279struct bcm63xx_udc {
280 spinlock_t lock;
281
282 struct device *dev;
283 struct bcm63xx_usbd_platform_data *pd;
284 struct clk *usbd_clk;
285 struct clk *usbh_clk;
286
287 struct usb_gadget gadget;
288 struct usb_gadget_driver *driver;
289
290 void __iomem *usbd_regs;
291 void __iomem *iudma_regs;
292
293 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
294 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
295
296 int cfg;
297 int iface;
298 int alt_iface;
299
300 struct bcm63xx_req ep0_ctrl_req;
301 u8 *ep0_ctrl_buf;
302
303 int ep0state;
304 struct work_struct ep0_wq;
305
306 unsigned long wedgemap;
307
308 unsigned ep0_req_reset:1;
309 unsigned ep0_req_set_cfg:1;
310 unsigned ep0_req_set_iface:1;
311 unsigned ep0_req_shutdown:1;
312
313 unsigned ep0_req_completed:1;
314 struct usb_request *ep0_reply;
315 struct usb_request *ep0_request;
316
317 struct dentry *debugfs_root;
318 struct dentry *debugfs_usbd;
319 struct dentry *debugfs_iudma;
320};
321
322static const struct usb_ep_ops bcm63xx_udc_ep_ops;
323
324
325
326
327
328static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
329{
330 return container_of(g, struct bcm63xx_udc, gadget);
331}
332
333static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
334{
335 return container_of(ep, struct bcm63xx_ep, ep);
336}
337
338static inline struct bcm63xx_req *our_req(struct usb_request *req)
339{
340 return container_of(req, struct bcm63xx_req, req);
341}
342
343static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
344{
345 return bcm_readl(udc->usbd_regs + off);
346}
347
348static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
349{
350 bcm_writel(val, udc->usbd_regs + off);
351}
352
353static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
354{
355 return bcm_readl(udc->iudma_regs + off);
356}
357
358static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
359{
360 bcm_writel(val, udc->iudma_regs + off);
361}
362
363static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
364{
365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
366 (ENETDMA_CHAN_WIDTH * chan));
367}
368
369static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
370 int chan)
371{
372 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
373 (ENETDMA_CHAN_WIDTH * chan));
374}
375
376static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
377{
378 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
379 (ENETDMA_CHAN_WIDTH * chan));
380}
381
382static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
383 int chan)
384{
385 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
387}
388
389static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
390{
391 if (is_enabled) {
392 clk_enable(udc->usbh_clk);
393 clk_enable(udc->usbd_clk);
394 udelay(10);
395 } else {
396 clk_disable(udc->usbd_clk);
397 clk_disable(udc->usbh_clk);
398 }
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
415{
416 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
417
418 val &= ~USBD_CONTROL_INIT_SEL_MASK;
419 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
420 usbd_writel(udc, val, USBD_CONTROL_REG);
421}
422
423
424
425
426
427
428
429
430
431
432static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
433 bool is_stalled)
434{
435 u32 val;
436
437 val = USBD_STALL_UPDATE_MASK |
438 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
439 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
440 usbd_writel(udc, val, USBD_STALL_REG);
441}
442
443
444
445
446
447
448
449
450static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
451{
452 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
453 u32 i, val, rx_fifo_slot, tx_fifo_slot;
454
455
456 rx_fifo_slot = tx_fifo_slot = 0;
457 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
458 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
459 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
460
461 bcm63xx_ep_dma_select(udc, i >> 1);
462
463 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
464 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
465 USBD_RXFIFO_CONFIG_END_SHIFT);
466 rx_fifo_slot += rx_cfg->n_fifo_slots;
467 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
468 usbd_writel(udc,
469 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
470 USBD_RXFIFO_EPSIZE_REG);
471
472 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
473 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
474 USBD_TXFIFO_CONFIG_END_SHIFT);
475 tx_fifo_slot += tx_cfg->n_fifo_slots;
476 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
477 usbd_writel(udc,
478 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
479 USBD_TXFIFO_EPSIZE_REG);
480
481 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
482 }
483}
484
485
486
487
488
489
490static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
491{
492 u32 val;
493
494 bcm63xx_ep_dma_select(udc, ep_num);
495
496 val = usbd_readl(udc, USBD_CONTROL_REG);
497 val |= USBD_CONTROL_FIFO_RESET_MASK;
498 usbd_writel(udc, val, USBD_CONTROL_REG);
499 usbd_readl(udc, USBD_CONTROL_REG);
500}
501
502
503
504
505
506static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
507{
508 int i;
509
510 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
511 bcm63xx_fifo_reset_ep(udc, i);
512}
513
514
515
516
517
518static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
519{
520 u32 i, val;
521
522 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
523 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
524
525 if (cfg->ep_num < 0)
526 continue;
527
528 bcm63xx_ep_dma_select(udc, cfg->ep_num);
529 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
530 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
531 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
532 }
533}
534
535
536
537
538
539
540
541static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
542{
543 u32 val, i;
544
545 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
546
547 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
548 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
549 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
550 cfg->max_pkt_hs : cfg->max_pkt_fs;
551 int idx = cfg->ep_num;
552
553 udc->iudma[i].max_pkt = max_pkt;
554
555 if (idx < 0)
556 continue;
557 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
558
559 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
560 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
561 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
562 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
563 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
564 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
565 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
566 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
567 }
568}
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
584 struct bcm63xx_req *breq)
585{
586 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
587 unsigned int bytes_left = breq->req.length - breq->offset;
588 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
589 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
590
591 iudma->n_bds_used = 0;
592 breq->bd_bytes = 0;
593 breq->iudma = iudma;
594
595 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
596 extra_zero_pkt = 1;
597
598 do {
599 struct bcm_enet_desc *d = iudma->write_bd;
600 u32 dmaflags = 0;
601 unsigned int n_bytes;
602
603 if (d == iudma->end_bd) {
604 dmaflags |= DMADESC_WRAP_MASK;
605 iudma->write_bd = iudma->bd_ring;
606 } else {
607 iudma->write_bd++;
608 }
609 iudma->n_bds_used++;
610
611 n_bytes = min_t(int, bytes_left, max_bd_bytes);
612 if (n_bytes)
613 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
614 else
615 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
616 DMADESC_USB_ZERO_MASK;
617
618 dmaflags |= DMADESC_OWNER_MASK;
619 if (first_bd) {
620 dmaflags |= DMADESC_SOP_MASK;
621 first_bd = 0;
622 }
623
624
625
626
627
628 if (extra_zero_pkt && !bytes_left)
629 extra_zero_pkt = 0;
630
631 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
632 (n_bytes == bytes_left && !extra_zero_pkt)) {
633 last_bd = 1;
634 dmaflags |= DMADESC_EOP_MASK;
635 }
636
637 d->address = breq->req.dma + breq->offset;
638 mb();
639 d->len_stat = dmaflags;
640
641 breq->offset += n_bytes;
642 breq->bd_bytes += n_bytes;
643 bytes_left -= n_bytes;
644 } while (!last_bd);
645
646 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
647 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
648}
649
650
651
652
653
654
655
656
657
658
659static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
660{
661 int i, actual_len = 0;
662 struct bcm_enet_desc *d = iudma->read_bd;
663
664 if (!iudma->n_bds_used)
665 return -EINVAL;
666
667 for (i = 0; i < iudma->n_bds_used; i++) {
668 u32 dmaflags;
669
670 dmaflags = d->len_stat;
671
672 if (dmaflags & DMADESC_OWNER_MASK)
673 return -EBUSY;
674
675 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
676 DMADESC_LENGTH_SHIFT;
677 if (d == iudma->end_bd)
678 d = iudma->bd_ring;
679 else
680 d++;
681 }
682
683 iudma->read_bd = d;
684 iudma->n_bds_used = 0;
685 return actual_len;
686}
687
688
689
690
691
692
693static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
694{
695 int timeout = IUDMA_RESET_TIMEOUT_US;
696 struct bcm_enet_desc *d;
697 int ch_idx = iudma->ch_idx;
698
699 if (!iudma->is_tx)
700 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
701
702
703 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
704
705 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
706 ENETDMAC_CHANCFG_EN_MASK) {
707 udelay(1);
708
709
710 if (iudma->is_tx && iudma->ep_num >= 0)
711 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
712
713 if (!timeout--) {
714 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
715 ch_idx);
716 break;
717 }
718 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
719 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
720 ch_idx);
721 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
722 ENETDMAC_CHANCFG_REG, ch_idx);
723 }
724 }
725 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
726
727
728 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
729 d->len_stat = 0;
730 mb();
731
732 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
733 iudma->n_bds_used = 0;
734
735
736 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
737 ENETDMAC_IRMASK_REG, ch_idx);
738 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
739
740 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
741 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
742}
743
744
745
746
747
748
749static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
750{
751 struct iudma_ch *iudma = &udc->iudma[ch_idx];
752 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
753 unsigned int n_bds = cfg->n_bds;
754 struct bcm63xx_ep *bep = NULL;
755
756 iudma->ep_num = cfg->ep_num;
757 iudma->ch_idx = ch_idx;
758 iudma->is_tx = !!(ch_idx & 0x01);
759 if (iudma->ep_num >= 0) {
760 bep = &udc->bep[iudma->ep_num];
761 bep->iudma = iudma;
762 INIT_LIST_HEAD(&bep->queue);
763 }
764
765 iudma->bep = bep;
766 iudma->udc = udc;
767
768
769 if (iudma->ep_num <= 0)
770 iudma->enabled = true;
771
772 iudma->n_bds = n_bds;
773 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
774 n_bds * sizeof(struct bcm_enet_desc),
775 &iudma->bd_ring_dma, GFP_KERNEL);
776 if (!iudma->bd_ring)
777 return -ENOMEM;
778 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
779
780 return 0;
781}
782
783
784
785
786
787
788
789static int iudma_init(struct bcm63xx_udc *udc)
790{
791 int i, rc;
792
793 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
794
795 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
796 rc = iudma_init_channel(udc, i);
797 if (rc)
798 return rc;
799 iudma_reset_channel(udc, &udc->iudma[i]);
800 }
801
802 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
803 return 0;
804}
805
806
807
808
809
810
811
812static void iudma_uninit(struct bcm63xx_udc *udc)
813{
814 int i;
815
816 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
817
818 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
819 iudma_reset_channel(udc, &udc->iudma[i]);
820
821 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
822}
823
824
825
826
827
828
829
830
831
832
833static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
834{
835 u32 val;
836
837 usbd_writel(udc, 0, USBD_STATUS_REG);
838
839 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
840 BIT(USBD_EVENT_IRQ_SETUP) |
841 BIT(USBD_EVENT_IRQ_SETCFG) |
842 BIT(USBD_EVENT_IRQ_SETINTF) |
843 BIT(USBD_EVENT_IRQ_USB_LINK);
844 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
845 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
846}
847
848
849
850
851
852
853
854
855
856
857
858
859static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
860{
861 u32 val, portmask = BIT(udc->pd->port_no);
862
863 if (BCMCPU_IS_6328()) {
864
865 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
866 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
867 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
868 GPIO_PINMUX_OTHR_6328_USB_HOST;
869 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
870 }
871
872 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
873 if (is_device) {
874 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
875 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
876 } else {
877 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
878 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
879 }
880 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
881
882 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
883 if (is_device)
884 val |= USBH_PRIV_SWAP_USBD_MASK;
885 else
886 val &= ~USBH_PRIV_SWAP_USBD_MASK;
887 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
888}
889
890
891
892
893
894
895
896
897
898
899static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
900{
901 u32 val, portmask = BIT(udc->pd->port_no);
902
903 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
904 if (is_on)
905 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
906 else
907 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
908 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
909}
910
911
912
913
914
915
916
917
918static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
919{
920 set_clocks(udc, true);
921 iudma_uninit(udc);
922 set_clocks(udc, false);
923
924 clk_put(udc->usbd_clk);
925 clk_put(udc->usbh_clk);
926}
927
928
929
930
931
932static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
933{
934 int i, rc = 0;
935 u32 val;
936
937 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
938 GFP_KERNEL);
939 if (!udc->ep0_ctrl_buf)
940 return -ENOMEM;
941
942 INIT_LIST_HEAD(&udc->gadget.ep_list);
943 for (i = 0; i < BCM63XX_NUM_EP; i++) {
944 struct bcm63xx_ep *bep = &udc->bep[i];
945
946 bep->ep.name = bcm63xx_ep_name[i];
947 bep->ep_num = i;
948 bep->ep.ops = &bcm63xx_udc_ep_ops;
949 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
950 bep->halted = 0;
951 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
952 bep->udc = udc;
953 bep->ep.desc = NULL;
954 INIT_LIST_HEAD(&bep->queue);
955 }
956
957 udc->gadget.ep0 = &udc->bep[0].ep;
958 list_del(&udc->bep[0].ep.ep_list);
959
960 udc->gadget.speed = USB_SPEED_UNKNOWN;
961 udc->ep0state = EP0_SHUTDOWN;
962
963 udc->usbh_clk = clk_get(udc->dev, "usbh");
964 if (IS_ERR(udc->usbh_clk))
965 return -EIO;
966
967 udc->usbd_clk = clk_get(udc->dev, "usbd");
968 if (IS_ERR(udc->usbd_clk)) {
969 clk_put(udc->usbh_clk);
970 return -EIO;
971 }
972
973 set_clocks(udc, true);
974
975 val = USBD_CONTROL_AUTO_CSRS_MASK |
976 USBD_CONTROL_DONE_CSRS_MASK |
977 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
978 usbd_writel(udc, val, USBD_CONTROL_REG);
979
980 val = USBD_STRAPS_APP_SELF_PWR_MASK |
981 USBD_STRAPS_APP_RAM_IF_MASK |
982 USBD_STRAPS_APP_CSRPRGSUP_MASK |
983 USBD_STRAPS_APP_8BITPHY_MASK |
984 USBD_STRAPS_APP_RMTWKUP_MASK;
985
986 if (udc->gadget.max_speed == USB_SPEED_HIGH)
987 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
988 else
989 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
990 usbd_writel(udc, val, USBD_STRAPS_REG);
991
992 bcm63xx_set_ctrl_irqs(udc, false);
993
994 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
995
996 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
997 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
998 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
999
1000 rc = iudma_init(udc);
1001 set_clocks(udc, false);
1002 if (rc)
1003 bcm63xx_uninit_udc_hw(udc);
1004
1005 return 0;
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static int bcm63xx_ep_enable(struct usb_ep *ep,
1021 const struct usb_endpoint_descriptor *desc)
1022{
1023 struct bcm63xx_ep *bep = our_ep(ep);
1024 struct bcm63xx_udc *udc = bep->udc;
1025 struct iudma_ch *iudma = bep->iudma;
1026 unsigned long flags;
1027
1028 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1029 return -EINVAL;
1030
1031 if (!udc->driver)
1032 return -ESHUTDOWN;
1033
1034 spin_lock_irqsave(&udc->lock, flags);
1035 if (iudma->enabled) {
1036 spin_unlock_irqrestore(&udc->lock, flags);
1037 return -EINVAL;
1038 }
1039
1040 iudma->enabled = true;
1041 BUG_ON(!list_empty(&bep->queue));
1042
1043 iudma_reset_channel(udc, iudma);
1044
1045 bep->halted = 0;
1046 bcm63xx_set_stall(udc, bep, false);
1047 clear_bit(bep->ep_num, &udc->wedgemap);
1048
1049 ep->desc = desc;
1050 ep->maxpacket = usb_endpoint_maxp(desc);
1051
1052 spin_unlock_irqrestore(&udc->lock, flags);
1053 return 0;
1054}
1055
1056
1057
1058
1059
1060static int bcm63xx_ep_disable(struct usb_ep *ep)
1061{
1062 struct bcm63xx_ep *bep = our_ep(ep);
1063 struct bcm63xx_udc *udc = bep->udc;
1064 struct iudma_ch *iudma = bep->iudma;
1065 struct list_head *pos, *n;
1066 unsigned long flags;
1067
1068 if (!ep || !ep->desc)
1069 return -EINVAL;
1070
1071 spin_lock_irqsave(&udc->lock, flags);
1072 if (!iudma->enabled) {
1073 spin_unlock_irqrestore(&udc->lock, flags);
1074 return -EINVAL;
1075 }
1076 iudma->enabled = false;
1077
1078 iudma_reset_channel(udc, iudma);
1079
1080 if (!list_empty(&bep->queue)) {
1081 list_for_each_safe(pos, n, &bep->queue) {
1082 struct bcm63xx_req *breq =
1083 list_entry(pos, struct bcm63xx_req, queue);
1084
1085 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1086 iudma->is_tx);
1087 list_del(&breq->queue);
1088 breq->req.status = -ESHUTDOWN;
1089
1090 spin_unlock_irqrestore(&udc->lock, flags);
1091 breq->req.complete(&iudma->bep->ep, &breq->req);
1092 spin_lock_irqsave(&udc->lock, flags);
1093 }
1094 }
1095 ep->desc = NULL;
1096
1097 spin_unlock_irqrestore(&udc->lock, flags);
1098 return 0;
1099}
1100
1101
1102
1103
1104
1105
1106static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1107 gfp_t mem_flags)
1108{
1109 struct bcm63xx_req *breq;
1110
1111 breq = kzalloc(sizeof(*breq), mem_flags);
1112 if (!breq)
1113 return NULL;
1114 return &breq->req;
1115}
1116
1117
1118
1119
1120
1121
1122static void bcm63xx_udc_free_request(struct usb_ep *ep,
1123 struct usb_request *req)
1124{
1125 struct bcm63xx_req *breq = our_req(req);
1126 kfree(breq);
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1144 gfp_t mem_flags)
1145{
1146 struct bcm63xx_ep *bep = our_ep(ep);
1147 struct bcm63xx_udc *udc = bep->udc;
1148 struct bcm63xx_req *breq = our_req(req);
1149 unsigned long flags;
1150 int rc = 0;
1151
1152 if (unlikely(!req || !req->complete || !req->buf || !ep))
1153 return -EINVAL;
1154
1155 req->actual = 0;
1156 req->status = 0;
1157 breq->offset = 0;
1158
1159 if (bep == &udc->bep[0]) {
1160
1161 if (udc->ep0_reply)
1162 return -EINVAL;
1163
1164 udc->ep0_reply = req;
1165 schedule_work(&udc->ep0_wq);
1166 return 0;
1167 }
1168
1169 spin_lock_irqsave(&udc->lock, flags);
1170 if (!bep->iudma->enabled) {
1171 rc = -ESHUTDOWN;
1172 goto out;
1173 }
1174
1175 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1176 if (rc == 0) {
1177 list_add_tail(&breq->queue, &bep->queue);
1178 if (list_is_singular(&bep->queue))
1179 iudma_write(udc, bep->iudma, breq);
1180 }
1181
1182out:
1183 spin_unlock_irqrestore(&udc->lock, flags);
1184 return rc;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1197{
1198 struct bcm63xx_ep *bep = our_ep(ep);
1199 struct bcm63xx_udc *udc = bep->udc;
1200 struct bcm63xx_req *breq = our_req(req), *cur;
1201 unsigned long flags;
1202 int rc = 0;
1203
1204 spin_lock_irqsave(&udc->lock, flags);
1205 if (list_empty(&bep->queue)) {
1206 rc = -EINVAL;
1207 goto out;
1208 }
1209
1210 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1211 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1212
1213 if (breq == cur) {
1214 iudma_reset_channel(udc, bep->iudma);
1215 list_del(&breq->queue);
1216
1217 if (!list_empty(&bep->queue)) {
1218 struct bcm63xx_req *next;
1219
1220 next = list_first_entry(&bep->queue,
1221 struct bcm63xx_req, queue);
1222 iudma_write(udc, bep->iudma, next);
1223 }
1224 } else {
1225 list_del(&breq->queue);
1226 }
1227
1228out:
1229 spin_unlock_irqrestore(&udc->lock, flags);
1230
1231 req->status = -ESHUTDOWN;
1232 req->complete(ep, req);
1233
1234 return rc;
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1245{
1246 struct bcm63xx_ep *bep = our_ep(ep);
1247 struct bcm63xx_udc *udc = bep->udc;
1248 unsigned long flags;
1249
1250 spin_lock_irqsave(&udc->lock, flags);
1251 bcm63xx_set_stall(udc, bep, !!value);
1252 bep->halted = value;
1253 spin_unlock_irqrestore(&udc->lock, flags);
1254
1255 return 0;
1256}
1257
1258
1259
1260
1261
1262
1263
1264static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1265{
1266 struct bcm63xx_ep *bep = our_ep(ep);
1267 struct bcm63xx_udc *udc = bep->udc;
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(&udc->lock, flags);
1271 set_bit(bep->ep_num, &udc->wedgemap);
1272 bcm63xx_set_stall(udc, bep, true);
1273 spin_unlock_irqrestore(&udc->lock, flags);
1274
1275 return 0;
1276}
1277
1278static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1279 .enable = bcm63xx_ep_enable,
1280 .disable = bcm63xx_ep_disable,
1281
1282 .alloc_request = bcm63xx_udc_alloc_request,
1283 .free_request = bcm63xx_udc_free_request,
1284
1285 .queue = bcm63xx_udc_queue,
1286 .dequeue = bcm63xx_udc_dequeue,
1287
1288 .set_halt = bcm63xx_udc_set_halt,
1289 .set_wedge = bcm63xx_udc_set_wedge,
1290};
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1302 struct usb_ctrlrequest *ctrl)
1303{
1304 int rc;
1305
1306 spin_unlock_irq(&udc->lock);
1307 rc = udc->driver->setup(&udc->gadget, ctrl);
1308 spin_lock_irq(&udc->lock);
1309 return rc;
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1325{
1326 struct usb_ctrlrequest ctrl;
1327 int rc;
1328
1329 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1330 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1331 ctrl.wValue = cpu_to_le16(udc->cfg);
1332 ctrl.wIndex = 0;
1333 ctrl.wLength = 0;
1334
1335 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1336 if (rc < 0) {
1337 dev_warn_ratelimited(udc->dev,
1338 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1339 udc->cfg);
1340 }
1341 return rc;
1342}
1343
1344
1345
1346
1347
1348static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1349{
1350 struct usb_ctrlrequest ctrl;
1351 int rc;
1352
1353 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1354 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1355 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1356 ctrl.wIndex = cpu_to_le16(udc->iface);
1357 ctrl.wLength = 0;
1358
1359 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1360 if (rc < 0) {
1361 dev_warn_ratelimited(udc->dev,
1362 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1363 udc->iface, udc->alt_iface);
1364 }
1365 return rc;
1366}
1367
1368
1369
1370
1371
1372
1373
1374static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1375 struct usb_request *req)
1376{
1377 struct bcm63xx_req *breq = our_req(req);
1378 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1379
1380 BUG_ON(udc->ep0_request);
1381 udc->ep0_request = req;
1382
1383 req->actual = 0;
1384 breq->offset = 0;
1385 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1386 iudma_write(udc, iudma, breq);
1387}
1388
1389
1390
1391
1392
1393
1394
1395static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1396 struct usb_request *req, int status)
1397{
1398 req->status = status;
1399 if (status)
1400 req->actual = 0;
1401 if (req->complete) {
1402 spin_unlock_irq(&udc->lock);
1403 req->complete(&udc->bep[0].ep, req);
1404 spin_lock_irq(&udc->lock);
1405 }
1406}
1407
1408
1409
1410
1411
1412
1413
1414static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1415{
1416 struct usb_request *req = udc->ep0_reply;
1417
1418 udc->ep0_reply = NULL;
1419 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1420 if (udc->ep0_request == req) {
1421 udc->ep0_req_completed = 0;
1422 udc->ep0_request = NULL;
1423 }
1424 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1425}
1426
1427
1428
1429
1430
1431
1432static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1433{
1434 struct usb_request *req = udc->ep0_request;
1435
1436 udc->ep0_req_completed = 0;
1437 udc->ep0_request = NULL;
1438
1439 return req->actual;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1452 int length)
1453{
1454 struct usb_request *req = &udc->ep0_ctrl_req.req;
1455
1456 req->buf = udc->ep0_ctrl_buf;
1457 req->length = length;
1458 req->complete = NULL;
1459
1460 bcm63xx_ep0_map_write(udc, ch_idx, req);
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1472{
1473 int rc;
1474 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1475
1476 rc = bcm63xx_ep0_read_complete(udc);
1477
1478 if (rc < 0) {
1479 dev_err(udc->dev, "missing SETUP packet\n");
1480 return EP0_IDLE;
1481 }
1482
1483
1484
1485
1486
1487
1488 if (rc == 0)
1489 return EP0_REQUEUE;
1490
1491
1492 if (rc != sizeof(*ctrl)) {
1493 dev_warn_ratelimited(udc->dev,
1494 "malformed SETUP packet (%d bytes)\n", rc);
1495 return EP0_REQUEUE;
1496 }
1497
1498
1499 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1500 if (rc < 0) {
1501 bcm63xx_set_stall(udc, &udc->bep[0], true);
1502 return EP0_REQUEUE;
1503 }
1504
1505 if (!ctrl->wLength)
1506 return EP0_REQUEUE;
1507 else if (ctrl->bRequestType & USB_DIR_IN)
1508 return EP0_IN_DATA_PHASE_SETUP;
1509 else
1510 return EP0_OUT_DATA_PHASE_SETUP;
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1525{
1526 if (udc->ep0_req_reset) {
1527 udc->ep0_req_reset = 0;
1528 } else if (udc->ep0_req_set_cfg) {
1529 udc->ep0_req_set_cfg = 0;
1530 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1531 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1532 } else if (udc->ep0_req_set_iface) {
1533 udc->ep0_req_set_iface = 0;
1534 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1535 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1536 } else if (udc->ep0_req_completed) {
1537 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1538 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1539 } else if (udc->ep0_req_shutdown) {
1540 udc->ep0_req_shutdown = 0;
1541 udc->ep0_req_completed = 0;
1542 udc->ep0_request = NULL;
1543 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1544 usb_gadget_unmap_request(&udc->gadget,
1545 &udc->ep0_ctrl_req.req, 0);
1546
1547
1548 mb();
1549 udc->ep0state = EP0_SHUTDOWN;
1550 } else if (udc->ep0_reply) {
1551
1552
1553
1554
1555
1556 dev_warn(udc->dev, "nuking unexpected reply\n");
1557 bcm63xx_ep0_nuke_reply(udc, 0);
1558 } else {
1559 return -EAGAIN;
1560 }
1561
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570
1571static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1572{
1573 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1574 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1575
1576 switch (udc->ep0state) {
1577 case EP0_REQUEUE:
1578
1579 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1580 BCM63XX_MAX_CTRL_PKT);
1581 ep0state = EP0_IDLE;
1582 break;
1583 case EP0_IDLE:
1584 return bcm63xx_ep0_do_idle(udc);
1585 case EP0_IN_DATA_PHASE_SETUP:
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595 if (udc->ep0_reply) {
1596 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1597 udc->ep0_reply);
1598 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1599 } else if (shutdown) {
1600 ep0state = EP0_REQUEUE;
1601 }
1602 break;
1603 case EP0_IN_DATA_PHASE_COMPLETE: {
1604
1605
1606
1607
1608
1609
1610
1611 if (udc->ep0_req_completed) {
1612 udc->ep0_reply = NULL;
1613 bcm63xx_ep0_read_complete(udc);
1614
1615
1616
1617
1618 ep0state = EP0_REQUEUE;
1619 } else if (shutdown) {
1620 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1621 bcm63xx_ep0_nuke_reply(udc, 1);
1622 ep0state = EP0_REQUEUE;
1623 }
1624 break;
1625 }
1626 case EP0_OUT_DATA_PHASE_SETUP:
1627
1628 if (udc->ep0_reply) {
1629 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1630 udc->ep0_reply);
1631 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1632 } else if (shutdown) {
1633 ep0state = EP0_REQUEUE;
1634 }
1635 break;
1636 case EP0_OUT_DATA_PHASE_COMPLETE: {
1637
1638 if (udc->ep0_req_completed) {
1639 udc->ep0_reply = NULL;
1640 bcm63xx_ep0_read_complete(udc);
1641
1642
1643 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1644 ep0state = EP0_OUT_STATUS_PHASE;
1645 } else if (shutdown) {
1646 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1647 bcm63xx_ep0_nuke_reply(udc, 0);
1648 ep0state = EP0_REQUEUE;
1649 }
1650 break;
1651 }
1652 case EP0_OUT_STATUS_PHASE:
1653
1654
1655
1656
1657
1658
1659
1660
1661 if (udc->ep0_req_completed) {
1662 bcm63xx_ep0_read_complete(udc);
1663 ep0state = EP0_REQUEUE;
1664 } else if (shutdown) {
1665 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1666 udc->ep0_request = NULL;
1667 ep0state = EP0_REQUEUE;
1668 }
1669 break;
1670 case EP0_IN_FAKE_STATUS_PHASE: {
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 struct usb_request *r = udc->ep0_reply;
1686
1687 if (!r) {
1688 if (shutdown)
1689 ep0state = EP0_IDLE;
1690 break;
1691 }
1692
1693 bcm63xx_ep0_complete(udc, r, 0);
1694 udc->ep0_reply = NULL;
1695 ep0state = EP0_IDLE;
1696 break;
1697 }
1698 case EP0_SHUTDOWN:
1699 break;
1700 }
1701
1702 if (udc->ep0state == ep0state)
1703 return -EAGAIN;
1704
1705 udc->ep0state = ep0state;
1706 return 0;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void bcm63xx_ep0_process(struct work_struct *w)
1724{
1725 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1726 spin_lock_irq(&udc->lock);
1727 while (bcm63xx_ep0_one_round(udc) == 0)
1728 ;
1729 spin_unlock_irq(&udc->lock);
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1741{
1742 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1743
1744 return (usbd_readl(udc, USBD_STATUS_REG) &
1745 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1756{
1757 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1758 unsigned long flags;
1759 int i, rc = -EINVAL;
1760
1761 spin_lock_irqsave(&udc->lock, flags);
1762 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1763 udc->gadget.speed = USB_SPEED_UNKNOWN;
1764 udc->ep0state = EP0_REQUEUE;
1765 bcm63xx_fifo_setup(udc);
1766 bcm63xx_fifo_reset(udc);
1767 bcm63xx_ep_setup(udc);
1768
1769 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1770 for (i = 0; i < BCM63XX_NUM_EP; i++)
1771 bcm63xx_set_stall(udc, &udc->bep[i], false);
1772
1773 bcm63xx_set_ctrl_irqs(udc, true);
1774 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1775 rc = 0;
1776 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1777 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1778
1779 udc->ep0_req_shutdown = 1;
1780 spin_unlock_irqrestore(&udc->lock, flags);
1781
1782 while (1) {
1783 schedule_work(&udc->ep0_wq);
1784 if (udc->ep0state == EP0_SHUTDOWN)
1785 break;
1786 msleep(50);
1787 }
1788 bcm63xx_set_ctrl_irqs(udc, false);
1789 cancel_work_sync(&udc->ep0_wq);
1790 return 0;
1791 }
1792
1793 spin_unlock_irqrestore(&udc->lock, flags);
1794 return rc;
1795}
1796
1797
1798
1799
1800
1801
1802static int bcm63xx_udc_start(struct usb_gadget *gadget,
1803 struct usb_gadget_driver *driver)
1804{
1805 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1806 unsigned long flags;
1807
1808 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1809 !driver->setup)
1810 return -EINVAL;
1811 if (!udc)
1812 return -ENODEV;
1813 if (udc->driver)
1814 return -EBUSY;
1815
1816 spin_lock_irqsave(&udc->lock, flags);
1817
1818 set_clocks(udc, true);
1819 bcm63xx_fifo_setup(udc);
1820 bcm63xx_ep_init(udc);
1821 bcm63xx_ep_setup(udc);
1822 bcm63xx_fifo_reset(udc);
1823 bcm63xx_select_phy_mode(udc, true);
1824
1825 udc->driver = driver;
1826 driver->driver.bus = NULL;
1827 udc->gadget.dev.of_node = udc->dev->of_node;
1828
1829 spin_unlock_irqrestore(&udc->lock, flags);
1830
1831 return 0;
1832}
1833
1834
1835
1836
1837
1838
1839static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1840 struct usb_gadget_driver *driver)
1841{
1842 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1843 unsigned long flags;
1844
1845 spin_lock_irqsave(&udc->lock, flags);
1846
1847 udc->driver = NULL;
1848
1849
1850
1851
1852
1853
1854
1855 msleep(100);
1856
1857 bcm63xx_select_phy_mode(udc, false);
1858 set_clocks(udc, false);
1859
1860 spin_unlock_irqrestore(&udc->lock, flags);
1861
1862 return 0;
1863}
1864
1865static const struct usb_gadget_ops bcm63xx_udc_ops = {
1866 .get_frame = bcm63xx_udc_get_frame,
1867 .pullup = bcm63xx_udc_pullup,
1868 .udc_start = bcm63xx_udc_start,
1869 .udc_stop = bcm63xx_udc_stop,
1870};
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1886{
1887 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1888
1889 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1890 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1891 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1892 USBD_STATUS_ALTINTF_SHIFT;
1893 bcm63xx_ep_setup(udc);
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1904{
1905 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1906 enum usb_device_speed oldspeed = udc->gadget.speed;
1907
1908 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1909 case BCM63XX_SPD_HIGH:
1910 udc->gadget.speed = USB_SPEED_HIGH;
1911 break;
1912 case BCM63XX_SPD_FULL:
1913 udc->gadget.speed = USB_SPEED_FULL;
1914 break;
1915 default:
1916
1917 udc->gadget.speed = USB_SPEED_UNKNOWN;
1918 dev_err(udc->dev,
1919 "received SETUP packet with invalid link speed\n");
1920 return 0;
1921 }
1922
1923 if (udc->gadget.speed != oldspeed) {
1924 dev_info(udc->dev, "link up, %s-speed mode\n",
1925 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1926 return 1;
1927 } else {
1928 return 0;
1929 }
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1944{
1945 int i;
1946
1947 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1948 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1949 if (!new_status)
1950 clear_bit(i, &udc->wedgemap);
1951 }
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1963{
1964 struct bcm63xx_udc *udc = dev_id;
1965 u32 stat;
1966 bool disconnected = false;
1967
1968 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1969 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1970
1971 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1972
1973 spin_lock(&udc->lock);
1974 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1975
1976
1977 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1978 USBD_EVENTS_USB_LINK_MASK) &&
1979 udc->gadget.speed != USB_SPEED_UNKNOWN)
1980 dev_info(udc->dev, "link down\n");
1981
1982 udc->gadget.speed = USB_SPEED_UNKNOWN;
1983 disconnected = true;
1984 }
1985 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1986 bcm63xx_fifo_setup(udc);
1987 bcm63xx_fifo_reset(udc);
1988 bcm63xx_ep_setup(udc);
1989
1990 bcm63xx_update_wedge(udc, false);
1991
1992 udc->ep0_req_reset = 1;
1993 schedule_work(&udc->ep0_wq);
1994 disconnected = true;
1995 }
1996 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1997 if (bcm63xx_update_link_speed(udc)) {
1998 bcm63xx_fifo_setup(udc);
1999 bcm63xx_ep_setup(udc);
2000 }
2001 bcm63xx_update_wedge(udc, true);
2002 }
2003 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2004 bcm63xx_update_cfg_iface(udc);
2005 udc->ep0_req_set_cfg = 1;
2006 schedule_work(&udc->ep0_wq);
2007 }
2008 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2009 bcm63xx_update_cfg_iface(udc);
2010 udc->ep0_req_set_iface = 1;
2011 schedule_work(&udc->ep0_wq);
2012 }
2013 spin_unlock(&udc->lock);
2014
2015 if (disconnected && udc->driver)
2016 udc->driver->disconnect(&udc->gadget);
2017
2018 return IRQ_HANDLED;
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2032{
2033 struct iudma_ch *iudma = dev_id;
2034 struct bcm63xx_udc *udc = iudma->udc;
2035 struct bcm63xx_ep *bep;
2036 struct usb_request *req = NULL;
2037 struct bcm63xx_req *breq = NULL;
2038 int rc;
2039 bool is_done = false;
2040
2041 spin_lock(&udc->lock);
2042
2043 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2044 ENETDMAC_IR_REG, iudma->ch_idx);
2045 bep = iudma->bep;
2046 rc = iudma_read(udc, iudma);
2047
2048
2049 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2050 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2051 req = udc->ep0_request;
2052 breq = our_req(req);
2053
2054
2055 if (rc >= 0) {
2056 req->actual += rc;
2057
2058 if (req->actual >= req->length || breq->bd_bytes > rc) {
2059 udc->ep0_req_completed = 1;
2060 is_done = true;
2061 schedule_work(&udc->ep0_wq);
2062
2063
2064 req->actual = min(req->actual, req->length);
2065 } else {
2066
2067 iudma_write(udc, iudma, breq);
2068 }
2069 }
2070 } else if (!list_empty(&bep->queue)) {
2071 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2072 req = &breq->req;
2073
2074 if (rc >= 0) {
2075 req->actual += rc;
2076
2077 if (req->actual >= req->length || breq->bd_bytes > rc) {
2078 is_done = true;
2079 list_del(&breq->queue);
2080
2081 req->actual = min(req->actual, req->length);
2082
2083 if (!list_empty(&bep->queue)) {
2084 struct bcm63xx_req *next;
2085
2086 next = list_first_entry(&bep->queue,
2087 struct bcm63xx_req, queue);
2088 iudma_write(udc, iudma, next);
2089 }
2090 } else {
2091 iudma_write(udc, iudma, breq);
2092 }
2093 }
2094 }
2095 spin_unlock(&udc->lock);
2096
2097 if (is_done) {
2098 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2099 if (req->complete)
2100 req->complete(&bep->ep, req);
2101 }
2102
2103 return IRQ_HANDLED;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2118{
2119 struct bcm63xx_udc *udc = s->private;
2120
2121 if (!udc->driver)
2122 return -ENODEV;
2123
2124 seq_printf(s, "ep0 state: %s\n",
2125 bcm63xx_ep0_state_names[udc->ep0state]);
2126 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2127 udc->ep0_req_reset ? "reset " : "",
2128 udc->ep0_req_set_cfg ? "set_cfg " : "",
2129 udc->ep0_req_set_iface ? "set_iface " : "",
2130 udc->ep0_req_shutdown ? "shutdown " : "",
2131 udc->ep0_request ? "pending " : "",
2132 udc->ep0_req_completed ? "completed " : "",
2133 udc->ep0_reply ? "reply " : "");
2134 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2135 udc->cfg, udc->iface, udc->alt_iface);
2136 seq_printf(s, "regs:\n");
2137 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2138 usbd_readl(udc, USBD_CONTROL_REG),
2139 usbd_readl(udc, USBD_STRAPS_REG),
2140 usbd_readl(udc, USBD_STATUS_REG));
2141 seq_printf(s, " events: %08x; stall: %08x\n",
2142 usbd_readl(udc, USBD_EVENTS_REG),
2143 usbd_readl(udc, USBD_STALL_REG));
2144
2145 return 0;
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2156{
2157 struct bcm63xx_udc *udc = s->private;
2158 int ch_idx, i;
2159 u32 sram2, sram3;
2160
2161 if (!udc->driver)
2162 return -ENODEV;
2163
2164 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2165 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2166 struct list_head *pos;
2167
2168 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2169 switch (iudma_defaults[ch_idx].ep_type) {
2170 case BCMEP_CTRL:
2171 seq_printf(s, "control");
2172 break;
2173 case BCMEP_BULK:
2174 seq_printf(s, "bulk");
2175 break;
2176 case BCMEP_INTR:
2177 seq_printf(s, "interrupt");
2178 break;
2179 }
2180 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2181 seq_printf(s, " [ep%d]:\n",
2182 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2183 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2184 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2185 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2186 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2187 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2188
2189 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2190 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2191 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2192 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2193 sram2 >> 16, sram2 & 0xffff,
2194 sram3 >> 16, sram3 & 0xffff,
2195 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2196 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2197 iudma->n_bds);
2198
2199 if (iudma->bep) {
2200 i = 0;
2201 list_for_each(pos, &iudma->bep->queue)
2202 i++;
2203 seq_printf(s, "; %d queued\n", i);
2204 } else {
2205 seq_printf(s, "\n");
2206 }
2207
2208 for (i = 0; i < iudma->n_bds; i++) {
2209 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2210
2211 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2212 i * sizeof(*d), i,
2213 d->len_stat >> 16, d->len_stat & 0xffff,
2214 d->address);
2215 if (d == iudma->read_bd)
2216 seq_printf(s, " <<RD");
2217 if (d == iudma->write_bd)
2218 seq_printf(s, " <<WR");
2219 seq_printf(s, "\n");
2220 }
2221
2222 seq_printf(s, "\n");
2223 }
2224
2225 return 0;
2226}
2227
2228static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2229{
2230 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2231}
2232
2233static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2234{
2235 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2236}
2237
2238static const struct file_operations usbd_dbg_fops = {
2239 .owner = THIS_MODULE,
2240 .open = bcm63xx_usbd_dbg_open,
2241 .llseek = seq_lseek,
2242 .read = seq_read,
2243 .release = single_release,
2244};
2245
2246static const struct file_operations iudma_dbg_fops = {
2247 .owner = THIS_MODULE,
2248 .open = bcm63xx_iudma_dbg_open,
2249 .llseek = seq_lseek,
2250 .read = seq_read,
2251 .release = single_release,
2252};
2253
2254
2255
2256
2257
2258
2259static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2260{
2261 struct dentry *root, *usbd, *iudma;
2262
2263 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2264 return;
2265
2266 root = debugfs_create_dir(udc->gadget.name, NULL);
2267 if (IS_ERR(root) || !root)
2268 goto err_root;
2269
2270 usbd = debugfs_create_file("usbd", 0400, root, udc,
2271 &usbd_dbg_fops);
2272 if (!usbd)
2273 goto err_usbd;
2274 iudma = debugfs_create_file("iudma", 0400, root, udc,
2275 &iudma_dbg_fops);
2276 if (!iudma)
2277 goto err_iudma;
2278
2279 udc->debugfs_root = root;
2280 udc->debugfs_usbd = usbd;
2281 udc->debugfs_iudma = iudma;
2282 return;
2283err_iudma:
2284 debugfs_remove(usbd);
2285err_usbd:
2286 debugfs_remove(root);
2287err_root:
2288 dev_err(udc->dev, "debugfs is not available\n");
2289}
2290
2291
2292
2293
2294
2295
2296
2297static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2298{
2299 debugfs_remove(udc->debugfs_iudma);
2300 debugfs_remove(udc->debugfs_usbd);
2301 debugfs_remove(udc->debugfs_root);
2302 udc->debugfs_iudma = NULL;
2303 udc->debugfs_usbd = NULL;
2304 udc->debugfs_root = NULL;
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318static int bcm63xx_udc_probe(struct platform_device *pdev)
2319{
2320 struct device *dev = &pdev->dev;
2321 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2322 struct bcm63xx_udc *udc;
2323 struct resource *res;
2324 int rc = -ENOMEM, i, irq;
2325
2326 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2327 if (!udc) {
2328 dev_err(dev, "cannot allocate memory\n");
2329 return -ENOMEM;
2330 }
2331
2332 platform_set_drvdata(pdev, udc);
2333 udc->dev = dev;
2334 udc->pd = pd;
2335
2336 if (!pd) {
2337 dev_err(dev, "missing platform data\n");
2338 return -EINVAL;
2339 }
2340
2341 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2342 udc->usbd_regs = devm_ioremap_resource(dev, res);
2343 if (IS_ERR(udc->usbd_regs))
2344 return PTR_ERR(udc->usbd_regs);
2345
2346 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2347 udc->iudma_regs = devm_ioremap_resource(dev, res);
2348 if (IS_ERR(udc->iudma_regs))
2349 return PTR_ERR(udc->iudma_regs);
2350
2351 spin_lock_init(&udc->lock);
2352 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2353
2354 udc->gadget.ops = &bcm63xx_udc_ops;
2355 udc->gadget.name = dev_name(dev);
2356
2357 if (!pd->use_fullspeed && !use_fullspeed)
2358 udc->gadget.max_speed = USB_SPEED_HIGH;
2359 else
2360 udc->gadget.max_speed = USB_SPEED_FULL;
2361
2362
2363 rc = bcm63xx_init_udc_hw(udc);
2364 if (rc)
2365 return rc;
2366
2367 rc = -ENXIO;
2368
2369
2370 irq = platform_get_irq(pdev, 0);
2371 if (irq < 0) {
2372 dev_err(dev, "missing IRQ resource #0\n");
2373 goto out_uninit;
2374 }
2375 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2376 dev_name(dev), udc) < 0) {
2377 dev_err(dev, "error requesting IRQ #%d\n", irq);
2378 goto out_uninit;
2379 }
2380
2381
2382 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2383 irq = platform_get_irq(pdev, i + 1);
2384 if (irq < 0) {
2385 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2386 goto out_uninit;
2387 }
2388 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2389 dev_name(dev), &udc->iudma[i]) < 0) {
2390 dev_err(dev, "error requesting IRQ #%d\n", irq);
2391 goto out_uninit;
2392 }
2393 }
2394
2395 bcm63xx_udc_init_debugfs(udc);
2396 rc = usb_add_gadget_udc(dev, &udc->gadget);
2397 if (!rc)
2398 return 0;
2399
2400 bcm63xx_udc_cleanup_debugfs(udc);
2401out_uninit:
2402 bcm63xx_uninit_udc_hw(udc);
2403 return rc;
2404}
2405
2406
2407
2408
2409
2410static int bcm63xx_udc_remove(struct platform_device *pdev)
2411{
2412 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2413
2414 bcm63xx_udc_cleanup_debugfs(udc);
2415 usb_del_gadget_udc(&udc->gadget);
2416 BUG_ON(udc->driver);
2417
2418 bcm63xx_uninit_udc_hw(udc);
2419
2420 return 0;
2421}
2422
2423static struct platform_driver bcm63xx_udc_driver = {
2424 .probe = bcm63xx_udc_probe,
2425 .remove = bcm63xx_udc_remove,
2426 .driver = {
2427 .name = DRV_MODULE_NAME,
2428 .owner = THIS_MODULE,
2429 },
2430};
2431module_platform_driver(bcm63xx_udc_driver);
2432
2433MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2434MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2435MODULE_LICENSE("GPL");
2436MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2437