1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kconfig.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/platform_device.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/workqueue.h>
37
38#include <bcm63xx_cpu.h>
39#include <bcm63xx_iudma.h>
40#include <bcm63xx_dev_usb_usbd.h>
41#include <bcm63xx_io.h>
42#include <bcm63xx_regs.h>
43
44#define DRV_MODULE_NAME "bcm63xx_udc"
45
46static const char bcm63xx_ep0name[] = "ep0";
47
48static const struct {
49 const char *name;
50 const struct usb_ep_caps caps;
51} bcm63xx_ep_info[] = {
52#define EP_INFO(_name, _caps) \
53 { \
54 .name = _name, \
55 .caps = _caps, \
56 }
57
58 EP_INFO(bcm63xx_ep0name,
59 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
60 EP_INFO("ep1in-bulk",
61 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
62 EP_INFO("ep2out-bulk",
63 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
64 EP_INFO("ep3in-int",
65 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
66 EP_INFO("ep4out-int",
67 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
68
69#undef EP_INFO
70};
71
72static bool use_fullspeed;
73module_param(use_fullspeed, bool, S_IRUGO);
74MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95static bool irq_coalesce;
96module_param(irq_coalesce, bool, S_IRUGO);
97MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
98
99#define BCM63XX_NUM_EP 5
100#define BCM63XX_NUM_IUDMA 6
101#define BCM63XX_NUM_FIFO_PAIRS 3
102
103#define IUDMA_RESET_TIMEOUT_US 10000
104
105#define IUDMA_EP0_RXCHAN 0
106#define IUDMA_EP0_TXCHAN 1
107
108#define IUDMA_MAX_FRAGMENT 2048
109#define BCM63XX_MAX_CTRL_PKT 64
110
111#define BCMEP_CTRL 0x00
112#define BCMEP_ISOC 0x01
113#define BCMEP_BULK 0x02
114#define BCMEP_INTR 0x03
115
116#define BCMEP_OUT 0x00
117#define BCMEP_IN 0x01
118
119#define BCM63XX_SPD_FULL 1
120#define BCM63XX_SPD_HIGH 0
121
122#define IUDMA_DMAC_OFFSET 0x200
123#define IUDMA_DMAS_OFFSET 0x400
124
125enum bcm63xx_ep0_state {
126 EP0_REQUEUE,
127 EP0_IDLE,
128 EP0_IN_DATA_PHASE_SETUP,
129 EP0_IN_DATA_PHASE_COMPLETE,
130 EP0_OUT_DATA_PHASE_SETUP,
131 EP0_OUT_DATA_PHASE_COMPLETE,
132 EP0_OUT_STATUS_PHASE,
133 EP0_IN_FAKE_STATUS_PHASE,
134 EP0_SHUTDOWN,
135};
136
137static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
138 "REQUEUE",
139 "IDLE",
140 "IN_DATA_PHASE_SETUP",
141 "IN_DATA_PHASE_COMPLETE",
142 "OUT_DATA_PHASE_SETUP",
143 "OUT_DATA_PHASE_COMPLETE",
144 "OUT_STATUS_PHASE",
145 "IN_FAKE_STATUS_PHASE",
146 "SHUTDOWN",
147};
148
149
150
151
152
153
154
155
156
157
158
159struct iudma_ch_cfg {
160 int ep_num;
161 int n_bds;
162 int ep_type;
163 int dir;
164 int n_fifo_slots;
165 int max_pkt_hs;
166 int max_pkt_fs;
167};
168
169static const struct iudma_ch_cfg iudma_defaults[] = {
170
171
172
173
174
175
176
177
178
179
180 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
181 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
182 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
183 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
184 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
185 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
186};
187
188struct bcm63xx_udc;
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214struct iudma_ch {
215 unsigned int ch_idx;
216 int ep_num;
217 bool enabled;
218 int max_pkt;
219 bool is_tx;
220 struct bcm63xx_ep *bep;
221 struct bcm63xx_udc *udc;
222
223 struct bcm_enet_desc *read_bd;
224 struct bcm_enet_desc *write_bd;
225 struct bcm_enet_desc *end_bd;
226 int n_bds_used;
227
228 struct bcm_enet_desc *bd_ring;
229 dma_addr_t bd_ring_dma;
230 unsigned int n_bds;
231};
232
233
234
235
236
237
238
239
240
241
242struct bcm63xx_ep {
243 unsigned int ep_num;
244 struct iudma_ch *iudma;
245 struct usb_ep ep;
246 struct bcm63xx_udc *udc;
247 struct list_head queue;
248 unsigned halted:1;
249};
250
251
252
253
254
255
256
257
258
259struct bcm63xx_req {
260 struct list_head queue;
261 struct usb_request req;
262 unsigned int offset;
263 unsigned int bd_bytes;
264 struct iudma_ch *iudma;
265};
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299struct bcm63xx_udc {
300 spinlock_t lock;
301
302 struct device *dev;
303 struct bcm63xx_usbd_platform_data *pd;
304 struct clk *usbd_clk;
305 struct clk *usbh_clk;
306
307 struct usb_gadget gadget;
308 struct usb_gadget_driver *driver;
309
310 void __iomem *usbd_regs;
311 void __iomem *iudma_regs;
312
313 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
314 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
315
316 int cfg;
317 int iface;
318 int alt_iface;
319
320 struct bcm63xx_req ep0_ctrl_req;
321 u8 *ep0_ctrl_buf;
322
323 int ep0state;
324 struct work_struct ep0_wq;
325
326 unsigned long wedgemap;
327
328 unsigned ep0_req_reset:1;
329 unsigned ep0_req_set_cfg:1;
330 unsigned ep0_req_set_iface:1;
331 unsigned ep0_req_shutdown:1;
332
333 unsigned ep0_req_completed:1;
334 struct usb_request *ep0_reply;
335 struct usb_request *ep0_request;
336
337 struct dentry *debugfs_root;
338 struct dentry *debugfs_usbd;
339 struct dentry *debugfs_iudma;
340};
341
342static const struct usb_ep_ops bcm63xx_udc_ep_ops;
343
344
345
346
347
348static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
349{
350 return container_of(g, struct bcm63xx_udc, gadget);
351}
352
353static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
354{
355 return container_of(ep, struct bcm63xx_ep, ep);
356}
357
358static inline struct bcm63xx_req *our_req(struct usb_request *req)
359{
360 return container_of(req, struct bcm63xx_req, req);
361}
362
363static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
364{
365 return bcm_readl(udc->usbd_regs + off);
366}
367
368static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
369{
370 bcm_writel(val, udc->usbd_regs + off);
371}
372
373static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
374{
375 return bcm_readl(udc->iudma_regs + off);
376}
377
378static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
379{
380 bcm_writel(val, udc->iudma_regs + off);
381}
382
383static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
384{
385 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
387}
388
389static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
390 int chan)
391{
392 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
393 (ENETDMA_CHAN_WIDTH * chan));
394}
395
396static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
397{
398 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
399 (ENETDMA_CHAN_WIDTH * chan));
400}
401
402static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
403 int chan)
404{
405 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
406 (ENETDMA_CHAN_WIDTH * chan));
407}
408
409static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
410{
411 if (is_enabled) {
412 clk_enable(udc->usbh_clk);
413 clk_enable(udc->usbd_clk);
414 udelay(10);
415 } else {
416 clk_disable(udc->usbd_clk);
417 clk_disable(udc->usbh_clk);
418 }
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
435{
436 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
437
438 val &= ~USBD_CONTROL_INIT_SEL_MASK;
439 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
440 usbd_writel(udc, val, USBD_CONTROL_REG);
441}
442
443
444
445
446
447
448
449
450
451
452static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
453 bool is_stalled)
454{
455 u32 val;
456
457 val = USBD_STALL_UPDATE_MASK |
458 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
459 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
460 usbd_writel(udc, val, USBD_STALL_REG);
461}
462
463
464
465
466
467
468
469
470static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
471{
472 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
473 u32 i, val, rx_fifo_slot, tx_fifo_slot;
474
475
476 rx_fifo_slot = tx_fifo_slot = 0;
477 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
478 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
479 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
480
481 bcm63xx_ep_dma_select(udc, i >> 1);
482
483 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
484 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
485 USBD_RXFIFO_CONFIG_END_SHIFT);
486 rx_fifo_slot += rx_cfg->n_fifo_slots;
487 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
488 usbd_writel(udc,
489 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
490 USBD_RXFIFO_EPSIZE_REG);
491
492 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
493 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
494 USBD_TXFIFO_CONFIG_END_SHIFT);
495 tx_fifo_slot += tx_cfg->n_fifo_slots;
496 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
497 usbd_writel(udc,
498 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
499 USBD_TXFIFO_EPSIZE_REG);
500
501 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
502 }
503}
504
505
506
507
508
509
510static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
511{
512 u32 val;
513
514 bcm63xx_ep_dma_select(udc, ep_num);
515
516 val = usbd_readl(udc, USBD_CONTROL_REG);
517 val |= USBD_CONTROL_FIFO_RESET_MASK;
518 usbd_writel(udc, val, USBD_CONTROL_REG);
519 usbd_readl(udc, USBD_CONTROL_REG);
520}
521
522
523
524
525
526static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
527{
528 int i;
529
530 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
531 bcm63xx_fifo_reset_ep(udc, i);
532}
533
534
535
536
537
538static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
539{
540 u32 i, val;
541
542 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544
545 if (cfg->ep_num < 0)
546 continue;
547
548 bcm63xx_ep_dma_select(udc, cfg->ep_num);
549 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
550 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
551 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
552 }
553}
554
555
556
557
558
559
560
561static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
562{
563 u32 val, i;
564
565 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
566
567 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
568 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
569 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
570 cfg->max_pkt_hs : cfg->max_pkt_fs;
571 int idx = cfg->ep_num;
572
573 udc->iudma[i].max_pkt = max_pkt;
574
575 if (idx < 0)
576 continue;
577 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
578
579 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
580 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
581 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
582 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
583 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
584 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
585 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
586 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
587 }
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
604 struct bcm63xx_req *breq)
605{
606 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
607 unsigned int bytes_left = breq->req.length - breq->offset;
608 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
609 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
610
611 iudma->n_bds_used = 0;
612 breq->bd_bytes = 0;
613 breq->iudma = iudma;
614
615 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
616 extra_zero_pkt = 1;
617
618 do {
619 struct bcm_enet_desc *d = iudma->write_bd;
620 u32 dmaflags = 0;
621 unsigned int n_bytes;
622
623 if (d == iudma->end_bd) {
624 dmaflags |= DMADESC_WRAP_MASK;
625 iudma->write_bd = iudma->bd_ring;
626 } else {
627 iudma->write_bd++;
628 }
629 iudma->n_bds_used++;
630
631 n_bytes = min_t(int, bytes_left, max_bd_bytes);
632 if (n_bytes)
633 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
634 else
635 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
636 DMADESC_USB_ZERO_MASK;
637
638 dmaflags |= DMADESC_OWNER_MASK;
639 if (first_bd) {
640 dmaflags |= DMADESC_SOP_MASK;
641 first_bd = 0;
642 }
643
644
645
646
647
648 if (extra_zero_pkt && !bytes_left)
649 extra_zero_pkt = 0;
650
651 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
652 (n_bytes == bytes_left && !extra_zero_pkt)) {
653 last_bd = 1;
654 dmaflags |= DMADESC_EOP_MASK;
655 }
656
657 d->address = breq->req.dma + breq->offset;
658 mb();
659 d->len_stat = dmaflags;
660
661 breq->offset += n_bytes;
662 breq->bd_bytes += n_bytes;
663 bytes_left -= n_bytes;
664 } while (!last_bd);
665
666 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
667 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
668}
669
670
671
672
673
674
675
676
677
678
679static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
680{
681 int i, actual_len = 0;
682 struct bcm_enet_desc *d = iudma->read_bd;
683
684 if (!iudma->n_bds_used)
685 return -EINVAL;
686
687 for (i = 0; i < iudma->n_bds_used; i++) {
688 u32 dmaflags;
689
690 dmaflags = d->len_stat;
691
692 if (dmaflags & DMADESC_OWNER_MASK)
693 return -EBUSY;
694
695 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
696 DMADESC_LENGTH_SHIFT;
697 if (d == iudma->end_bd)
698 d = iudma->bd_ring;
699 else
700 d++;
701 }
702
703 iudma->read_bd = d;
704 iudma->n_bds_used = 0;
705 return actual_len;
706}
707
708
709
710
711
712
713static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
714{
715 int timeout = IUDMA_RESET_TIMEOUT_US;
716 struct bcm_enet_desc *d;
717 int ch_idx = iudma->ch_idx;
718
719 if (!iudma->is_tx)
720 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
721
722
723 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
724
725 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
726 ENETDMAC_CHANCFG_EN_MASK) {
727 udelay(1);
728
729
730 if (iudma->is_tx && iudma->ep_num >= 0)
731 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
732
733 if (!timeout--) {
734 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
735 ch_idx);
736 break;
737 }
738 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
739 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
740 ch_idx);
741 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
742 ENETDMAC_CHANCFG_REG, ch_idx);
743 }
744 }
745 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
746
747
748 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
749 d->len_stat = 0;
750 mb();
751
752 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
753 iudma->n_bds_used = 0;
754
755
756 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
757 ENETDMAC_IRMASK_REG, ch_idx);
758 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
759
760 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
761 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
762}
763
764
765
766
767
768
769static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
770{
771 struct iudma_ch *iudma = &udc->iudma[ch_idx];
772 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
773 unsigned int n_bds = cfg->n_bds;
774 struct bcm63xx_ep *bep = NULL;
775
776 iudma->ep_num = cfg->ep_num;
777 iudma->ch_idx = ch_idx;
778 iudma->is_tx = !!(ch_idx & 0x01);
779 if (iudma->ep_num >= 0) {
780 bep = &udc->bep[iudma->ep_num];
781 bep->iudma = iudma;
782 INIT_LIST_HEAD(&bep->queue);
783 }
784
785 iudma->bep = bep;
786 iudma->udc = udc;
787
788
789 if (iudma->ep_num <= 0)
790 iudma->enabled = true;
791
792 iudma->n_bds = n_bds;
793 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
794 n_bds * sizeof(struct bcm_enet_desc),
795 &iudma->bd_ring_dma, GFP_KERNEL);
796 if (!iudma->bd_ring)
797 return -ENOMEM;
798 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
799
800 return 0;
801}
802
803
804
805
806
807
808
809static int iudma_init(struct bcm63xx_udc *udc)
810{
811 int i, rc;
812
813 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
814
815 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
816 rc = iudma_init_channel(udc, i);
817 if (rc)
818 return rc;
819 iudma_reset_channel(udc, &udc->iudma[i]);
820 }
821
822 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
823 return 0;
824}
825
826
827
828
829
830
831
832static void iudma_uninit(struct bcm63xx_udc *udc)
833{
834 int i;
835
836 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
837
838 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
839 iudma_reset_channel(udc, &udc->iudma[i]);
840
841 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
842}
843
844
845
846
847
848
849
850
851
852
853static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
854{
855 u32 val;
856
857 usbd_writel(udc, 0, USBD_STATUS_REG);
858
859 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
860 BIT(USBD_EVENT_IRQ_SETUP) |
861 BIT(USBD_EVENT_IRQ_SETCFG) |
862 BIT(USBD_EVENT_IRQ_SETINTF) |
863 BIT(USBD_EVENT_IRQ_USB_LINK);
864 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
865 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
866}
867
868
869
870
871
872
873
874
875
876
877
878
879static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
880{
881 u32 val, portmask = BIT(udc->pd->port_no);
882
883 if (BCMCPU_IS_6328()) {
884
885 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
886 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
887 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
888 GPIO_PINMUX_OTHR_6328_USB_HOST;
889 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
890 }
891
892 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
893 if (is_device) {
894 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
895 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
896 } else {
897 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
898 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
899 }
900 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
901
902 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
903 if (is_device)
904 val |= USBH_PRIV_SWAP_USBD_MASK;
905 else
906 val &= ~USBH_PRIV_SWAP_USBD_MASK;
907 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
908}
909
910
911
912
913
914
915
916
917
918
919static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
920{
921 u32 val, portmask = BIT(udc->pd->port_no);
922
923 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
924 if (is_on)
925 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
926 else
927 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
928 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
929}
930
931
932
933
934
935
936
937
938static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
939{
940 set_clocks(udc, true);
941 iudma_uninit(udc);
942 set_clocks(udc, false);
943
944 clk_put(udc->usbd_clk);
945 clk_put(udc->usbh_clk);
946}
947
948
949
950
951
952static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
953{
954 int i, rc = 0;
955 u32 val;
956
957 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
958 GFP_KERNEL);
959 if (!udc->ep0_ctrl_buf)
960 return -ENOMEM;
961
962 INIT_LIST_HEAD(&udc->gadget.ep_list);
963 for (i = 0; i < BCM63XX_NUM_EP; i++) {
964 struct bcm63xx_ep *bep = &udc->bep[i];
965
966 bep->ep.name = bcm63xx_ep_info[i].name;
967 bep->ep.caps = bcm63xx_ep_info[i].caps;
968 bep->ep_num = i;
969 bep->ep.ops = &bcm63xx_udc_ep_ops;
970 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
971 bep->halted = 0;
972 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
973 bep->udc = udc;
974 bep->ep.desc = NULL;
975 INIT_LIST_HEAD(&bep->queue);
976 }
977
978 udc->gadget.ep0 = &udc->bep[0].ep;
979 list_del(&udc->bep[0].ep.ep_list);
980
981 udc->gadget.speed = USB_SPEED_UNKNOWN;
982 udc->ep0state = EP0_SHUTDOWN;
983
984 udc->usbh_clk = clk_get(udc->dev, "usbh");
985 if (IS_ERR(udc->usbh_clk))
986 return -EIO;
987
988 udc->usbd_clk = clk_get(udc->dev, "usbd");
989 if (IS_ERR(udc->usbd_clk)) {
990 clk_put(udc->usbh_clk);
991 return -EIO;
992 }
993
994 set_clocks(udc, true);
995
996 val = USBD_CONTROL_AUTO_CSRS_MASK |
997 USBD_CONTROL_DONE_CSRS_MASK |
998 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
999 usbd_writel(udc, val, USBD_CONTROL_REG);
1000
1001 val = USBD_STRAPS_APP_SELF_PWR_MASK |
1002 USBD_STRAPS_APP_RAM_IF_MASK |
1003 USBD_STRAPS_APP_CSRPRGSUP_MASK |
1004 USBD_STRAPS_APP_8BITPHY_MASK |
1005 USBD_STRAPS_APP_RMTWKUP_MASK;
1006
1007 if (udc->gadget.max_speed == USB_SPEED_HIGH)
1008 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1009 else
1010 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1011 usbd_writel(udc, val, USBD_STRAPS_REG);
1012
1013 bcm63xx_set_ctrl_irqs(udc, false);
1014
1015 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1016
1017 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1018 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1019 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1020
1021 rc = iudma_init(udc);
1022 set_clocks(udc, false);
1023 if (rc)
1024 bcm63xx_uninit_udc_hw(udc);
1025
1026 return 0;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static int bcm63xx_ep_enable(struct usb_ep *ep,
1042 const struct usb_endpoint_descriptor *desc)
1043{
1044 struct bcm63xx_ep *bep = our_ep(ep);
1045 struct bcm63xx_udc *udc = bep->udc;
1046 struct iudma_ch *iudma = bep->iudma;
1047 unsigned long flags;
1048
1049 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1050 return -EINVAL;
1051
1052 if (!udc->driver)
1053 return -ESHUTDOWN;
1054
1055 spin_lock_irqsave(&udc->lock, flags);
1056 if (iudma->enabled) {
1057 spin_unlock_irqrestore(&udc->lock, flags);
1058 return -EINVAL;
1059 }
1060
1061 iudma->enabled = true;
1062 BUG_ON(!list_empty(&bep->queue));
1063
1064 iudma_reset_channel(udc, iudma);
1065
1066 bep->halted = 0;
1067 bcm63xx_set_stall(udc, bep, false);
1068 clear_bit(bep->ep_num, &udc->wedgemap);
1069
1070 ep->desc = desc;
1071 ep->maxpacket = usb_endpoint_maxp(desc);
1072
1073 spin_unlock_irqrestore(&udc->lock, flags);
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081static int bcm63xx_ep_disable(struct usb_ep *ep)
1082{
1083 struct bcm63xx_ep *bep = our_ep(ep);
1084 struct bcm63xx_udc *udc = bep->udc;
1085 struct iudma_ch *iudma = bep->iudma;
1086 struct list_head *pos, *n;
1087 unsigned long flags;
1088
1089 if (!ep || !ep->desc)
1090 return -EINVAL;
1091
1092 spin_lock_irqsave(&udc->lock, flags);
1093 if (!iudma->enabled) {
1094 spin_unlock_irqrestore(&udc->lock, flags);
1095 return -EINVAL;
1096 }
1097 iudma->enabled = false;
1098
1099 iudma_reset_channel(udc, iudma);
1100
1101 if (!list_empty(&bep->queue)) {
1102 list_for_each_safe(pos, n, &bep->queue) {
1103 struct bcm63xx_req *breq =
1104 list_entry(pos, struct bcm63xx_req, queue);
1105
1106 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1107 iudma->is_tx);
1108 list_del(&breq->queue);
1109 breq->req.status = -ESHUTDOWN;
1110
1111 spin_unlock_irqrestore(&udc->lock, flags);
1112 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1113 spin_lock_irqsave(&udc->lock, flags);
1114 }
1115 }
1116 ep->desc = NULL;
1117
1118 spin_unlock_irqrestore(&udc->lock, flags);
1119 return 0;
1120}
1121
1122
1123
1124
1125
1126
1127static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1128 gfp_t mem_flags)
1129{
1130 struct bcm63xx_req *breq;
1131
1132 breq = kzalloc(sizeof(*breq), mem_flags);
1133 if (!breq)
1134 return NULL;
1135 return &breq->req;
1136}
1137
1138
1139
1140
1141
1142
1143static void bcm63xx_udc_free_request(struct usb_ep *ep,
1144 struct usb_request *req)
1145{
1146 struct bcm63xx_req *breq = our_req(req);
1147 kfree(breq);
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1165 gfp_t mem_flags)
1166{
1167 struct bcm63xx_ep *bep = our_ep(ep);
1168 struct bcm63xx_udc *udc = bep->udc;
1169 struct bcm63xx_req *breq = our_req(req);
1170 unsigned long flags;
1171 int rc = 0;
1172
1173 if (unlikely(!req || !req->complete || !req->buf || !ep))
1174 return -EINVAL;
1175
1176 req->actual = 0;
1177 req->status = 0;
1178 breq->offset = 0;
1179
1180 if (bep == &udc->bep[0]) {
1181
1182 if (udc->ep0_reply)
1183 return -EINVAL;
1184
1185 udc->ep0_reply = req;
1186 schedule_work(&udc->ep0_wq);
1187 return 0;
1188 }
1189
1190 spin_lock_irqsave(&udc->lock, flags);
1191 if (!bep->iudma->enabled) {
1192 rc = -ESHUTDOWN;
1193 goto out;
1194 }
1195
1196 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1197 if (rc == 0) {
1198 list_add_tail(&breq->queue, &bep->queue);
1199 if (list_is_singular(&bep->queue))
1200 iudma_write(udc, bep->iudma, breq);
1201 }
1202
1203out:
1204 spin_unlock_irqrestore(&udc->lock, flags);
1205 return rc;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1218{
1219 struct bcm63xx_ep *bep = our_ep(ep);
1220 struct bcm63xx_udc *udc = bep->udc;
1221 struct bcm63xx_req *breq = our_req(req), *cur;
1222 unsigned long flags;
1223 int rc = 0;
1224
1225 spin_lock_irqsave(&udc->lock, flags);
1226 if (list_empty(&bep->queue)) {
1227 rc = -EINVAL;
1228 goto out;
1229 }
1230
1231 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1232 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1233
1234 if (breq == cur) {
1235 iudma_reset_channel(udc, bep->iudma);
1236 list_del(&breq->queue);
1237
1238 if (!list_empty(&bep->queue)) {
1239 struct bcm63xx_req *next;
1240
1241 next = list_first_entry(&bep->queue,
1242 struct bcm63xx_req, queue);
1243 iudma_write(udc, bep->iudma, next);
1244 }
1245 } else {
1246 list_del(&breq->queue);
1247 }
1248
1249out:
1250 spin_unlock_irqrestore(&udc->lock, flags);
1251
1252 req->status = -ESHUTDOWN;
1253 req->complete(ep, req);
1254
1255 return rc;
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1266{
1267 struct bcm63xx_ep *bep = our_ep(ep);
1268 struct bcm63xx_udc *udc = bep->udc;
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(&udc->lock, flags);
1272 bcm63xx_set_stall(udc, bep, !!value);
1273 bep->halted = value;
1274 spin_unlock_irqrestore(&udc->lock, flags);
1275
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283
1284
1285static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1286{
1287 struct bcm63xx_ep *bep = our_ep(ep);
1288 struct bcm63xx_udc *udc = bep->udc;
1289 unsigned long flags;
1290
1291 spin_lock_irqsave(&udc->lock, flags);
1292 set_bit(bep->ep_num, &udc->wedgemap);
1293 bcm63xx_set_stall(udc, bep, true);
1294 spin_unlock_irqrestore(&udc->lock, flags);
1295
1296 return 0;
1297}
1298
1299static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1300 .enable = bcm63xx_ep_enable,
1301 .disable = bcm63xx_ep_disable,
1302
1303 .alloc_request = bcm63xx_udc_alloc_request,
1304 .free_request = bcm63xx_udc_free_request,
1305
1306 .queue = bcm63xx_udc_queue,
1307 .dequeue = bcm63xx_udc_dequeue,
1308
1309 .set_halt = bcm63xx_udc_set_halt,
1310 .set_wedge = bcm63xx_udc_set_wedge,
1311};
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1323 struct usb_ctrlrequest *ctrl)
1324{
1325 int rc;
1326
1327 spin_unlock_irq(&udc->lock);
1328 rc = udc->driver->setup(&udc->gadget, ctrl);
1329 spin_lock_irq(&udc->lock);
1330 return rc;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1346{
1347 struct usb_ctrlrequest ctrl;
1348 int rc;
1349
1350 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1351 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1352 ctrl.wValue = cpu_to_le16(udc->cfg);
1353 ctrl.wIndex = 0;
1354 ctrl.wLength = 0;
1355
1356 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1357 if (rc < 0) {
1358 dev_warn_ratelimited(udc->dev,
1359 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1360 udc->cfg);
1361 }
1362 return rc;
1363}
1364
1365
1366
1367
1368
1369static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1370{
1371 struct usb_ctrlrequest ctrl;
1372 int rc;
1373
1374 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1375 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1376 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1377 ctrl.wIndex = cpu_to_le16(udc->iface);
1378 ctrl.wLength = 0;
1379
1380 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1381 if (rc < 0) {
1382 dev_warn_ratelimited(udc->dev,
1383 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1384 udc->iface, udc->alt_iface);
1385 }
1386 return rc;
1387}
1388
1389
1390
1391
1392
1393
1394
1395static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1396 struct usb_request *req)
1397{
1398 struct bcm63xx_req *breq = our_req(req);
1399 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1400
1401 BUG_ON(udc->ep0_request);
1402 udc->ep0_request = req;
1403
1404 req->actual = 0;
1405 breq->offset = 0;
1406 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1407 iudma_write(udc, iudma, breq);
1408}
1409
1410
1411
1412
1413
1414
1415
1416static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1417 struct usb_request *req, int status)
1418{
1419 req->status = status;
1420 if (status)
1421 req->actual = 0;
1422 if (req->complete) {
1423 spin_unlock_irq(&udc->lock);
1424 req->complete(&udc->bep[0].ep, req);
1425 spin_lock_irq(&udc->lock);
1426 }
1427}
1428
1429
1430
1431
1432
1433
1434
1435static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1436{
1437 struct usb_request *req = udc->ep0_reply;
1438
1439 udc->ep0_reply = NULL;
1440 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1441 if (udc->ep0_request == req) {
1442 udc->ep0_req_completed = 0;
1443 udc->ep0_request = NULL;
1444 }
1445 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1446}
1447
1448
1449
1450
1451
1452
1453static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1454{
1455 struct usb_request *req = udc->ep0_request;
1456
1457 udc->ep0_req_completed = 0;
1458 udc->ep0_request = NULL;
1459
1460 return req->actual;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1473 int length)
1474{
1475 struct usb_request *req = &udc->ep0_ctrl_req.req;
1476
1477 req->buf = udc->ep0_ctrl_buf;
1478 req->length = length;
1479 req->complete = NULL;
1480
1481 bcm63xx_ep0_map_write(udc, ch_idx, req);
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1493{
1494 int rc;
1495 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1496
1497 rc = bcm63xx_ep0_read_complete(udc);
1498
1499 if (rc < 0) {
1500 dev_err(udc->dev, "missing SETUP packet\n");
1501 return EP0_IDLE;
1502 }
1503
1504
1505
1506
1507
1508
1509 if (rc == 0)
1510 return EP0_REQUEUE;
1511
1512
1513 if (rc != sizeof(*ctrl)) {
1514 dev_warn_ratelimited(udc->dev,
1515 "malformed SETUP packet (%d bytes)\n", rc);
1516 return EP0_REQUEUE;
1517 }
1518
1519
1520 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1521 if (rc < 0) {
1522 bcm63xx_set_stall(udc, &udc->bep[0], true);
1523 return EP0_REQUEUE;
1524 }
1525
1526 if (!ctrl->wLength)
1527 return EP0_REQUEUE;
1528 else if (ctrl->bRequestType & USB_DIR_IN)
1529 return EP0_IN_DATA_PHASE_SETUP;
1530 else
1531 return EP0_OUT_DATA_PHASE_SETUP;
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1546{
1547 if (udc->ep0_req_reset) {
1548 udc->ep0_req_reset = 0;
1549 } else if (udc->ep0_req_set_cfg) {
1550 udc->ep0_req_set_cfg = 0;
1551 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1552 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1553 } else if (udc->ep0_req_set_iface) {
1554 udc->ep0_req_set_iface = 0;
1555 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1556 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1557 } else if (udc->ep0_req_completed) {
1558 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1559 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1560 } else if (udc->ep0_req_shutdown) {
1561 udc->ep0_req_shutdown = 0;
1562 udc->ep0_req_completed = 0;
1563 udc->ep0_request = NULL;
1564 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1565 usb_gadget_unmap_request(&udc->gadget,
1566 &udc->ep0_ctrl_req.req, 0);
1567
1568
1569 mb();
1570 udc->ep0state = EP0_SHUTDOWN;
1571 } else if (udc->ep0_reply) {
1572
1573
1574
1575
1576
1577 dev_warn(udc->dev, "nuking unexpected reply\n");
1578 bcm63xx_ep0_nuke_reply(udc, 0);
1579 } else {
1580 return -EAGAIN;
1581 }
1582
1583 return 0;
1584}
1585
1586
1587
1588
1589
1590
1591
1592static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1593{
1594 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1595 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1596
1597 switch (udc->ep0state) {
1598 case EP0_REQUEUE:
1599
1600 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1601 BCM63XX_MAX_CTRL_PKT);
1602 ep0state = EP0_IDLE;
1603 break;
1604 case EP0_IDLE:
1605 return bcm63xx_ep0_do_idle(udc);
1606 case EP0_IN_DATA_PHASE_SETUP:
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 if (udc->ep0_reply) {
1617 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1618 udc->ep0_reply);
1619 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1620 } else if (shutdown) {
1621 ep0state = EP0_REQUEUE;
1622 }
1623 break;
1624 case EP0_IN_DATA_PHASE_COMPLETE: {
1625
1626
1627
1628
1629
1630
1631
1632 if (udc->ep0_req_completed) {
1633 udc->ep0_reply = NULL;
1634 bcm63xx_ep0_read_complete(udc);
1635
1636
1637
1638
1639 ep0state = EP0_REQUEUE;
1640 } else if (shutdown) {
1641 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1642 bcm63xx_ep0_nuke_reply(udc, 1);
1643 ep0state = EP0_REQUEUE;
1644 }
1645 break;
1646 }
1647 case EP0_OUT_DATA_PHASE_SETUP:
1648
1649 if (udc->ep0_reply) {
1650 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1651 udc->ep0_reply);
1652 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1653 } else if (shutdown) {
1654 ep0state = EP0_REQUEUE;
1655 }
1656 break;
1657 case EP0_OUT_DATA_PHASE_COMPLETE: {
1658
1659 if (udc->ep0_req_completed) {
1660 udc->ep0_reply = NULL;
1661 bcm63xx_ep0_read_complete(udc);
1662
1663
1664 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1665 ep0state = EP0_OUT_STATUS_PHASE;
1666 } else if (shutdown) {
1667 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1668 bcm63xx_ep0_nuke_reply(udc, 0);
1669 ep0state = EP0_REQUEUE;
1670 }
1671 break;
1672 }
1673 case EP0_OUT_STATUS_PHASE:
1674
1675
1676
1677
1678
1679
1680
1681
1682 if (udc->ep0_req_completed) {
1683 bcm63xx_ep0_read_complete(udc);
1684 ep0state = EP0_REQUEUE;
1685 } else if (shutdown) {
1686 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1687 udc->ep0_request = NULL;
1688 ep0state = EP0_REQUEUE;
1689 }
1690 break;
1691 case EP0_IN_FAKE_STATUS_PHASE: {
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706 struct usb_request *r = udc->ep0_reply;
1707
1708 if (!r) {
1709 if (shutdown)
1710 ep0state = EP0_IDLE;
1711 break;
1712 }
1713
1714 bcm63xx_ep0_complete(udc, r, 0);
1715 udc->ep0_reply = NULL;
1716 ep0state = EP0_IDLE;
1717 break;
1718 }
1719 case EP0_SHUTDOWN:
1720 break;
1721 }
1722
1723 if (udc->ep0state == ep0state)
1724 return -EAGAIN;
1725
1726 udc->ep0state = ep0state;
1727 return 0;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744static void bcm63xx_ep0_process(struct work_struct *w)
1745{
1746 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1747 spin_lock_irq(&udc->lock);
1748 while (bcm63xx_ep0_one_round(udc) == 0)
1749 ;
1750 spin_unlock_irq(&udc->lock);
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1762{
1763 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1764
1765 return (usbd_readl(udc, USBD_STATUS_REG) &
1766 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1777{
1778 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1779 unsigned long flags;
1780 int i, rc = -EINVAL;
1781
1782 spin_lock_irqsave(&udc->lock, flags);
1783 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1784 udc->gadget.speed = USB_SPEED_UNKNOWN;
1785 udc->ep0state = EP0_REQUEUE;
1786 bcm63xx_fifo_setup(udc);
1787 bcm63xx_fifo_reset(udc);
1788 bcm63xx_ep_setup(udc);
1789
1790 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1791 for (i = 0; i < BCM63XX_NUM_EP; i++)
1792 bcm63xx_set_stall(udc, &udc->bep[i], false);
1793
1794 bcm63xx_set_ctrl_irqs(udc, true);
1795 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1796 rc = 0;
1797 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1798 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1799
1800 udc->ep0_req_shutdown = 1;
1801 spin_unlock_irqrestore(&udc->lock, flags);
1802
1803 while (1) {
1804 schedule_work(&udc->ep0_wq);
1805 if (udc->ep0state == EP0_SHUTDOWN)
1806 break;
1807 msleep(50);
1808 }
1809 bcm63xx_set_ctrl_irqs(udc, false);
1810 cancel_work_sync(&udc->ep0_wq);
1811 return 0;
1812 }
1813
1814 spin_unlock_irqrestore(&udc->lock, flags);
1815 return rc;
1816}
1817
1818
1819
1820
1821
1822
1823static int bcm63xx_udc_start(struct usb_gadget *gadget,
1824 struct usb_gadget_driver *driver)
1825{
1826 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1827 unsigned long flags;
1828
1829 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1830 !driver->setup)
1831 return -EINVAL;
1832 if (!udc)
1833 return -ENODEV;
1834 if (udc->driver)
1835 return -EBUSY;
1836
1837 spin_lock_irqsave(&udc->lock, flags);
1838
1839 set_clocks(udc, true);
1840 bcm63xx_fifo_setup(udc);
1841 bcm63xx_ep_init(udc);
1842 bcm63xx_ep_setup(udc);
1843 bcm63xx_fifo_reset(udc);
1844 bcm63xx_select_phy_mode(udc, true);
1845
1846 udc->driver = driver;
1847 driver->driver.bus = NULL;
1848 udc->gadget.dev.of_node = udc->dev->of_node;
1849
1850 spin_unlock_irqrestore(&udc->lock, flags);
1851
1852 return 0;
1853}
1854
1855
1856
1857
1858
1859
1860static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1861{
1862 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1863 unsigned long flags;
1864
1865 spin_lock_irqsave(&udc->lock, flags);
1866
1867 udc->driver = NULL;
1868
1869
1870
1871
1872
1873
1874
1875 msleep(100);
1876
1877 bcm63xx_select_phy_mode(udc, false);
1878 set_clocks(udc, false);
1879
1880 spin_unlock_irqrestore(&udc->lock, flags);
1881
1882 return 0;
1883}
1884
1885static const struct usb_gadget_ops bcm63xx_udc_ops = {
1886 .get_frame = bcm63xx_udc_get_frame,
1887 .pullup = bcm63xx_udc_pullup,
1888 .udc_start = bcm63xx_udc_start,
1889 .udc_stop = bcm63xx_udc_stop,
1890};
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1906{
1907 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1908
1909 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1910 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1911 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1912 USBD_STATUS_ALTINTF_SHIFT;
1913 bcm63xx_ep_setup(udc);
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1924{
1925 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1926 enum usb_device_speed oldspeed = udc->gadget.speed;
1927
1928 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1929 case BCM63XX_SPD_HIGH:
1930 udc->gadget.speed = USB_SPEED_HIGH;
1931 break;
1932 case BCM63XX_SPD_FULL:
1933 udc->gadget.speed = USB_SPEED_FULL;
1934 break;
1935 default:
1936
1937 udc->gadget.speed = USB_SPEED_UNKNOWN;
1938 dev_err(udc->dev,
1939 "received SETUP packet with invalid link speed\n");
1940 return 0;
1941 }
1942
1943 if (udc->gadget.speed != oldspeed) {
1944 dev_info(udc->dev, "link up, %s-speed mode\n",
1945 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1946 return 1;
1947 } else {
1948 return 0;
1949 }
1950}
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1964{
1965 int i;
1966
1967 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1968 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1969 if (!new_status)
1970 clear_bit(i, &udc->wedgemap);
1971 }
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1983{
1984 struct bcm63xx_udc *udc = dev_id;
1985 u32 stat;
1986 bool disconnected = false, bus_reset = false;
1987
1988 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1989 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1990
1991 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1992
1993 spin_lock(&udc->lock);
1994 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1995
1996
1997 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1998 USBD_EVENTS_USB_LINK_MASK) &&
1999 udc->gadget.speed != USB_SPEED_UNKNOWN)
2000 dev_info(udc->dev, "link down\n");
2001
2002 udc->gadget.speed = USB_SPEED_UNKNOWN;
2003 disconnected = true;
2004 }
2005 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2006 bcm63xx_fifo_setup(udc);
2007 bcm63xx_fifo_reset(udc);
2008 bcm63xx_ep_setup(udc);
2009
2010 bcm63xx_update_wedge(udc, false);
2011
2012 udc->ep0_req_reset = 1;
2013 schedule_work(&udc->ep0_wq);
2014 bus_reset = true;
2015 }
2016 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2017 if (bcm63xx_update_link_speed(udc)) {
2018 bcm63xx_fifo_setup(udc);
2019 bcm63xx_ep_setup(udc);
2020 }
2021 bcm63xx_update_wedge(udc, true);
2022 }
2023 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2024 bcm63xx_update_cfg_iface(udc);
2025 udc->ep0_req_set_cfg = 1;
2026 schedule_work(&udc->ep0_wq);
2027 }
2028 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2029 bcm63xx_update_cfg_iface(udc);
2030 udc->ep0_req_set_iface = 1;
2031 schedule_work(&udc->ep0_wq);
2032 }
2033 spin_unlock(&udc->lock);
2034
2035 if (disconnected && udc->driver)
2036 udc->driver->disconnect(&udc->gadget);
2037 else if (bus_reset && udc->driver)
2038 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2039
2040 return IRQ_HANDLED;
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2054{
2055 struct iudma_ch *iudma = dev_id;
2056 struct bcm63xx_udc *udc = iudma->udc;
2057 struct bcm63xx_ep *bep;
2058 struct usb_request *req = NULL;
2059 struct bcm63xx_req *breq = NULL;
2060 int rc;
2061 bool is_done = false;
2062
2063 spin_lock(&udc->lock);
2064
2065 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2066 ENETDMAC_IR_REG, iudma->ch_idx);
2067 bep = iudma->bep;
2068 rc = iudma_read(udc, iudma);
2069
2070
2071 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2072 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2073 req = udc->ep0_request;
2074 breq = our_req(req);
2075
2076
2077 if (rc >= 0) {
2078 req->actual += rc;
2079
2080 if (req->actual >= req->length || breq->bd_bytes > rc) {
2081 udc->ep0_req_completed = 1;
2082 is_done = true;
2083 schedule_work(&udc->ep0_wq);
2084
2085
2086 req->actual = min(req->actual, req->length);
2087 } else {
2088
2089 iudma_write(udc, iudma, breq);
2090 }
2091 }
2092 } else if (!list_empty(&bep->queue)) {
2093 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2094 req = &breq->req;
2095
2096 if (rc >= 0) {
2097 req->actual += rc;
2098
2099 if (req->actual >= req->length || breq->bd_bytes > rc) {
2100 is_done = true;
2101 list_del(&breq->queue);
2102
2103 req->actual = min(req->actual, req->length);
2104
2105 if (!list_empty(&bep->queue)) {
2106 struct bcm63xx_req *next;
2107
2108 next = list_first_entry(&bep->queue,
2109 struct bcm63xx_req, queue);
2110 iudma_write(udc, iudma, next);
2111 }
2112 } else {
2113 iudma_write(udc, iudma, breq);
2114 }
2115 }
2116 }
2117 spin_unlock(&udc->lock);
2118
2119 if (is_done) {
2120 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2121 if (req->complete)
2122 req->complete(&bep->ep, req);
2123 }
2124
2125 return IRQ_HANDLED;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2140{
2141 struct bcm63xx_udc *udc = s->private;
2142
2143 if (!udc->driver)
2144 return -ENODEV;
2145
2146 seq_printf(s, "ep0 state: %s\n",
2147 bcm63xx_ep0_state_names[udc->ep0state]);
2148 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2149 udc->ep0_req_reset ? "reset " : "",
2150 udc->ep0_req_set_cfg ? "set_cfg " : "",
2151 udc->ep0_req_set_iface ? "set_iface " : "",
2152 udc->ep0_req_shutdown ? "shutdown " : "",
2153 udc->ep0_request ? "pending " : "",
2154 udc->ep0_req_completed ? "completed " : "",
2155 udc->ep0_reply ? "reply " : "");
2156 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2157 udc->cfg, udc->iface, udc->alt_iface);
2158 seq_printf(s, "regs:\n");
2159 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2160 usbd_readl(udc, USBD_CONTROL_REG),
2161 usbd_readl(udc, USBD_STRAPS_REG),
2162 usbd_readl(udc, USBD_STATUS_REG));
2163 seq_printf(s, " events: %08x; stall: %08x\n",
2164 usbd_readl(udc, USBD_EVENTS_REG),
2165 usbd_readl(udc, USBD_STALL_REG));
2166
2167 return 0;
2168}
2169
2170
2171
2172
2173
2174
2175
2176
2177static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2178{
2179 struct bcm63xx_udc *udc = s->private;
2180 int ch_idx, i;
2181 u32 sram2, sram3;
2182
2183 if (!udc->driver)
2184 return -ENODEV;
2185
2186 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2187 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2188 struct list_head *pos;
2189
2190 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2191 switch (iudma_defaults[ch_idx].ep_type) {
2192 case BCMEP_CTRL:
2193 seq_printf(s, "control");
2194 break;
2195 case BCMEP_BULK:
2196 seq_printf(s, "bulk");
2197 break;
2198 case BCMEP_INTR:
2199 seq_printf(s, "interrupt");
2200 break;
2201 }
2202 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2203 seq_printf(s, " [ep%d]:\n",
2204 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2205 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2206 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2207 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2208 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2209 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2210
2211 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2212 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2213 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2214 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2215 sram2 >> 16, sram2 & 0xffff,
2216 sram3 >> 16, sram3 & 0xffff,
2217 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2218 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2219 iudma->n_bds);
2220
2221 if (iudma->bep) {
2222 i = 0;
2223 list_for_each(pos, &iudma->bep->queue)
2224 i++;
2225 seq_printf(s, "; %d queued\n", i);
2226 } else {
2227 seq_printf(s, "\n");
2228 }
2229
2230 for (i = 0; i < iudma->n_bds; i++) {
2231 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2232
2233 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2234 i * sizeof(*d), i,
2235 d->len_stat >> 16, d->len_stat & 0xffff,
2236 d->address);
2237 if (d == iudma->read_bd)
2238 seq_printf(s, " <<RD");
2239 if (d == iudma->write_bd)
2240 seq_printf(s, " <<WR");
2241 seq_printf(s, "\n");
2242 }
2243
2244 seq_printf(s, "\n");
2245 }
2246
2247 return 0;
2248}
2249
2250static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2251{
2252 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2253}
2254
2255static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2256{
2257 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2258}
2259
2260static const struct file_operations usbd_dbg_fops = {
2261 .owner = THIS_MODULE,
2262 .open = bcm63xx_usbd_dbg_open,
2263 .llseek = seq_lseek,
2264 .read = seq_read,
2265 .release = single_release,
2266};
2267
2268static const struct file_operations iudma_dbg_fops = {
2269 .owner = THIS_MODULE,
2270 .open = bcm63xx_iudma_dbg_open,
2271 .llseek = seq_lseek,
2272 .read = seq_read,
2273 .release = single_release,
2274};
2275
2276
2277
2278
2279
2280
2281static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2282{
2283 struct dentry *root, *usbd, *iudma;
2284
2285 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2286 return;
2287
2288 root = debugfs_create_dir(udc->gadget.name, NULL);
2289 if (IS_ERR(root) || !root)
2290 goto err_root;
2291
2292 usbd = debugfs_create_file("usbd", 0400, root, udc,
2293 &usbd_dbg_fops);
2294 if (!usbd)
2295 goto err_usbd;
2296 iudma = debugfs_create_file("iudma", 0400, root, udc,
2297 &iudma_dbg_fops);
2298 if (!iudma)
2299 goto err_iudma;
2300
2301 udc->debugfs_root = root;
2302 udc->debugfs_usbd = usbd;
2303 udc->debugfs_iudma = iudma;
2304 return;
2305err_iudma:
2306 debugfs_remove(usbd);
2307err_usbd:
2308 debugfs_remove(root);
2309err_root:
2310 dev_err(udc->dev, "debugfs is not available\n");
2311}
2312
2313
2314
2315
2316
2317
2318
2319static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2320{
2321 debugfs_remove(udc->debugfs_iudma);
2322 debugfs_remove(udc->debugfs_usbd);
2323 debugfs_remove(udc->debugfs_root);
2324 udc->debugfs_iudma = NULL;
2325 udc->debugfs_usbd = NULL;
2326 udc->debugfs_root = NULL;
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340static int bcm63xx_udc_probe(struct platform_device *pdev)
2341{
2342 struct device *dev = &pdev->dev;
2343 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2344 struct bcm63xx_udc *udc;
2345 struct resource *res;
2346 int rc = -ENOMEM, i, irq;
2347
2348 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2349 if (!udc)
2350 return -ENOMEM;
2351
2352 platform_set_drvdata(pdev, udc);
2353 udc->dev = dev;
2354 udc->pd = pd;
2355
2356 if (!pd) {
2357 dev_err(dev, "missing platform data\n");
2358 return -EINVAL;
2359 }
2360
2361 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2362 udc->usbd_regs = devm_ioremap_resource(dev, res);
2363 if (IS_ERR(udc->usbd_regs))
2364 return PTR_ERR(udc->usbd_regs);
2365
2366 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2367 udc->iudma_regs = devm_ioremap_resource(dev, res);
2368 if (IS_ERR(udc->iudma_regs))
2369 return PTR_ERR(udc->iudma_regs);
2370
2371 spin_lock_init(&udc->lock);
2372 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2373
2374 udc->gadget.ops = &bcm63xx_udc_ops;
2375 udc->gadget.name = dev_name(dev);
2376
2377 if (!pd->use_fullspeed && !use_fullspeed)
2378 udc->gadget.max_speed = USB_SPEED_HIGH;
2379 else
2380 udc->gadget.max_speed = USB_SPEED_FULL;
2381
2382
2383 rc = bcm63xx_init_udc_hw(udc);
2384 if (rc)
2385 return rc;
2386
2387 rc = -ENXIO;
2388
2389
2390 irq = platform_get_irq(pdev, 0);
2391 if (irq < 0) {
2392 dev_err(dev, "missing IRQ resource #0\n");
2393 goto out_uninit;
2394 }
2395 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2396 dev_name(dev), udc) < 0) {
2397 dev_err(dev, "error requesting IRQ #%d\n", irq);
2398 goto out_uninit;
2399 }
2400
2401
2402 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2403 irq = platform_get_irq(pdev, i + 1);
2404 if (irq < 0) {
2405 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2406 goto out_uninit;
2407 }
2408 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2409 dev_name(dev), &udc->iudma[i]) < 0) {
2410 dev_err(dev, "error requesting IRQ #%d\n", irq);
2411 goto out_uninit;
2412 }
2413 }
2414
2415 bcm63xx_udc_init_debugfs(udc);
2416 rc = usb_add_gadget_udc(dev, &udc->gadget);
2417 if (!rc)
2418 return 0;
2419
2420 bcm63xx_udc_cleanup_debugfs(udc);
2421out_uninit:
2422 bcm63xx_uninit_udc_hw(udc);
2423 return rc;
2424}
2425
2426
2427
2428
2429
2430static int bcm63xx_udc_remove(struct platform_device *pdev)
2431{
2432 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2433
2434 bcm63xx_udc_cleanup_debugfs(udc);
2435 usb_del_gadget_udc(&udc->gadget);
2436 BUG_ON(udc->driver);
2437
2438 bcm63xx_uninit_udc_hw(udc);
2439
2440 return 0;
2441}
2442
2443static struct platform_driver bcm63xx_udc_driver = {
2444 .probe = bcm63xx_udc_probe,
2445 .remove = bcm63xx_udc_remove,
2446 .driver = {
2447 .name = DRV_MODULE_NAME,
2448 },
2449};
2450module_platform_driver(bcm63xx_udc_driver);
2451
2452MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2453MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2454MODULE_LICENSE("GPL");
2455MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2456