1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/platform_device.h>
29#include <linux/sched.h>
30#include <linux/seq_file.h>
31#include <linux/slab.h>
32#include <linux/timer.h>
33#include <linux/usb/ch9.h>
34#include <linux/usb/gadget.h>
35#include <linux/workqueue.h>
36
37#include <bcm63xx_cpu.h>
38#include <bcm63xx_iudma.h>
39#include <bcm63xx_dev_usb_usbd.h>
40#include <bcm63xx_io.h>
41#include <bcm63xx_regs.h>
42
43#define DRV_MODULE_NAME "bcm63xx_udc"
44
45static const char bcm63xx_ep0name[] = "ep0";
46
47static const struct {
48 const char *name;
49 const struct usb_ep_caps caps;
50} bcm63xx_ep_info[] = {
51#define EP_INFO(_name, _caps) \
52 { \
53 .name = _name, \
54 .caps = _caps, \
55 }
56
57 EP_INFO(bcm63xx_ep0name,
58 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
59 EP_INFO("ep1in-bulk",
60 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
61 EP_INFO("ep2out-bulk",
62 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
63 EP_INFO("ep3in-int",
64 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
65 EP_INFO("ep4out-int",
66 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
67
68#undef EP_INFO
69};
70
71static bool use_fullspeed;
72module_param(use_fullspeed, bool, S_IRUGO);
73MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94static bool irq_coalesce;
95module_param(irq_coalesce, bool, S_IRUGO);
96MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
97
98#define BCM63XX_NUM_EP 5
99#define BCM63XX_NUM_IUDMA 6
100#define BCM63XX_NUM_FIFO_PAIRS 3
101
102#define IUDMA_RESET_TIMEOUT_US 10000
103
104#define IUDMA_EP0_RXCHAN 0
105#define IUDMA_EP0_TXCHAN 1
106
107#define IUDMA_MAX_FRAGMENT 2048
108#define BCM63XX_MAX_CTRL_PKT 64
109
110#define BCMEP_CTRL 0x00
111#define BCMEP_ISOC 0x01
112#define BCMEP_BULK 0x02
113#define BCMEP_INTR 0x03
114
115#define BCMEP_OUT 0x00
116#define BCMEP_IN 0x01
117
118#define BCM63XX_SPD_FULL 1
119#define BCM63XX_SPD_HIGH 0
120
121#define IUDMA_DMAC_OFFSET 0x200
122#define IUDMA_DMAS_OFFSET 0x400
123
124enum bcm63xx_ep0_state {
125 EP0_REQUEUE,
126 EP0_IDLE,
127 EP0_IN_DATA_PHASE_SETUP,
128 EP0_IN_DATA_PHASE_COMPLETE,
129 EP0_OUT_DATA_PHASE_SETUP,
130 EP0_OUT_DATA_PHASE_COMPLETE,
131 EP0_OUT_STATUS_PHASE,
132 EP0_IN_FAKE_STATUS_PHASE,
133 EP0_SHUTDOWN,
134};
135
136static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
137 "REQUEUE",
138 "IDLE",
139 "IN_DATA_PHASE_SETUP",
140 "IN_DATA_PHASE_COMPLETE",
141 "OUT_DATA_PHASE_SETUP",
142 "OUT_DATA_PHASE_COMPLETE",
143 "OUT_STATUS_PHASE",
144 "IN_FAKE_STATUS_PHASE",
145 "SHUTDOWN",
146};
147
148
149
150
151
152
153
154
155
156
157
158struct iudma_ch_cfg {
159 int ep_num;
160 int n_bds;
161 int ep_type;
162 int dir;
163 int n_fifo_slots;
164 int max_pkt_hs;
165 int max_pkt_fs;
166};
167
168static const struct iudma_ch_cfg iudma_defaults[] = {
169
170
171
172
173
174
175
176
177
178
179 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
180 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
181 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
182 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
183 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
184 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
185};
186
187struct bcm63xx_udc;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213struct iudma_ch {
214 unsigned int ch_idx;
215 int ep_num;
216 bool enabled;
217 int max_pkt;
218 bool is_tx;
219 struct bcm63xx_ep *bep;
220 struct bcm63xx_udc *udc;
221
222 struct bcm_enet_desc *read_bd;
223 struct bcm_enet_desc *write_bd;
224 struct bcm_enet_desc *end_bd;
225 int n_bds_used;
226
227 struct bcm_enet_desc *bd_ring;
228 dma_addr_t bd_ring_dma;
229 unsigned int n_bds;
230};
231
232
233
234
235
236
237
238
239
240
241struct bcm63xx_ep {
242 unsigned int ep_num;
243 struct iudma_ch *iudma;
244 struct usb_ep ep;
245 struct bcm63xx_udc *udc;
246 struct list_head queue;
247 unsigned halted:1;
248};
249
250
251
252
253
254
255
256
257
258struct bcm63xx_req {
259 struct list_head queue;
260 struct usb_request req;
261 unsigned int offset;
262 unsigned int bd_bytes;
263 struct iudma_ch *iudma;
264};
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298struct bcm63xx_udc {
299 spinlock_t lock;
300
301 struct device *dev;
302 struct bcm63xx_usbd_platform_data *pd;
303 struct clk *usbd_clk;
304 struct clk *usbh_clk;
305
306 struct usb_gadget gadget;
307 struct usb_gadget_driver *driver;
308
309 void __iomem *usbd_regs;
310 void __iomem *iudma_regs;
311
312 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
313 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
314
315 int cfg;
316 int iface;
317 int alt_iface;
318
319 struct bcm63xx_req ep0_ctrl_req;
320 u8 *ep0_ctrl_buf;
321
322 int ep0state;
323 struct work_struct ep0_wq;
324
325 unsigned long wedgemap;
326
327 unsigned ep0_req_reset:1;
328 unsigned ep0_req_set_cfg:1;
329 unsigned ep0_req_set_iface:1;
330 unsigned ep0_req_shutdown:1;
331
332 unsigned ep0_req_completed:1;
333 struct usb_request *ep0_reply;
334 struct usb_request *ep0_request;
335
336 struct dentry *debugfs_root;
337 struct dentry *debugfs_usbd;
338 struct dentry *debugfs_iudma;
339};
340
341static const struct usb_ep_ops bcm63xx_udc_ep_ops;
342
343
344
345
346
347static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
348{
349 return container_of(g, struct bcm63xx_udc, gadget);
350}
351
352static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
353{
354 return container_of(ep, struct bcm63xx_ep, ep);
355}
356
357static inline struct bcm63xx_req *our_req(struct usb_request *req)
358{
359 return container_of(req, struct bcm63xx_req, req);
360}
361
362static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
363{
364 return bcm_readl(udc->usbd_regs + off);
365}
366
367static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
368{
369 bcm_writel(val, udc->usbd_regs + off);
370}
371
372static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
373{
374 return bcm_readl(udc->iudma_regs + off);
375}
376
377static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
378{
379 bcm_writel(val, udc->iudma_regs + off);
380}
381
382static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
383{
384 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
385 (ENETDMA_CHAN_WIDTH * chan));
386}
387
388static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
389 int chan)
390{
391 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
392 (ENETDMA_CHAN_WIDTH * chan));
393}
394
395static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
396{
397 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
398 (ENETDMA_CHAN_WIDTH * chan));
399}
400
401static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
402 int chan)
403{
404 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
405 (ENETDMA_CHAN_WIDTH * chan));
406}
407
408static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
409{
410 if (is_enabled) {
411 clk_enable(udc->usbh_clk);
412 clk_enable(udc->usbd_clk);
413 udelay(10);
414 } else {
415 clk_disable(udc->usbd_clk);
416 clk_disable(udc->usbh_clk);
417 }
418}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
434{
435 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
436
437 val &= ~USBD_CONTROL_INIT_SEL_MASK;
438 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
439 usbd_writel(udc, val, USBD_CONTROL_REG);
440}
441
442
443
444
445
446
447
448
449
450
451static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
452 bool is_stalled)
453{
454 u32 val;
455
456 val = USBD_STALL_UPDATE_MASK |
457 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
458 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
459 usbd_writel(udc, val, USBD_STALL_REG);
460}
461
462
463
464
465
466
467
468
469static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
470{
471 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
472 u32 i, val, rx_fifo_slot, tx_fifo_slot;
473
474
475 rx_fifo_slot = tx_fifo_slot = 0;
476 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
477 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
478 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
479
480 bcm63xx_ep_dma_select(udc, i >> 1);
481
482 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
483 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
484 USBD_RXFIFO_CONFIG_END_SHIFT);
485 rx_fifo_slot += rx_cfg->n_fifo_slots;
486 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
487 usbd_writel(udc,
488 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
489 USBD_RXFIFO_EPSIZE_REG);
490
491 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
492 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
493 USBD_TXFIFO_CONFIG_END_SHIFT);
494 tx_fifo_slot += tx_cfg->n_fifo_slots;
495 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
496 usbd_writel(udc,
497 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
498 USBD_TXFIFO_EPSIZE_REG);
499
500 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
501 }
502}
503
504
505
506
507
508
509static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
510{
511 u32 val;
512
513 bcm63xx_ep_dma_select(udc, ep_num);
514
515 val = usbd_readl(udc, USBD_CONTROL_REG);
516 val |= USBD_CONTROL_FIFO_RESET_MASK;
517 usbd_writel(udc, val, USBD_CONTROL_REG);
518 usbd_readl(udc, USBD_CONTROL_REG);
519}
520
521
522
523
524
525static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
526{
527 int i;
528
529 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
530 bcm63xx_fifo_reset_ep(udc, i);
531}
532
533
534
535
536
537static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
538{
539 u32 i, val;
540
541 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
542 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
543
544 if (cfg->ep_num < 0)
545 continue;
546
547 bcm63xx_ep_dma_select(udc, cfg->ep_num);
548 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
549 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
550 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
551 }
552}
553
554
555
556
557
558
559
560static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
561{
562 u32 val, i;
563
564 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
565
566 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
567 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
568 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
569 cfg->max_pkt_hs : cfg->max_pkt_fs;
570 int idx = cfg->ep_num;
571
572 udc->iudma[i].max_pkt = max_pkt;
573
574 if (idx < 0)
575 continue;
576 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
577
578 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
579 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
580 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
581 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
582 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
583 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
584 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
585 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
586 }
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
603 struct bcm63xx_req *breq)
604{
605 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
606 unsigned int bytes_left = breq->req.length - breq->offset;
607 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
608 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
609
610 iudma->n_bds_used = 0;
611 breq->bd_bytes = 0;
612 breq->iudma = iudma;
613
614 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
615 extra_zero_pkt = 1;
616
617 do {
618 struct bcm_enet_desc *d = iudma->write_bd;
619 u32 dmaflags = 0;
620 unsigned int n_bytes;
621
622 if (d == iudma->end_bd) {
623 dmaflags |= DMADESC_WRAP_MASK;
624 iudma->write_bd = iudma->bd_ring;
625 } else {
626 iudma->write_bd++;
627 }
628 iudma->n_bds_used++;
629
630 n_bytes = min_t(int, bytes_left, max_bd_bytes);
631 if (n_bytes)
632 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
633 else
634 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
635 DMADESC_USB_ZERO_MASK;
636
637 dmaflags |= DMADESC_OWNER_MASK;
638 if (first_bd) {
639 dmaflags |= DMADESC_SOP_MASK;
640 first_bd = 0;
641 }
642
643
644
645
646
647 if (extra_zero_pkt && !bytes_left)
648 extra_zero_pkt = 0;
649
650 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
651 (n_bytes == bytes_left && !extra_zero_pkt)) {
652 last_bd = 1;
653 dmaflags |= DMADESC_EOP_MASK;
654 }
655
656 d->address = breq->req.dma + breq->offset;
657 mb();
658 d->len_stat = dmaflags;
659
660 breq->offset += n_bytes;
661 breq->bd_bytes += n_bytes;
662 bytes_left -= n_bytes;
663 } while (!last_bd);
664
665 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
666 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
667}
668
669
670
671
672
673
674
675
676
677
678static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
679{
680 int i, actual_len = 0;
681 struct bcm_enet_desc *d = iudma->read_bd;
682
683 if (!iudma->n_bds_used)
684 return -EINVAL;
685
686 for (i = 0; i < iudma->n_bds_used; i++) {
687 u32 dmaflags;
688
689 dmaflags = d->len_stat;
690
691 if (dmaflags & DMADESC_OWNER_MASK)
692 return -EBUSY;
693
694 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
695 DMADESC_LENGTH_SHIFT;
696 if (d == iudma->end_bd)
697 d = iudma->bd_ring;
698 else
699 d++;
700 }
701
702 iudma->read_bd = d;
703 iudma->n_bds_used = 0;
704 return actual_len;
705}
706
707
708
709
710
711
712static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
713{
714 int timeout = IUDMA_RESET_TIMEOUT_US;
715 struct bcm_enet_desc *d;
716 int ch_idx = iudma->ch_idx;
717
718 if (!iudma->is_tx)
719 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
720
721
722 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
723
724 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
725 ENETDMAC_CHANCFG_EN_MASK) {
726 udelay(1);
727
728
729 if (iudma->is_tx && iudma->ep_num >= 0)
730 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
731
732 if (!timeout--) {
733 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
734 ch_idx);
735 break;
736 }
737 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
738 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
739 ch_idx);
740 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
741 ENETDMAC_CHANCFG_REG, ch_idx);
742 }
743 }
744 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
745
746
747 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
748 d->len_stat = 0;
749 mb();
750
751 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
752 iudma->n_bds_used = 0;
753
754
755 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
756 ENETDMAC_IRMASK_REG, ch_idx);
757 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
758
759 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
760 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
761}
762
763
764
765
766
767
768static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
769{
770 struct iudma_ch *iudma = &udc->iudma[ch_idx];
771 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
772 unsigned int n_bds = cfg->n_bds;
773 struct bcm63xx_ep *bep = NULL;
774
775 iudma->ep_num = cfg->ep_num;
776 iudma->ch_idx = ch_idx;
777 iudma->is_tx = !!(ch_idx & 0x01);
778 if (iudma->ep_num >= 0) {
779 bep = &udc->bep[iudma->ep_num];
780 bep->iudma = iudma;
781 INIT_LIST_HEAD(&bep->queue);
782 }
783
784 iudma->bep = bep;
785 iudma->udc = udc;
786
787
788 if (iudma->ep_num <= 0)
789 iudma->enabled = true;
790
791 iudma->n_bds = n_bds;
792 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
793 n_bds * sizeof(struct bcm_enet_desc),
794 &iudma->bd_ring_dma, GFP_KERNEL);
795 if (!iudma->bd_ring)
796 return -ENOMEM;
797 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
798
799 return 0;
800}
801
802
803
804
805
806
807
808static int iudma_init(struct bcm63xx_udc *udc)
809{
810 int i, rc;
811
812 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
813
814 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
815 rc = iudma_init_channel(udc, i);
816 if (rc)
817 return rc;
818 iudma_reset_channel(udc, &udc->iudma[i]);
819 }
820
821 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
822 return 0;
823}
824
825
826
827
828
829
830
831static void iudma_uninit(struct bcm63xx_udc *udc)
832{
833 int i;
834
835 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
836
837 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
838 iudma_reset_channel(udc, &udc->iudma[i]);
839
840 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
841}
842
843
844
845
846
847
848
849
850
851
852static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
853{
854 u32 val;
855
856 usbd_writel(udc, 0, USBD_STATUS_REG);
857
858 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
859 BIT(USBD_EVENT_IRQ_SETUP) |
860 BIT(USBD_EVENT_IRQ_SETCFG) |
861 BIT(USBD_EVENT_IRQ_SETINTF) |
862 BIT(USBD_EVENT_IRQ_USB_LINK);
863 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
864 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
865}
866
867
868
869
870
871
872
873
874
875
876
877
878static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
879{
880 u32 val, portmask = BIT(udc->pd->port_no);
881
882 if (BCMCPU_IS_6328()) {
883
884 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
885 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
886 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
887 GPIO_PINMUX_OTHR_6328_USB_HOST;
888 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
889 }
890
891 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
892 if (is_device) {
893 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
894 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
895 } else {
896 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
897 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
898 }
899 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
900
901 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
902 if (is_device)
903 val |= USBH_PRIV_SWAP_USBD_MASK;
904 else
905 val &= ~USBH_PRIV_SWAP_USBD_MASK;
906 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
907}
908
909
910
911
912
913
914
915
916
917
918static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
919{
920 u32 val, portmask = BIT(udc->pd->port_no);
921
922 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
923 if (is_on)
924 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
925 else
926 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
927 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
928}
929
930
931
932
933
934
935
936
937static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
938{
939 set_clocks(udc, true);
940 iudma_uninit(udc);
941 set_clocks(udc, false);
942
943 clk_put(udc->usbd_clk);
944 clk_put(udc->usbh_clk);
945}
946
947
948
949
950
951static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
952{
953 int i, rc = 0;
954 u32 val;
955
956 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
957 GFP_KERNEL);
958 if (!udc->ep0_ctrl_buf)
959 return -ENOMEM;
960
961 INIT_LIST_HEAD(&udc->gadget.ep_list);
962 for (i = 0; i < BCM63XX_NUM_EP; i++) {
963 struct bcm63xx_ep *bep = &udc->bep[i];
964
965 bep->ep.name = bcm63xx_ep_info[i].name;
966 bep->ep.caps = bcm63xx_ep_info[i].caps;
967 bep->ep_num = i;
968 bep->ep.ops = &bcm63xx_udc_ep_ops;
969 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
970 bep->halted = 0;
971 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
972 bep->udc = udc;
973 bep->ep.desc = NULL;
974 INIT_LIST_HEAD(&bep->queue);
975 }
976
977 udc->gadget.ep0 = &udc->bep[0].ep;
978 list_del(&udc->bep[0].ep.ep_list);
979
980 udc->gadget.speed = USB_SPEED_UNKNOWN;
981 udc->ep0state = EP0_SHUTDOWN;
982
983 udc->usbh_clk = clk_get(udc->dev, "usbh");
984 if (IS_ERR(udc->usbh_clk))
985 return -EIO;
986
987 udc->usbd_clk = clk_get(udc->dev, "usbd");
988 if (IS_ERR(udc->usbd_clk)) {
989 clk_put(udc->usbh_clk);
990 return -EIO;
991 }
992
993 set_clocks(udc, true);
994
995 val = USBD_CONTROL_AUTO_CSRS_MASK |
996 USBD_CONTROL_DONE_CSRS_MASK |
997 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
998 usbd_writel(udc, val, USBD_CONTROL_REG);
999
1000 val = USBD_STRAPS_APP_SELF_PWR_MASK |
1001 USBD_STRAPS_APP_RAM_IF_MASK |
1002 USBD_STRAPS_APP_CSRPRGSUP_MASK |
1003 USBD_STRAPS_APP_8BITPHY_MASK |
1004 USBD_STRAPS_APP_RMTWKUP_MASK;
1005
1006 if (udc->gadget.max_speed == USB_SPEED_HIGH)
1007 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1008 else
1009 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1010 usbd_writel(udc, val, USBD_STRAPS_REG);
1011
1012 bcm63xx_set_ctrl_irqs(udc, false);
1013
1014 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1015
1016 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1017 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1018 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1019
1020 rc = iudma_init(udc);
1021 set_clocks(udc, false);
1022 if (rc)
1023 bcm63xx_uninit_udc_hw(udc);
1024
1025 return 0;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static int bcm63xx_ep_enable(struct usb_ep *ep,
1041 const struct usb_endpoint_descriptor *desc)
1042{
1043 struct bcm63xx_ep *bep = our_ep(ep);
1044 struct bcm63xx_udc *udc = bep->udc;
1045 struct iudma_ch *iudma = bep->iudma;
1046 unsigned long flags;
1047
1048 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1049 return -EINVAL;
1050
1051 if (!udc->driver)
1052 return -ESHUTDOWN;
1053
1054 spin_lock_irqsave(&udc->lock, flags);
1055 if (iudma->enabled) {
1056 spin_unlock_irqrestore(&udc->lock, flags);
1057 return -EINVAL;
1058 }
1059
1060 iudma->enabled = true;
1061 BUG_ON(!list_empty(&bep->queue));
1062
1063 iudma_reset_channel(udc, iudma);
1064
1065 bep->halted = 0;
1066 bcm63xx_set_stall(udc, bep, false);
1067 clear_bit(bep->ep_num, &udc->wedgemap);
1068
1069 ep->desc = desc;
1070 ep->maxpacket = usb_endpoint_maxp(desc);
1071
1072 spin_unlock_irqrestore(&udc->lock, flags);
1073 return 0;
1074}
1075
1076
1077
1078
1079
1080static int bcm63xx_ep_disable(struct usb_ep *ep)
1081{
1082 struct bcm63xx_ep *bep = our_ep(ep);
1083 struct bcm63xx_udc *udc = bep->udc;
1084 struct iudma_ch *iudma = bep->iudma;
1085 struct bcm63xx_req *breq, *n;
1086 unsigned long flags;
1087
1088 if (!ep || !ep->desc)
1089 return -EINVAL;
1090
1091 spin_lock_irqsave(&udc->lock, flags);
1092 if (!iudma->enabled) {
1093 spin_unlock_irqrestore(&udc->lock, flags);
1094 return -EINVAL;
1095 }
1096 iudma->enabled = false;
1097
1098 iudma_reset_channel(udc, iudma);
1099
1100 if (!list_empty(&bep->queue)) {
1101 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1102 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1103 iudma->is_tx);
1104 list_del(&breq->queue);
1105 breq->req.status = -ESHUTDOWN;
1106
1107 spin_unlock_irqrestore(&udc->lock, flags);
1108 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1109 spin_lock_irqsave(&udc->lock, flags);
1110 }
1111 }
1112 ep->desc = NULL;
1113
1114 spin_unlock_irqrestore(&udc->lock, flags);
1115 return 0;
1116}
1117
1118
1119
1120
1121
1122
1123static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1124 gfp_t mem_flags)
1125{
1126 struct bcm63xx_req *breq;
1127
1128 breq = kzalloc(sizeof(*breq), mem_flags);
1129 if (!breq)
1130 return NULL;
1131 return &breq->req;
1132}
1133
1134
1135
1136
1137
1138
1139static void bcm63xx_udc_free_request(struct usb_ep *ep,
1140 struct usb_request *req)
1141{
1142 struct bcm63xx_req *breq = our_req(req);
1143 kfree(breq);
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1161 gfp_t mem_flags)
1162{
1163 struct bcm63xx_ep *bep = our_ep(ep);
1164 struct bcm63xx_udc *udc = bep->udc;
1165 struct bcm63xx_req *breq = our_req(req);
1166 unsigned long flags;
1167 int rc = 0;
1168
1169 if (unlikely(!req || !req->complete || !req->buf || !ep))
1170 return -EINVAL;
1171
1172 req->actual = 0;
1173 req->status = 0;
1174 breq->offset = 0;
1175
1176 if (bep == &udc->bep[0]) {
1177
1178 if (udc->ep0_reply)
1179 return -EINVAL;
1180
1181 udc->ep0_reply = req;
1182 schedule_work(&udc->ep0_wq);
1183 return 0;
1184 }
1185
1186 spin_lock_irqsave(&udc->lock, flags);
1187 if (!bep->iudma->enabled) {
1188 rc = -ESHUTDOWN;
1189 goto out;
1190 }
1191
1192 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1193 if (rc == 0) {
1194 list_add_tail(&breq->queue, &bep->queue);
1195 if (list_is_singular(&bep->queue))
1196 iudma_write(udc, bep->iudma, breq);
1197 }
1198
1199out:
1200 spin_unlock_irqrestore(&udc->lock, flags);
1201 return rc;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1214{
1215 struct bcm63xx_ep *bep = our_ep(ep);
1216 struct bcm63xx_udc *udc = bep->udc;
1217 struct bcm63xx_req *breq = our_req(req), *cur;
1218 unsigned long flags;
1219 int rc = 0;
1220
1221 spin_lock_irqsave(&udc->lock, flags);
1222 if (list_empty(&bep->queue)) {
1223 rc = -EINVAL;
1224 goto out;
1225 }
1226
1227 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1228 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1229
1230 if (breq == cur) {
1231 iudma_reset_channel(udc, bep->iudma);
1232 list_del(&breq->queue);
1233
1234 if (!list_empty(&bep->queue)) {
1235 struct bcm63xx_req *next;
1236
1237 next = list_first_entry(&bep->queue,
1238 struct bcm63xx_req, queue);
1239 iudma_write(udc, bep->iudma, next);
1240 }
1241 } else {
1242 list_del(&breq->queue);
1243 }
1244
1245out:
1246 spin_unlock_irqrestore(&udc->lock, flags);
1247
1248 req->status = -ESHUTDOWN;
1249 req->complete(ep, req);
1250
1251 return rc;
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1262{
1263 struct bcm63xx_ep *bep = our_ep(ep);
1264 struct bcm63xx_udc *udc = bep->udc;
1265 unsigned long flags;
1266
1267 spin_lock_irqsave(&udc->lock, flags);
1268 bcm63xx_set_stall(udc, bep, !!value);
1269 bep->halted = value;
1270 spin_unlock_irqrestore(&udc->lock, flags);
1271
1272 return 0;
1273}
1274
1275
1276
1277
1278
1279
1280
1281static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1282{
1283 struct bcm63xx_ep *bep = our_ep(ep);
1284 struct bcm63xx_udc *udc = bep->udc;
1285 unsigned long flags;
1286
1287 spin_lock_irqsave(&udc->lock, flags);
1288 set_bit(bep->ep_num, &udc->wedgemap);
1289 bcm63xx_set_stall(udc, bep, true);
1290 spin_unlock_irqrestore(&udc->lock, flags);
1291
1292 return 0;
1293}
1294
1295static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1296 .enable = bcm63xx_ep_enable,
1297 .disable = bcm63xx_ep_disable,
1298
1299 .alloc_request = bcm63xx_udc_alloc_request,
1300 .free_request = bcm63xx_udc_free_request,
1301
1302 .queue = bcm63xx_udc_queue,
1303 .dequeue = bcm63xx_udc_dequeue,
1304
1305 .set_halt = bcm63xx_udc_set_halt,
1306 .set_wedge = bcm63xx_udc_set_wedge,
1307};
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1319 struct usb_ctrlrequest *ctrl)
1320{
1321 int rc;
1322
1323 spin_unlock_irq(&udc->lock);
1324 rc = udc->driver->setup(&udc->gadget, ctrl);
1325 spin_lock_irq(&udc->lock);
1326 return rc;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1342{
1343 struct usb_ctrlrequest ctrl;
1344 int rc;
1345
1346 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1347 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1348 ctrl.wValue = cpu_to_le16(udc->cfg);
1349 ctrl.wIndex = 0;
1350 ctrl.wLength = 0;
1351
1352 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1353 if (rc < 0) {
1354 dev_warn_ratelimited(udc->dev,
1355 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1356 udc->cfg);
1357 }
1358 return rc;
1359}
1360
1361
1362
1363
1364
1365static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1366{
1367 struct usb_ctrlrequest ctrl;
1368 int rc;
1369
1370 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1371 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1372 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1373 ctrl.wIndex = cpu_to_le16(udc->iface);
1374 ctrl.wLength = 0;
1375
1376 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1377 if (rc < 0) {
1378 dev_warn_ratelimited(udc->dev,
1379 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1380 udc->iface, udc->alt_iface);
1381 }
1382 return rc;
1383}
1384
1385
1386
1387
1388
1389
1390
1391static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1392 struct usb_request *req)
1393{
1394 struct bcm63xx_req *breq = our_req(req);
1395 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1396
1397 BUG_ON(udc->ep0_request);
1398 udc->ep0_request = req;
1399
1400 req->actual = 0;
1401 breq->offset = 0;
1402 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1403 iudma_write(udc, iudma, breq);
1404}
1405
1406
1407
1408
1409
1410
1411
1412static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1413 struct usb_request *req, int status)
1414{
1415 req->status = status;
1416 if (status)
1417 req->actual = 0;
1418 if (req->complete) {
1419 spin_unlock_irq(&udc->lock);
1420 req->complete(&udc->bep[0].ep, req);
1421 spin_lock_irq(&udc->lock);
1422 }
1423}
1424
1425
1426
1427
1428
1429
1430
1431static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1432{
1433 struct usb_request *req = udc->ep0_reply;
1434
1435 udc->ep0_reply = NULL;
1436 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1437 if (udc->ep0_request == req) {
1438 udc->ep0_req_completed = 0;
1439 udc->ep0_request = NULL;
1440 }
1441 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1442}
1443
1444
1445
1446
1447
1448
1449static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1450{
1451 struct usb_request *req = udc->ep0_request;
1452
1453 udc->ep0_req_completed = 0;
1454 udc->ep0_request = NULL;
1455
1456 return req->actual;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1469 int length)
1470{
1471 struct usb_request *req = &udc->ep0_ctrl_req.req;
1472
1473 req->buf = udc->ep0_ctrl_buf;
1474 req->length = length;
1475 req->complete = NULL;
1476
1477 bcm63xx_ep0_map_write(udc, ch_idx, req);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1489{
1490 int rc;
1491 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1492
1493 rc = bcm63xx_ep0_read_complete(udc);
1494
1495 if (rc < 0) {
1496 dev_err(udc->dev, "missing SETUP packet\n");
1497 return EP0_IDLE;
1498 }
1499
1500
1501
1502
1503
1504
1505 if (rc == 0)
1506 return EP0_REQUEUE;
1507
1508
1509 if (rc != sizeof(*ctrl)) {
1510 dev_warn_ratelimited(udc->dev,
1511 "malformed SETUP packet (%d bytes)\n", rc);
1512 return EP0_REQUEUE;
1513 }
1514
1515
1516 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1517 if (rc < 0) {
1518 bcm63xx_set_stall(udc, &udc->bep[0], true);
1519 return EP0_REQUEUE;
1520 }
1521
1522 if (!ctrl->wLength)
1523 return EP0_REQUEUE;
1524 else if (ctrl->bRequestType & USB_DIR_IN)
1525 return EP0_IN_DATA_PHASE_SETUP;
1526 else
1527 return EP0_OUT_DATA_PHASE_SETUP;
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1542{
1543 if (udc->ep0_req_reset) {
1544 udc->ep0_req_reset = 0;
1545 } else if (udc->ep0_req_set_cfg) {
1546 udc->ep0_req_set_cfg = 0;
1547 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1548 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1549 } else if (udc->ep0_req_set_iface) {
1550 udc->ep0_req_set_iface = 0;
1551 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1552 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1553 } else if (udc->ep0_req_completed) {
1554 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1555 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1556 } else if (udc->ep0_req_shutdown) {
1557 udc->ep0_req_shutdown = 0;
1558 udc->ep0_req_completed = 0;
1559 udc->ep0_request = NULL;
1560 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1561 usb_gadget_unmap_request(&udc->gadget,
1562 &udc->ep0_ctrl_req.req, 0);
1563
1564
1565 mb();
1566 udc->ep0state = EP0_SHUTDOWN;
1567 } else if (udc->ep0_reply) {
1568
1569
1570
1571
1572
1573 dev_warn(udc->dev, "nuking unexpected reply\n");
1574 bcm63xx_ep0_nuke_reply(udc, 0);
1575 } else {
1576 return -EAGAIN;
1577 }
1578
1579 return 0;
1580}
1581
1582
1583
1584
1585
1586
1587
1588static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1589{
1590 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1591 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1592
1593 switch (udc->ep0state) {
1594 case EP0_REQUEUE:
1595
1596 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1597 BCM63XX_MAX_CTRL_PKT);
1598 ep0state = EP0_IDLE;
1599 break;
1600 case EP0_IDLE:
1601 return bcm63xx_ep0_do_idle(udc);
1602 case EP0_IN_DATA_PHASE_SETUP:
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 if (udc->ep0_reply) {
1613 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1614 udc->ep0_reply);
1615 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1616 } else if (shutdown) {
1617 ep0state = EP0_REQUEUE;
1618 }
1619 break;
1620 case EP0_IN_DATA_PHASE_COMPLETE: {
1621
1622
1623
1624
1625
1626
1627
1628 if (udc->ep0_req_completed) {
1629 udc->ep0_reply = NULL;
1630 bcm63xx_ep0_read_complete(udc);
1631
1632
1633
1634
1635 ep0state = EP0_REQUEUE;
1636 } else if (shutdown) {
1637 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1638 bcm63xx_ep0_nuke_reply(udc, 1);
1639 ep0state = EP0_REQUEUE;
1640 }
1641 break;
1642 }
1643 case EP0_OUT_DATA_PHASE_SETUP:
1644
1645 if (udc->ep0_reply) {
1646 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1647 udc->ep0_reply);
1648 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1649 } else if (shutdown) {
1650 ep0state = EP0_REQUEUE;
1651 }
1652 break;
1653 case EP0_OUT_DATA_PHASE_COMPLETE: {
1654
1655 if (udc->ep0_req_completed) {
1656 udc->ep0_reply = NULL;
1657 bcm63xx_ep0_read_complete(udc);
1658
1659
1660 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1661 ep0state = EP0_OUT_STATUS_PHASE;
1662 } else if (shutdown) {
1663 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1664 bcm63xx_ep0_nuke_reply(udc, 0);
1665 ep0state = EP0_REQUEUE;
1666 }
1667 break;
1668 }
1669 case EP0_OUT_STATUS_PHASE:
1670
1671
1672
1673
1674
1675
1676
1677
1678 if (udc->ep0_req_completed) {
1679 bcm63xx_ep0_read_complete(udc);
1680 ep0state = EP0_REQUEUE;
1681 } else if (shutdown) {
1682 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1683 udc->ep0_request = NULL;
1684 ep0state = EP0_REQUEUE;
1685 }
1686 break;
1687 case EP0_IN_FAKE_STATUS_PHASE: {
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 struct usb_request *r = udc->ep0_reply;
1703
1704 if (!r) {
1705 if (shutdown)
1706 ep0state = EP0_IDLE;
1707 break;
1708 }
1709
1710 bcm63xx_ep0_complete(udc, r, 0);
1711 udc->ep0_reply = NULL;
1712 ep0state = EP0_IDLE;
1713 break;
1714 }
1715 case EP0_SHUTDOWN:
1716 break;
1717 }
1718
1719 if (udc->ep0state == ep0state)
1720 return -EAGAIN;
1721
1722 udc->ep0state = ep0state;
1723 return 0;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static void bcm63xx_ep0_process(struct work_struct *w)
1741{
1742 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1743 spin_lock_irq(&udc->lock);
1744 while (bcm63xx_ep0_one_round(udc) == 0)
1745 ;
1746 spin_unlock_irq(&udc->lock);
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1758{
1759 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1760
1761 return (usbd_readl(udc, USBD_STATUS_REG) &
1762 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1773{
1774 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1775 unsigned long flags;
1776 int i, rc = -EINVAL;
1777
1778 spin_lock_irqsave(&udc->lock, flags);
1779 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1780 udc->gadget.speed = USB_SPEED_UNKNOWN;
1781 udc->ep0state = EP0_REQUEUE;
1782 bcm63xx_fifo_setup(udc);
1783 bcm63xx_fifo_reset(udc);
1784 bcm63xx_ep_setup(udc);
1785
1786 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1787 for (i = 0; i < BCM63XX_NUM_EP; i++)
1788 bcm63xx_set_stall(udc, &udc->bep[i], false);
1789
1790 bcm63xx_set_ctrl_irqs(udc, true);
1791 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1792 rc = 0;
1793 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1794 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1795
1796 udc->ep0_req_shutdown = 1;
1797 spin_unlock_irqrestore(&udc->lock, flags);
1798
1799 while (1) {
1800 schedule_work(&udc->ep0_wq);
1801 if (udc->ep0state == EP0_SHUTDOWN)
1802 break;
1803 msleep(50);
1804 }
1805 bcm63xx_set_ctrl_irqs(udc, false);
1806 cancel_work_sync(&udc->ep0_wq);
1807 return 0;
1808 }
1809
1810 spin_unlock_irqrestore(&udc->lock, flags);
1811 return rc;
1812}
1813
1814
1815
1816
1817
1818
1819static int bcm63xx_udc_start(struct usb_gadget *gadget,
1820 struct usb_gadget_driver *driver)
1821{
1822 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1823 unsigned long flags;
1824
1825 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1826 !driver->setup)
1827 return -EINVAL;
1828 if (!udc)
1829 return -ENODEV;
1830 if (udc->driver)
1831 return -EBUSY;
1832
1833 spin_lock_irqsave(&udc->lock, flags);
1834
1835 set_clocks(udc, true);
1836 bcm63xx_fifo_setup(udc);
1837 bcm63xx_ep_init(udc);
1838 bcm63xx_ep_setup(udc);
1839 bcm63xx_fifo_reset(udc);
1840 bcm63xx_select_phy_mode(udc, true);
1841
1842 udc->driver = driver;
1843 driver->driver.bus = NULL;
1844 udc->gadget.dev.of_node = udc->dev->of_node;
1845
1846 spin_unlock_irqrestore(&udc->lock, flags);
1847
1848 return 0;
1849}
1850
1851
1852
1853
1854
1855
1856static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1857{
1858 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1859 unsigned long flags;
1860
1861 spin_lock_irqsave(&udc->lock, flags);
1862
1863 udc->driver = NULL;
1864
1865
1866
1867
1868
1869
1870
1871 msleep(100);
1872
1873 bcm63xx_select_phy_mode(udc, false);
1874 set_clocks(udc, false);
1875
1876 spin_unlock_irqrestore(&udc->lock, flags);
1877
1878 return 0;
1879}
1880
1881static const struct usb_gadget_ops bcm63xx_udc_ops = {
1882 .get_frame = bcm63xx_udc_get_frame,
1883 .pullup = bcm63xx_udc_pullup,
1884 .udc_start = bcm63xx_udc_start,
1885 .udc_stop = bcm63xx_udc_stop,
1886};
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1902{
1903 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1904
1905 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1906 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1907 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1908 USBD_STATUS_ALTINTF_SHIFT;
1909 bcm63xx_ep_setup(udc);
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1920{
1921 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1922 enum usb_device_speed oldspeed = udc->gadget.speed;
1923
1924 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1925 case BCM63XX_SPD_HIGH:
1926 udc->gadget.speed = USB_SPEED_HIGH;
1927 break;
1928 case BCM63XX_SPD_FULL:
1929 udc->gadget.speed = USB_SPEED_FULL;
1930 break;
1931 default:
1932
1933 udc->gadget.speed = USB_SPEED_UNKNOWN;
1934 dev_err(udc->dev,
1935 "received SETUP packet with invalid link speed\n");
1936 return 0;
1937 }
1938
1939 if (udc->gadget.speed != oldspeed) {
1940 dev_info(udc->dev, "link up, %s-speed mode\n",
1941 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1942 return 1;
1943 } else {
1944 return 0;
1945 }
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1960{
1961 int i;
1962
1963 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1964 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1965 if (!new_status)
1966 clear_bit(i, &udc->wedgemap);
1967 }
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1979{
1980 struct bcm63xx_udc *udc = dev_id;
1981 u32 stat;
1982 bool disconnected = false, bus_reset = false;
1983
1984 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1985 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1986
1987 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1988
1989 spin_lock(&udc->lock);
1990 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1991
1992
1993 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1994 USBD_EVENTS_USB_LINK_MASK) &&
1995 udc->gadget.speed != USB_SPEED_UNKNOWN)
1996 dev_info(udc->dev, "link down\n");
1997
1998 udc->gadget.speed = USB_SPEED_UNKNOWN;
1999 disconnected = true;
2000 }
2001 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2002 bcm63xx_fifo_setup(udc);
2003 bcm63xx_fifo_reset(udc);
2004 bcm63xx_ep_setup(udc);
2005
2006 bcm63xx_update_wedge(udc, false);
2007
2008 udc->ep0_req_reset = 1;
2009 schedule_work(&udc->ep0_wq);
2010 bus_reset = true;
2011 }
2012 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2013 if (bcm63xx_update_link_speed(udc)) {
2014 bcm63xx_fifo_setup(udc);
2015 bcm63xx_ep_setup(udc);
2016 }
2017 bcm63xx_update_wedge(udc, true);
2018 }
2019 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2020 bcm63xx_update_cfg_iface(udc);
2021 udc->ep0_req_set_cfg = 1;
2022 schedule_work(&udc->ep0_wq);
2023 }
2024 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2025 bcm63xx_update_cfg_iface(udc);
2026 udc->ep0_req_set_iface = 1;
2027 schedule_work(&udc->ep0_wq);
2028 }
2029 spin_unlock(&udc->lock);
2030
2031 if (disconnected && udc->driver)
2032 udc->driver->disconnect(&udc->gadget);
2033 else if (bus_reset && udc->driver)
2034 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2035
2036 return IRQ_HANDLED;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2050{
2051 struct iudma_ch *iudma = dev_id;
2052 struct bcm63xx_udc *udc = iudma->udc;
2053 struct bcm63xx_ep *bep;
2054 struct usb_request *req = NULL;
2055 struct bcm63xx_req *breq = NULL;
2056 int rc;
2057 bool is_done = false;
2058
2059 spin_lock(&udc->lock);
2060
2061 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2062 ENETDMAC_IR_REG, iudma->ch_idx);
2063 bep = iudma->bep;
2064 rc = iudma_read(udc, iudma);
2065
2066
2067 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2068 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2069 req = udc->ep0_request;
2070 breq = our_req(req);
2071
2072
2073 if (rc >= 0) {
2074 req->actual += rc;
2075
2076 if (req->actual >= req->length || breq->bd_bytes > rc) {
2077 udc->ep0_req_completed = 1;
2078 is_done = true;
2079 schedule_work(&udc->ep0_wq);
2080
2081
2082 req->actual = min(req->actual, req->length);
2083 } else {
2084
2085 iudma_write(udc, iudma, breq);
2086 }
2087 }
2088 } else if (!list_empty(&bep->queue)) {
2089 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2090 req = &breq->req;
2091
2092 if (rc >= 0) {
2093 req->actual += rc;
2094
2095 if (req->actual >= req->length || breq->bd_bytes > rc) {
2096 is_done = true;
2097 list_del(&breq->queue);
2098
2099 req->actual = min(req->actual, req->length);
2100
2101 if (!list_empty(&bep->queue)) {
2102 struct bcm63xx_req *next;
2103
2104 next = list_first_entry(&bep->queue,
2105 struct bcm63xx_req, queue);
2106 iudma_write(udc, iudma, next);
2107 }
2108 } else {
2109 iudma_write(udc, iudma, breq);
2110 }
2111 }
2112 }
2113 spin_unlock(&udc->lock);
2114
2115 if (is_done) {
2116 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2117 if (req->complete)
2118 req->complete(&bep->ep, req);
2119 }
2120
2121 return IRQ_HANDLED;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2136{
2137 struct bcm63xx_udc *udc = s->private;
2138
2139 if (!udc->driver)
2140 return -ENODEV;
2141
2142 seq_printf(s, "ep0 state: %s\n",
2143 bcm63xx_ep0_state_names[udc->ep0state]);
2144 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2145 udc->ep0_req_reset ? "reset " : "",
2146 udc->ep0_req_set_cfg ? "set_cfg " : "",
2147 udc->ep0_req_set_iface ? "set_iface " : "",
2148 udc->ep0_req_shutdown ? "shutdown " : "",
2149 udc->ep0_request ? "pending " : "",
2150 udc->ep0_req_completed ? "completed " : "",
2151 udc->ep0_reply ? "reply " : "");
2152 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2153 udc->cfg, udc->iface, udc->alt_iface);
2154 seq_printf(s, "regs:\n");
2155 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2156 usbd_readl(udc, USBD_CONTROL_REG),
2157 usbd_readl(udc, USBD_STRAPS_REG),
2158 usbd_readl(udc, USBD_STATUS_REG));
2159 seq_printf(s, " events: %08x; stall: %08x\n",
2160 usbd_readl(udc, USBD_EVENTS_REG),
2161 usbd_readl(udc, USBD_STALL_REG));
2162
2163 return 0;
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2174{
2175 struct bcm63xx_udc *udc = s->private;
2176 int ch_idx, i;
2177 u32 sram2, sram3;
2178
2179 if (!udc->driver)
2180 return -ENODEV;
2181
2182 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2183 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2184 struct list_head *pos;
2185
2186 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2187 switch (iudma_defaults[ch_idx].ep_type) {
2188 case BCMEP_CTRL:
2189 seq_printf(s, "control");
2190 break;
2191 case BCMEP_BULK:
2192 seq_printf(s, "bulk");
2193 break;
2194 case BCMEP_INTR:
2195 seq_printf(s, "interrupt");
2196 break;
2197 }
2198 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2199 seq_printf(s, " [ep%d]:\n",
2200 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2201 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2202 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2203 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2204 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2205 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2206
2207 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2208 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2209 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2210 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2211 sram2 >> 16, sram2 & 0xffff,
2212 sram3 >> 16, sram3 & 0xffff,
2213 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2214 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2215 iudma->n_bds);
2216
2217 if (iudma->bep) {
2218 i = 0;
2219 list_for_each(pos, &iudma->bep->queue)
2220 i++;
2221 seq_printf(s, "; %d queued\n", i);
2222 } else {
2223 seq_printf(s, "\n");
2224 }
2225
2226 for (i = 0; i < iudma->n_bds; i++) {
2227 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2228
2229 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2230 i * sizeof(*d), i,
2231 d->len_stat >> 16, d->len_stat & 0xffff,
2232 d->address);
2233 if (d == iudma->read_bd)
2234 seq_printf(s, " <<RD");
2235 if (d == iudma->write_bd)
2236 seq_printf(s, " <<WR");
2237 seq_printf(s, "\n");
2238 }
2239
2240 seq_printf(s, "\n");
2241 }
2242
2243 return 0;
2244}
2245
2246static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2247{
2248 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2249}
2250
2251static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2252{
2253 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2254}
2255
2256static const struct file_operations usbd_dbg_fops = {
2257 .owner = THIS_MODULE,
2258 .open = bcm63xx_usbd_dbg_open,
2259 .llseek = seq_lseek,
2260 .read = seq_read,
2261 .release = single_release,
2262};
2263
2264static const struct file_operations iudma_dbg_fops = {
2265 .owner = THIS_MODULE,
2266 .open = bcm63xx_iudma_dbg_open,
2267 .llseek = seq_lseek,
2268 .read = seq_read,
2269 .release = single_release,
2270};
2271
2272
2273
2274
2275
2276
2277static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2278{
2279 struct dentry *root, *usbd, *iudma;
2280
2281 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2282 return;
2283
2284 root = debugfs_create_dir(udc->gadget.name, NULL);
2285 if (IS_ERR(root) || !root)
2286 goto err_root;
2287
2288 usbd = debugfs_create_file("usbd", 0400, root, udc,
2289 &usbd_dbg_fops);
2290 if (!usbd)
2291 goto err_usbd;
2292 iudma = debugfs_create_file("iudma", 0400, root, udc,
2293 &iudma_dbg_fops);
2294 if (!iudma)
2295 goto err_iudma;
2296
2297 udc->debugfs_root = root;
2298 udc->debugfs_usbd = usbd;
2299 udc->debugfs_iudma = iudma;
2300 return;
2301err_iudma:
2302 debugfs_remove(usbd);
2303err_usbd:
2304 debugfs_remove(root);
2305err_root:
2306 dev_err(udc->dev, "debugfs is not available\n");
2307}
2308
2309
2310
2311
2312
2313
2314
2315static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2316{
2317 debugfs_remove(udc->debugfs_iudma);
2318 debugfs_remove(udc->debugfs_usbd);
2319 debugfs_remove(udc->debugfs_root);
2320 udc->debugfs_iudma = NULL;
2321 udc->debugfs_usbd = NULL;
2322 udc->debugfs_root = NULL;
2323}
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336static int bcm63xx_udc_probe(struct platform_device *pdev)
2337{
2338 struct device *dev = &pdev->dev;
2339 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2340 struct bcm63xx_udc *udc;
2341 struct resource *res;
2342 int rc = -ENOMEM, i, irq;
2343
2344 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2345 if (!udc)
2346 return -ENOMEM;
2347
2348 platform_set_drvdata(pdev, udc);
2349 udc->dev = dev;
2350 udc->pd = pd;
2351
2352 if (!pd) {
2353 dev_err(dev, "missing platform data\n");
2354 return -EINVAL;
2355 }
2356
2357 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2358 udc->usbd_regs = devm_ioremap_resource(dev, res);
2359 if (IS_ERR(udc->usbd_regs))
2360 return PTR_ERR(udc->usbd_regs);
2361
2362 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2363 udc->iudma_regs = devm_ioremap_resource(dev, res);
2364 if (IS_ERR(udc->iudma_regs))
2365 return PTR_ERR(udc->iudma_regs);
2366
2367 spin_lock_init(&udc->lock);
2368 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2369
2370 udc->gadget.ops = &bcm63xx_udc_ops;
2371 udc->gadget.name = dev_name(dev);
2372
2373 if (!pd->use_fullspeed && !use_fullspeed)
2374 udc->gadget.max_speed = USB_SPEED_HIGH;
2375 else
2376 udc->gadget.max_speed = USB_SPEED_FULL;
2377
2378
2379 rc = bcm63xx_init_udc_hw(udc);
2380 if (rc)
2381 return rc;
2382
2383 rc = -ENXIO;
2384
2385
2386 irq = platform_get_irq(pdev, 0);
2387 if (irq < 0) {
2388 dev_err(dev, "missing IRQ resource #0\n");
2389 goto out_uninit;
2390 }
2391 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2392 dev_name(dev), udc) < 0) {
2393 dev_err(dev, "error requesting IRQ #%d\n", irq);
2394 goto out_uninit;
2395 }
2396
2397
2398 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2399 irq = platform_get_irq(pdev, i + 1);
2400 if (irq < 0) {
2401 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2402 goto out_uninit;
2403 }
2404 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2405 dev_name(dev), &udc->iudma[i]) < 0) {
2406 dev_err(dev, "error requesting IRQ #%d\n", irq);
2407 goto out_uninit;
2408 }
2409 }
2410
2411 bcm63xx_udc_init_debugfs(udc);
2412 rc = usb_add_gadget_udc(dev, &udc->gadget);
2413 if (!rc)
2414 return 0;
2415
2416 bcm63xx_udc_cleanup_debugfs(udc);
2417out_uninit:
2418 bcm63xx_uninit_udc_hw(udc);
2419 return rc;
2420}
2421
2422
2423
2424
2425
2426static int bcm63xx_udc_remove(struct platform_device *pdev)
2427{
2428 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2429
2430 bcm63xx_udc_cleanup_debugfs(udc);
2431 usb_del_gadget_udc(&udc->gadget);
2432 BUG_ON(udc->driver);
2433
2434 bcm63xx_uninit_udc_hw(udc);
2435
2436 return 0;
2437}
2438
2439static struct platform_driver bcm63xx_udc_driver = {
2440 .probe = bcm63xx_udc_probe,
2441 .remove = bcm63xx_udc_remove,
2442 .driver = {
2443 .name = DRV_MODULE_NAME,
2444 },
2445};
2446module_platform_driver(bcm63xx_udc_driver);
2447
2448MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2449MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2450MODULE_LICENSE("GPL");
2451MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2452