1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/pci.h>
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/list.h>
12#include <linux/interrupt.h>
13#include <linux/usb/ch9.h>
14#include <linux/usb/gadget.h>
15#include <linux/gpio.h>
16#include <linux/irq.h>
17
18
19static int vbus_gpio_port = -1;
20
21#define PCH_VBUS_PERIOD 3000
22#define PCH_VBUS_INTERVAL 10
23
24
25#define UDC_EP_REG_SHIFT 0x20
26
27#define UDC_EPCTL_ADDR 0x00
28#define UDC_EPSTS_ADDR 0x04
29#define UDC_BUFIN_FRAMENUM_ADDR 0x08
30#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
31#define UDC_SUBPTR_ADDR 0x10
32#define UDC_DESPTR_ADDR 0x14
33#define UDC_CONFIRM_ADDR 0x18
34
35#define UDC_DEVCFG_ADDR 0x400
36#define UDC_DEVCTL_ADDR 0x404
37#define UDC_DEVSTS_ADDR 0x408
38#define UDC_DEVIRQSTS_ADDR 0x40C
39#define UDC_DEVIRQMSK_ADDR 0x410
40#define UDC_EPIRQSTS_ADDR 0x414
41#define UDC_EPIRQMSK_ADDR 0x418
42#define UDC_DEVLPM_ADDR 0x41C
43#define UDC_CSR_BUSY_ADDR 0x4f0
44#define UDC_SRST_ADDR 0x4fc
45#define UDC_CSR_ADDR 0x500
46
47
48
49#define UDC_EPCTL_MRXFLUSH (1 << 12)
50#define UDC_EPCTL_RRDY (1 << 9)
51#define UDC_EPCTL_CNAK (1 << 8)
52#define UDC_EPCTL_SNAK (1 << 7)
53#define UDC_EPCTL_NAK (1 << 6)
54#define UDC_EPCTL_P (1 << 3)
55#define UDC_EPCTL_F (1 << 1)
56#define UDC_EPCTL_S (1 << 0)
57#define UDC_EPCTL_ET_SHIFT 4
58
59#define UDC_EPCTL_ET_MASK 0x00000030
60
61#define UDC_EPCTL_ET_CONTROL 0
62#define UDC_EPCTL_ET_ISO 1
63#define UDC_EPCTL_ET_BULK 2
64#define UDC_EPCTL_ET_INTERRUPT 3
65
66
67
68#define UDC_EPSTS_XFERDONE (1 << 27)
69#define UDC_EPSTS_RSS (1 << 26)
70#define UDC_EPSTS_RCS (1 << 25)
71#define UDC_EPSTS_TXEMPTY (1 << 24)
72#define UDC_EPSTS_TDC (1 << 10)
73#define UDC_EPSTS_HE (1 << 9)
74#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
75#define UDC_EPSTS_BNA (1 << 7)
76#define UDC_EPSTS_IN (1 << 6)
77#define UDC_EPSTS_OUT_SHIFT 4
78
79#define UDC_EPSTS_OUT_MASK 0x00000030
80#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
81
82#define UDC_EPSTS_OUT_SETUP 2
83#define UDC_EPSTS_OUT_DATA 1
84
85
86
87#define UDC_DEVCFG_CSR_PRG (1 << 17)
88#define UDC_DEVCFG_SP (1 << 3)
89
90#define UDC_DEVCFG_SPD_HS 0x0
91#define UDC_DEVCFG_SPD_FS 0x1
92#define UDC_DEVCFG_SPD_LS 0x2
93
94
95
96#define UDC_DEVCTL_THLEN_SHIFT 24
97#define UDC_DEVCTL_BRLEN_SHIFT 16
98#define UDC_DEVCTL_CSR_DONE (1 << 13)
99#define UDC_DEVCTL_SD (1 << 10)
100#define UDC_DEVCTL_MODE (1 << 9)
101#define UDC_DEVCTL_BREN (1 << 8)
102#define UDC_DEVCTL_THE (1 << 7)
103#define UDC_DEVCTL_DU (1 << 4)
104#define UDC_DEVCTL_TDE (1 << 3)
105#define UDC_DEVCTL_RDE (1 << 2)
106#define UDC_DEVCTL_RES (1 << 0)
107
108
109
110#define UDC_DEVSTS_TS_SHIFT 18
111#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
112#define UDC_DEVSTS_ALT_SHIFT 8
113#define UDC_DEVSTS_INTF_SHIFT 4
114#define UDC_DEVSTS_CFG_SHIFT 0
115
116#define UDC_DEVSTS_TS_MASK 0xfffc0000
117#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
118#define UDC_DEVSTS_ALT_MASK 0x00000f00
119#define UDC_DEVSTS_INTF_MASK 0x000000f0
120#define UDC_DEVSTS_CFG_MASK 0x0000000f
121
122#define UDC_DEVSTS_ENUM_SPEED_FULL 1
123#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
124#define UDC_DEVSTS_ENUM_SPEED_LOW 2
125#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
126
127
128
129#define UDC_DEVINT_RWKP (1 << 7)
130#define UDC_DEVINT_ENUM (1 << 6)
131#define UDC_DEVINT_SOF (1 << 5)
132#define UDC_DEVINT_US (1 << 4)
133#define UDC_DEVINT_UR (1 << 3)
134#define UDC_DEVINT_ES (1 << 2)
135#define UDC_DEVINT_SI (1 << 1)
136#define UDC_DEVINT_SC (1 << 0)
137
138#define UDC_DEVINT_MSK 0x7f
139
140
141
142#define UDC_EPINT_IN_SHIFT 0
143#define UDC_EPINT_OUT_SHIFT 16
144#define UDC_EPINT_IN_EP0 (1 << 0)
145#define UDC_EPINT_OUT_EP0 (1 << 16)
146
147#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
148
149
150
151#define UDC_CSR_BUSY (1 << 0)
152
153
154
155#define UDC_PSRST (1 << 1)
156#define UDC_SRST (1 << 0)
157
158
159
160#define UDC_CSR_NE_NUM_SHIFT 0
161#define UDC_CSR_NE_DIR_SHIFT 4
162#define UDC_CSR_NE_TYPE_SHIFT 5
163#define UDC_CSR_NE_CFG_SHIFT 7
164#define UDC_CSR_NE_INTF_SHIFT 11
165#define UDC_CSR_NE_ALT_SHIFT 15
166#define UDC_CSR_NE_MAX_PKT_SHIFT 19
167
168#define UDC_CSR_NE_NUM_MASK 0x0000000f
169#define UDC_CSR_NE_DIR_MASK 0x00000010
170#define UDC_CSR_NE_TYPE_MASK 0x00000060
171#define UDC_CSR_NE_CFG_MASK 0x00000780
172#define UDC_CSR_NE_INTF_MASK 0x00007800
173#define UDC_CSR_NE_ALT_MASK 0x00078000
174#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
175
176#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
177#define PCH_UDC_EPINT(in, num)\
178 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179
180
181#define UDC_EP0IN_IDX 0
182#define UDC_EP0OUT_IDX 1
183#define UDC_EPIN_IDX(ep) (ep * 2)
184#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
185#define PCH_UDC_EP0 0
186#define PCH_UDC_EP1 1
187#define PCH_UDC_EP2 2
188#define PCH_UDC_EP3 3
189
190
191#define PCH_UDC_EP_NUM 32
192#define PCH_UDC_USED_EP_NUM 4
193
194#define PCH_UDC_BRLEN 0x0F
195#define PCH_UDC_THLEN 0x1F
196
197#define UDC_EP0IN_BUFF_SIZE 16
198#define UDC_EPIN_BUFF_SIZE 256
199#define UDC_EP0OUT_BUFF_SIZE 16
200#define UDC_EPOUT_BUFF_SIZE 256
201
202#define UDC_EP0IN_MAX_PKT_SIZE 64
203#define UDC_EP0OUT_MAX_PKT_SIZE 64
204#define UDC_BULK_MAX_PKT_SIZE 512
205
206
207#define DMA_DIR_RX 1
208#define DMA_DIR_TX 2
209#define DMA_ADDR_INVALID (~(dma_addr_t)0)
210#define UDC_DMA_MAXPACKET 65536
211
212
213
214
215
216
217
218
219
220struct pch_udc_data_dma_desc {
221 u32 status;
222 u32 reserved;
223 u32 dataptr;
224 u32 next;
225};
226
227
228
229
230
231
232
233
234
235struct pch_udc_stp_dma_desc {
236 u32 status;
237 u32 reserved;
238 struct usb_ctrlrequest request;
239} __attribute((packed));
240
241
242
243#define PCH_UDC_BUFF_STS 0xC0000000
244#define PCH_UDC_BS_HST_RDY 0x00000000
245#define PCH_UDC_BS_DMA_BSY 0x40000000
246#define PCH_UDC_BS_DMA_DONE 0x80000000
247#define PCH_UDC_BS_HST_BSY 0xC0000000
248
249#define PCH_UDC_RXTX_STS 0x30000000
250#define PCH_UDC_RTS_SUCC 0x00000000
251#define PCH_UDC_RTS_DESERR 0x10000000
252#define PCH_UDC_RTS_BUFERR 0x30000000
253
254#define PCH_UDC_DMA_LAST 0x08000000
255
256#define PCH_UDC_RXTX_BYTES 0x0000ffff
257
258
259
260
261
262
263
264
265struct pch_udc_cfg_data {
266 u16 cur_cfg;
267 u16 cur_intf;
268 u16 cur_alt;
269};
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287struct pch_udc_ep {
288 struct usb_ep ep;
289 dma_addr_t td_stp_phys;
290 dma_addr_t td_data_phys;
291 struct pch_udc_stp_dma_desc *td_stp;
292 struct pch_udc_data_dma_desc *td_data;
293 struct pch_udc_dev *dev;
294 unsigned long offset_addr;
295 struct list_head queue;
296 unsigned num:5,
297 in:1,
298 halted:1;
299 unsigned long epsts;
300};
301
302
303
304
305
306
307
308
309
310struct pch_vbus_gpio_data {
311 int port;
312 int intr;
313 struct work_struct irq_work_fall;
314 struct work_struct irq_work_rise;
315};
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341struct pch_udc_dev {
342 struct usb_gadget gadget;
343 struct usb_gadget_driver *driver;
344 struct pci_dev *pdev;
345 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
346 spinlock_t lock;
347 unsigned
348 stall:1,
349 prot_stall:1,
350 suspended:1,
351 connected:1,
352 vbus_session:1,
353 set_cfg_not_acked:1,
354 waiting_zlp_ack:1;
355 struct dma_pool *data_requests;
356 struct dma_pool *stp_requests;
357 dma_addr_t dma_addr;
358 struct usb_ctrlrequest setup_data;
359 void __iomem *base_addr;
360 struct pch_udc_cfg_data cfg_data;
361 struct pch_vbus_gpio_data vbus_gpio;
362};
363#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
364
365#define PCH_UDC_PCI_BAR_QUARK_X1000 0
366#define PCH_UDC_PCI_BAR 1
367
368#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
369#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
370
371#define PCI_VENDOR_ID_ROHM 0x10DB
372#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
373#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
374
375static const char ep0_string[] = "ep0in";
376static DEFINE_SPINLOCK(udc_stall_spinlock);
377static bool speed_fs;
378module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
379MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395struct pch_udc_request {
396 struct usb_request req;
397 dma_addr_t td_data_phys;
398 struct pch_udc_data_dma_desc *td_data;
399 struct pch_udc_data_dma_desc *td_data_last;
400 struct list_head queue;
401 unsigned dma_going:1,
402 dma_mapped:1,
403 dma_done:1;
404 unsigned chain_len;
405 void *buf;
406 dma_addr_t dma;
407};
408
409static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
410{
411 return ioread32(dev->base_addr + reg);
412}
413
414static inline void pch_udc_writel(struct pch_udc_dev *dev,
415 unsigned long val, unsigned long reg)
416{
417 iowrite32(val, dev->base_addr + reg);
418}
419
420static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
421 unsigned long reg,
422 unsigned long bitmask)
423{
424 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
425}
426
427static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
428 unsigned long reg,
429 unsigned long bitmask)
430{
431 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
432}
433
434static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
435{
436 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
437}
438
439static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
440 unsigned long val, unsigned long reg)
441{
442 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
443}
444
445static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
446 unsigned long reg,
447 unsigned long bitmask)
448{
449 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
450}
451
452static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
453 unsigned long reg,
454 unsigned long bitmask)
455{
456 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
457}
458
459
460
461
462
463static void pch_udc_csr_busy(struct pch_udc_dev *dev)
464{
465 unsigned int count = 200;
466
467
468 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
469 && --count)
470 cpu_relax();
471 if (!count)
472 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
473}
474
475
476
477
478
479
480
481static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
482 unsigned int ep)
483{
484 unsigned long reg = PCH_UDC_CSR(ep);
485
486 pch_udc_csr_busy(dev);
487 pch_udc_writel(dev, val, reg);
488 pch_udc_csr_busy(dev);
489}
490
491
492
493
494
495
496
497
498static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
499{
500 unsigned long reg = PCH_UDC_CSR(ep);
501
502 pch_udc_csr_busy(dev);
503 pch_udc_readl(dev, reg);
504 pch_udc_csr_busy(dev);
505 return pch_udc_readl(dev, reg);
506}
507
508
509
510
511
512static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
513{
514 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
515 mdelay(1);
516 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
517}
518
519
520
521
522
523
524static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
525{
526 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
527 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
528}
529
530
531
532
533
534static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
535{
536 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
537}
538
539
540
541
542
543static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
544{
545 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
546}
547
548
549
550
551
552static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
553{
554 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
555}
556
557
558
559
560
561static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
562{
563
564 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
565 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
566 mdelay(1);
567
568 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
569}
570
571
572
573
574
575
576static void pch_udc_init(struct pch_udc_dev *dev);
577static void pch_udc_reconnect(struct pch_udc_dev *dev)
578{
579 pch_udc_init(dev);
580
581
582
583 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
584 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
585
586
587 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
588 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
589 mdelay(1);
590
591 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
592}
593
594
595
596
597
598
599
600
601static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
602 int is_active)
603{
604 if (is_active) {
605 pch_udc_reconnect(dev);
606 dev->vbus_session = 1;
607 } else {
608 if (dev->driver && dev->driver->disconnect) {
609 spin_lock(&dev->lock);
610 dev->driver->disconnect(&dev->gadget);
611 spin_unlock(&dev->lock);
612 }
613 pch_udc_set_disconnect(dev);
614 dev->vbus_session = 0;
615 }
616}
617
618
619
620
621
622static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
623{
624 if (ep->in) {
625 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
626 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
627 } else {
628 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
629 }
630}
631
632
633
634
635
636static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
637{
638
639 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
640
641 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
642}
643
644
645
646
647
648
649static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
650 u8 type)
651{
652 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
653 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
654}
655
656
657
658
659
660
661static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
662 u32 buf_size, u32 ep_in)
663{
664 u32 data;
665 if (ep_in) {
666 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
667 data = (data & 0xffff0000) | (buf_size & 0xffff);
668 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
669 } else {
670 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
671 data = (buf_size << 16) | (data & 0xffff);
672 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
673 }
674}
675
676
677
678
679
680
681static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
682{
683 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
684 data = (data & 0xffff0000) | (pkt_size & 0xffff);
685 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
686}
687
688
689
690
691
692
693static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
694{
695 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
696}
697
698
699
700
701
702
703static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
704{
705 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
706}
707
708
709
710
711
712static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
713{
714 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
715}
716
717
718
719
720
721static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
722{
723 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
724}
725
726
727
728
729
730static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
731{
732 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
733}
734
735
736
737
738
739
740
741
742
743static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
744{
745 if (dir == DMA_DIR_RX)
746 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
747 else if (dir == DMA_DIR_TX)
748 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
749}
750
751
752
753
754
755
756
757
758
759static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
760{
761 if (dir == DMA_DIR_RX)
762 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
763 else if (dir == DMA_DIR_TX)
764 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
765}
766
767
768
769
770
771
772static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
773{
774 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
775}
776
777
778
779
780
781
782static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
783 u32 mask)
784{
785 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
786}
787
788
789
790
791
792
793static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
794 u32 mask)
795{
796 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
797}
798
799
800
801
802
803
804static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
805 u32 mask)
806{
807 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
808}
809
810
811
812
813
814
815static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
816 u32 mask)
817{
818 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
819}
820
821
822
823
824
825
826static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
827{
828 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
829}
830
831
832
833
834
835
836static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
837 u32 val)
838{
839 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
840}
841
842
843
844
845
846
847static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
848{
849 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
850}
851
852
853
854
855
856
857static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
858 u32 val)
859{
860 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
861}
862
863
864
865
866
867
868static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
869{
870 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
871}
872
873
874
875
876
877
878static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
879{
880 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
881}
882
883
884
885
886
887
888static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
889{
890 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
891}
892
893
894
895
896
897
898static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
899{
900 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
901}
902
903
904
905
906
907
908static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
909 u32 stat)
910{
911 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
912}
913
914
915
916
917
918
919static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
920{
921 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
922}
923
924
925
926
927
928
929static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
930{
931 unsigned int loopcnt = 0;
932 struct pch_udc_dev *dev = ep->dev;
933
934 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
935 return;
936 if (!ep->in) {
937 loopcnt = 10000;
938 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
939 --loopcnt)
940 udelay(5);
941 if (!loopcnt)
942 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
943 __func__);
944 }
945 loopcnt = 10000;
946 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
947 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
948 udelay(5);
949 }
950 if (!loopcnt)
951 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
952 __func__, ep->num, (ep->in ? "in" : "out"));
953}
954
955
956
957
958
959
960
961
962static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
963{
964 if (dir) {
965 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
966 return;
967 }
968}
969
970
971
972
973
974
975static void pch_udc_ep_enable(struct pch_udc_ep *ep,
976 struct pch_udc_cfg_data *cfg,
977 const struct usb_endpoint_descriptor *desc)
978{
979 u32 val = 0;
980 u32 buff_size = 0;
981
982 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
983 if (ep->in)
984 buff_size = UDC_EPIN_BUFF_SIZE;
985 else
986 buff_size = UDC_EPOUT_BUFF_SIZE;
987 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
988 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
989 pch_udc_ep_set_nak(ep);
990 pch_udc_ep_fifo_flush(ep, ep->in);
991
992 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
993 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
994 UDC_CSR_NE_TYPE_SHIFT) |
995 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
996 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
997 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
998 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
999
1000 if (ep->in)
1001 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1002 else
1003 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1004}
1005
1006
1007
1008
1009
1010static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1011{
1012 if (ep->in) {
1013
1014 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1015
1016 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1017 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1018 } else {
1019
1020 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1021 }
1022
1023 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1024}
1025
1026
1027
1028
1029
1030static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1031{
1032 unsigned int count = 10000;
1033
1034
1035 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1036 udelay(5);
1037 if (!count)
1038 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1039}
1040
1041
1042
1043
1044
1045static void pch_udc_init(struct pch_udc_dev *dev)
1046{
1047 if (NULL == dev) {
1048 pr_err("%s: Invalid address\n", __func__);
1049 return;
1050 }
1051
1052 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1053 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1054 mdelay(1);
1055 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1057 mdelay(1);
1058
1059 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1060 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1061
1062
1063 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1065
1066
1067 if (speed_fs)
1068 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1069 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1070 else
1071 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1073 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1074 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1075 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1076 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1077 UDC_DEVCTL_THE);
1078}
1079
1080
1081
1082
1083
1084static void pch_udc_exit(struct pch_udc_dev *dev)
1085{
1086
1087 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1088
1089 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1090
1091 pch_udc_set_disconnect(dev);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1103{
1104 struct pch_udc_dev *dev;
1105
1106 if (!gadget)
1107 return -EINVAL;
1108 dev = container_of(gadget, struct pch_udc_dev, gadget);
1109 return pch_udc_get_frame(dev);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1121{
1122 struct pch_udc_dev *dev;
1123 unsigned long flags;
1124
1125 if (!gadget)
1126 return -EINVAL;
1127 dev = container_of(gadget, struct pch_udc_dev, gadget);
1128 spin_lock_irqsave(&dev->lock, flags);
1129 pch_udc_rmt_wakeup(dev);
1130 spin_unlock_irqrestore(&dev->lock, flags);
1131 return 0;
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1145{
1146 struct pch_udc_dev *dev;
1147
1148 if (!gadget)
1149 return -EINVAL;
1150 gadget->is_selfpowered = (value != 0);
1151 dev = container_of(gadget, struct pch_udc_dev, gadget);
1152 if (value)
1153 pch_udc_set_selfpowered(dev);
1154 else
1155 pch_udc_clear_selfpowered(dev);
1156 return 0;
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1170{
1171 struct pch_udc_dev *dev;
1172
1173 if (!gadget)
1174 return -EINVAL;
1175 dev = container_of(gadget, struct pch_udc_dev, gadget);
1176 if (is_on) {
1177 pch_udc_reconnect(dev);
1178 } else {
1179 if (dev->driver && dev->driver->disconnect) {
1180 spin_lock(&dev->lock);
1181 dev->driver->disconnect(&dev->gadget);
1182 spin_unlock(&dev->lock);
1183 }
1184 pch_udc_set_disconnect(dev);
1185 }
1186
1187 return 0;
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1202{
1203 struct pch_udc_dev *dev;
1204
1205 if (!gadget)
1206 return -EINVAL;
1207 dev = container_of(gadget, struct pch_udc_dev, gadget);
1208 pch_udc_vbus_session(dev, is_active);
1209 return 0;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1224{
1225 return -EOPNOTSUPP;
1226}
1227
1228static int pch_udc_start(struct usb_gadget *g,
1229 struct usb_gadget_driver *driver);
1230static int pch_udc_stop(struct usb_gadget *g);
1231
1232static const struct usb_gadget_ops pch_udc_ops = {
1233 .get_frame = pch_udc_pcd_get_frame,
1234 .wakeup = pch_udc_pcd_wakeup,
1235 .set_selfpowered = pch_udc_pcd_selfpowered,
1236 .pullup = pch_udc_pcd_pullup,
1237 .vbus_session = pch_udc_pcd_vbus_session,
1238 .vbus_draw = pch_udc_pcd_vbus_draw,
1239 .udc_start = pch_udc_start,
1240 .udc_stop = pch_udc_stop,
1241};
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1253{
1254 int vbus = 0;
1255
1256 if (dev->vbus_gpio.port)
1257 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1258 else
1259 vbus = -1;
1260
1261 return vbus;
1262}
1263
1264
1265
1266
1267
1268
1269
1270static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1271{
1272 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1273 struct pch_vbus_gpio_data, irq_work_fall);
1274 struct pch_udc_dev *dev =
1275 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1276 int vbus_saved = -1;
1277 int vbus;
1278 int count;
1279
1280 if (!dev->vbus_gpio.port)
1281 return;
1282
1283 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1284 count++) {
1285 vbus = pch_vbus_gpio_get_value(dev);
1286
1287 if ((vbus_saved == vbus) && (vbus == 0)) {
1288 dev_dbg(&dev->pdev->dev, "VBUS fell");
1289 if (dev->driver
1290 && dev->driver->disconnect) {
1291 dev->driver->disconnect(
1292 &dev->gadget);
1293 }
1294 if (dev->vbus_gpio.intr)
1295 pch_udc_init(dev);
1296 else
1297 pch_udc_reconnect(dev);
1298 return;
1299 }
1300 vbus_saved = vbus;
1301 mdelay(PCH_VBUS_INTERVAL);
1302 }
1303}
1304
1305
1306
1307
1308
1309
1310
1311static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1312{
1313 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1314 struct pch_vbus_gpio_data, irq_work_rise);
1315 struct pch_udc_dev *dev =
1316 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1317 int vbus;
1318
1319 if (!dev->vbus_gpio.port)
1320 return;
1321
1322 mdelay(PCH_VBUS_INTERVAL);
1323 vbus = pch_vbus_gpio_get_value(dev);
1324
1325 if (vbus == 1) {
1326 dev_dbg(&dev->pdev->dev, "VBUS rose");
1327 pch_udc_reconnect(dev);
1328 return;
1329 }
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1342{
1343 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1344
1345 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1346 return IRQ_NONE;
1347
1348 if (pch_vbus_gpio_get_value(dev))
1349 schedule_work(&dev->vbus_gpio.irq_work_rise);
1350 else
1351 schedule_work(&dev->vbus_gpio.irq_work_fall);
1352
1353 return IRQ_HANDLED;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1366{
1367 int err;
1368 int irq_num = 0;
1369
1370 dev->vbus_gpio.port = 0;
1371 dev->vbus_gpio.intr = 0;
1372
1373 if (vbus_gpio_port <= -1)
1374 return -EINVAL;
1375
1376 err = gpio_is_valid(vbus_gpio_port);
1377 if (!err) {
1378 pr_err("%s: gpio port %d is invalid\n",
1379 __func__, vbus_gpio_port);
1380 return -EINVAL;
1381 }
1382
1383 err = gpio_request(vbus_gpio_port, "pch_vbus");
1384 if (err) {
1385 pr_err("%s: can't request gpio port %d, err: %d\n",
1386 __func__, vbus_gpio_port, err);
1387 return -EINVAL;
1388 }
1389
1390 dev->vbus_gpio.port = vbus_gpio_port;
1391 gpio_direction_input(vbus_gpio_port);
1392 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1393
1394 irq_num = gpio_to_irq(vbus_gpio_port);
1395 if (irq_num > 0) {
1396 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1397 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1398 "vbus_detect", dev);
1399 if (!err) {
1400 dev->vbus_gpio.intr = irq_num;
1401 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1402 pch_vbus_gpio_work_rise);
1403 } else {
1404 pr_err("%s: can't request irq %d, err: %d\n",
1405 __func__, irq_num, err);
1406 }
1407 }
1408
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1417{
1418 if (dev->vbus_gpio.intr)
1419 free_irq(dev->vbus_gpio.intr, dev);
1420
1421 if (dev->vbus_gpio.port)
1422 gpio_free(dev->vbus_gpio.port);
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1433 int status)
1434 __releases(&dev->lock)
1435 __acquires(&dev->lock)
1436{
1437 struct pch_udc_dev *dev;
1438 unsigned halted = ep->halted;
1439
1440 list_del_init(&req->queue);
1441
1442
1443 if (req->req.status == -EINPROGRESS)
1444 req->req.status = status;
1445 else
1446 status = req->req.status;
1447
1448 dev = ep->dev;
1449 if (req->dma_mapped) {
1450 if (req->dma == DMA_ADDR_INVALID) {
1451 if (ep->in)
1452 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1453 req->req.length,
1454 DMA_TO_DEVICE);
1455 else
1456 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1457 req->req.length,
1458 DMA_FROM_DEVICE);
1459 req->req.dma = DMA_ADDR_INVALID;
1460 } else {
1461 if (ep->in)
1462 dma_unmap_single(&dev->pdev->dev, req->dma,
1463 req->req.length,
1464 DMA_TO_DEVICE);
1465 else {
1466 dma_unmap_single(&dev->pdev->dev, req->dma,
1467 req->req.length,
1468 DMA_FROM_DEVICE);
1469 memcpy(req->req.buf, req->buf, req->req.length);
1470 }
1471 kfree(req->buf);
1472 req->dma = DMA_ADDR_INVALID;
1473 }
1474 req->dma_mapped = 0;
1475 }
1476 ep->halted = 1;
1477 spin_unlock(&dev->lock);
1478 if (!ep->in)
1479 pch_udc_ep_clear_rrdy(ep);
1480 usb_gadget_giveback_request(&ep->ep, &req->req);
1481 spin_lock(&dev->lock);
1482 ep->halted = halted;
1483}
1484
1485
1486
1487
1488
1489static void empty_req_queue(struct pch_udc_ep *ep)
1490{
1491 struct pch_udc_request *req;
1492
1493 ep->halted = 1;
1494 while (!list_empty(&ep->queue)) {
1495 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1496 complete_req(ep, req, -ESHUTDOWN);
1497 }
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1510 struct pch_udc_request *req)
1511{
1512 struct pch_udc_data_dma_desc *td = req->td_data;
1513 unsigned i = req->chain_len;
1514
1515 dma_addr_t addr2;
1516 dma_addr_t addr = (dma_addr_t)td->next;
1517 td->next = 0x00;
1518 for (; i > 1; --i) {
1519
1520 td = phys_to_virt(addr);
1521 addr2 = (dma_addr_t)td->next;
1522 dma_pool_free(dev->data_requests, td, addr);
1523 td->next = 0x00;
1524 addr = addr2;
1525 }
1526 req->chain_len = 1;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1542 struct pch_udc_request *req,
1543 unsigned long buf_len,
1544 gfp_t gfp_flags)
1545{
1546 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1547 unsigned long bytes = req->req.length, i = 0;
1548 dma_addr_t dma_addr;
1549 unsigned len = 1;
1550
1551 if (req->chain_len > 1)
1552 pch_udc_free_dma_chain(ep->dev, req);
1553
1554 if (req->dma == DMA_ADDR_INVALID)
1555 td->dataptr = req->req.dma;
1556 else
1557 td->dataptr = req->dma;
1558
1559 td->status = PCH_UDC_BS_HST_BSY;
1560 for (; ; bytes -= buf_len, ++len) {
1561 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1562 if (bytes <= buf_len)
1563 break;
1564 last = td;
1565 td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1566 &dma_addr);
1567 if (!td)
1568 goto nomem;
1569 i += buf_len;
1570 td->dataptr = req->td_data->dataptr + i;
1571 last->next = dma_addr;
1572 }
1573
1574 req->td_data_last = td;
1575 td->status |= PCH_UDC_DMA_LAST;
1576 td->next = req->td_data_phys;
1577 req->chain_len = len;
1578 return 0;
1579
1580nomem:
1581 if (len > 1) {
1582 req->chain_len = len;
1583 pch_udc_free_dma_chain(ep->dev, req);
1584 }
1585 req->chain_len = 1;
1586 return -ENOMEM;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1601 gfp_t gfp)
1602{
1603 int retval;
1604
1605
1606 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1607 if (retval) {
1608 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1609 return retval;
1610 }
1611 if (ep->in)
1612 req->td_data->status = (req->td_data->status &
1613 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1614 return 0;
1615}
1616
1617
1618
1619
1620
1621
1622
1623static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1624{
1625 struct pch_udc_dev *dev = ep->dev;
1626
1627
1628 complete_req(ep, req, 0);
1629
1630
1631
1632
1633 if (dev->set_cfg_not_acked) {
1634 pch_udc_set_csr_done(dev);
1635 dev->set_cfg_not_acked = 0;
1636 }
1637
1638 if (!dev->stall && dev->waiting_zlp_ack) {
1639 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1640 dev->waiting_zlp_ack = 0;
1641 }
1642}
1643
1644
1645
1646
1647
1648
1649static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1650 struct pch_udc_request *req)
1651{
1652 struct pch_udc_data_dma_desc *td_data;
1653
1654 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1655 td_data = req->td_data;
1656
1657 while (1) {
1658 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1659 PCH_UDC_BS_HST_RDY;
1660 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1661 break;
1662 td_data = phys_to_virt(td_data->next);
1663 }
1664
1665 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1666 req->dma_going = 1;
1667 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1668 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1669 pch_udc_ep_clear_nak(ep);
1670 pch_udc_ep_set_rrdy(ep);
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1685 const struct usb_endpoint_descriptor *desc)
1686{
1687 struct pch_udc_ep *ep;
1688 struct pch_udc_dev *dev;
1689 unsigned long iflags;
1690
1691 if (!usbep || (usbep->name == ep0_string) || !desc ||
1692 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1693 return -EINVAL;
1694
1695 ep = container_of(usbep, struct pch_udc_ep, ep);
1696 dev = ep->dev;
1697 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1698 return -ESHUTDOWN;
1699 spin_lock_irqsave(&dev->lock, iflags);
1700 ep->ep.desc = desc;
1701 ep->halted = 0;
1702 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1703 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1704 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1705 spin_unlock_irqrestore(&dev->lock, iflags);
1706 return 0;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1719{
1720 struct pch_udc_ep *ep;
1721 unsigned long iflags;
1722
1723 if (!usbep)
1724 return -EINVAL;
1725
1726 ep = container_of(usbep, struct pch_udc_ep, ep);
1727 if ((usbep->name == ep0_string) || !ep->ep.desc)
1728 return -EINVAL;
1729
1730 spin_lock_irqsave(&ep->dev->lock, iflags);
1731 empty_req_queue(ep);
1732 ep->halted = 1;
1733 pch_udc_ep_disable(ep);
1734 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1735 ep->ep.desc = NULL;
1736 INIT_LIST_HEAD(&ep->queue);
1737 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1738 return 0;
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1752 gfp_t gfp)
1753{
1754 struct pch_udc_request *req;
1755 struct pch_udc_ep *ep;
1756 struct pch_udc_data_dma_desc *dma_desc;
1757
1758 if (!usbep)
1759 return NULL;
1760 ep = container_of(usbep, struct pch_udc_ep, ep);
1761 req = kzalloc(sizeof *req, gfp);
1762 if (!req)
1763 return NULL;
1764 req->req.dma = DMA_ADDR_INVALID;
1765 req->dma = DMA_ADDR_INVALID;
1766 INIT_LIST_HEAD(&req->queue);
1767 if (!ep->dev->dma_addr)
1768 return &req->req;
1769
1770 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1771 &req->td_data_phys);
1772 if (NULL == dma_desc) {
1773 kfree(req);
1774 return NULL;
1775 }
1776
1777 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1778 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1779 req->td_data = dma_desc;
1780 req->td_data_last = dma_desc;
1781 req->chain_len = 1;
1782 return &req->req;
1783}
1784
1785
1786
1787
1788
1789
1790
1791static void pch_udc_free_request(struct usb_ep *usbep,
1792 struct usb_request *usbreq)
1793{
1794 struct pch_udc_ep *ep;
1795 struct pch_udc_request *req;
1796 struct pch_udc_dev *dev;
1797
1798 if (!usbep || !usbreq)
1799 return;
1800 ep = container_of(usbep, struct pch_udc_ep, ep);
1801 req = container_of(usbreq, struct pch_udc_request, req);
1802 dev = ep->dev;
1803 if (!list_empty(&req->queue))
1804 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1805 __func__, usbep->name, req);
1806 if (req->td_data != NULL) {
1807 if (req->chain_len > 1)
1808 pch_udc_free_dma_chain(ep->dev, req);
1809 dma_pool_free(ep->dev->data_requests, req->td_data,
1810 req->td_data_phys);
1811 }
1812 kfree(req);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1827 gfp_t gfp)
1828{
1829 int retval = 0;
1830 struct pch_udc_ep *ep;
1831 struct pch_udc_dev *dev;
1832 struct pch_udc_request *req;
1833 unsigned long iflags;
1834
1835 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1836 return -EINVAL;
1837 ep = container_of(usbep, struct pch_udc_ep, ep);
1838 dev = ep->dev;
1839 if (!ep->ep.desc && ep->num)
1840 return -EINVAL;
1841 req = container_of(usbreq, struct pch_udc_request, req);
1842 if (!list_empty(&req->queue))
1843 return -EINVAL;
1844 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1845 return -ESHUTDOWN;
1846 spin_lock_irqsave(&dev->lock, iflags);
1847
1848 if (usbreq->length &&
1849 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1850 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1851 if (ep->in)
1852 usbreq->dma = dma_map_single(&dev->pdev->dev,
1853 usbreq->buf,
1854 usbreq->length,
1855 DMA_TO_DEVICE);
1856 else
1857 usbreq->dma = dma_map_single(&dev->pdev->dev,
1858 usbreq->buf,
1859 usbreq->length,
1860 DMA_FROM_DEVICE);
1861 } else {
1862 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1863 if (!req->buf) {
1864 retval = -ENOMEM;
1865 goto probe_end;
1866 }
1867 if (ep->in) {
1868 memcpy(req->buf, usbreq->buf, usbreq->length);
1869 req->dma = dma_map_single(&dev->pdev->dev,
1870 req->buf,
1871 usbreq->length,
1872 DMA_TO_DEVICE);
1873 } else
1874 req->dma = dma_map_single(&dev->pdev->dev,
1875 req->buf,
1876 usbreq->length,
1877 DMA_FROM_DEVICE);
1878 }
1879 req->dma_mapped = 1;
1880 }
1881 if (usbreq->length > 0) {
1882 retval = prepare_dma(ep, req, GFP_ATOMIC);
1883 if (retval)
1884 goto probe_end;
1885 }
1886 usbreq->actual = 0;
1887 usbreq->status = -EINPROGRESS;
1888 req->dma_done = 0;
1889 if (list_empty(&ep->queue) && !ep->halted) {
1890
1891 if (!usbreq->length) {
1892 process_zlp(ep, req);
1893 retval = 0;
1894 goto probe_end;
1895 }
1896 if (!ep->in) {
1897 pch_udc_start_rxrequest(ep, req);
1898 } else {
1899
1900
1901
1902
1903
1904 pch_udc_wait_ep_stall(ep);
1905 pch_udc_ep_clear_nak(ep);
1906 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1907 }
1908 }
1909
1910 if (req != NULL)
1911 list_add_tail(&req->queue, &ep->queue);
1912
1913probe_end:
1914 spin_unlock_irqrestore(&dev->lock, iflags);
1915 return retval;
1916}
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1929 struct usb_request *usbreq)
1930{
1931 struct pch_udc_ep *ep;
1932 struct pch_udc_request *req;
1933 unsigned long flags;
1934 int ret = -EINVAL;
1935
1936 ep = container_of(usbep, struct pch_udc_ep, ep);
1937 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1938 return ret;
1939 req = container_of(usbreq, struct pch_udc_request, req);
1940 spin_lock_irqsave(&ep->dev->lock, flags);
1941
1942 list_for_each_entry(req, &ep->queue, queue) {
1943 if (&req->req == usbreq) {
1944 pch_udc_ep_set_nak(ep);
1945 if (!list_empty(&req->queue))
1946 complete_req(ep, req, -ECONNRESET);
1947 ret = 0;
1948 break;
1949 }
1950 }
1951 spin_unlock_irqrestore(&ep->dev->lock, flags);
1952 return ret;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1966{
1967 struct pch_udc_ep *ep;
1968 unsigned long iflags;
1969 int ret;
1970
1971 if (!usbep)
1972 return -EINVAL;
1973 ep = container_of(usbep, struct pch_udc_ep, ep);
1974 if (!ep->ep.desc && !ep->num)
1975 return -EINVAL;
1976 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1977 return -ESHUTDOWN;
1978 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1979 if (list_empty(&ep->queue)) {
1980 if (halt) {
1981 if (ep->num == PCH_UDC_EP0)
1982 ep->dev->stall = 1;
1983 pch_udc_ep_set_stall(ep);
1984 pch_udc_enable_ep_interrupts(
1985 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1986 } else {
1987 pch_udc_ep_clear_stall(ep);
1988 }
1989 ret = 0;
1990 } else {
1991 ret = -EAGAIN;
1992 }
1993 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1994 return ret;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2008{
2009 struct pch_udc_ep *ep;
2010 unsigned long iflags;
2011 int ret;
2012
2013 if (!usbep)
2014 return -EINVAL;
2015 ep = container_of(usbep, struct pch_udc_ep, ep);
2016 if (!ep->ep.desc && !ep->num)
2017 return -EINVAL;
2018 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2019 return -ESHUTDOWN;
2020 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2021 if (!list_empty(&ep->queue)) {
2022 ret = -EAGAIN;
2023 } else {
2024 if (ep->num == PCH_UDC_EP0)
2025 ep->dev->stall = 1;
2026 pch_udc_ep_set_stall(ep);
2027 pch_udc_enable_ep_interrupts(ep->dev,
2028 PCH_UDC_EPINT(ep->in, ep->num));
2029 ep->dev->prot_stall = 1;
2030 ret = 0;
2031 }
2032 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2033 return ret;
2034}
2035
2036
2037
2038
2039
2040static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2041{
2042 struct pch_udc_ep *ep;
2043
2044 if (!usbep)
2045 return;
2046
2047 ep = container_of(usbep, struct pch_udc_ep, ep);
2048 if (ep->ep.desc || !ep->num)
2049 pch_udc_ep_fifo_flush(ep, ep->in);
2050}
2051
2052static const struct usb_ep_ops pch_udc_ep_ops = {
2053 .enable = pch_udc_pcd_ep_enable,
2054 .disable = pch_udc_pcd_ep_disable,
2055 .alloc_request = pch_udc_alloc_request,
2056 .free_request = pch_udc_free_request,
2057 .queue = pch_udc_pcd_queue,
2058 .dequeue = pch_udc_pcd_dequeue,
2059 .set_halt = pch_udc_pcd_set_halt,
2060 .set_wedge = pch_udc_pcd_set_wedge,
2061 .fifo_status = NULL,
2062 .fifo_flush = pch_udc_pcd_fifo_flush,
2063};
2064
2065
2066
2067
2068
2069static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2070{
2071 static u32 pky_marker;
2072
2073 if (!td_stp)
2074 return;
2075 td_stp->reserved = ++pky_marker;
2076 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2077 td_stp->status = PCH_UDC_BS_HST_RDY;
2078}
2079
2080
2081
2082
2083
2084
2085static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2086{
2087 struct pch_udc_request *req;
2088 struct pch_udc_data_dma_desc *td_data;
2089
2090 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2091 return;
2092
2093 if (list_empty(&ep->queue))
2094 return;
2095
2096
2097 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2098 if (req->dma_going)
2099 return;
2100 if (!req->td_data)
2101 return;
2102 pch_udc_wait_ep_stall(ep);
2103 req->dma_going = 1;
2104 pch_udc_ep_set_ddptr(ep, 0);
2105 td_data = req->td_data;
2106 while (1) {
2107 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2108 PCH_UDC_BS_HST_RDY;
2109 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2110 break;
2111 td_data = phys_to_virt(td_data->next);
2112 }
2113 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2114 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2115 pch_udc_ep_set_pd(ep);
2116 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2117 pch_udc_ep_clear_nak(ep);
2118}
2119
2120
2121
2122
2123
2124static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2125{
2126 struct pch_udc_request *req;
2127 struct pch_udc_dev *dev = ep->dev;
2128
2129 if (list_empty(&ep->queue))
2130 return;
2131 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2132 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2133 PCH_UDC_BS_DMA_DONE)
2134 return;
2135 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2136 PCH_UDC_RTS_SUCC) {
2137 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2138 "epstatus=0x%08x\n",
2139 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2140 (int)(ep->epsts));
2141 return;
2142 }
2143
2144 req->req.actual = req->req.length;
2145 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2146 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2147 complete_req(ep, req, 0);
2148 req->dma_going = 0;
2149 if (!list_empty(&ep->queue)) {
2150 pch_udc_wait_ep_stall(ep);
2151 pch_udc_ep_clear_nak(ep);
2152 pch_udc_enable_ep_interrupts(ep->dev,
2153 PCH_UDC_EPINT(ep->in, ep->num));
2154 } else {
2155 pch_udc_disable_ep_interrupts(ep->dev,
2156 PCH_UDC_EPINT(ep->in, ep->num));
2157 }
2158}
2159
2160
2161
2162
2163
2164static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2165{
2166 struct pch_udc_request *req;
2167 struct pch_udc_dev *dev = ep->dev;
2168 unsigned int count;
2169 struct pch_udc_data_dma_desc *td;
2170 dma_addr_t addr;
2171
2172 if (list_empty(&ep->queue))
2173 return;
2174
2175 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2176 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2177 pch_udc_ep_set_ddptr(ep, 0);
2178 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2179 PCH_UDC_BS_DMA_DONE)
2180 td = req->td_data_last;
2181 else
2182 td = req->td_data;
2183
2184 while (1) {
2185 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2186 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2187 "epstatus=0x%08x\n",
2188 (req->td_data->status & PCH_UDC_RXTX_STS),
2189 (int)(ep->epsts));
2190 return;
2191 }
2192 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2193 if (td->status & PCH_UDC_DMA_LAST) {
2194 count = td->status & PCH_UDC_RXTX_BYTES;
2195 break;
2196 }
2197 if (td == req->td_data_last) {
2198 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2199 return;
2200 }
2201 addr = (dma_addr_t)td->next;
2202 td = phys_to_virt(addr);
2203 }
2204
2205 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2206 count = UDC_DMA_MAXPACKET;
2207 req->td_data->status |= PCH_UDC_DMA_LAST;
2208 td->status |= PCH_UDC_BS_HST_BSY;
2209
2210 req->dma_going = 0;
2211 req->req.actual = count;
2212 complete_req(ep, req, 0);
2213
2214 if (!list_empty(&ep->queue)) {
2215 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2216 pch_udc_start_rxrequest(ep, req);
2217 }
2218}
2219
2220
2221
2222
2223
2224
2225
2226static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2227{
2228 u32 epsts;
2229 struct pch_udc_ep *ep;
2230
2231 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2232 epsts = ep->epsts;
2233 ep->epsts = 0;
2234
2235 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2236 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2237 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2238 return;
2239 if ((epsts & UDC_EPSTS_BNA))
2240 return;
2241 if (epsts & UDC_EPSTS_HE)
2242 return;
2243 if (epsts & UDC_EPSTS_RSS) {
2244 pch_udc_ep_set_stall(ep);
2245 pch_udc_enable_ep_interrupts(ep->dev,
2246 PCH_UDC_EPINT(ep->in, ep->num));
2247 }
2248 if (epsts & UDC_EPSTS_RCS) {
2249 if (!dev->prot_stall) {
2250 pch_udc_ep_clear_stall(ep);
2251 } else {
2252 pch_udc_ep_set_stall(ep);
2253 pch_udc_enable_ep_interrupts(ep->dev,
2254 PCH_UDC_EPINT(ep->in, ep->num));
2255 }
2256 }
2257 if (epsts & UDC_EPSTS_TDC)
2258 pch_udc_complete_transfer(ep);
2259
2260 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2261 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2262 pch_udc_start_next_txrequest(ep);
2263}
2264
2265
2266
2267
2268
2269
2270static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2271{
2272 u32 epsts;
2273 struct pch_udc_ep *ep;
2274 struct pch_udc_request *req = NULL;
2275
2276 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2277 epsts = ep->epsts;
2278 ep->epsts = 0;
2279
2280 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2281
2282 req = list_entry(ep->queue.next, struct pch_udc_request,
2283 queue);
2284 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2285 PCH_UDC_BS_DMA_DONE) {
2286 if (!req->dma_going)
2287 pch_udc_start_rxrequest(ep, req);
2288 return;
2289 }
2290 }
2291 if (epsts & UDC_EPSTS_HE)
2292 return;
2293 if (epsts & UDC_EPSTS_RSS) {
2294 pch_udc_ep_set_stall(ep);
2295 pch_udc_enable_ep_interrupts(ep->dev,
2296 PCH_UDC_EPINT(ep->in, ep->num));
2297 }
2298 if (epsts & UDC_EPSTS_RCS) {
2299 if (!dev->prot_stall) {
2300 pch_udc_ep_clear_stall(ep);
2301 } else {
2302 pch_udc_ep_set_stall(ep);
2303 pch_udc_enable_ep_interrupts(ep->dev,
2304 PCH_UDC_EPINT(ep->in, ep->num));
2305 }
2306 }
2307 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2308 UDC_EPSTS_OUT_DATA) {
2309 if (ep->dev->prot_stall == 1) {
2310 pch_udc_ep_set_stall(ep);
2311 pch_udc_enable_ep_interrupts(ep->dev,
2312 PCH_UDC_EPINT(ep->in, ep->num));
2313 } else {
2314 pch_udc_complete_receiver(ep);
2315 }
2316 }
2317 if (list_empty(&ep->queue))
2318 pch_udc_set_dma(dev, DMA_DIR_RX);
2319}
2320
2321
2322
2323
2324
2325static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2326{
2327 u32 epsts;
2328 struct pch_udc_ep *ep;
2329 struct pch_udc_ep *ep_out;
2330
2331 ep = &dev->ep[UDC_EP0IN_IDX];
2332 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2333 epsts = ep->epsts;
2334 ep->epsts = 0;
2335
2336 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2337 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2338 UDC_EPSTS_XFERDONE)))
2339 return;
2340 if ((epsts & UDC_EPSTS_BNA))
2341 return;
2342 if (epsts & UDC_EPSTS_HE)
2343 return;
2344 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2345 pch_udc_complete_transfer(ep);
2346 pch_udc_clear_dma(dev, DMA_DIR_RX);
2347 ep_out->td_data->status = (ep_out->td_data->status &
2348 ~PCH_UDC_BUFF_STS) |
2349 PCH_UDC_BS_HST_RDY;
2350 pch_udc_ep_clear_nak(ep_out);
2351 pch_udc_set_dma(dev, DMA_DIR_RX);
2352 pch_udc_ep_set_rrdy(ep_out);
2353 }
2354
2355 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2356 !(epsts & UDC_EPSTS_TXEMPTY))
2357 pch_udc_start_next_txrequest(ep);
2358}
2359
2360
2361
2362
2363
2364
2365static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2366 __releases(&dev->lock)
2367 __acquires(&dev->lock)
2368{
2369 u32 stat;
2370 int setup_supported;
2371 struct pch_udc_ep *ep;
2372
2373 ep = &dev->ep[UDC_EP0OUT_IDX];
2374 stat = ep->epsts;
2375 ep->epsts = 0;
2376
2377
2378 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2379 UDC_EPSTS_OUT_SETUP) {
2380 dev->stall = 0;
2381 dev->ep[UDC_EP0IN_IDX].halted = 0;
2382 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2383 dev->setup_data = ep->td_stp->request;
2384 pch_udc_init_setup_buff(ep->td_stp);
2385 pch_udc_clear_dma(dev, DMA_DIR_RX);
2386 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2387 dev->ep[UDC_EP0IN_IDX].in);
2388 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2389 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2390 else
2391 dev->gadget.ep0 = &ep->ep;
2392 spin_lock(&dev->lock);
2393
2394 if ((dev->setup_data.bRequestType == 0x21) &&
2395 (dev->setup_data.bRequest == 0xFF))
2396 dev->prot_stall = 0;
2397
2398 setup_supported = dev->driver->setup(&dev->gadget,
2399 &dev->setup_data);
2400 spin_unlock(&dev->lock);
2401
2402 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2403 ep->td_data->status = (ep->td_data->status &
2404 ~PCH_UDC_BUFF_STS) |
2405 PCH_UDC_BS_HST_RDY;
2406 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2407 }
2408
2409 if (setup_supported >= 0 && setup_supported <
2410 UDC_EP0IN_MAX_PKT_SIZE) {
2411 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2412
2413
2414 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2415 pch_udc_set_dma(dev, DMA_DIR_RX);
2416 pch_udc_ep_clear_nak(ep);
2417 }
2418 } else if (setup_supported < 0) {
2419
2420 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2421 pch_udc_enable_ep_interrupts(ep->dev,
2422 PCH_UDC_EPINT(ep->in, ep->num));
2423 dev->stall = 0;
2424 pch_udc_set_dma(dev, DMA_DIR_RX);
2425 } else {
2426 dev->waiting_zlp_ack = 1;
2427 }
2428 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2429 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2430 pch_udc_clear_dma(dev, DMA_DIR_RX);
2431 pch_udc_ep_set_ddptr(ep, 0);
2432 if (!list_empty(&ep->queue)) {
2433 ep->epsts = stat;
2434 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2435 }
2436 pch_udc_set_dma(dev, DMA_DIR_RX);
2437 }
2438 pch_udc_ep_set_rrdy(ep);
2439}
2440
2441
2442
2443
2444
2445
2446
2447
2448static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2449{
2450 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2451 if (list_empty(&ep->queue))
2452 return;
2453 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2454 pch_udc_ep_clear_nak(ep);
2455}
2456
2457
2458
2459
2460
2461
2462static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2463{
2464 int i;
2465 struct pch_udc_ep *ep;
2466
2467 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2468
2469 if (ep_intr & (0x1 << i)) {
2470 ep = &dev->ep[UDC_EPIN_IDX(i)];
2471 ep->epsts = pch_udc_read_ep_status(ep);
2472 pch_udc_clear_ep_status(ep, ep->epsts);
2473 }
2474
2475 if (ep_intr & (0x10000 << i)) {
2476 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2477 ep->epsts = pch_udc_read_ep_status(ep);
2478 pch_udc_clear_ep_status(ep, ep->epsts);
2479 }
2480 }
2481}
2482
2483
2484
2485
2486
2487
2488static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2489{
2490 struct pch_udc_ep *ep;
2491 u32 val;
2492
2493
2494 ep = &dev->ep[UDC_EP0IN_IDX];
2495 pch_udc_clear_ep_control(ep);
2496 pch_udc_ep_fifo_flush(ep, ep->in);
2497 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2498 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2499
2500 ep->td_data = NULL;
2501 ep->td_stp = NULL;
2502 ep->td_data_phys = 0;
2503 ep->td_stp_phys = 0;
2504
2505
2506 ep = &dev->ep[UDC_EP0OUT_IDX];
2507 pch_udc_clear_ep_control(ep);
2508 pch_udc_ep_fifo_flush(ep, ep->in);
2509 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2510 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2511 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2512 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2513
2514
2515 pch_udc_init_setup_buff(ep->td_stp);
2516
2517 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2518
2519 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2520
2521
2522 ep->td_data->status = PCH_UDC_DMA_LAST;
2523 ep->td_data->dataptr = dev->dma_addr;
2524 ep->td_data->next = ep->td_data_phys;
2525
2526 pch_udc_ep_clear_nak(ep);
2527}
2528
2529
2530
2531
2532
2533
2534static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2535{
2536 struct pch_udc_ep *ep;
2537 int i;
2538
2539 pch_udc_clear_dma(dev, DMA_DIR_TX);
2540 pch_udc_clear_dma(dev, DMA_DIR_RX);
2541
2542 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2543
2544 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2545
2546 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2547 ep = &dev->ep[i];
2548 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2549 pch_udc_clear_ep_control(ep);
2550 pch_udc_ep_set_ddptr(ep, 0);
2551 pch_udc_write_csr(ep->dev, 0x00, i);
2552 }
2553 dev->stall = 0;
2554 dev->prot_stall = 0;
2555 dev->waiting_zlp_ack = 0;
2556 dev->set_cfg_not_acked = 0;
2557
2558
2559 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2560 ep = &dev->ep[i];
2561 pch_udc_ep_set_nak(ep);
2562 pch_udc_ep_fifo_flush(ep, ep->in);
2563
2564 empty_req_queue(ep);
2565 }
2566 if (dev->driver) {
2567 spin_unlock(&dev->lock);
2568 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2569 spin_lock(&dev->lock);
2570 }
2571}
2572
2573
2574
2575
2576
2577
2578static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2579{
2580 u32 dev_stat, dev_speed;
2581 u32 speed = USB_SPEED_FULL;
2582
2583 dev_stat = pch_udc_read_device_status(dev);
2584 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2585 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2586 switch (dev_speed) {
2587 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2588 speed = USB_SPEED_HIGH;
2589 break;
2590 case UDC_DEVSTS_ENUM_SPEED_FULL:
2591 speed = USB_SPEED_FULL;
2592 break;
2593 case UDC_DEVSTS_ENUM_SPEED_LOW:
2594 speed = USB_SPEED_LOW;
2595 break;
2596 default:
2597 BUG();
2598 }
2599 dev->gadget.speed = speed;
2600 pch_udc_activate_control_ep(dev);
2601 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2602 pch_udc_set_dma(dev, DMA_DIR_TX);
2603 pch_udc_set_dma(dev, DMA_DIR_RX);
2604 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2605
2606
2607 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2608 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2609 UDC_DEVINT_SI | UDC_DEVINT_SC);
2610}
2611
2612
2613
2614
2615
2616
2617static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2618{
2619 u32 reg, dev_stat = 0;
2620 int i;
2621
2622 dev_stat = pch_udc_read_device_status(dev);
2623 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2624 UDC_DEVSTS_INTF_SHIFT;
2625 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2626 UDC_DEVSTS_ALT_SHIFT;
2627 dev->set_cfg_not_acked = 1;
2628
2629 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2630 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2631 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2632 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2633 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2634
2635
2636 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2637 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2638 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2639 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2640 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2641 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2642 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2643
2644 pch_udc_ep_clear_stall(&(dev->ep[i]));
2645 dev->ep[i].halted = 0;
2646 }
2647 dev->stall = 0;
2648 spin_unlock(&dev->lock);
2649 dev->driver->setup(&dev->gadget, &dev->setup_data);
2650 spin_lock(&dev->lock);
2651}
2652
2653
2654
2655
2656
2657
2658static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2659{
2660 int i;
2661 u32 reg, dev_stat = 0;
2662
2663 dev_stat = pch_udc_read_device_status(dev);
2664 dev->set_cfg_not_acked = 1;
2665 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2666 UDC_DEVSTS_CFG_SHIFT;
2667
2668 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2669 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2670 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2671
2672
2673 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2674 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2675 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2676 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2677 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2678
2679 pch_udc_ep_clear_stall(&(dev->ep[i]));
2680 dev->ep[i].halted = 0;
2681 }
2682 dev->stall = 0;
2683
2684
2685 spin_unlock(&dev->lock);
2686 dev->driver->setup(&dev->gadget, &dev->setup_data);
2687 spin_lock(&dev->lock);
2688}
2689
2690
2691
2692
2693
2694
2695
2696static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2697{
2698 int vbus;
2699
2700
2701 if (dev_intr & UDC_DEVINT_UR) {
2702 pch_udc_svc_ur_interrupt(dev);
2703 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2704 }
2705
2706 if (dev_intr & UDC_DEVINT_ENUM) {
2707 pch_udc_svc_enum_interrupt(dev);
2708 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2709 }
2710
2711 if (dev_intr & UDC_DEVINT_SI)
2712 pch_udc_svc_intf_interrupt(dev);
2713
2714 if (dev_intr & UDC_DEVINT_SC)
2715 pch_udc_svc_cfg_interrupt(dev);
2716
2717 if (dev_intr & UDC_DEVINT_US) {
2718 if (dev->driver
2719 && dev->driver->suspend) {
2720 spin_unlock(&dev->lock);
2721 dev->driver->suspend(&dev->gadget);
2722 spin_lock(&dev->lock);
2723 }
2724
2725 vbus = pch_vbus_gpio_get_value(dev);
2726 if ((dev->vbus_session == 0)
2727 && (vbus != 1)) {
2728 if (dev->driver && dev->driver->disconnect) {
2729 spin_unlock(&dev->lock);
2730 dev->driver->disconnect(&dev->gadget);
2731 spin_lock(&dev->lock);
2732 }
2733 pch_udc_reconnect(dev);
2734 } else if ((dev->vbus_session == 0)
2735 && (vbus == 1)
2736 && !dev->vbus_gpio.intr)
2737 schedule_work(&dev->vbus_gpio.irq_work_fall);
2738
2739 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2740 }
2741
2742 if (dev_intr & UDC_DEVINT_SOF)
2743 dev_dbg(&dev->pdev->dev, "SOF\n");
2744
2745 if (dev_intr & UDC_DEVINT_ES)
2746 dev_dbg(&dev->pdev->dev, "ES\n");
2747
2748 if (dev_intr & UDC_DEVINT_RWKP)
2749 dev_dbg(&dev->pdev->dev, "RWKP\n");
2750}
2751
2752
2753
2754
2755
2756
2757static irqreturn_t pch_udc_isr(int irq, void *pdev)
2758{
2759 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2760 u32 dev_intr, ep_intr;
2761 int i;
2762
2763 dev_intr = pch_udc_read_device_interrupts(dev);
2764 ep_intr = pch_udc_read_ep_interrupts(dev);
2765
2766
2767 if (dev_intr == ep_intr)
2768 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2769 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2770
2771 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2772 return IRQ_HANDLED;
2773 }
2774 if (dev_intr)
2775
2776 pch_udc_write_device_interrupts(dev, dev_intr);
2777 if (ep_intr)
2778
2779 pch_udc_write_ep_interrupts(dev, ep_intr);
2780 if (!dev_intr && !ep_intr)
2781 return IRQ_NONE;
2782 spin_lock(&dev->lock);
2783 if (dev_intr)
2784 pch_udc_dev_isr(dev, dev_intr);
2785 if (ep_intr) {
2786 pch_udc_read_all_epstatus(dev, ep_intr);
2787
2788 if (ep_intr & UDC_EPINT_IN_EP0) {
2789 pch_udc_svc_control_in(dev);
2790 pch_udc_postsvc_epinters(dev, 0);
2791 }
2792
2793 if (ep_intr & UDC_EPINT_OUT_EP0)
2794 pch_udc_svc_control_out(dev);
2795
2796 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2797 if (ep_intr & (1 << i)) {
2798 pch_udc_svc_data_in(dev, i);
2799 pch_udc_postsvc_epinters(dev, i);
2800 }
2801 }
2802
2803 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2804 PCH_UDC_USED_EP_NUM); i++)
2805 if (ep_intr & (1 << i))
2806 pch_udc_svc_data_out(dev, i -
2807 UDC_EPINT_OUT_SHIFT);
2808 }
2809 spin_unlock(&dev->lock);
2810 return IRQ_HANDLED;
2811}
2812
2813
2814
2815
2816
2817static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2818{
2819
2820 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2821 UDC_EPINT_OUT_EP0);
2822
2823 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2824 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2825 UDC_DEVINT_SI | UDC_DEVINT_SC);
2826}
2827
2828
2829
2830
2831
2832static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2833{
2834 const char *const ep_string[] = {
2835 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2836 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2837 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2838 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2839 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2840 "ep15in", "ep15out",
2841 };
2842 int i;
2843
2844 dev->gadget.speed = USB_SPEED_UNKNOWN;
2845 INIT_LIST_HEAD(&dev->gadget.ep_list);
2846
2847
2848 memset(dev->ep, 0, sizeof dev->ep);
2849 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2850 struct pch_udc_ep *ep = &dev->ep[i];
2851 ep->dev = dev;
2852 ep->halted = 1;
2853 ep->num = i / 2;
2854 ep->in = ~i & 1;
2855 ep->ep.name = ep_string[i];
2856 ep->ep.ops = &pch_udc_ep_ops;
2857 if (ep->in) {
2858 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2859 ep->ep.caps.dir_in = true;
2860 } else {
2861 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2862 UDC_EP_REG_SHIFT;
2863 ep->ep.caps.dir_out = true;
2864 }
2865 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2866 ep->ep.caps.type_control = true;
2867 } else {
2868 ep->ep.caps.type_iso = true;
2869 ep->ep.caps.type_bulk = true;
2870 ep->ep.caps.type_int = true;
2871 }
2872
2873 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2874 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2875 INIT_LIST_HEAD(&ep->queue);
2876 }
2877 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2878 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2879
2880
2881 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2882 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2883
2884 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2885 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2896{
2897 pch_udc_init(dev);
2898 pch_udc_pcd_reinit(dev);
2899 pch_vbus_gpio_init(dev, vbus_gpio_port);
2900 return 0;
2901}
2902
2903
2904
2905
2906
2907static int init_dma_pools(struct pch_udc_dev *dev)
2908{
2909 struct pch_udc_stp_dma_desc *td_stp;
2910 struct pch_udc_data_dma_desc *td_data;
2911 void *ep0out_buf;
2912
2913
2914 dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2915 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2916 if (!dev->data_requests) {
2917 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2918 __func__);
2919 return -ENOMEM;
2920 }
2921
2922
2923 dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2924 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2925 if (!dev->stp_requests) {
2926 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2927 __func__);
2928 return -ENOMEM;
2929 }
2930
2931 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2932 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2933 if (!td_stp) {
2934 dev_err(&dev->pdev->dev,
2935 "%s: can't allocate setup dma descriptor\n", __func__);
2936 return -ENOMEM;
2937 }
2938 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2939
2940
2941 td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2942 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2943 if (!td_data) {
2944 dev_err(&dev->pdev->dev,
2945 "%s: can't allocate data dma descriptor\n", __func__);
2946 return -ENOMEM;
2947 }
2948 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2949 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2950 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2951 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2952 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2953
2954 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2955 GFP_KERNEL);
2956 if (!ep0out_buf)
2957 return -ENOMEM;
2958 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2959 UDC_EP0OUT_BUFF_SIZE * 4,
2960 DMA_FROM_DEVICE);
2961 return 0;
2962}
2963
2964static int pch_udc_start(struct usb_gadget *g,
2965 struct usb_gadget_driver *driver)
2966{
2967 struct pch_udc_dev *dev = to_pch_udc(g);
2968
2969 driver->driver.bus = NULL;
2970 dev->driver = driver;
2971
2972
2973 pch_udc_setup_ep0(dev);
2974
2975
2976 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2977 pch_udc_clear_disconnect(dev);
2978
2979 dev->connected = 1;
2980 return 0;
2981}
2982
2983static int pch_udc_stop(struct usb_gadget *g)
2984{
2985 struct pch_udc_dev *dev = to_pch_udc(g);
2986
2987 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2988
2989
2990 dev->driver = NULL;
2991 dev->connected = 0;
2992
2993
2994 pch_udc_set_disconnect(dev);
2995
2996 return 0;
2997}
2998
2999static void pch_udc_shutdown(struct pci_dev *pdev)
3000{
3001 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3002
3003 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3004 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3005
3006
3007 pch_udc_set_disconnect(dev);
3008}
3009
3010static void pch_udc_remove(struct pci_dev *pdev)
3011{
3012 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3013
3014 usb_del_gadget_udc(&dev->gadget);
3015
3016
3017 if (dev->driver)
3018 dev_err(&pdev->dev,
3019 "%s: gadget driver still bound!!!\n", __func__);
3020
3021 dma_pool_destroy(dev->data_requests);
3022
3023 if (dev->stp_requests) {
3024
3025 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3026 dma_pool_free(dev->stp_requests,
3027 dev->ep[UDC_EP0OUT_IDX].td_stp,
3028 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3029 }
3030 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3031 dma_pool_free(dev->stp_requests,
3032 dev->ep[UDC_EP0OUT_IDX].td_data,
3033 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3034 }
3035 dma_pool_destroy(dev->stp_requests);
3036 }
3037
3038 if (dev->dma_addr)
3039 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3040 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3041
3042 pch_vbus_gpio_free(dev);
3043
3044 pch_udc_exit(dev);
3045}
3046
3047#ifdef CONFIG_PM_SLEEP
3048static int pch_udc_suspend(struct device *d)
3049{
3050 struct pci_dev *pdev = to_pci_dev(d);
3051 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3052
3053 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3054 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3055
3056 return 0;
3057}
3058
3059static int pch_udc_resume(struct device *d)
3060{
3061 return 0;
3062}
3063
3064static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3065#define PCH_UDC_PM_OPS (&pch_udc_pm)
3066#else
3067#define PCH_UDC_PM_OPS NULL
3068#endif
3069
3070static int pch_udc_probe(struct pci_dev *pdev,
3071 const struct pci_device_id *id)
3072{
3073 int bar;
3074 int retval;
3075 struct pch_udc_dev *dev;
3076
3077
3078 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3079 if (!dev)
3080 return -ENOMEM;
3081
3082
3083 retval = pcim_enable_device(pdev);
3084 if (retval)
3085 return retval;
3086
3087 pci_set_drvdata(pdev, dev);
3088
3089
3090 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3091 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3092 else
3093 bar = PCH_UDC_PCI_BAR;
3094
3095
3096 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3097 if (retval)
3098 return retval;
3099
3100 dev->base_addr = pcim_iomap_table(pdev)[bar];
3101
3102
3103 if (pch_udc_pcd_init(dev))
3104 return -ENODEV;
3105
3106 pci_enable_msi(pdev);
3107
3108 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3109 IRQF_SHARED, KBUILD_MODNAME, dev);
3110 if (retval) {
3111 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3112 pdev->irq);
3113 goto finished;
3114 }
3115
3116 pci_set_master(pdev);
3117 pci_try_set_mwi(pdev);
3118
3119
3120 spin_lock_init(&dev->lock);
3121 dev->pdev = pdev;
3122 dev->gadget.ops = &pch_udc_ops;
3123
3124 retval = init_dma_pools(dev);
3125 if (retval)
3126 goto finished;
3127
3128 dev->gadget.name = KBUILD_MODNAME;
3129 dev->gadget.max_speed = USB_SPEED_HIGH;
3130
3131
3132 pch_udc_set_disconnect(dev);
3133 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3134 if (retval)
3135 goto finished;
3136 return 0;
3137
3138finished:
3139 pch_udc_remove(pdev);
3140 return retval;
3141}
3142
3143static const struct pci_device_id pch_udc_pcidev_id[] = {
3144 {
3145 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3146 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3147 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3148 .class_mask = 0xffffffff,
3149 },
3150 {
3151 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3152 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3153 .class_mask = 0xffffffff,
3154 },
3155 {
3156 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3157 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3158 .class_mask = 0xffffffff,
3159 },
3160 {
3161 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3162 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3163 .class_mask = 0xffffffff,
3164 },
3165 { 0 },
3166};
3167
3168MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3169
3170static struct pci_driver pch_udc_driver = {
3171 .name = KBUILD_MODNAME,
3172 .id_table = pch_udc_pcidev_id,
3173 .probe = pch_udc_probe,
3174 .remove = pch_udc_remove,
3175 .shutdown = pch_udc_shutdown,
3176 .driver = {
3177 .pm = PCH_UDC_PM_OPS,
3178 },
3179};
3180
3181module_pci_driver(pch_udc_driver);
3182
3183MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3184MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3185MODULE_LICENSE("GPL");
3186