1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/interrupt.h>
16#include <linux/usb/ch9.h>
17#include <linux/usb/gadget.h>
18#include <linux/gpio.h>
19#include <linux/irq.h>
20
21
22static int vbus_gpio_port = -1;
23
24#define PCH_VBUS_PERIOD 3000
25#define PCH_VBUS_INTERVAL 10
26
27
28#define UDC_EP_REG_SHIFT 0x20
29
30#define UDC_EPCTL_ADDR 0x00
31#define UDC_EPSTS_ADDR 0x04
32#define UDC_BUFIN_FRAMENUM_ADDR 0x08
33#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
34#define UDC_SUBPTR_ADDR 0x10
35#define UDC_DESPTR_ADDR 0x14
36#define UDC_CONFIRM_ADDR 0x18
37
38#define UDC_DEVCFG_ADDR 0x400
39#define UDC_DEVCTL_ADDR 0x404
40#define UDC_DEVSTS_ADDR 0x408
41#define UDC_DEVIRQSTS_ADDR 0x40C
42#define UDC_DEVIRQMSK_ADDR 0x410
43#define UDC_EPIRQSTS_ADDR 0x414
44#define UDC_EPIRQMSK_ADDR 0x418
45#define UDC_DEVLPM_ADDR 0x41C
46#define UDC_CSR_BUSY_ADDR 0x4f0
47#define UDC_SRST_ADDR 0x4fc
48#define UDC_CSR_ADDR 0x500
49
50
51
52#define UDC_EPCTL_MRXFLUSH (1 << 12)
53#define UDC_EPCTL_RRDY (1 << 9)
54#define UDC_EPCTL_CNAK (1 << 8)
55#define UDC_EPCTL_SNAK (1 << 7)
56#define UDC_EPCTL_NAK (1 << 6)
57#define UDC_EPCTL_P (1 << 3)
58#define UDC_EPCTL_F (1 << 1)
59#define UDC_EPCTL_S (1 << 0)
60#define UDC_EPCTL_ET_SHIFT 4
61
62#define UDC_EPCTL_ET_MASK 0x00000030
63
64#define UDC_EPCTL_ET_CONTROL 0
65#define UDC_EPCTL_ET_ISO 1
66#define UDC_EPCTL_ET_BULK 2
67#define UDC_EPCTL_ET_INTERRUPT 3
68
69
70
71#define UDC_EPSTS_XFERDONE (1 << 27)
72#define UDC_EPSTS_RSS (1 << 26)
73#define UDC_EPSTS_RCS (1 << 25)
74#define UDC_EPSTS_TXEMPTY (1 << 24)
75#define UDC_EPSTS_TDC (1 << 10)
76#define UDC_EPSTS_HE (1 << 9)
77#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78#define UDC_EPSTS_BNA (1 << 7)
79#define UDC_EPSTS_IN (1 << 6)
80#define UDC_EPSTS_OUT_SHIFT 4
81
82#define UDC_EPSTS_OUT_MASK 0x00000030
83#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84
85#define UDC_EPSTS_OUT_SETUP 2
86#define UDC_EPSTS_OUT_DATA 1
87
88
89
90#define UDC_DEVCFG_CSR_PRG (1 << 17)
91#define UDC_DEVCFG_SP (1 << 3)
92
93#define UDC_DEVCFG_SPD_HS 0x0
94#define UDC_DEVCFG_SPD_FS 0x1
95#define UDC_DEVCFG_SPD_LS 0x2
96
97
98
99#define UDC_DEVCTL_THLEN_SHIFT 24
100#define UDC_DEVCTL_BRLEN_SHIFT 16
101#define UDC_DEVCTL_CSR_DONE (1 << 13)
102#define UDC_DEVCTL_SD (1 << 10)
103#define UDC_DEVCTL_MODE (1 << 9)
104#define UDC_DEVCTL_BREN (1 << 8)
105#define UDC_DEVCTL_THE (1 << 7)
106#define UDC_DEVCTL_DU (1 << 4)
107#define UDC_DEVCTL_TDE (1 << 3)
108#define UDC_DEVCTL_RDE (1 << 2)
109#define UDC_DEVCTL_RES (1 << 0)
110
111
112
113#define UDC_DEVSTS_TS_SHIFT 18
114#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115#define UDC_DEVSTS_ALT_SHIFT 8
116#define UDC_DEVSTS_INTF_SHIFT 4
117#define UDC_DEVSTS_CFG_SHIFT 0
118
119#define UDC_DEVSTS_TS_MASK 0xfffc0000
120#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121#define UDC_DEVSTS_ALT_MASK 0x00000f00
122#define UDC_DEVSTS_INTF_MASK 0x000000f0
123#define UDC_DEVSTS_CFG_MASK 0x0000000f
124
125#define UDC_DEVSTS_ENUM_SPEED_FULL 1
126#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127#define UDC_DEVSTS_ENUM_SPEED_LOW 2
128#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130
131
132#define UDC_DEVINT_RWKP (1 << 7)
133#define UDC_DEVINT_ENUM (1 << 6)
134#define UDC_DEVINT_SOF (1 << 5)
135#define UDC_DEVINT_US (1 << 4)
136#define UDC_DEVINT_UR (1 << 3)
137#define UDC_DEVINT_ES (1 << 2)
138#define UDC_DEVINT_SI (1 << 1)
139#define UDC_DEVINT_SC (1 << 0)
140
141#define UDC_DEVINT_MSK 0x7f
142
143
144
145#define UDC_EPINT_IN_SHIFT 0
146#define UDC_EPINT_OUT_SHIFT 16
147#define UDC_EPINT_IN_EP0 (1 << 0)
148#define UDC_EPINT_OUT_EP0 (1 << 16)
149
150#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152
153
154#define UDC_CSR_BUSY (1 << 0)
155
156
157
158#define UDC_PSRST (1 << 1)
159#define UDC_SRST (1 << 0)
160
161
162
163#define UDC_CSR_NE_NUM_SHIFT 0
164#define UDC_CSR_NE_DIR_SHIFT 4
165#define UDC_CSR_NE_TYPE_SHIFT 5
166#define UDC_CSR_NE_CFG_SHIFT 7
167#define UDC_CSR_NE_INTF_SHIFT 11
168#define UDC_CSR_NE_ALT_SHIFT 15
169#define UDC_CSR_NE_MAX_PKT_SHIFT 19
170
171#define UDC_CSR_NE_NUM_MASK 0x0000000f
172#define UDC_CSR_NE_DIR_MASK 0x00000010
173#define UDC_CSR_NE_TYPE_MASK 0x00000060
174#define UDC_CSR_NE_CFG_MASK 0x00000780
175#define UDC_CSR_NE_INTF_MASK 0x00007800
176#define UDC_CSR_NE_ALT_MASK 0x00078000
177#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180#define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183
184#define UDC_EP0IN_IDX 0
185#define UDC_EP0OUT_IDX 1
186#define UDC_EPIN_IDX(ep) (ep * 2)
187#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188#define PCH_UDC_EP0 0
189#define PCH_UDC_EP1 1
190#define PCH_UDC_EP2 2
191#define PCH_UDC_EP3 3
192
193
194#define PCH_UDC_EP_NUM 32
195#define PCH_UDC_USED_EP_NUM 4
196
197#define PCH_UDC_BRLEN 0x0F
198#define PCH_UDC_THLEN 0x1F
199
200#define UDC_EP0IN_BUFF_SIZE 16
201#define UDC_EPIN_BUFF_SIZE 256
202#define UDC_EP0OUT_BUFF_SIZE 16
203#define UDC_EPOUT_BUFF_SIZE 256
204
205#define UDC_EP0IN_MAX_PKT_SIZE 64
206#define UDC_EP0OUT_MAX_PKT_SIZE 64
207#define UDC_BULK_MAX_PKT_SIZE 512
208
209
210#define DMA_DIR_RX 1
211#define DMA_DIR_TX 2
212#define DMA_ADDR_INVALID (~(dma_addr_t)0)
213#define UDC_DMA_MAXPACKET 65536
214
215
216
217
218
219
220
221
222
223struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228};
229
230
231
232
233
234
235
236
237
238struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242} __attribute((packed));
243
244
245
246#define PCH_UDC_BUFF_STS 0xC0000000
247#define PCH_UDC_BS_HST_RDY 0x00000000
248#define PCH_UDC_BS_DMA_BSY 0x40000000
249#define PCH_UDC_BS_DMA_DONE 0x80000000
250#define PCH_UDC_BS_HST_BSY 0xC0000000
251
252#define PCH_UDC_RXTX_STS 0x30000000
253#define PCH_UDC_RTS_SUCC 0x00000000
254#define PCH_UDC_RTS_DESERR 0x10000000
255#define PCH_UDC_RTS_BUFERR 0x30000000
256
257#define PCH_UDC_DMA_LAST 0x08000000
258
259#define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261
262
263
264
265
266
267
268struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272};
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303};
304
305
306
307
308
309
310
311
312
313struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351struct pch_udc_dev {
352 struct usb_gadget gadget;
353 struct usb_gadget_driver *driver;
354 struct pci_dev *pdev;
355 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
356 spinlock_t lock;
357 unsigned active:1,
358 stall:1,
359 prot_stall:1,
360 irq_registered:1,
361 mem_region:1,
362 suspended:1,
363 connected:1,
364 vbus_session:1,
365 set_cfg_not_acked:1,
366 waiting_zlp_ack:1;
367 struct pci_pool *data_requests;
368 struct pci_pool *stp_requests;
369 dma_addr_t dma_addr;
370 void *ep0out_buf;
371 struct usb_ctrlrequest setup_data;
372 unsigned long phys_addr;
373 void __iomem *base_addr;
374 unsigned bar;
375 unsigned irq;
376 struct pch_udc_cfg_data cfg_data;
377 struct pch_vbus_gpio_data vbus_gpio;
378};
379#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
380
381#define PCH_UDC_PCI_BAR_QUARK_X1000 0
382#define PCH_UDC_PCI_BAR 1
383#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
384#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
385#define PCI_VENDOR_ID_ROHM 0x10DB
386#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
387#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
388
389static const char ep0_string[] = "ep0in";
390static DEFINE_SPINLOCK(udc_stall_spinlock);
391static bool speed_fs;
392module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
393MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409struct pch_udc_request {
410 struct usb_request req;
411 dma_addr_t td_data_phys;
412 struct pch_udc_data_dma_desc *td_data;
413 struct pch_udc_data_dma_desc *td_data_last;
414 struct list_head queue;
415 unsigned dma_going:1,
416 dma_mapped:1,
417 dma_done:1;
418 unsigned chain_len;
419 void *buf;
420 dma_addr_t dma;
421};
422
423static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
424{
425 return ioread32(dev->base_addr + reg);
426}
427
428static inline void pch_udc_writel(struct pch_udc_dev *dev,
429 unsigned long val, unsigned long reg)
430{
431 iowrite32(val, dev->base_addr + reg);
432}
433
434static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
435 unsigned long reg,
436 unsigned long bitmask)
437{
438 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
439}
440
441static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
442 unsigned long reg,
443 unsigned long bitmask)
444{
445 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
446}
447
448static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
449{
450 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
451}
452
453static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
454 unsigned long val, unsigned long reg)
455{
456 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
457}
458
459static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
460 unsigned long reg,
461 unsigned long bitmask)
462{
463 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
464}
465
466static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
467 unsigned long reg,
468 unsigned long bitmask)
469{
470 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
471}
472
473
474
475
476
477static void pch_udc_csr_busy(struct pch_udc_dev *dev)
478{
479 unsigned int count = 200;
480
481
482 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
483 && --count)
484 cpu_relax();
485 if (!count)
486 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
487}
488
489
490
491
492
493
494
495static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
496 unsigned int ep)
497{
498 unsigned long reg = PCH_UDC_CSR(ep);
499
500 pch_udc_csr_busy(dev);
501 pch_udc_writel(dev, val, reg);
502 pch_udc_csr_busy(dev);
503}
504
505
506
507
508
509
510
511
512static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
513{
514 unsigned long reg = PCH_UDC_CSR(ep);
515
516 pch_udc_csr_busy(dev);
517 pch_udc_readl(dev, reg);
518 pch_udc_csr_busy(dev);
519 return pch_udc_readl(dev, reg);
520}
521
522
523
524
525
526static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
527{
528 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
529 mdelay(1);
530 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
531}
532
533
534
535
536
537
538static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
539{
540 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
541 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
542}
543
544
545
546
547
548static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
549{
550 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
551}
552
553
554
555
556
557static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
558{
559 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
560}
561
562
563
564
565
566static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
567{
568 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569}
570
571
572
573
574
575static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
576{
577
578 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
579 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
580 mdelay(1);
581
582 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
583}
584
585
586
587
588
589
590static void pch_udc_init(struct pch_udc_dev *dev);
591static void pch_udc_reconnect(struct pch_udc_dev *dev)
592{
593 pch_udc_init(dev);
594
595
596
597 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
598 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
599
600
601 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
602 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
603 mdelay(1);
604
605 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
606}
607
608
609
610
611
612
613
614
615static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
616 int is_active)
617{
618 if (is_active) {
619 pch_udc_reconnect(dev);
620 dev->vbus_session = 1;
621 } else {
622 if (dev->driver && dev->driver->disconnect) {
623 spin_lock(&dev->lock);
624 dev->driver->disconnect(&dev->gadget);
625 spin_unlock(&dev->lock);
626 }
627 pch_udc_set_disconnect(dev);
628 dev->vbus_session = 0;
629 }
630}
631
632
633
634
635
636static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
637{
638 if (ep->in) {
639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
640 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
641 } else {
642 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643 }
644}
645
646
647
648
649
650static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
651{
652
653 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
654
655 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
656}
657
658
659
660
661
662
663static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
664 u8 type)
665{
666 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
667 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
668}
669
670
671
672
673
674
675static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
676 u32 buf_size, u32 ep_in)
677{
678 u32 data;
679 if (ep_in) {
680 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
681 data = (data & 0xffff0000) | (buf_size & 0xffff);
682 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
683 } else {
684 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
685 data = (buf_size << 16) | (data & 0xffff);
686 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
687 }
688}
689
690
691
692
693
694
695static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
696{
697 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
698 data = (data & 0xffff0000) | (pkt_size & 0xffff);
699 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
700}
701
702
703
704
705
706
707static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
708{
709 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
710}
711
712
713
714
715
716
717static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
718{
719 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
720}
721
722
723
724
725
726static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
727{
728 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
729}
730
731
732
733
734
735static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
736{
737 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
738}
739
740
741
742
743
744static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
745{
746 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
747}
748
749
750
751
752
753
754
755
756
757static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
758{
759 if (dir == DMA_DIR_RX)
760 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
761 else if (dir == DMA_DIR_TX)
762 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
763}
764
765
766
767
768
769
770
771
772
773static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
774{
775 if (dir == DMA_DIR_RX)
776 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
777 else if (dir == DMA_DIR_TX)
778 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
779}
780
781
782
783
784
785
786static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
787{
788 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
789}
790
791
792
793
794
795
796static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
797 u32 mask)
798{
799 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
800}
801
802
803
804
805
806
807static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
808 u32 mask)
809{
810 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
811}
812
813
814
815
816
817
818static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
819 u32 mask)
820{
821 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
822}
823
824
825
826
827
828
829static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
830 u32 mask)
831{
832 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
833}
834
835
836
837
838
839
840static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
841{
842 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
843}
844
845
846
847
848
849
850static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
851 u32 val)
852{
853 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
854}
855
856
857
858
859
860
861static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
862{
863 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
864}
865
866
867
868
869
870
871static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
872 u32 val)
873{
874 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
875}
876
877
878
879
880
881
882static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
883{
884 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
885}
886
887
888
889
890
891
892static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
893{
894 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
895}
896
897
898
899
900
901
902static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
903{
904 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
905}
906
907
908
909
910
911
912static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
913{
914 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
915}
916
917
918
919
920
921
922static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
923 u32 stat)
924{
925 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
926}
927
928
929
930
931
932
933static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
934{
935 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
936}
937
938
939
940
941
942
943static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
944{
945 unsigned int loopcnt = 0;
946 struct pch_udc_dev *dev = ep->dev;
947
948 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
949 return;
950 if (!ep->in) {
951 loopcnt = 10000;
952 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
953 --loopcnt)
954 udelay(5);
955 if (!loopcnt)
956 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
957 __func__);
958 }
959 loopcnt = 10000;
960 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
961 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
962 udelay(5);
963 }
964 if (!loopcnt)
965 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
966 __func__, ep->num, (ep->in ? "in" : "out"));
967}
968
969
970
971
972
973
974
975
976static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
977{
978 if (dir) {
979 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
980 return;
981 }
982}
983
984
985
986
987
988
989static void pch_udc_ep_enable(struct pch_udc_ep *ep,
990 struct pch_udc_cfg_data *cfg,
991 const struct usb_endpoint_descriptor *desc)
992{
993 u32 val = 0;
994 u32 buff_size = 0;
995
996 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
997 if (ep->in)
998 buff_size = UDC_EPIN_BUFF_SIZE;
999 else
1000 buff_size = UDC_EPOUT_BUFF_SIZE;
1001 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
1002 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
1003 pch_udc_ep_set_nak(ep);
1004 pch_udc_ep_fifo_flush(ep, ep->in);
1005
1006 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1007 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1008 UDC_CSR_NE_TYPE_SHIFT) |
1009 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1010 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1011 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1012 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1013
1014 if (ep->in)
1015 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1016 else
1017 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1018}
1019
1020
1021
1022
1023
1024static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1025{
1026 if (ep->in) {
1027
1028 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1029
1030 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1031 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1032 } else {
1033
1034 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1035 }
1036
1037 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1038}
1039
1040
1041
1042
1043
1044static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1045{
1046 unsigned int count = 10000;
1047
1048
1049 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1050 udelay(5);
1051 if (!count)
1052 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1053}
1054
1055
1056
1057
1058
1059static void pch_udc_init(struct pch_udc_dev *dev)
1060{
1061 if (NULL == dev) {
1062 pr_err("%s: Invalid address\n", __func__);
1063 return;
1064 }
1065
1066 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1067 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1068 mdelay(1);
1069 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1070 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1071 mdelay(1);
1072
1073 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1074 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1075
1076
1077 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1078 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1079
1080
1081 if (speed_fs)
1082 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1083 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1084 else
1085 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1086 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1087 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1088 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1089 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1090 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1091 UDC_DEVCTL_THE);
1092}
1093
1094
1095
1096
1097
1098static void pch_udc_exit(struct pch_udc_dev *dev)
1099{
1100
1101 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1102
1103 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1104
1105 pch_udc_set_disconnect(dev);
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1117{
1118 struct pch_udc_dev *dev;
1119
1120 if (!gadget)
1121 return -EINVAL;
1122 dev = container_of(gadget, struct pch_udc_dev, gadget);
1123 return pch_udc_get_frame(dev);
1124}
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1135{
1136 struct pch_udc_dev *dev;
1137 unsigned long flags;
1138
1139 if (!gadget)
1140 return -EINVAL;
1141 dev = container_of(gadget, struct pch_udc_dev, gadget);
1142 spin_lock_irqsave(&dev->lock, flags);
1143 pch_udc_rmt_wakeup(dev);
1144 spin_unlock_irqrestore(&dev->lock, flags);
1145 return 0;
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1159{
1160 struct pch_udc_dev *dev;
1161
1162 if (!gadget)
1163 return -EINVAL;
1164 gadget->is_selfpowered = (value != 0);
1165 dev = container_of(gadget, struct pch_udc_dev, gadget);
1166 if (value)
1167 pch_udc_set_selfpowered(dev);
1168 else
1169 pch_udc_clear_selfpowered(dev);
1170 return 0;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1184{
1185 struct pch_udc_dev *dev;
1186
1187 if (!gadget)
1188 return -EINVAL;
1189 dev = container_of(gadget, struct pch_udc_dev, gadget);
1190 if (is_on) {
1191 pch_udc_reconnect(dev);
1192 } else {
1193 if (dev->driver && dev->driver->disconnect) {
1194 spin_lock(&dev->lock);
1195 dev->driver->disconnect(&dev->gadget);
1196 spin_unlock(&dev->lock);
1197 }
1198 pch_udc_set_disconnect(dev);
1199 }
1200
1201 return 0;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1216{
1217 struct pch_udc_dev *dev;
1218
1219 if (!gadget)
1220 return -EINVAL;
1221 dev = container_of(gadget, struct pch_udc_dev, gadget);
1222 pch_udc_vbus_session(dev, is_active);
1223 return 0;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1238{
1239 return -EOPNOTSUPP;
1240}
1241
1242static int pch_udc_start(struct usb_gadget *g,
1243 struct usb_gadget_driver *driver);
1244static int pch_udc_stop(struct usb_gadget *g);
1245
1246static const struct usb_gadget_ops pch_udc_ops = {
1247 .get_frame = pch_udc_pcd_get_frame,
1248 .wakeup = pch_udc_pcd_wakeup,
1249 .set_selfpowered = pch_udc_pcd_selfpowered,
1250 .pullup = pch_udc_pcd_pullup,
1251 .vbus_session = pch_udc_pcd_vbus_session,
1252 .vbus_draw = pch_udc_pcd_vbus_draw,
1253 .udc_start = pch_udc_start,
1254 .udc_stop = pch_udc_stop,
1255};
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1267{
1268 int vbus = 0;
1269
1270 if (dev->vbus_gpio.port)
1271 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1272 else
1273 vbus = -1;
1274
1275 return vbus;
1276}
1277
1278
1279
1280
1281
1282
1283
1284static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1285{
1286 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1287 struct pch_vbus_gpio_data, irq_work_fall);
1288 struct pch_udc_dev *dev =
1289 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1290 int vbus_saved = -1;
1291 int vbus;
1292 int count;
1293
1294 if (!dev->vbus_gpio.port)
1295 return;
1296
1297 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1298 count++) {
1299 vbus = pch_vbus_gpio_get_value(dev);
1300
1301 if ((vbus_saved == vbus) && (vbus == 0)) {
1302 dev_dbg(&dev->pdev->dev, "VBUS fell");
1303 if (dev->driver
1304 && dev->driver->disconnect) {
1305 dev->driver->disconnect(
1306 &dev->gadget);
1307 }
1308 if (dev->vbus_gpio.intr)
1309 pch_udc_init(dev);
1310 else
1311 pch_udc_reconnect(dev);
1312 return;
1313 }
1314 vbus_saved = vbus;
1315 mdelay(PCH_VBUS_INTERVAL);
1316 }
1317}
1318
1319
1320
1321
1322
1323
1324
1325static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1326{
1327 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1328 struct pch_vbus_gpio_data, irq_work_rise);
1329 struct pch_udc_dev *dev =
1330 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1331 int vbus;
1332
1333 if (!dev->vbus_gpio.port)
1334 return;
1335
1336 mdelay(PCH_VBUS_INTERVAL);
1337 vbus = pch_vbus_gpio_get_value(dev);
1338
1339 if (vbus == 1) {
1340 dev_dbg(&dev->pdev->dev, "VBUS rose");
1341 pch_udc_reconnect(dev);
1342 return;
1343 }
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1356{
1357 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1358
1359 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1360 return IRQ_NONE;
1361
1362 if (pch_vbus_gpio_get_value(dev))
1363 schedule_work(&dev->vbus_gpio.irq_work_rise);
1364 else
1365 schedule_work(&dev->vbus_gpio.irq_work_fall);
1366
1367 return IRQ_HANDLED;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1380{
1381 int err;
1382 int irq_num = 0;
1383
1384 dev->vbus_gpio.port = 0;
1385 dev->vbus_gpio.intr = 0;
1386
1387 if (vbus_gpio_port <= -1)
1388 return -EINVAL;
1389
1390 err = gpio_is_valid(vbus_gpio_port);
1391 if (!err) {
1392 pr_err("%s: gpio port %d is invalid\n",
1393 __func__, vbus_gpio_port);
1394 return -EINVAL;
1395 }
1396
1397 err = gpio_request(vbus_gpio_port, "pch_vbus");
1398 if (err) {
1399 pr_err("%s: can't request gpio port %d, err: %d\n",
1400 __func__, vbus_gpio_port, err);
1401 return -EINVAL;
1402 }
1403
1404 dev->vbus_gpio.port = vbus_gpio_port;
1405 gpio_direction_input(vbus_gpio_port);
1406 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1407
1408 irq_num = gpio_to_irq(vbus_gpio_port);
1409 if (irq_num > 0) {
1410 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1411 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1412 "vbus_detect", dev);
1413 if (!err) {
1414 dev->vbus_gpio.intr = irq_num;
1415 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1416 pch_vbus_gpio_work_rise);
1417 } else {
1418 pr_err("%s: can't request irq %d, err: %d\n",
1419 __func__, irq_num, err);
1420 }
1421 }
1422
1423 return 0;
1424}
1425
1426
1427
1428
1429
1430static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1431{
1432 if (dev->vbus_gpio.intr)
1433 free_irq(dev->vbus_gpio.intr, dev);
1434
1435 if (dev->vbus_gpio.port)
1436 gpio_free(dev->vbus_gpio.port);
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1447 int status)
1448 __releases(&dev->lock)
1449 __acquires(&dev->lock)
1450{
1451 struct pch_udc_dev *dev;
1452 unsigned halted = ep->halted;
1453
1454 list_del_init(&req->queue);
1455
1456
1457 if (req->req.status == -EINPROGRESS)
1458 req->req.status = status;
1459 else
1460 status = req->req.status;
1461
1462 dev = ep->dev;
1463 if (req->dma_mapped) {
1464 if (req->dma == DMA_ADDR_INVALID) {
1465 if (ep->in)
1466 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1467 req->req.length,
1468 DMA_TO_DEVICE);
1469 else
1470 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1471 req->req.length,
1472 DMA_FROM_DEVICE);
1473 req->req.dma = DMA_ADDR_INVALID;
1474 } else {
1475 if (ep->in)
1476 dma_unmap_single(&dev->pdev->dev, req->dma,
1477 req->req.length,
1478 DMA_TO_DEVICE);
1479 else {
1480 dma_unmap_single(&dev->pdev->dev, req->dma,
1481 req->req.length,
1482 DMA_FROM_DEVICE);
1483 memcpy(req->req.buf, req->buf, req->req.length);
1484 }
1485 kfree(req->buf);
1486 req->dma = DMA_ADDR_INVALID;
1487 }
1488 req->dma_mapped = 0;
1489 }
1490 ep->halted = 1;
1491 spin_lock(&dev->lock);
1492 if (!ep->in)
1493 pch_udc_ep_clear_rrdy(ep);
1494 usb_gadget_giveback_request(&ep->ep, &req->req);
1495 spin_unlock(&dev->lock);
1496 ep->halted = halted;
1497}
1498
1499
1500
1501
1502
1503static void empty_req_queue(struct pch_udc_ep *ep)
1504{
1505 struct pch_udc_request *req;
1506
1507 ep->halted = 1;
1508 while (!list_empty(&ep->queue)) {
1509 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1510 complete_req(ep, req, -ESHUTDOWN);
1511 }
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1524 struct pch_udc_request *req)
1525{
1526 struct pch_udc_data_dma_desc *td = req->td_data;
1527 unsigned i = req->chain_len;
1528
1529 dma_addr_t addr2;
1530 dma_addr_t addr = (dma_addr_t)td->next;
1531 td->next = 0x00;
1532 for (; i > 1; --i) {
1533
1534 td = phys_to_virt(addr);
1535 addr2 = (dma_addr_t)td->next;
1536 pci_pool_free(dev->data_requests, td, addr);
1537 td->next = 0x00;
1538 addr = addr2;
1539 }
1540 req->chain_len = 1;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1556 struct pch_udc_request *req,
1557 unsigned long buf_len,
1558 gfp_t gfp_flags)
1559{
1560 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1561 unsigned long bytes = req->req.length, i = 0;
1562 dma_addr_t dma_addr;
1563 unsigned len = 1;
1564
1565 if (req->chain_len > 1)
1566 pch_udc_free_dma_chain(ep->dev, req);
1567
1568 if (req->dma == DMA_ADDR_INVALID)
1569 td->dataptr = req->req.dma;
1570 else
1571 td->dataptr = req->dma;
1572
1573 td->status = PCH_UDC_BS_HST_BSY;
1574 for (; ; bytes -= buf_len, ++len) {
1575 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1576 if (bytes <= buf_len)
1577 break;
1578 last = td;
1579 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1580 &dma_addr);
1581 if (!td)
1582 goto nomem;
1583 i += buf_len;
1584 td->dataptr = req->td_data->dataptr + i;
1585 last->next = dma_addr;
1586 }
1587
1588 req->td_data_last = td;
1589 td->status |= PCH_UDC_DMA_LAST;
1590 td->next = req->td_data_phys;
1591 req->chain_len = len;
1592 return 0;
1593
1594nomem:
1595 if (len > 1) {
1596 req->chain_len = len;
1597 pch_udc_free_dma_chain(ep->dev, req);
1598 }
1599 req->chain_len = 1;
1600 return -ENOMEM;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1615 gfp_t gfp)
1616{
1617 int retval;
1618
1619
1620 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1621 if (retval) {
1622 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1623 return retval;
1624 }
1625 if (ep->in)
1626 req->td_data->status = (req->td_data->status &
1627 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1628 return 0;
1629}
1630
1631
1632
1633
1634
1635
1636
1637static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1638{
1639 struct pch_udc_dev *dev = ep->dev;
1640
1641
1642 complete_req(ep, req, 0);
1643
1644
1645
1646
1647 if (dev->set_cfg_not_acked) {
1648 pch_udc_set_csr_done(dev);
1649 dev->set_cfg_not_acked = 0;
1650 }
1651
1652 if (!dev->stall && dev->waiting_zlp_ack) {
1653 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1654 dev->waiting_zlp_ack = 0;
1655 }
1656}
1657
1658
1659
1660
1661
1662
1663static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1664 struct pch_udc_request *req)
1665{
1666 struct pch_udc_data_dma_desc *td_data;
1667
1668 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1669 td_data = req->td_data;
1670
1671 while (1) {
1672 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1673 PCH_UDC_BS_HST_RDY;
1674 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1675 break;
1676 td_data = phys_to_virt(td_data->next);
1677 }
1678
1679 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1680 req->dma_going = 1;
1681 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1682 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1683 pch_udc_ep_clear_nak(ep);
1684 pch_udc_ep_set_rrdy(ep);
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1699 const struct usb_endpoint_descriptor *desc)
1700{
1701 struct pch_udc_ep *ep;
1702 struct pch_udc_dev *dev;
1703 unsigned long iflags;
1704
1705 if (!usbep || (usbep->name == ep0_string) || !desc ||
1706 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1707 return -EINVAL;
1708
1709 ep = container_of(usbep, struct pch_udc_ep, ep);
1710 dev = ep->dev;
1711 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1712 return -ESHUTDOWN;
1713 spin_lock_irqsave(&dev->lock, iflags);
1714 ep->ep.desc = desc;
1715 ep->halted = 0;
1716 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1717 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1718 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1719 spin_unlock_irqrestore(&dev->lock, iflags);
1720 return 0;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1733{
1734 struct pch_udc_ep *ep;
1735 struct pch_udc_dev *dev;
1736 unsigned long iflags;
1737
1738 if (!usbep)
1739 return -EINVAL;
1740
1741 ep = container_of(usbep, struct pch_udc_ep, ep);
1742 dev = ep->dev;
1743 if ((usbep->name == ep0_string) || !ep->ep.desc)
1744 return -EINVAL;
1745
1746 spin_lock_irqsave(&ep->dev->lock, iflags);
1747 empty_req_queue(ep);
1748 ep->halted = 1;
1749 pch_udc_ep_disable(ep);
1750 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1751 ep->ep.desc = NULL;
1752 INIT_LIST_HEAD(&ep->queue);
1753 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1754 return 0;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1768 gfp_t gfp)
1769{
1770 struct pch_udc_request *req;
1771 struct pch_udc_ep *ep;
1772 struct pch_udc_data_dma_desc *dma_desc;
1773 struct pch_udc_dev *dev;
1774
1775 if (!usbep)
1776 return NULL;
1777 ep = container_of(usbep, struct pch_udc_ep, ep);
1778 dev = ep->dev;
1779 req = kzalloc(sizeof *req, gfp);
1780 if (!req)
1781 return NULL;
1782 req->req.dma = DMA_ADDR_INVALID;
1783 req->dma = DMA_ADDR_INVALID;
1784 INIT_LIST_HEAD(&req->queue);
1785 if (!ep->dev->dma_addr)
1786 return &req->req;
1787
1788 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1789 &req->td_data_phys);
1790 if (NULL == dma_desc) {
1791 kfree(req);
1792 return NULL;
1793 }
1794
1795 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1796 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1797 req->td_data = dma_desc;
1798 req->td_data_last = dma_desc;
1799 req->chain_len = 1;
1800 return &req->req;
1801}
1802
1803
1804
1805
1806
1807
1808
1809static void pch_udc_free_request(struct usb_ep *usbep,
1810 struct usb_request *usbreq)
1811{
1812 struct pch_udc_ep *ep;
1813 struct pch_udc_request *req;
1814 struct pch_udc_dev *dev;
1815
1816 if (!usbep || !usbreq)
1817 return;
1818 ep = container_of(usbep, struct pch_udc_ep, ep);
1819 req = container_of(usbreq, struct pch_udc_request, req);
1820 dev = ep->dev;
1821 if (!list_empty(&req->queue))
1822 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1823 __func__, usbep->name, req);
1824 if (req->td_data != NULL) {
1825 if (req->chain_len > 1)
1826 pch_udc_free_dma_chain(ep->dev, req);
1827 pci_pool_free(ep->dev->data_requests, req->td_data,
1828 req->td_data_phys);
1829 }
1830 kfree(req);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1845 gfp_t gfp)
1846{
1847 int retval = 0;
1848 struct pch_udc_ep *ep;
1849 struct pch_udc_dev *dev;
1850 struct pch_udc_request *req;
1851 unsigned long iflags;
1852
1853 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1854 return -EINVAL;
1855 ep = container_of(usbep, struct pch_udc_ep, ep);
1856 dev = ep->dev;
1857 if (!ep->ep.desc && ep->num)
1858 return -EINVAL;
1859 req = container_of(usbreq, struct pch_udc_request, req);
1860 if (!list_empty(&req->queue))
1861 return -EINVAL;
1862 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1863 return -ESHUTDOWN;
1864 spin_lock_irqsave(&dev->lock, iflags);
1865
1866 if (usbreq->length &&
1867 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1868 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1869 if (ep->in)
1870 usbreq->dma = dma_map_single(&dev->pdev->dev,
1871 usbreq->buf,
1872 usbreq->length,
1873 DMA_TO_DEVICE);
1874 else
1875 usbreq->dma = dma_map_single(&dev->pdev->dev,
1876 usbreq->buf,
1877 usbreq->length,
1878 DMA_FROM_DEVICE);
1879 } else {
1880 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1881 if (!req->buf) {
1882 retval = -ENOMEM;
1883 goto probe_end;
1884 }
1885 if (ep->in) {
1886 memcpy(req->buf, usbreq->buf, usbreq->length);
1887 req->dma = dma_map_single(&dev->pdev->dev,
1888 req->buf,
1889 usbreq->length,
1890 DMA_TO_DEVICE);
1891 } else
1892 req->dma = dma_map_single(&dev->pdev->dev,
1893 req->buf,
1894 usbreq->length,
1895 DMA_FROM_DEVICE);
1896 }
1897 req->dma_mapped = 1;
1898 }
1899 if (usbreq->length > 0) {
1900 retval = prepare_dma(ep, req, GFP_ATOMIC);
1901 if (retval)
1902 goto probe_end;
1903 }
1904 usbreq->actual = 0;
1905 usbreq->status = -EINPROGRESS;
1906 req->dma_done = 0;
1907 if (list_empty(&ep->queue) && !ep->halted) {
1908
1909 if (!usbreq->length) {
1910 process_zlp(ep, req);
1911 retval = 0;
1912 goto probe_end;
1913 }
1914 if (!ep->in) {
1915 pch_udc_start_rxrequest(ep, req);
1916 } else {
1917
1918
1919
1920
1921
1922 pch_udc_wait_ep_stall(ep);
1923 pch_udc_ep_clear_nak(ep);
1924 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1925 }
1926 }
1927
1928 if (req != NULL)
1929 list_add_tail(&req->queue, &ep->queue);
1930
1931probe_end:
1932 spin_unlock_irqrestore(&dev->lock, iflags);
1933 return retval;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1947 struct usb_request *usbreq)
1948{
1949 struct pch_udc_ep *ep;
1950 struct pch_udc_request *req;
1951 struct pch_udc_dev *dev;
1952 unsigned long flags;
1953 int ret = -EINVAL;
1954
1955 ep = container_of(usbep, struct pch_udc_ep, ep);
1956 dev = ep->dev;
1957 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1958 return ret;
1959 req = container_of(usbreq, struct pch_udc_request, req);
1960 spin_lock_irqsave(&ep->dev->lock, flags);
1961
1962 list_for_each_entry(req, &ep->queue, queue) {
1963 if (&req->req == usbreq) {
1964 pch_udc_ep_set_nak(ep);
1965 if (!list_empty(&req->queue))
1966 complete_req(ep, req, -ECONNRESET);
1967 ret = 0;
1968 break;
1969 }
1970 }
1971 spin_unlock_irqrestore(&ep->dev->lock, flags);
1972 return ret;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1986{
1987 struct pch_udc_ep *ep;
1988 struct pch_udc_dev *dev;
1989 unsigned long iflags;
1990 int ret;
1991
1992 if (!usbep)
1993 return -EINVAL;
1994 ep = container_of(usbep, struct pch_udc_ep, ep);
1995 dev = ep->dev;
1996 if (!ep->ep.desc && !ep->num)
1997 return -EINVAL;
1998 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1999 return -ESHUTDOWN;
2000 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2001 if (list_empty(&ep->queue)) {
2002 if (halt) {
2003 if (ep->num == PCH_UDC_EP0)
2004 ep->dev->stall = 1;
2005 pch_udc_ep_set_stall(ep);
2006 pch_udc_enable_ep_interrupts(ep->dev,
2007 PCH_UDC_EPINT(ep->in,
2008 ep->num));
2009 } else {
2010 pch_udc_ep_clear_stall(ep);
2011 }
2012 ret = 0;
2013 } else {
2014 ret = -EAGAIN;
2015 }
2016 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2017 return ret;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2031{
2032 struct pch_udc_ep *ep;
2033 struct pch_udc_dev *dev;
2034 unsigned long iflags;
2035 int ret;
2036
2037 if (!usbep)
2038 return -EINVAL;
2039 ep = container_of(usbep, struct pch_udc_ep, ep);
2040 dev = ep->dev;
2041 if (!ep->ep.desc && !ep->num)
2042 return -EINVAL;
2043 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2044 return -ESHUTDOWN;
2045 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2046 if (!list_empty(&ep->queue)) {
2047 ret = -EAGAIN;
2048 } else {
2049 if (ep->num == PCH_UDC_EP0)
2050 ep->dev->stall = 1;
2051 pch_udc_ep_set_stall(ep);
2052 pch_udc_enable_ep_interrupts(ep->dev,
2053 PCH_UDC_EPINT(ep->in, ep->num));
2054 ep->dev->prot_stall = 1;
2055 ret = 0;
2056 }
2057 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2058 return ret;
2059}
2060
2061
2062
2063
2064
2065static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2066{
2067 struct pch_udc_ep *ep;
2068
2069 if (!usbep)
2070 return;
2071
2072 ep = container_of(usbep, struct pch_udc_ep, ep);
2073 if (ep->ep.desc || !ep->num)
2074 pch_udc_ep_fifo_flush(ep, ep->in);
2075}
2076
2077static const struct usb_ep_ops pch_udc_ep_ops = {
2078 .enable = pch_udc_pcd_ep_enable,
2079 .disable = pch_udc_pcd_ep_disable,
2080 .alloc_request = pch_udc_alloc_request,
2081 .free_request = pch_udc_free_request,
2082 .queue = pch_udc_pcd_queue,
2083 .dequeue = pch_udc_pcd_dequeue,
2084 .set_halt = pch_udc_pcd_set_halt,
2085 .set_wedge = pch_udc_pcd_set_wedge,
2086 .fifo_status = NULL,
2087 .fifo_flush = pch_udc_pcd_fifo_flush,
2088};
2089
2090
2091
2092
2093
2094static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2095{
2096 static u32 pky_marker;
2097
2098 if (!td_stp)
2099 return;
2100 td_stp->reserved = ++pky_marker;
2101 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2102 td_stp->status = PCH_UDC_BS_HST_RDY;
2103}
2104
2105
2106
2107
2108
2109
2110static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2111{
2112 struct pch_udc_request *req;
2113 struct pch_udc_data_dma_desc *td_data;
2114
2115 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2116 return;
2117
2118 if (list_empty(&ep->queue))
2119 return;
2120
2121
2122 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2123 if (req->dma_going)
2124 return;
2125 if (!req->td_data)
2126 return;
2127 pch_udc_wait_ep_stall(ep);
2128 req->dma_going = 1;
2129 pch_udc_ep_set_ddptr(ep, 0);
2130 td_data = req->td_data;
2131 while (1) {
2132 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2133 PCH_UDC_BS_HST_RDY;
2134 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2135 break;
2136 td_data = phys_to_virt(td_data->next);
2137 }
2138 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2139 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2140 pch_udc_ep_set_pd(ep);
2141 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2142 pch_udc_ep_clear_nak(ep);
2143}
2144
2145
2146
2147
2148
2149static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2150{
2151 struct pch_udc_request *req;
2152 struct pch_udc_dev *dev = ep->dev;
2153
2154 if (list_empty(&ep->queue))
2155 return;
2156 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2157 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2158 PCH_UDC_BS_DMA_DONE)
2159 return;
2160 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2161 PCH_UDC_RTS_SUCC) {
2162 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2163 "epstatus=0x%08x\n",
2164 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2165 (int)(ep->epsts));
2166 return;
2167 }
2168
2169 req->req.actual = req->req.length;
2170 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2171 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2172 complete_req(ep, req, 0);
2173 req->dma_going = 0;
2174 if (!list_empty(&ep->queue)) {
2175 pch_udc_wait_ep_stall(ep);
2176 pch_udc_ep_clear_nak(ep);
2177 pch_udc_enable_ep_interrupts(ep->dev,
2178 PCH_UDC_EPINT(ep->in, ep->num));
2179 } else {
2180 pch_udc_disable_ep_interrupts(ep->dev,
2181 PCH_UDC_EPINT(ep->in, ep->num));
2182 }
2183}
2184
2185
2186
2187
2188
2189static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2190{
2191 struct pch_udc_request *req;
2192 struct pch_udc_dev *dev = ep->dev;
2193 unsigned int count;
2194 struct pch_udc_data_dma_desc *td;
2195 dma_addr_t addr;
2196
2197 if (list_empty(&ep->queue))
2198 return;
2199
2200 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2201 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2202 pch_udc_ep_set_ddptr(ep, 0);
2203 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2204 PCH_UDC_BS_DMA_DONE)
2205 td = req->td_data_last;
2206 else
2207 td = req->td_data;
2208
2209 while (1) {
2210 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2211 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2212 "epstatus=0x%08x\n",
2213 (req->td_data->status & PCH_UDC_RXTX_STS),
2214 (int)(ep->epsts));
2215 return;
2216 }
2217 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2218 if (td->status & PCH_UDC_DMA_LAST) {
2219 count = td->status & PCH_UDC_RXTX_BYTES;
2220 break;
2221 }
2222 if (td == req->td_data_last) {
2223 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2224 return;
2225 }
2226 addr = (dma_addr_t)td->next;
2227 td = phys_to_virt(addr);
2228 }
2229
2230 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2231 count = UDC_DMA_MAXPACKET;
2232 req->td_data->status |= PCH_UDC_DMA_LAST;
2233 td->status |= PCH_UDC_BS_HST_BSY;
2234
2235 req->dma_going = 0;
2236 req->req.actual = count;
2237 complete_req(ep, req, 0);
2238
2239 if (!list_empty(&ep->queue)) {
2240 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2241 pch_udc_start_rxrequest(ep, req);
2242 }
2243}
2244
2245
2246
2247
2248
2249
2250
2251static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2252{
2253 u32 epsts;
2254 struct pch_udc_ep *ep;
2255
2256 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2257 epsts = ep->epsts;
2258 ep->epsts = 0;
2259
2260 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2261 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2262 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2263 return;
2264 if ((epsts & UDC_EPSTS_BNA))
2265 return;
2266 if (epsts & UDC_EPSTS_HE)
2267 return;
2268 if (epsts & UDC_EPSTS_RSS) {
2269 pch_udc_ep_set_stall(ep);
2270 pch_udc_enable_ep_interrupts(ep->dev,
2271 PCH_UDC_EPINT(ep->in, ep->num));
2272 }
2273 if (epsts & UDC_EPSTS_RCS) {
2274 if (!dev->prot_stall) {
2275 pch_udc_ep_clear_stall(ep);
2276 } else {
2277 pch_udc_ep_set_stall(ep);
2278 pch_udc_enable_ep_interrupts(ep->dev,
2279 PCH_UDC_EPINT(ep->in, ep->num));
2280 }
2281 }
2282 if (epsts & UDC_EPSTS_TDC)
2283 pch_udc_complete_transfer(ep);
2284
2285 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2286 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2287 pch_udc_start_next_txrequest(ep);
2288}
2289
2290
2291
2292
2293
2294
2295static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2296{
2297 u32 epsts;
2298 struct pch_udc_ep *ep;
2299 struct pch_udc_request *req = NULL;
2300
2301 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2302 epsts = ep->epsts;
2303 ep->epsts = 0;
2304
2305 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2306
2307 req = list_entry(ep->queue.next, struct pch_udc_request,
2308 queue);
2309 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2310 PCH_UDC_BS_DMA_DONE) {
2311 if (!req->dma_going)
2312 pch_udc_start_rxrequest(ep, req);
2313 return;
2314 }
2315 }
2316 if (epsts & UDC_EPSTS_HE)
2317 return;
2318 if (epsts & UDC_EPSTS_RSS) {
2319 pch_udc_ep_set_stall(ep);
2320 pch_udc_enable_ep_interrupts(ep->dev,
2321 PCH_UDC_EPINT(ep->in, ep->num));
2322 }
2323 if (epsts & UDC_EPSTS_RCS) {
2324 if (!dev->prot_stall) {
2325 pch_udc_ep_clear_stall(ep);
2326 } else {
2327 pch_udc_ep_set_stall(ep);
2328 pch_udc_enable_ep_interrupts(ep->dev,
2329 PCH_UDC_EPINT(ep->in, ep->num));
2330 }
2331 }
2332 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2333 UDC_EPSTS_OUT_DATA) {
2334 if (ep->dev->prot_stall == 1) {
2335 pch_udc_ep_set_stall(ep);
2336 pch_udc_enable_ep_interrupts(ep->dev,
2337 PCH_UDC_EPINT(ep->in, ep->num));
2338 } else {
2339 pch_udc_complete_receiver(ep);
2340 }
2341 }
2342 if (list_empty(&ep->queue))
2343 pch_udc_set_dma(dev, DMA_DIR_RX);
2344}
2345
2346
2347
2348
2349
2350static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2351{
2352 u32 epsts;
2353 struct pch_udc_ep *ep;
2354 struct pch_udc_ep *ep_out;
2355
2356 ep = &dev->ep[UDC_EP0IN_IDX];
2357 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2358 epsts = ep->epsts;
2359 ep->epsts = 0;
2360
2361 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2362 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2363 UDC_EPSTS_XFERDONE)))
2364 return;
2365 if ((epsts & UDC_EPSTS_BNA))
2366 return;
2367 if (epsts & UDC_EPSTS_HE)
2368 return;
2369 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2370 pch_udc_complete_transfer(ep);
2371 pch_udc_clear_dma(dev, DMA_DIR_RX);
2372 ep_out->td_data->status = (ep_out->td_data->status &
2373 ~PCH_UDC_BUFF_STS) |
2374 PCH_UDC_BS_HST_RDY;
2375 pch_udc_ep_clear_nak(ep_out);
2376 pch_udc_set_dma(dev, DMA_DIR_RX);
2377 pch_udc_ep_set_rrdy(ep_out);
2378 }
2379
2380 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2381 !(epsts & UDC_EPSTS_TXEMPTY))
2382 pch_udc_start_next_txrequest(ep);
2383}
2384
2385
2386
2387
2388
2389
2390static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2391 __releases(&dev->lock)
2392 __acquires(&dev->lock)
2393{
2394 u32 stat;
2395 int setup_supported;
2396 struct pch_udc_ep *ep;
2397
2398 ep = &dev->ep[UDC_EP0OUT_IDX];
2399 stat = ep->epsts;
2400 ep->epsts = 0;
2401
2402
2403 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2404 UDC_EPSTS_OUT_SETUP) {
2405 dev->stall = 0;
2406 dev->ep[UDC_EP0IN_IDX].halted = 0;
2407 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2408 dev->setup_data = ep->td_stp->request;
2409 pch_udc_init_setup_buff(ep->td_stp);
2410 pch_udc_clear_dma(dev, DMA_DIR_RX);
2411 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2412 dev->ep[UDC_EP0IN_IDX].in);
2413 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2414 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2415 else
2416 dev->gadget.ep0 = &ep->ep;
2417 spin_lock(&dev->lock);
2418
2419 if ((dev->setup_data.bRequestType == 0x21) &&
2420 (dev->setup_data.bRequest == 0xFF))
2421 dev->prot_stall = 0;
2422
2423 setup_supported = dev->driver->setup(&dev->gadget,
2424 &dev->setup_data);
2425 spin_unlock(&dev->lock);
2426
2427 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2428 ep->td_data->status = (ep->td_data->status &
2429 ~PCH_UDC_BUFF_STS) |
2430 PCH_UDC_BS_HST_RDY;
2431 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2432 }
2433
2434 if (setup_supported >= 0 && setup_supported <
2435 UDC_EP0IN_MAX_PKT_SIZE) {
2436 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2437
2438
2439 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2440 pch_udc_set_dma(dev, DMA_DIR_RX);
2441 pch_udc_ep_clear_nak(ep);
2442 }
2443 } else if (setup_supported < 0) {
2444
2445 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2446 pch_udc_enable_ep_interrupts(ep->dev,
2447 PCH_UDC_EPINT(ep->in, ep->num));
2448 dev->stall = 0;
2449 pch_udc_set_dma(dev, DMA_DIR_RX);
2450 } else {
2451 dev->waiting_zlp_ack = 1;
2452 }
2453 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2454 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2455 pch_udc_clear_dma(dev, DMA_DIR_RX);
2456 pch_udc_ep_set_ddptr(ep, 0);
2457 if (!list_empty(&ep->queue)) {
2458 ep->epsts = stat;
2459 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2460 }
2461 pch_udc_set_dma(dev, DMA_DIR_RX);
2462 }
2463 pch_udc_ep_set_rrdy(ep);
2464}
2465
2466
2467
2468
2469
2470
2471
2472
2473static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2474{
2475 struct pch_udc_ep *ep;
2476 struct pch_udc_request *req;
2477
2478 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2479 if (!list_empty(&ep->queue)) {
2480 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2481 pch_udc_enable_ep_interrupts(ep->dev,
2482 PCH_UDC_EPINT(ep->in, ep->num));
2483 pch_udc_ep_clear_nak(ep);
2484 }
2485}
2486
2487
2488
2489
2490
2491
2492static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2493{
2494 int i;
2495 struct pch_udc_ep *ep;
2496
2497 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2498
2499 if (ep_intr & (0x1 << i)) {
2500 ep = &dev->ep[UDC_EPIN_IDX(i)];
2501 ep->epsts = pch_udc_read_ep_status(ep);
2502 pch_udc_clear_ep_status(ep, ep->epsts);
2503 }
2504
2505 if (ep_intr & (0x10000 << i)) {
2506 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2507 ep->epsts = pch_udc_read_ep_status(ep);
2508 pch_udc_clear_ep_status(ep, ep->epsts);
2509 }
2510 }
2511}
2512
2513
2514
2515
2516
2517
2518static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2519{
2520 struct pch_udc_ep *ep;
2521 u32 val;
2522
2523
2524 ep = &dev->ep[UDC_EP0IN_IDX];
2525 pch_udc_clear_ep_control(ep);
2526 pch_udc_ep_fifo_flush(ep, ep->in);
2527 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2528 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2529
2530 ep->td_data = NULL;
2531 ep->td_stp = NULL;
2532 ep->td_data_phys = 0;
2533 ep->td_stp_phys = 0;
2534
2535
2536 ep = &dev->ep[UDC_EP0OUT_IDX];
2537 pch_udc_clear_ep_control(ep);
2538 pch_udc_ep_fifo_flush(ep, ep->in);
2539 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2540 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2541 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2542 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2543
2544
2545 pch_udc_init_setup_buff(ep->td_stp);
2546
2547 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2548
2549 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2550
2551
2552 ep->td_data->status = PCH_UDC_DMA_LAST;
2553 ep->td_data->dataptr = dev->dma_addr;
2554 ep->td_data->next = ep->td_data_phys;
2555
2556 pch_udc_ep_clear_nak(ep);
2557}
2558
2559
2560
2561
2562
2563
2564static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2565{
2566 struct pch_udc_ep *ep;
2567 int i;
2568
2569 pch_udc_clear_dma(dev, DMA_DIR_TX);
2570 pch_udc_clear_dma(dev, DMA_DIR_RX);
2571
2572 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2573
2574 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2575
2576 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2577 ep = &dev->ep[i];
2578 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2579 pch_udc_clear_ep_control(ep);
2580 pch_udc_ep_set_ddptr(ep, 0);
2581 pch_udc_write_csr(ep->dev, 0x00, i);
2582 }
2583 dev->stall = 0;
2584 dev->prot_stall = 0;
2585 dev->waiting_zlp_ack = 0;
2586 dev->set_cfg_not_acked = 0;
2587
2588
2589 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2590 ep = &dev->ep[i];
2591 pch_udc_ep_set_nak(ep);
2592 pch_udc_ep_fifo_flush(ep, ep->in);
2593
2594 empty_req_queue(ep);
2595 }
2596 if (dev->driver) {
2597 spin_lock(&dev->lock);
2598 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2599 spin_unlock(&dev->lock);
2600 }
2601}
2602
2603
2604
2605
2606
2607
2608static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2609{
2610 u32 dev_stat, dev_speed;
2611 u32 speed = USB_SPEED_FULL;
2612
2613 dev_stat = pch_udc_read_device_status(dev);
2614 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2615 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2616 switch (dev_speed) {
2617 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2618 speed = USB_SPEED_HIGH;
2619 break;
2620 case UDC_DEVSTS_ENUM_SPEED_FULL:
2621 speed = USB_SPEED_FULL;
2622 break;
2623 case UDC_DEVSTS_ENUM_SPEED_LOW:
2624 speed = USB_SPEED_LOW;
2625 break;
2626 default:
2627 BUG();
2628 }
2629 dev->gadget.speed = speed;
2630 pch_udc_activate_control_ep(dev);
2631 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2632 pch_udc_set_dma(dev, DMA_DIR_TX);
2633 pch_udc_set_dma(dev, DMA_DIR_RX);
2634 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2635
2636
2637 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2638 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2639 UDC_DEVINT_SI | UDC_DEVINT_SC);
2640}
2641
2642
2643
2644
2645
2646
2647static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2648{
2649 u32 reg, dev_stat = 0;
2650 int i, ret;
2651
2652 dev_stat = pch_udc_read_device_status(dev);
2653 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2654 UDC_DEVSTS_INTF_SHIFT;
2655 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2656 UDC_DEVSTS_ALT_SHIFT;
2657 dev->set_cfg_not_acked = 1;
2658
2659 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2660 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2661 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2662 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2663 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2664
2665
2666 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2667 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2668 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2669 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2670 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2671 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2672 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2673
2674 pch_udc_ep_clear_stall(&(dev->ep[i]));
2675 dev->ep[i].halted = 0;
2676 }
2677 dev->stall = 0;
2678 spin_lock(&dev->lock);
2679 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2680 spin_unlock(&dev->lock);
2681}
2682
2683
2684
2685
2686
2687
2688static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2689{
2690 int i, ret;
2691 u32 reg, dev_stat = 0;
2692
2693 dev_stat = pch_udc_read_device_status(dev);
2694 dev->set_cfg_not_acked = 1;
2695 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2696 UDC_DEVSTS_CFG_SHIFT;
2697
2698 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2699 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2700 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2701
2702
2703 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2704 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2705 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2706 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2707 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2708
2709 pch_udc_ep_clear_stall(&(dev->ep[i]));
2710 dev->ep[i].halted = 0;
2711 }
2712 dev->stall = 0;
2713
2714
2715 spin_lock(&dev->lock);
2716 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2717 spin_unlock(&dev->lock);
2718}
2719
2720
2721
2722
2723
2724
2725
2726static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2727{
2728 int vbus;
2729
2730
2731 if (dev_intr & UDC_DEVINT_UR) {
2732 pch_udc_svc_ur_interrupt(dev);
2733 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2734 }
2735
2736 if (dev_intr & UDC_DEVINT_ENUM) {
2737 pch_udc_svc_enum_interrupt(dev);
2738 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2739 }
2740
2741 if (dev_intr & UDC_DEVINT_SI)
2742 pch_udc_svc_intf_interrupt(dev);
2743
2744 if (dev_intr & UDC_DEVINT_SC)
2745 pch_udc_svc_cfg_interrupt(dev);
2746
2747 if (dev_intr & UDC_DEVINT_US) {
2748 if (dev->driver
2749 && dev->driver->suspend) {
2750 spin_lock(&dev->lock);
2751 dev->driver->suspend(&dev->gadget);
2752 spin_unlock(&dev->lock);
2753 }
2754
2755 vbus = pch_vbus_gpio_get_value(dev);
2756 if ((dev->vbus_session == 0)
2757 && (vbus != 1)) {
2758 if (dev->driver && dev->driver->disconnect) {
2759 spin_lock(&dev->lock);
2760 dev->driver->disconnect(&dev->gadget);
2761 spin_unlock(&dev->lock);
2762 }
2763 pch_udc_reconnect(dev);
2764 } else if ((dev->vbus_session == 0)
2765 && (vbus == 1)
2766 && !dev->vbus_gpio.intr)
2767 schedule_work(&dev->vbus_gpio.irq_work_fall);
2768
2769 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2770 }
2771
2772 if (dev_intr & UDC_DEVINT_SOF)
2773 dev_dbg(&dev->pdev->dev, "SOF\n");
2774
2775 if (dev_intr & UDC_DEVINT_ES)
2776 dev_dbg(&dev->pdev->dev, "ES\n");
2777
2778 if (dev_intr & UDC_DEVINT_RWKP)
2779 dev_dbg(&dev->pdev->dev, "RWKP\n");
2780}
2781
2782
2783
2784
2785
2786
2787static irqreturn_t pch_udc_isr(int irq, void *pdev)
2788{
2789 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2790 u32 dev_intr, ep_intr;
2791 int i;
2792
2793 dev_intr = pch_udc_read_device_interrupts(dev);
2794 ep_intr = pch_udc_read_ep_interrupts(dev);
2795
2796
2797 if (dev_intr == ep_intr)
2798 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2799 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2800
2801 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2802 return IRQ_HANDLED;
2803 }
2804 if (dev_intr)
2805
2806 pch_udc_write_device_interrupts(dev, dev_intr);
2807 if (ep_intr)
2808
2809 pch_udc_write_ep_interrupts(dev, ep_intr);
2810 if (!dev_intr && !ep_intr)
2811 return IRQ_NONE;
2812 spin_lock(&dev->lock);
2813 if (dev_intr)
2814 pch_udc_dev_isr(dev, dev_intr);
2815 if (ep_intr) {
2816 pch_udc_read_all_epstatus(dev, ep_intr);
2817
2818 if (ep_intr & UDC_EPINT_IN_EP0) {
2819 pch_udc_svc_control_in(dev);
2820 pch_udc_postsvc_epinters(dev, 0);
2821 }
2822
2823 if (ep_intr & UDC_EPINT_OUT_EP0)
2824 pch_udc_svc_control_out(dev);
2825
2826 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2827 if (ep_intr & (1 << i)) {
2828 pch_udc_svc_data_in(dev, i);
2829 pch_udc_postsvc_epinters(dev, i);
2830 }
2831 }
2832
2833 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2834 PCH_UDC_USED_EP_NUM); i++)
2835 if (ep_intr & (1 << i))
2836 pch_udc_svc_data_out(dev, i -
2837 UDC_EPINT_OUT_SHIFT);
2838 }
2839 spin_unlock(&dev->lock);
2840 return IRQ_HANDLED;
2841}
2842
2843
2844
2845
2846
2847static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2848{
2849
2850 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2851 UDC_EPINT_OUT_EP0);
2852
2853 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2854 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2855 UDC_DEVINT_SI | UDC_DEVINT_SC);
2856}
2857
2858
2859
2860
2861
2862static void gadget_release(struct device *pdev)
2863{
2864 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2865
2866 kfree(dev);
2867}
2868
2869
2870
2871
2872
2873static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2874{
2875 const char *const ep_string[] = {
2876 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2877 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2878 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2879 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2880 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2881 "ep15in", "ep15out",
2882 };
2883 int i;
2884
2885 dev->gadget.speed = USB_SPEED_UNKNOWN;
2886 INIT_LIST_HEAD(&dev->gadget.ep_list);
2887
2888
2889 memset(dev->ep, 0, sizeof dev->ep);
2890 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2891 struct pch_udc_ep *ep = &dev->ep[i];
2892 ep->dev = dev;
2893 ep->halted = 1;
2894 ep->num = i / 2;
2895 ep->in = ~i & 1;
2896 ep->ep.name = ep_string[i];
2897 ep->ep.ops = &pch_udc_ep_ops;
2898 if (ep->in) {
2899 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2900 ep->ep.caps.dir_in = true;
2901 } else {
2902 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2903 UDC_EP_REG_SHIFT;
2904 ep->ep.caps.dir_out = true;
2905 }
2906 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2907 ep->ep.caps.type_control = true;
2908 } else {
2909 ep->ep.caps.type_iso = true;
2910 ep->ep.caps.type_bulk = true;
2911 ep->ep.caps.type_int = true;
2912 }
2913
2914 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2915 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2916 INIT_LIST_HEAD(&ep->queue);
2917 }
2918 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2919 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2920
2921
2922 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2923 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2924
2925 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2926 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2927}
2928
2929
2930
2931
2932
2933
2934
2935
2936static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2937{
2938 pch_udc_init(dev);
2939 pch_udc_pcd_reinit(dev);
2940 pch_vbus_gpio_init(dev, vbus_gpio_port);
2941 return 0;
2942}
2943
2944
2945
2946
2947
2948static int init_dma_pools(struct pch_udc_dev *dev)
2949{
2950 struct pch_udc_stp_dma_desc *td_stp;
2951 struct pch_udc_data_dma_desc *td_data;
2952
2953
2954 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2955 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2956 if (!dev->data_requests) {
2957 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2958 __func__);
2959 return -ENOMEM;
2960 }
2961
2962
2963 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2964 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2965 if (!dev->stp_requests) {
2966 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2967 __func__);
2968 return -ENOMEM;
2969 }
2970
2971 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2972 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2973 if (!td_stp) {
2974 dev_err(&dev->pdev->dev,
2975 "%s: can't allocate setup dma descriptor\n", __func__);
2976 return -ENOMEM;
2977 }
2978 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2979
2980
2981 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2982 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2983 if (!td_data) {
2984 dev_err(&dev->pdev->dev,
2985 "%s: can't allocate data dma descriptor\n", __func__);
2986 return -ENOMEM;
2987 }
2988 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2989 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2990 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2991 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2992 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2993
2994 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2995 if (!dev->ep0out_buf)
2996 return -ENOMEM;
2997 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2998 UDC_EP0OUT_BUFF_SIZE * 4,
2999 DMA_FROM_DEVICE);
3000 return 0;
3001}
3002
3003static int pch_udc_start(struct usb_gadget *g,
3004 struct usb_gadget_driver *driver)
3005{
3006 struct pch_udc_dev *dev = to_pch_udc(g);
3007
3008 driver->driver.bus = NULL;
3009 dev->driver = driver;
3010
3011
3012 pch_udc_setup_ep0(dev);
3013
3014
3015 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3016 pch_udc_clear_disconnect(dev);
3017
3018 dev->connected = 1;
3019 return 0;
3020}
3021
3022static int pch_udc_stop(struct usb_gadget *g)
3023{
3024 struct pch_udc_dev *dev = to_pch_udc(g);
3025
3026 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3027
3028
3029 dev->driver = NULL;
3030 dev->connected = 0;
3031
3032
3033 pch_udc_set_disconnect(dev);
3034
3035 return 0;
3036}
3037
3038static void pch_udc_shutdown(struct pci_dev *pdev)
3039{
3040 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3041
3042 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3043 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3044
3045
3046 pch_udc_set_disconnect(dev);
3047}
3048
3049static void pch_udc_remove(struct pci_dev *pdev)
3050{
3051 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3052
3053 usb_del_gadget_udc(&dev->gadget);
3054
3055
3056 if (dev->driver)
3057 dev_err(&pdev->dev,
3058 "%s: gadget driver still bound!!!\n", __func__);
3059
3060 if (dev->data_requests)
3061 pci_pool_destroy(dev->data_requests);
3062
3063 if (dev->stp_requests) {
3064
3065 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3066 pci_pool_free(dev->stp_requests,
3067 dev->ep[UDC_EP0OUT_IDX].td_stp,
3068 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3069 }
3070 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3071 pci_pool_free(dev->stp_requests,
3072 dev->ep[UDC_EP0OUT_IDX].td_data,
3073 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3074 }
3075 pci_pool_destroy(dev->stp_requests);
3076 }
3077
3078 if (dev->dma_addr)
3079 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3080 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3081 kfree(dev->ep0out_buf);
3082
3083 pch_vbus_gpio_free(dev);
3084
3085 pch_udc_exit(dev);
3086
3087 if (dev->irq_registered)
3088 free_irq(pdev->irq, dev);
3089 if (dev->base_addr)
3090 iounmap(dev->base_addr);
3091 if (dev->mem_region)
3092 release_mem_region(dev->phys_addr,
3093 pci_resource_len(pdev, dev->bar));
3094 if (dev->active)
3095 pci_disable_device(pdev);
3096 kfree(dev);
3097}
3098
3099#ifdef CONFIG_PM
3100static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3101{
3102 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3103
3104 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3105 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3106
3107 pci_disable_device(pdev);
3108 pci_enable_wake(pdev, PCI_D3hot, 0);
3109
3110 if (pci_save_state(pdev)) {
3111 dev_err(&pdev->dev,
3112 "%s: could not save PCI config state\n", __func__);
3113 return -ENOMEM;
3114 }
3115 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3116 return 0;
3117}
3118
3119static int pch_udc_resume(struct pci_dev *pdev)
3120{
3121 int ret;
3122
3123 pci_set_power_state(pdev, PCI_D0);
3124 pci_restore_state(pdev);
3125 ret = pci_enable_device(pdev);
3126 if (ret) {
3127 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3128 return ret;
3129 }
3130 pci_enable_wake(pdev, PCI_D3hot, 0);
3131 return 0;
3132}
3133#else
3134#define pch_udc_suspend NULL
3135#define pch_udc_resume NULL
3136#endif
3137
3138static int pch_udc_probe(struct pci_dev *pdev,
3139 const struct pci_device_id *id)
3140{
3141 unsigned long resource;
3142 unsigned long len;
3143 int retval;
3144 struct pch_udc_dev *dev;
3145
3146
3147 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3148 if (!dev) {
3149 pr_err("%s: no memory for device structure\n", __func__);
3150 return -ENOMEM;
3151 }
3152
3153 if (pci_enable_device(pdev) < 0) {
3154 kfree(dev);
3155 pr_err("%s: pci_enable_device failed\n", __func__);
3156 return -ENODEV;
3157 }
3158 dev->active = 1;
3159 pci_set_drvdata(pdev, dev);
3160
3161
3162 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3163 dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3164 else
3165 dev->bar = PCH_UDC_PCI_BAR;
3166
3167
3168 resource = pci_resource_start(pdev, dev->bar);
3169 len = pci_resource_len(pdev, dev->bar);
3170
3171 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3172 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3173 retval = -EBUSY;
3174 goto finished;
3175 }
3176 dev->phys_addr = resource;
3177 dev->mem_region = 1;
3178
3179 dev->base_addr = ioremap_nocache(resource, len);
3180 if (!dev->base_addr) {
3181 pr_err("%s: device memory cannot be mapped\n", __func__);
3182 retval = -ENOMEM;
3183 goto finished;
3184 }
3185 if (!pdev->irq) {
3186 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3187 retval = -ENODEV;
3188 goto finished;
3189 }
3190
3191 if (pch_udc_pcd_init(dev)) {
3192 retval = -ENODEV;
3193 goto finished;
3194 }
3195 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3196 dev)) {
3197 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3198 pdev->irq);
3199 retval = -ENODEV;
3200 goto finished;
3201 }
3202 dev->irq = pdev->irq;
3203 dev->irq_registered = 1;
3204
3205 pci_set_master(pdev);
3206 pci_try_set_mwi(pdev);
3207
3208
3209 spin_lock_init(&dev->lock);
3210 dev->pdev = pdev;
3211 dev->gadget.ops = &pch_udc_ops;
3212
3213 retval = init_dma_pools(dev);
3214 if (retval)
3215 goto finished;
3216
3217 dev->gadget.name = KBUILD_MODNAME;
3218 dev->gadget.max_speed = USB_SPEED_HIGH;
3219
3220
3221 pch_udc_set_disconnect(dev);
3222 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3223 gadget_release);
3224 if (retval)
3225 goto finished;
3226 return 0;
3227
3228finished:
3229 pch_udc_remove(pdev);
3230 return retval;
3231}
3232
3233static const struct pci_device_id pch_udc_pcidev_id[] = {
3234 {
3235 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3236 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3237 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3238 .class_mask = 0xffffffff,
3239 },
3240 {
3241 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3242 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3243 .class_mask = 0xffffffff,
3244 },
3245 {
3246 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3247 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3248 .class_mask = 0xffffffff,
3249 },
3250 {
3251 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3252 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3253 .class_mask = 0xffffffff,
3254 },
3255 { 0 },
3256};
3257
3258MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3259
3260static struct pci_driver pch_udc_driver = {
3261 .name = KBUILD_MODNAME,
3262 .id_table = pch_udc_pcidev_id,
3263 .probe = pch_udc_probe,
3264 .remove = pch_udc_remove,
3265 .suspend = pch_udc_suspend,
3266 .resume = pch_udc_resume,
3267 .shutdown = pch_udc_shutdown,
3268};
3269
3270module_pci_driver(pch_udc_driver);
3271
3272MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3273MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3274MODULE_LICENSE("GPL");
3275