1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/interrupt.h>
16#include <linux/usb/ch9.h>
17#include <linux/usb/gadget.h>
18#include <linux/gpio.h>
19#include <linux/irq.h>
20
21
22static int vbus_gpio_port = -1;
23
24#define PCH_VBUS_PERIOD 3000
25#define PCH_VBUS_INTERVAL 10
26
27
28#define UDC_EP_REG_SHIFT 0x20
29
30#define UDC_EPCTL_ADDR 0x00
31#define UDC_EPSTS_ADDR 0x04
32#define UDC_BUFIN_FRAMENUM_ADDR 0x08
33#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
34#define UDC_SUBPTR_ADDR 0x10
35#define UDC_DESPTR_ADDR 0x14
36#define UDC_CONFIRM_ADDR 0x18
37
38#define UDC_DEVCFG_ADDR 0x400
39#define UDC_DEVCTL_ADDR 0x404
40#define UDC_DEVSTS_ADDR 0x408
41#define UDC_DEVIRQSTS_ADDR 0x40C
42#define UDC_DEVIRQMSK_ADDR 0x410
43#define UDC_EPIRQSTS_ADDR 0x414
44#define UDC_EPIRQMSK_ADDR 0x418
45#define UDC_DEVLPM_ADDR 0x41C
46#define UDC_CSR_BUSY_ADDR 0x4f0
47#define UDC_SRST_ADDR 0x4fc
48#define UDC_CSR_ADDR 0x500
49
50
51
52#define UDC_EPCTL_MRXFLUSH (1 << 12)
53#define UDC_EPCTL_RRDY (1 << 9)
54#define UDC_EPCTL_CNAK (1 << 8)
55#define UDC_EPCTL_SNAK (1 << 7)
56#define UDC_EPCTL_NAK (1 << 6)
57#define UDC_EPCTL_P (1 << 3)
58#define UDC_EPCTL_F (1 << 1)
59#define UDC_EPCTL_S (1 << 0)
60#define UDC_EPCTL_ET_SHIFT 4
61
62#define UDC_EPCTL_ET_MASK 0x00000030
63
64#define UDC_EPCTL_ET_CONTROL 0
65#define UDC_EPCTL_ET_ISO 1
66#define UDC_EPCTL_ET_BULK 2
67#define UDC_EPCTL_ET_INTERRUPT 3
68
69
70
71#define UDC_EPSTS_XFERDONE (1 << 27)
72#define UDC_EPSTS_RSS (1 << 26)
73#define UDC_EPSTS_RCS (1 << 25)
74#define UDC_EPSTS_TXEMPTY (1 << 24)
75#define UDC_EPSTS_TDC (1 << 10)
76#define UDC_EPSTS_HE (1 << 9)
77#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78#define UDC_EPSTS_BNA (1 << 7)
79#define UDC_EPSTS_IN (1 << 6)
80#define UDC_EPSTS_OUT_SHIFT 4
81
82#define UDC_EPSTS_OUT_MASK 0x00000030
83#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84
85#define UDC_EPSTS_OUT_SETUP 2
86#define UDC_EPSTS_OUT_DATA 1
87
88
89
90#define UDC_DEVCFG_CSR_PRG (1 << 17)
91#define UDC_DEVCFG_SP (1 << 3)
92
93#define UDC_DEVCFG_SPD_HS 0x0
94#define UDC_DEVCFG_SPD_FS 0x1
95#define UDC_DEVCFG_SPD_LS 0x2
96
97
98
99#define UDC_DEVCTL_THLEN_SHIFT 24
100#define UDC_DEVCTL_BRLEN_SHIFT 16
101#define UDC_DEVCTL_CSR_DONE (1 << 13)
102#define UDC_DEVCTL_SD (1 << 10)
103#define UDC_DEVCTL_MODE (1 << 9)
104#define UDC_DEVCTL_BREN (1 << 8)
105#define UDC_DEVCTL_THE (1 << 7)
106#define UDC_DEVCTL_DU (1 << 4)
107#define UDC_DEVCTL_TDE (1 << 3)
108#define UDC_DEVCTL_RDE (1 << 2)
109#define UDC_DEVCTL_RES (1 << 0)
110
111
112
113#define UDC_DEVSTS_TS_SHIFT 18
114#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115#define UDC_DEVSTS_ALT_SHIFT 8
116#define UDC_DEVSTS_INTF_SHIFT 4
117#define UDC_DEVSTS_CFG_SHIFT 0
118
119#define UDC_DEVSTS_TS_MASK 0xfffc0000
120#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121#define UDC_DEVSTS_ALT_MASK 0x00000f00
122#define UDC_DEVSTS_INTF_MASK 0x000000f0
123#define UDC_DEVSTS_CFG_MASK 0x0000000f
124
125#define UDC_DEVSTS_ENUM_SPEED_FULL 1
126#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127#define UDC_DEVSTS_ENUM_SPEED_LOW 2
128#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130
131
132#define UDC_DEVINT_RWKP (1 << 7)
133#define UDC_DEVINT_ENUM (1 << 6)
134#define UDC_DEVINT_SOF (1 << 5)
135#define UDC_DEVINT_US (1 << 4)
136#define UDC_DEVINT_UR (1 << 3)
137#define UDC_DEVINT_ES (1 << 2)
138#define UDC_DEVINT_SI (1 << 1)
139#define UDC_DEVINT_SC (1 << 0)
140
141#define UDC_DEVINT_MSK 0x7f
142
143
144
145#define UDC_EPINT_IN_SHIFT 0
146#define UDC_EPINT_OUT_SHIFT 16
147#define UDC_EPINT_IN_EP0 (1 << 0)
148#define UDC_EPINT_OUT_EP0 (1 << 16)
149
150#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152
153
154#define UDC_CSR_BUSY (1 << 0)
155
156
157
158#define UDC_PSRST (1 << 1)
159#define UDC_SRST (1 << 0)
160
161
162
163#define UDC_CSR_NE_NUM_SHIFT 0
164#define UDC_CSR_NE_DIR_SHIFT 4
165#define UDC_CSR_NE_TYPE_SHIFT 5
166#define UDC_CSR_NE_CFG_SHIFT 7
167#define UDC_CSR_NE_INTF_SHIFT 11
168#define UDC_CSR_NE_ALT_SHIFT 15
169#define UDC_CSR_NE_MAX_PKT_SHIFT 19
170
171#define UDC_CSR_NE_NUM_MASK 0x0000000f
172#define UDC_CSR_NE_DIR_MASK 0x00000010
173#define UDC_CSR_NE_TYPE_MASK 0x00000060
174#define UDC_CSR_NE_CFG_MASK 0x00000780
175#define UDC_CSR_NE_INTF_MASK 0x00007800
176#define UDC_CSR_NE_ALT_MASK 0x00078000
177#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180#define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183
184#define UDC_EP0IN_IDX 0
185#define UDC_EP0OUT_IDX 1
186#define UDC_EPIN_IDX(ep) (ep * 2)
187#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188#define PCH_UDC_EP0 0
189#define PCH_UDC_EP1 1
190#define PCH_UDC_EP2 2
191#define PCH_UDC_EP3 3
192
193
194#define PCH_UDC_EP_NUM 32
195#define PCH_UDC_USED_EP_NUM 4
196
197#define PCH_UDC_BRLEN 0x0F
198#define PCH_UDC_THLEN 0x1F
199
200#define UDC_EP0IN_BUFF_SIZE 16
201#define UDC_EPIN_BUFF_SIZE 256
202#define UDC_EP0OUT_BUFF_SIZE 16
203#define UDC_EPOUT_BUFF_SIZE 256
204
205#define UDC_EP0IN_MAX_PKT_SIZE 64
206#define UDC_EP0OUT_MAX_PKT_SIZE 64
207#define UDC_BULK_MAX_PKT_SIZE 512
208
209
210#define DMA_DIR_RX 1
211#define DMA_DIR_TX 2
212#define DMA_ADDR_INVALID (~(dma_addr_t)0)
213#define UDC_DMA_MAXPACKET 65536
214
215
216
217
218
219
220
221
222
223struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228};
229
230
231
232
233
234
235
236
237
238struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242} __attribute((packed));
243
244
245
246#define PCH_UDC_BUFF_STS 0xC0000000
247#define PCH_UDC_BS_HST_RDY 0x00000000
248#define PCH_UDC_BS_DMA_BSY 0x40000000
249#define PCH_UDC_BS_DMA_DONE 0x80000000
250#define PCH_UDC_BS_HST_BSY 0xC0000000
251
252#define PCH_UDC_RXTX_STS 0x30000000
253#define PCH_UDC_RTS_SUCC 0x00000000
254#define PCH_UDC_RTS_DESERR 0x10000000
255#define PCH_UDC_RTS_BUFERR 0x30000000
256
257#define PCH_UDC_DMA_LAST 0x08000000
258
259#define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261
262
263
264
265
266
267
268struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272};
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303};
304
305
306
307
308
309
310
311
312
313struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344struct pch_udc_dev {
345 struct usb_gadget gadget;
346 struct usb_gadget_driver *driver;
347 struct pci_dev *pdev;
348 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
349 spinlock_t lock;
350 unsigned
351 stall:1,
352 prot_stall:1,
353 suspended:1,
354 connected:1,
355 vbus_session:1,
356 set_cfg_not_acked:1,
357 waiting_zlp_ack:1;
358 struct dma_pool *data_requests;
359 struct dma_pool *stp_requests;
360 dma_addr_t dma_addr;
361 struct usb_ctrlrequest setup_data;
362 void __iomem *base_addr;
363 struct pch_udc_cfg_data cfg_data;
364 struct pch_vbus_gpio_data vbus_gpio;
365};
366#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
367
368#define PCH_UDC_PCI_BAR_QUARK_X1000 0
369#define PCH_UDC_PCI_BAR 1
370
371#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
372#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
373
374#define PCI_VENDOR_ID_ROHM 0x10DB
375#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
376#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
377
378static const char ep0_string[] = "ep0in";
379static DEFINE_SPINLOCK(udc_stall_spinlock);
380static bool speed_fs;
381module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
382MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398struct pch_udc_request {
399 struct usb_request req;
400 dma_addr_t td_data_phys;
401 struct pch_udc_data_dma_desc *td_data;
402 struct pch_udc_data_dma_desc *td_data_last;
403 struct list_head queue;
404 unsigned dma_going:1,
405 dma_mapped:1,
406 dma_done:1;
407 unsigned chain_len;
408 void *buf;
409 dma_addr_t dma;
410};
411
412static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
413{
414 return ioread32(dev->base_addr + reg);
415}
416
417static inline void pch_udc_writel(struct pch_udc_dev *dev,
418 unsigned long val, unsigned long reg)
419{
420 iowrite32(val, dev->base_addr + reg);
421}
422
423static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
424 unsigned long reg,
425 unsigned long bitmask)
426{
427 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
428}
429
430static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
431 unsigned long reg,
432 unsigned long bitmask)
433{
434 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
435}
436
437static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
438{
439 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
440}
441
442static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
443 unsigned long val, unsigned long reg)
444{
445 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
446}
447
448static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
449 unsigned long reg,
450 unsigned long bitmask)
451{
452 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
453}
454
455static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
456 unsigned long reg,
457 unsigned long bitmask)
458{
459 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
460}
461
462
463
464
465
466static void pch_udc_csr_busy(struct pch_udc_dev *dev)
467{
468 unsigned int count = 200;
469
470
471 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
472 && --count)
473 cpu_relax();
474 if (!count)
475 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
476}
477
478
479
480
481
482
483
484static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
485 unsigned int ep)
486{
487 unsigned long reg = PCH_UDC_CSR(ep);
488
489 pch_udc_csr_busy(dev);
490 pch_udc_writel(dev, val, reg);
491 pch_udc_csr_busy(dev);
492}
493
494
495
496
497
498
499
500
501static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
502{
503 unsigned long reg = PCH_UDC_CSR(ep);
504
505 pch_udc_csr_busy(dev);
506 pch_udc_readl(dev, reg);
507 pch_udc_csr_busy(dev);
508 return pch_udc_readl(dev, reg);
509}
510
511
512
513
514
515static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
516{
517 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
518 mdelay(1);
519 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
520}
521
522
523
524
525
526
527static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
528{
529 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
530 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
531}
532
533
534
535
536
537static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
538{
539 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
540}
541
542
543
544
545
546static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
547{
548 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
549}
550
551
552
553
554
555static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
556{
557 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
558}
559
560
561
562
563
564static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
565{
566
567 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569 mdelay(1);
570
571 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
572}
573
574
575
576
577
578
579static void pch_udc_init(struct pch_udc_dev *dev);
580static void pch_udc_reconnect(struct pch_udc_dev *dev)
581{
582 pch_udc_init(dev);
583
584
585
586 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
587 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
588
589
590 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
592 mdelay(1);
593
594 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
595}
596
597
598
599
600
601
602
603
604static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
605 int is_active)
606{
607 if (is_active) {
608 pch_udc_reconnect(dev);
609 dev->vbus_session = 1;
610 } else {
611 if (dev->driver && dev->driver->disconnect) {
612 spin_lock(&dev->lock);
613 dev->driver->disconnect(&dev->gadget);
614 spin_unlock(&dev->lock);
615 }
616 pch_udc_set_disconnect(dev);
617 dev->vbus_session = 0;
618 }
619}
620
621
622
623
624
625static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
626{
627 if (ep->in) {
628 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
629 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 } else {
631 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
632 }
633}
634
635
636
637
638
639static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
640{
641
642 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643
644 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
645}
646
647
648
649
650
651
652static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
653 u8 type)
654{
655 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
656 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
657}
658
659
660
661
662
663
664static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
665 u32 buf_size, u32 ep_in)
666{
667 u32 data;
668 if (ep_in) {
669 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
670 data = (data & 0xffff0000) | (buf_size & 0xffff);
671 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
672 } else {
673 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
674 data = (buf_size << 16) | (data & 0xffff);
675 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
676 }
677}
678
679
680
681
682
683
684static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
685{
686 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
687 data = (data & 0xffff0000) | (pkt_size & 0xffff);
688 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
689}
690
691
692
693
694
695
696static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
697{
698 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
699}
700
701
702
703
704
705
706static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
707{
708 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
709}
710
711
712
713
714
715static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
716{
717 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
718}
719
720
721
722
723
724static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
725{
726 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
727}
728
729
730
731
732
733static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
734{
735 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
736}
737
738
739
740
741
742
743
744
745
746static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
747{
748 if (dir == DMA_DIR_RX)
749 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
750 else if (dir == DMA_DIR_TX)
751 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
752}
753
754
755
756
757
758
759
760
761
762static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
763{
764 if (dir == DMA_DIR_RX)
765 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
766 else if (dir == DMA_DIR_TX)
767 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
768}
769
770
771
772
773
774
775static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
776{
777 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
778}
779
780
781
782
783
784
785static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
786 u32 mask)
787{
788 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
789}
790
791
792
793
794
795
796static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
797 u32 mask)
798{
799 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
800}
801
802
803
804
805
806
807static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
808 u32 mask)
809{
810 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
811}
812
813
814
815
816
817
818static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
819 u32 mask)
820{
821 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
822}
823
824
825
826
827
828
829static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
830{
831 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
832}
833
834
835
836
837
838
839static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
840 u32 val)
841{
842 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
843}
844
845
846
847
848
849
850static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
851{
852 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
853}
854
855
856
857
858
859
860static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
861 u32 val)
862{
863 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
864}
865
866
867
868
869
870
871static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
872{
873 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
874}
875
876
877
878
879
880
881static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
882{
883 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
884}
885
886
887
888
889
890
891static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
892{
893 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
894}
895
896
897
898
899
900
901static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
902{
903 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
904}
905
906
907
908
909
910
911static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
912 u32 stat)
913{
914 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
915}
916
917
918
919
920
921
922static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
923{
924 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
925}
926
927
928
929
930
931
932static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
933{
934 unsigned int loopcnt = 0;
935 struct pch_udc_dev *dev = ep->dev;
936
937 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
938 return;
939 if (!ep->in) {
940 loopcnt = 10000;
941 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
942 --loopcnt)
943 udelay(5);
944 if (!loopcnt)
945 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
946 __func__);
947 }
948 loopcnt = 10000;
949 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
950 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
951 udelay(5);
952 }
953 if (!loopcnt)
954 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
955 __func__, ep->num, (ep->in ? "in" : "out"));
956}
957
958
959
960
961
962
963
964
965static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
966{
967 if (dir) {
968 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
969 return;
970 }
971}
972
973
974
975
976
977
978static void pch_udc_ep_enable(struct pch_udc_ep *ep,
979 struct pch_udc_cfg_data *cfg,
980 const struct usb_endpoint_descriptor *desc)
981{
982 u32 val = 0;
983 u32 buff_size = 0;
984
985 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
986 if (ep->in)
987 buff_size = UDC_EPIN_BUFF_SIZE;
988 else
989 buff_size = UDC_EPOUT_BUFF_SIZE;
990 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
991 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
992 pch_udc_ep_set_nak(ep);
993 pch_udc_ep_fifo_flush(ep, ep->in);
994
995 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
996 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
997 UDC_CSR_NE_TYPE_SHIFT) |
998 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
999 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1000 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1001 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1002
1003 if (ep->in)
1004 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1005 else
1006 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1007}
1008
1009
1010
1011
1012
1013static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1014{
1015 if (ep->in) {
1016
1017 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1018
1019 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1021 } else {
1022
1023 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1024 }
1025
1026 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1027}
1028
1029
1030
1031
1032
1033static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1034{
1035 unsigned int count = 10000;
1036
1037
1038 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1039 udelay(5);
1040 if (!count)
1041 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1042}
1043
1044
1045
1046
1047
1048static void pch_udc_init(struct pch_udc_dev *dev)
1049{
1050 if (NULL == dev) {
1051 pr_err("%s: Invalid address\n", __func__);
1052 return;
1053 }
1054
1055 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1057 mdelay(1);
1058 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1059 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1060 mdelay(1);
1061
1062 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1063 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1064
1065
1066 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1067 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1068
1069
1070 if (speed_fs)
1071 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1073 else
1074 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1075 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1076 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1077 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1078 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1079 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1080 UDC_DEVCTL_THE);
1081}
1082
1083
1084
1085
1086
1087static void pch_udc_exit(struct pch_udc_dev *dev)
1088{
1089
1090 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1091
1092 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1093
1094 pch_udc_set_disconnect(dev);
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1106{
1107 struct pch_udc_dev *dev;
1108
1109 if (!gadget)
1110 return -EINVAL;
1111 dev = container_of(gadget, struct pch_udc_dev, gadget);
1112 return pch_udc_get_frame(dev);
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1124{
1125 struct pch_udc_dev *dev;
1126 unsigned long flags;
1127
1128 if (!gadget)
1129 return -EINVAL;
1130 dev = container_of(gadget, struct pch_udc_dev, gadget);
1131 spin_lock_irqsave(&dev->lock, flags);
1132 pch_udc_rmt_wakeup(dev);
1133 spin_unlock_irqrestore(&dev->lock, flags);
1134 return 0;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1148{
1149 struct pch_udc_dev *dev;
1150
1151 if (!gadget)
1152 return -EINVAL;
1153 gadget->is_selfpowered = (value != 0);
1154 dev = container_of(gadget, struct pch_udc_dev, gadget);
1155 if (value)
1156 pch_udc_set_selfpowered(dev);
1157 else
1158 pch_udc_clear_selfpowered(dev);
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1173{
1174 struct pch_udc_dev *dev;
1175
1176 if (!gadget)
1177 return -EINVAL;
1178 dev = container_of(gadget, struct pch_udc_dev, gadget);
1179 if (is_on) {
1180 pch_udc_reconnect(dev);
1181 } else {
1182 if (dev->driver && dev->driver->disconnect) {
1183 spin_lock(&dev->lock);
1184 dev->driver->disconnect(&dev->gadget);
1185 spin_unlock(&dev->lock);
1186 }
1187 pch_udc_set_disconnect(dev);
1188 }
1189
1190 return 0;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1205{
1206 struct pch_udc_dev *dev;
1207
1208 if (!gadget)
1209 return -EINVAL;
1210 dev = container_of(gadget, struct pch_udc_dev, gadget);
1211 pch_udc_vbus_session(dev, is_active);
1212 return 0;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1227{
1228 return -EOPNOTSUPP;
1229}
1230
1231static int pch_udc_start(struct usb_gadget *g,
1232 struct usb_gadget_driver *driver);
1233static int pch_udc_stop(struct usb_gadget *g);
1234
1235static const struct usb_gadget_ops pch_udc_ops = {
1236 .get_frame = pch_udc_pcd_get_frame,
1237 .wakeup = pch_udc_pcd_wakeup,
1238 .set_selfpowered = pch_udc_pcd_selfpowered,
1239 .pullup = pch_udc_pcd_pullup,
1240 .vbus_session = pch_udc_pcd_vbus_session,
1241 .vbus_draw = pch_udc_pcd_vbus_draw,
1242 .udc_start = pch_udc_start,
1243 .udc_stop = pch_udc_stop,
1244};
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1256{
1257 int vbus = 0;
1258
1259 if (dev->vbus_gpio.port)
1260 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1261 else
1262 vbus = -1;
1263
1264 return vbus;
1265}
1266
1267
1268
1269
1270
1271
1272
1273static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1274{
1275 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1276 struct pch_vbus_gpio_data, irq_work_fall);
1277 struct pch_udc_dev *dev =
1278 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1279 int vbus_saved = -1;
1280 int vbus;
1281 int count;
1282
1283 if (!dev->vbus_gpio.port)
1284 return;
1285
1286 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1287 count++) {
1288 vbus = pch_vbus_gpio_get_value(dev);
1289
1290 if ((vbus_saved == vbus) && (vbus == 0)) {
1291 dev_dbg(&dev->pdev->dev, "VBUS fell");
1292 if (dev->driver
1293 && dev->driver->disconnect) {
1294 dev->driver->disconnect(
1295 &dev->gadget);
1296 }
1297 if (dev->vbus_gpio.intr)
1298 pch_udc_init(dev);
1299 else
1300 pch_udc_reconnect(dev);
1301 return;
1302 }
1303 vbus_saved = vbus;
1304 mdelay(PCH_VBUS_INTERVAL);
1305 }
1306}
1307
1308
1309
1310
1311
1312
1313
1314static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1315{
1316 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1317 struct pch_vbus_gpio_data, irq_work_rise);
1318 struct pch_udc_dev *dev =
1319 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1320 int vbus;
1321
1322 if (!dev->vbus_gpio.port)
1323 return;
1324
1325 mdelay(PCH_VBUS_INTERVAL);
1326 vbus = pch_vbus_gpio_get_value(dev);
1327
1328 if (vbus == 1) {
1329 dev_dbg(&dev->pdev->dev, "VBUS rose");
1330 pch_udc_reconnect(dev);
1331 return;
1332 }
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1345{
1346 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1347
1348 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1349 return IRQ_NONE;
1350
1351 if (pch_vbus_gpio_get_value(dev))
1352 schedule_work(&dev->vbus_gpio.irq_work_rise);
1353 else
1354 schedule_work(&dev->vbus_gpio.irq_work_fall);
1355
1356 return IRQ_HANDLED;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1369{
1370 int err;
1371 int irq_num = 0;
1372
1373 dev->vbus_gpio.port = 0;
1374 dev->vbus_gpio.intr = 0;
1375
1376 if (vbus_gpio_port <= -1)
1377 return -EINVAL;
1378
1379 err = gpio_is_valid(vbus_gpio_port);
1380 if (!err) {
1381 pr_err("%s: gpio port %d is invalid\n",
1382 __func__, vbus_gpio_port);
1383 return -EINVAL;
1384 }
1385
1386 err = gpio_request(vbus_gpio_port, "pch_vbus");
1387 if (err) {
1388 pr_err("%s: can't request gpio port %d, err: %d\n",
1389 __func__, vbus_gpio_port, err);
1390 return -EINVAL;
1391 }
1392
1393 dev->vbus_gpio.port = vbus_gpio_port;
1394 gpio_direction_input(vbus_gpio_port);
1395 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1396
1397 irq_num = gpio_to_irq(vbus_gpio_port);
1398 if (irq_num > 0) {
1399 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1400 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1401 "vbus_detect", dev);
1402 if (!err) {
1403 dev->vbus_gpio.intr = irq_num;
1404 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1405 pch_vbus_gpio_work_rise);
1406 } else {
1407 pr_err("%s: can't request irq %d, err: %d\n",
1408 __func__, irq_num, err);
1409 }
1410 }
1411
1412 return 0;
1413}
1414
1415
1416
1417
1418
1419static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1420{
1421 if (dev->vbus_gpio.intr)
1422 free_irq(dev->vbus_gpio.intr, dev);
1423
1424 if (dev->vbus_gpio.port)
1425 gpio_free(dev->vbus_gpio.port);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1436 int status)
1437 __releases(&dev->lock)
1438 __acquires(&dev->lock)
1439{
1440 struct pch_udc_dev *dev;
1441 unsigned halted = ep->halted;
1442
1443 list_del_init(&req->queue);
1444
1445
1446 if (req->req.status == -EINPROGRESS)
1447 req->req.status = status;
1448 else
1449 status = req->req.status;
1450
1451 dev = ep->dev;
1452 if (req->dma_mapped) {
1453 if (req->dma == DMA_ADDR_INVALID) {
1454 if (ep->in)
1455 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 req->req.length,
1457 DMA_TO_DEVICE);
1458 else
1459 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 req->req.length,
1461 DMA_FROM_DEVICE);
1462 req->req.dma = DMA_ADDR_INVALID;
1463 } else {
1464 if (ep->in)
1465 dma_unmap_single(&dev->pdev->dev, req->dma,
1466 req->req.length,
1467 DMA_TO_DEVICE);
1468 else {
1469 dma_unmap_single(&dev->pdev->dev, req->dma,
1470 req->req.length,
1471 DMA_FROM_DEVICE);
1472 memcpy(req->req.buf, req->buf, req->req.length);
1473 }
1474 kfree(req->buf);
1475 req->dma = DMA_ADDR_INVALID;
1476 }
1477 req->dma_mapped = 0;
1478 }
1479 ep->halted = 1;
1480 spin_unlock(&dev->lock);
1481 if (!ep->in)
1482 pch_udc_ep_clear_rrdy(ep);
1483 usb_gadget_giveback_request(&ep->ep, &req->req);
1484 spin_lock(&dev->lock);
1485 ep->halted = halted;
1486}
1487
1488
1489
1490
1491
1492static void empty_req_queue(struct pch_udc_ep *ep)
1493{
1494 struct pch_udc_request *req;
1495
1496 ep->halted = 1;
1497 while (!list_empty(&ep->queue)) {
1498 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1499 complete_req(ep, req, -ESHUTDOWN);
1500 }
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1513 struct pch_udc_request *req)
1514{
1515 struct pch_udc_data_dma_desc *td = req->td_data;
1516 unsigned i = req->chain_len;
1517
1518 dma_addr_t addr2;
1519 dma_addr_t addr = (dma_addr_t)td->next;
1520 td->next = 0x00;
1521 for (; i > 1; --i) {
1522
1523 td = phys_to_virt(addr);
1524 addr2 = (dma_addr_t)td->next;
1525 dma_pool_free(dev->data_requests, td, addr);
1526 td->next = 0x00;
1527 addr = addr2;
1528 }
1529 req->chain_len = 1;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1545 struct pch_udc_request *req,
1546 unsigned long buf_len,
1547 gfp_t gfp_flags)
1548{
1549 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1550 unsigned long bytes = req->req.length, i = 0;
1551 dma_addr_t dma_addr;
1552 unsigned len = 1;
1553
1554 if (req->chain_len > 1)
1555 pch_udc_free_dma_chain(ep->dev, req);
1556
1557 if (req->dma == DMA_ADDR_INVALID)
1558 td->dataptr = req->req.dma;
1559 else
1560 td->dataptr = req->dma;
1561
1562 td->status = PCH_UDC_BS_HST_BSY;
1563 for (; ; bytes -= buf_len, ++len) {
1564 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1565 if (bytes <= buf_len)
1566 break;
1567 last = td;
1568 td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1569 &dma_addr);
1570 if (!td)
1571 goto nomem;
1572 i += buf_len;
1573 td->dataptr = req->td_data->dataptr + i;
1574 last->next = dma_addr;
1575 }
1576
1577 req->td_data_last = td;
1578 td->status |= PCH_UDC_DMA_LAST;
1579 td->next = req->td_data_phys;
1580 req->chain_len = len;
1581 return 0;
1582
1583nomem:
1584 if (len > 1) {
1585 req->chain_len = len;
1586 pch_udc_free_dma_chain(ep->dev, req);
1587 }
1588 req->chain_len = 1;
1589 return -ENOMEM;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1604 gfp_t gfp)
1605{
1606 int retval;
1607
1608
1609 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1610 if (retval) {
1611 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1612 return retval;
1613 }
1614 if (ep->in)
1615 req->td_data->status = (req->td_data->status &
1616 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1617 return 0;
1618}
1619
1620
1621
1622
1623
1624
1625
1626static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1627{
1628 struct pch_udc_dev *dev = ep->dev;
1629
1630
1631 complete_req(ep, req, 0);
1632
1633
1634
1635
1636 if (dev->set_cfg_not_acked) {
1637 pch_udc_set_csr_done(dev);
1638 dev->set_cfg_not_acked = 0;
1639 }
1640
1641 if (!dev->stall && dev->waiting_zlp_ack) {
1642 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1643 dev->waiting_zlp_ack = 0;
1644 }
1645}
1646
1647
1648
1649
1650
1651
1652static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1653 struct pch_udc_request *req)
1654{
1655 struct pch_udc_data_dma_desc *td_data;
1656
1657 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1658 td_data = req->td_data;
1659
1660 while (1) {
1661 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1662 PCH_UDC_BS_HST_RDY;
1663 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1664 break;
1665 td_data = phys_to_virt(td_data->next);
1666 }
1667
1668 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1669 req->dma_going = 1;
1670 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1671 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1672 pch_udc_ep_clear_nak(ep);
1673 pch_udc_ep_set_rrdy(ep);
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1688 const struct usb_endpoint_descriptor *desc)
1689{
1690 struct pch_udc_ep *ep;
1691 struct pch_udc_dev *dev;
1692 unsigned long iflags;
1693
1694 if (!usbep || (usbep->name == ep0_string) || !desc ||
1695 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1696 return -EINVAL;
1697
1698 ep = container_of(usbep, struct pch_udc_ep, ep);
1699 dev = ep->dev;
1700 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1701 return -ESHUTDOWN;
1702 spin_lock_irqsave(&dev->lock, iflags);
1703 ep->ep.desc = desc;
1704 ep->halted = 0;
1705 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1706 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1707 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1708 spin_unlock_irqrestore(&dev->lock, iflags);
1709 return 0;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1722{
1723 struct pch_udc_ep *ep;
1724 unsigned long iflags;
1725
1726 if (!usbep)
1727 return -EINVAL;
1728
1729 ep = container_of(usbep, struct pch_udc_ep, ep);
1730 if ((usbep->name == ep0_string) || !ep->ep.desc)
1731 return -EINVAL;
1732
1733 spin_lock_irqsave(&ep->dev->lock, iflags);
1734 empty_req_queue(ep);
1735 ep->halted = 1;
1736 pch_udc_ep_disable(ep);
1737 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1738 ep->ep.desc = NULL;
1739 INIT_LIST_HEAD(&ep->queue);
1740 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1741 return 0;
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1755 gfp_t gfp)
1756{
1757 struct pch_udc_request *req;
1758 struct pch_udc_ep *ep;
1759 struct pch_udc_data_dma_desc *dma_desc;
1760
1761 if (!usbep)
1762 return NULL;
1763 ep = container_of(usbep, struct pch_udc_ep, ep);
1764 req = kzalloc(sizeof *req, gfp);
1765 if (!req)
1766 return NULL;
1767 req->req.dma = DMA_ADDR_INVALID;
1768 req->dma = DMA_ADDR_INVALID;
1769 INIT_LIST_HEAD(&req->queue);
1770 if (!ep->dev->dma_addr)
1771 return &req->req;
1772
1773 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1774 &req->td_data_phys);
1775 if (NULL == dma_desc) {
1776 kfree(req);
1777 return NULL;
1778 }
1779
1780 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1781 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1782 req->td_data = dma_desc;
1783 req->td_data_last = dma_desc;
1784 req->chain_len = 1;
1785 return &req->req;
1786}
1787
1788
1789
1790
1791
1792
1793
1794static void pch_udc_free_request(struct usb_ep *usbep,
1795 struct usb_request *usbreq)
1796{
1797 struct pch_udc_ep *ep;
1798 struct pch_udc_request *req;
1799 struct pch_udc_dev *dev;
1800
1801 if (!usbep || !usbreq)
1802 return;
1803 ep = container_of(usbep, struct pch_udc_ep, ep);
1804 req = container_of(usbreq, struct pch_udc_request, req);
1805 dev = ep->dev;
1806 if (!list_empty(&req->queue))
1807 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1808 __func__, usbep->name, req);
1809 if (req->td_data != NULL) {
1810 if (req->chain_len > 1)
1811 pch_udc_free_dma_chain(ep->dev, req);
1812 dma_pool_free(ep->dev->data_requests, req->td_data,
1813 req->td_data_phys);
1814 }
1815 kfree(req);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1830 gfp_t gfp)
1831{
1832 int retval = 0;
1833 struct pch_udc_ep *ep;
1834 struct pch_udc_dev *dev;
1835 struct pch_udc_request *req;
1836 unsigned long iflags;
1837
1838 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1839 return -EINVAL;
1840 ep = container_of(usbep, struct pch_udc_ep, ep);
1841 dev = ep->dev;
1842 if (!ep->ep.desc && ep->num)
1843 return -EINVAL;
1844 req = container_of(usbreq, struct pch_udc_request, req);
1845 if (!list_empty(&req->queue))
1846 return -EINVAL;
1847 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1848 return -ESHUTDOWN;
1849 spin_lock_irqsave(&dev->lock, iflags);
1850
1851 if (usbreq->length &&
1852 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1853 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1854 if (ep->in)
1855 usbreq->dma = dma_map_single(&dev->pdev->dev,
1856 usbreq->buf,
1857 usbreq->length,
1858 DMA_TO_DEVICE);
1859 else
1860 usbreq->dma = dma_map_single(&dev->pdev->dev,
1861 usbreq->buf,
1862 usbreq->length,
1863 DMA_FROM_DEVICE);
1864 } else {
1865 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1866 if (!req->buf) {
1867 retval = -ENOMEM;
1868 goto probe_end;
1869 }
1870 if (ep->in) {
1871 memcpy(req->buf, usbreq->buf, usbreq->length);
1872 req->dma = dma_map_single(&dev->pdev->dev,
1873 req->buf,
1874 usbreq->length,
1875 DMA_TO_DEVICE);
1876 } else
1877 req->dma = dma_map_single(&dev->pdev->dev,
1878 req->buf,
1879 usbreq->length,
1880 DMA_FROM_DEVICE);
1881 }
1882 req->dma_mapped = 1;
1883 }
1884 if (usbreq->length > 0) {
1885 retval = prepare_dma(ep, req, GFP_ATOMIC);
1886 if (retval)
1887 goto probe_end;
1888 }
1889 usbreq->actual = 0;
1890 usbreq->status = -EINPROGRESS;
1891 req->dma_done = 0;
1892 if (list_empty(&ep->queue) && !ep->halted) {
1893
1894 if (!usbreq->length) {
1895 process_zlp(ep, req);
1896 retval = 0;
1897 goto probe_end;
1898 }
1899 if (!ep->in) {
1900 pch_udc_start_rxrequest(ep, req);
1901 } else {
1902
1903
1904
1905
1906
1907 pch_udc_wait_ep_stall(ep);
1908 pch_udc_ep_clear_nak(ep);
1909 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1910 }
1911 }
1912
1913 if (req != NULL)
1914 list_add_tail(&req->queue, &ep->queue);
1915
1916probe_end:
1917 spin_unlock_irqrestore(&dev->lock, iflags);
1918 return retval;
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1932 struct usb_request *usbreq)
1933{
1934 struct pch_udc_ep *ep;
1935 struct pch_udc_request *req;
1936 unsigned long flags;
1937 int ret = -EINVAL;
1938
1939 ep = container_of(usbep, struct pch_udc_ep, ep);
1940 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1941 return ret;
1942 req = container_of(usbreq, struct pch_udc_request, req);
1943 spin_lock_irqsave(&ep->dev->lock, flags);
1944
1945 list_for_each_entry(req, &ep->queue, queue) {
1946 if (&req->req == usbreq) {
1947 pch_udc_ep_set_nak(ep);
1948 if (!list_empty(&req->queue))
1949 complete_req(ep, req, -ECONNRESET);
1950 ret = 0;
1951 break;
1952 }
1953 }
1954 spin_unlock_irqrestore(&ep->dev->lock, flags);
1955 return ret;
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1969{
1970 struct pch_udc_ep *ep;
1971 unsigned long iflags;
1972 int ret;
1973
1974 if (!usbep)
1975 return -EINVAL;
1976 ep = container_of(usbep, struct pch_udc_ep, ep);
1977 if (!ep->ep.desc && !ep->num)
1978 return -EINVAL;
1979 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1980 return -ESHUTDOWN;
1981 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1982 if (list_empty(&ep->queue)) {
1983 if (halt) {
1984 if (ep->num == PCH_UDC_EP0)
1985 ep->dev->stall = 1;
1986 pch_udc_ep_set_stall(ep);
1987 pch_udc_enable_ep_interrupts(
1988 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1989 } else {
1990 pch_udc_ep_clear_stall(ep);
1991 }
1992 ret = 0;
1993 } else {
1994 ret = -EAGAIN;
1995 }
1996 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1997 return ret;
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2011{
2012 struct pch_udc_ep *ep;
2013 unsigned long iflags;
2014 int ret;
2015
2016 if (!usbep)
2017 return -EINVAL;
2018 ep = container_of(usbep, struct pch_udc_ep, ep);
2019 if (!ep->ep.desc && !ep->num)
2020 return -EINVAL;
2021 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2022 return -ESHUTDOWN;
2023 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2024 if (!list_empty(&ep->queue)) {
2025 ret = -EAGAIN;
2026 } else {
2027 if (ep->num == PCH_UDC_EP0)
2028 ep->dev->stall = 1;
2029 pch_udc_ep_set_stall(ep);
2030 pch_udc_enable_ep_interrupts(ep->dev,
2031 PCH_UDC_EPINT(ep->in, ep->num));
2032 ep->dev->prot_stall = 1;
2033 ret = 0;
2034 }
2035 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2036 return ret;
2037}
2038
2039
2040
2041
2042
2043static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2044{
2045 struct pch_udc_ep *ep;
2046
2047 if (!usbep)
2048 return;
2049
2050 ep = container_of(usbep, struct pch_udc_ep, ep);
2051 if (ep->ep.desc || !ep->num)
2052 pch_udc_ep_fifo_flush(ep, ep->in);
2053}
2054
2055static const struct usb_ep_ops pch_udc_ep_ops = {
2056 .enable = pch_udc_pcd_ep_enable,
2057 .disable = pch_udc_pcd_ep_disable,
2058 .alloc_request = pch_udc_alloc_request,
2059 .free_request = pch_udc_free_request,
2060 .queue = pch_udc_pcd_queue,
2061 .dequeue = pch_udc_pcd_dequeue,
2062 .set_halt = pch_udc_pcd_set_halt,
2063 .set_wedge = pch_udc_pcd_set_wedge,
2064 .fifo_status = NULL,
2065 .fifo_flush = pch_udc_pcd_fifo_flush,
2066};
2067
2068
2069
2070
2071
2072static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2073{
2074 static u32 pky_marker;
2075
2076 if (!td_stp)
2077 return;
2078 td_stp->reserved = ++pky_marker;
2079 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2080 td_stp->status = PCH_UDC_BS_HST_RDY;
2081}
2082
2083
2084
2085
2086
2087
2088static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2089{
2090 struct pch_udc_request *req;
2091 struct pch_udc_data_dma_desc *td_data;
2092
2093 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2094 return;
2095
2096 if (list_empty(&ep->queue))
2097 return;
2098
2099
2100 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2101 if (req->dma_going)
2102 return;
2103 if (!req->td_data)
2104 return;
2105 pch_udc_wait_ep_stall(ep);
2106 req->dma_going = 1;
2107 pch_udc_ep_set_ddptr(ep, 0);
2108 td_data = req->td_data;
2109 while (1) {
2110 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2111 PCH_UDC_BS_HST_RDY;
2112 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2113 break;
2114 td_data = phys_to_virt(td_data->next);
2115 }
2116 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2117 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2118 pch_udc_ep_set_pd(ep);
2119 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2120 pch_udc_ep_clear_nak(ep);
2121}
2122
2123
2124
2125
2126
2127static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2128{
2129 struct pch_udc_request *req;
2130 struct pch_udc_dev *dev = ep->dev;
2131
2132 if (list_empty(&ep->queue))
2133 return;
2134 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2135 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2136 PCH_UDC_BS_DMA_DONE)
2137 return;
2138 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2139 PCH_UDC_RTS_SUCC) {
2140 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2141 "epstatus=0x%08x\n",
2142 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2143 (int)(ep->epsts));
2144 return;
2145 }
2146
2147 req->req.actual = req->req.length;
2148 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2149 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2150 complete_req(ep, req, 0);
2151 req->dma_going = 0;
2152 if (!list_empty(&ep->queue)) {
2153 pch_udc_wait_ep_stall(ep);
2154 pch_udc_ep_clear_nak(ep);
2155 pch_udc_enable_ep_interrupts(ep->dev,
2156 PCH_UDC_EPINT(ep->in, ep->num));
2157 } else {
2158 pch_udc_disable_ep_interrupts(ep->dev,
2159 PCH_UDC_EPINT(ep->in, ep->num));
2160 }
2161}
2162
2163
2164
2165
2166
2167static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2168{
2169 struct pch_udc_request *req;
2170 struct pch_udc_dev *dev = ep->dev;
2171 unsigned int count;
2172 struct pch_udc_data_dma_desc *td;
2173 dma_addr_t addr;
2174
2175 if (list_empty(&ep->queue))
2176 return;
2177
2178 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2179 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2180 pch_udc_ep_set_ddptr(ep, 0);
2181 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2182 PCH_UDC_BS_DMA_DONE)
2183 td = req->td_data_last;
2184 else
2185 td = req->td_data;
2186
2187 while (1) {
2188 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2189 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2190 "epstatus=0x%08x\n",
2191 (req->td_data->status & PCH_UDC_RXTX_STS),
2192 (int)(ep->epsts));
2193 return;
2194 }
2195 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2196 if (td->status & PCH_UDC_DMA_LAST) {
2197 count = td->status & PCH_UDC_RXTX_BYTES;
2198 break;
2199 }
2200 if (td == req->td_data_last) {
2201 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2202 return;
2203 }
2204 addr = (dma_addr_t)td->next;
2205 td = phys_to_virt(addr);
2206 }
2207
2208 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2209 count = UDC_DMA_MAXPACKET;
2210 req->td_data->status |= PCH_UDC_DMA_LAST;
2211 td->status |= PCH_UDC_BS_HST_BSY;
2212
2213 req->dma_going = 0;
2214 req->req.actual = count;
2215 complete_req(ep, req, 0);
2216
2217 if (!list_empty(&ep->queue)) {
2218 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2219 pch_udc_start_rxrequest(ep, req);
2220 }
2221}
2222
2223
2224
2225
2226
2227
2228
2229static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2230{
2231 u32 epsts;
2232 struct pch_udc_ep *ep;
2233
2234 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2235 epsts = ep->epsts;
2236 ep->epsts = 0;
2237
2238 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2239 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2240 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2241 return;
2242 if ((epsts & UDC_EPSTS_BNA))
2243 return;
2244 if (epsts & UDC_EPSTS_HE)
2245 return;
2246 if (epsts & UDC_EPSTS_RSS) {
2247 pch_udc_ep_set_stall(ep);
2248 pch_udc_enable_ep_interrupts(ep->dev,
2249 PCH_UDC_EPINT(ep->in, ep->num));
2250 }
2251 if (epsts & UDC_EPSTS_RCS) {
2252 if (!dev->prot_stall) {
2253 pch_udc_ep_clear_stall(ep);
2254 } else {
2255 pch_udc_ep_set_stall(ep);
2256 pch_udc_enable_ep_interrupts(ep->dev,
2257 PCH_UDC_EPINT(ep->in, ep->num));
2258 }
2259 }
2260 if (epsts & UDC_EPSTS_TDC)
2261 pch_udc_complete_transfer(ep);
2262
2263 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2264 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2265 pch_udc_start_next_txrequest(ep);
2266}
2267
2268
2269
2270
2271
2272
2273static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2274{
2275 u32 epsts;
2276 struct pch_udc_ep *ep;
2277 struct pch_udc_request *req = NULL;
2278
2279 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2280 epsts = ep->epsts;
2281 ep->epsts = 0;
2282
2283 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2284
2285 req = list_entry(ep->queue.next, struct pch_udc_request,
2286 queue);
2287 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2288 PCH_UDC_BS_DMA_DONE) {
2289 if (!req->dma_going)
2290 pch_udc_start_rxrequest(ep, req);
2291 return;
2292 }
2293 }
2294 if (epsts & UDC_EPSTS_HE)
2295 return;
2296 if (epsts & UDC_EPSTS_RSS) {
2297 pch_udc_ep_set_stall(ep);
2298 pch_udc_enable_ep_interrupts(ep->dev,
2299 PCH_UDC_EPINT(ep->in, ep->num));
2300 }
2301 if (epsts & UDC_EPSTS_RCS) {
2302 if (!dev->prot_stall) {
2303 pch_udc_ep_clear_stall(ep);
2304 } else {
2305 pch_udc_ep_set_stall(ep);
2306 pch_udc_enable_ep_interrupts(ep->dev,
2307 PCH_UDC_EPINT(ep->in, ep->num));
2308 }
2309 }
2310 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2311 UDC_EPSTS_OUT_DATA) {
2312 if (ep->dev->prot_stall == 1) {
2313 pch_udc_ep_set_stall(ep);
2314 pch_udc_enable_ep_interrupts(ep->dev,
2315 PCH_UDC_EPINT(ep->in, ep->num));
2316 } else {
2317 pch_udc_complete_receiver(ep);
2318 }
2319 }
2320 if (list_empty(&ep->queue))
2321 pch_udc_set_dma(dev, DMA_DIR_RX);
2322}
2323
2324
2325
2326
2327
2328static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2329{
2330 u32 epsts;
2331 struct pch_udc_ep *ep;
2332 struct pch_udc_ep *ep_out;
2333
2334 ep = &dev->ep[UDC_EP0IN_IDX];
2335 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2336 epsts = ep->epsts;
2337 ep->epsts = 0;
2338
2339 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2340 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2341 UDC_EPSTS_XFERDONE)))
2342 return;
2343 if ((epsts & UDC_EPSTS_BNA))
2344 return;
2345 if (epsts & UDC_EPSTS_HE)
2346 return;
2347 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2348 pch_udc_complete_transfer(ep);
2349 pch_udc_clear_dma(dev, DMA_DIR_RX);
2350 ep_out->td_data->status = (ep_out->td_data->status &
2351 ~PCH_UDC_BUFF_STS) |
2352 PCH_UDC_BS_HST_RDY;
2353 pch_udc_ep_clear_nak(ep_out);
2354 pch_udc_set_dma(dev, DMA_DIR_RX);
2355 pch_udc_ep_set_rrdy(ep_out);
2356 }
2357
2358 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2359 !(epsts & UDC_EPSTS_TXEMPTY))
2360 pch_udc_start_next_txrequest(ep);
2361}
2362
2363
2364
2365
2366
2367
2368static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2369 __releases(&dev->lock)
2370 __acquires(&dev->lock)
2371{
2372 u32 stat;
2373 int setup_supported;
2374 struct pch_udc_ep *ep;
2375
2376 ep = &dev->ep[UDC_EP0OUT_IDX];
2377 stat = ep->epsts;
2378 ep->epsts = 0;
2379
2380
2381 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2382 UDC_EPSTS_OUT_SETUP) {
2383 dev->stall = 0;
2384 dev->ep[UDC_EP0IN_IDX].halted = 0;
2385 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2386 dev->setup_data = ep->td_stp->request;
2387 pch_udc_init_setup_buff(ep->td_stp);
2388 pch_udc_clear_dma(dev, DMA_DIR_RX);
2389 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2390 dev->ep[UDC_EP0IN_IDX].in);
2391 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2392 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2393 else
2394 dev->gadget.ep0 = &ep->ep;
2395 spin_lock(&dev->lock);
2396
2397 if ((dev->setup_data.bRequestType == 0x21) &&
2398 (dev->setup_data.bRequest == 0xFF))
2399 dev->prot_stall = 0;
2400
2401 setup_supported = dev->driver->setup(&dev->gadget,
2402 &dev->setup_data);
2403 spin_unlock(&dev->lock);
2404
2405 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2406 ep->td_data->status = (ep->td_data->status &
2407 ~PCH_UDC_BUFF_STS) |
2408 PCH_UDC_BS_HST_RDY;
2409 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2410 }
2411
2412 if (setup_supported >= 0 && setup_supported <
2413 UDC_EP0IN_MAX_PKT_SIZE) {
2414 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2415
2416
2417 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2418 pch_udc_set_dma(dev, DMA_DIR_RX);
2419 pch_udc_ep_clear_nak(ep);
2420 }
2421 } else if (setup_supported < 0) {
2422
2423 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2424 pch_udc_enable_ep_interrupts(ep->dev,
2425 PCH_UDC_EPINT(ep->in, ep->num));
2426 dev->stall = 0;
2427 pch_udc_set_dma(dev, DMA_DIR_RX);
2428 } else {
2429 dev->waiting_zlp_ack = 1;
2430 }
2431 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2432 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2433 pch_udc_clear_dma(dev, DMA_DIR_RX);
2434 pch_udc_ep_set_ddptr(ep, 0);
2435 if (!list_empty(&ep->queue)) {
2436 ep->epsts = stat;
2437 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2438 }
2439 pch_udc_set_dma(dev, DMA_DIR_RX);
2440 }
2441 pch_udc_ep_set_rrdy(ep);
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2452{
2453 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2454 if (list_empty(&ep->queue))
2455 return;
2456 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2457 pch_udc_ep_clear_nak(ep);
2458}
2459
2460
2461
2462
2463
2464
2465static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2466{
2467 int i;
2468 struct pch_udc_ep *ep;
2469
2470 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2471
2472 if (ep_intr & (0x1 << i)) {
2473 ep = &dev->ep[UDC_EPIN_IDX(i)];
2474 ep->epsts = pch_udc_read_ep_status(ep);
2475 pch_udc_clear_ep_status(ep, ep->epsts);
2476 }
2477
2478 if (ep_intr & (0x10000 << i)) {
2479 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2480 ep->epsts = pch_udc_read_ep_status(ep);
2481 pch_udc_clear_ep_status(ep, ep->epsts);
2482 }
2483 }
2484}
2485
2486
2487
2488
2489
2490
2491static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2492{
2493 struct pch_udc_ep *ep;
2494 u32 val;
2495
2496
2497 ep = &dev->ep[UDC_EP0IN_IDX];
2498 pch_udc_clear_ep_control(ep);
2499 pch_udc_ep_fifo_flush(ep, ep->in);
2500 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2501 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2502
2503 ep->td_data = NULL;
2504 ep->td_stp = NULL;
2505 ep->td_data_phys = 0;
2506 ep->td_stp_phys = 0;
2507
2508
2509 ep = &dev->ep[UDC_EP0OUT_IDX];
2510 pch_udc_clear_ep_control(ep);
2511 pch_udc_ep_fifo_flush(ep, ep->in);
2512 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2513 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2514 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2515 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2516
2517
2518 pch_udc_init_setup_buff(ep->td_stp);
2519
2520 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2521
2522 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2523
2524
2525 ep->td_data->status = PCH_UDC_DMA_LAST;
2526 ep->td_data->dataptr = dev->dma_addr;
2527 ep->td_data->next = ep->td_data_phys;
2528
2529 pch_udc_ep_clear_nak(ep);
2530}
2531
2532
2533
2534
2535
2536
2537static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2538{
2539 struct pch_udc_ep *ep;
2540 int i;
2541
2542 pch_udc_clear_dma(dev, DMA_DIR_TX);
2543 pch_udc_clear_dma(dev, DMA_DIR_RX);
2544
2545 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2546
2547 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2548
2549 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2550 ep = &dev->ep[i];
2551 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2552 pch_udc_clear_ep_control(ep);
2553 pch_udc_ep_set_ddptr(ep, 0);
2554 pch_udc_write_csr(ep->dev, 0x00, i);
2555 }
2556 dev->stall = 0;
2557 dev->prot_stall = 0;
2558 dev->waiting_zlp_ack = 0;
2559 dev->set_cfg_not_acked = 0;
2560
2561
2562 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2563 ep = &dev->ep[i];
2564 pch_udc_ep_set_nak(ep);
2565 pch_udc_ep_fifo_flush(ep, ep->in);
2566
2567 empty_req_queue(ep);
2568 }
2569 if (dev->driver) {
2570 spin_unlock(&dev->lock);
2571 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2572 spin_lock(&dev->lock);
2573 }
2574}
2575
2576
2577
2578
2579
2580
2581static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2582{
2583 u32 dev_stat, dev_speed;
2584 u32 speed = USB_SPEED_FULL;
2585
2586 dev_stat = pch_udc_read_device_status(dev);
2587 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2588 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2589 switch (dev_speed) {
2590 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2591 speed = USB_SPEED_HIGH;
2592 break;
2593 case UDC_DEVSTS_ENUM_SPEED_FULL:
2594 speed = USB_SPEED_FULL;
2595 break;
2596 case UDC_DEVSTS_ENUM_SPEED_LOW:
2597 speed = USB_SPEED_LOW;
2598 break;
2599 default:
2600 BUG();
2601 }
2602 dev->gadget.speed = speed;
2603 pch_udc_activate_control_ep(dev);
2604 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2605 pch_udc_set_dma(dev, DMA_DIR_TX);
2606 pch_udc_set_dma(dev, DMA_DIR_RX);
2607 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2608
2609
2610 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2611 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2612 UDC_DEVINT_SI | UDC_DEVINT_SC);
2613}
2614
2615
2616
2617
2618
2619
2620static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2621{
2622 u32 reg, dev_stat = 0;
2623 int i;
2624
2625 dev_stat = pch_udc_read_device_status(dev);
2626 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2627 UDC_DEVSTS_INTF_SHIFT;
2628 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2629 UDC_DEVSTS_ALT_SHIFT;
2630 dev->set_cfg_not_acked = 1;
2631
2632 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2633 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2634 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2635 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2636 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2637
2638
2639 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2640 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2641 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2642 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2643 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2644 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2645 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2646
2647 pch_udc_ep_clear_stall(&(dev->ep[i]));
2648 dev->ep[i].halted = 0;
2649 }
2650 dev->stall = 0;
2651 spin_unlock(&dev->lock);
2652 dev->driver->setup(&dev->gadget, &dev->setup_data);
2653 spin_lock(&dev->lock);
2654}
2655
2656
2657
2658
2659
2660
2661static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2662{
2663 int i;
2664 u32 reg, dev_stat = 0;
2665
2666 dev_stat = pch_udc_read_device_status(dev);
2667 dev->set_cfg_not_acked = 1;
2668 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2669 UDC_DEVSTS_CFG_SHIFT;
2670
2671 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2672 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2673 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2674
2675
2676 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2677 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2678 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2679 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2680 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2681
2682 pch_udc_ep_clear_stall(&(dev->ep[i]));
2683 dev->ep[i].halted = 0;
2684 }
2685 dev->stall = 0;
2686
2687
2688 spin_unlock(&dev->lock);
2689 dev->driver->setup(&dev->gadget, &dev->setup_data);
2690 spin_lock(&dev->lock);
2691}
2692
2693
2694
2695
2696
2697
2698
2699static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2700{
2701 int vbus;
2702
2703
2704 if (dev_intr & UDC_DEVINT_UR) {
2705 pch_udc_svc_ur_interrupt(dev);
2706 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2707 }
2708
2709 if (dev_intr & UDC_DEVINT_ENUM) {
2710 pch_udc_svc_enum_interrupt(dev);
2711 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2712 }
2713
2714 if (dev_intr & UDC_DEVINT_SI)
2715 pch_udc_svc_intf_interrupt(dev);
2716
2717 if (dev_intr & UDC_DEVINT_SC)
2718 pch_udc_svc_cfg_interrupt(dev);
2719
2720 if (dev_intr & UDC_DEVINT_US) {
2721 if (dev->driver
2722 && dev->driver->suspend) {
2723 spin_unlock(&dev->lock);
2724 dev->driver->suspend(&dev->gadget);
2725 spin_lock(&dev->lock);
2726 }
2727
2728 vbus = pch_vbus_gpio_get_value(dev);
2729 if ((dev->vbus_session == 0)
2730 && (vbus != 1)) {
2731 if (dev->driver && dev->driver->disconnect) {
2732 spin_unlock(&dev->lock);
2733 dev->driver->disconnect(&dev->gadget);
2734 spin_lock(&dev->lock);
2735 }
2736 pch_udc_reconnect(dev);
2737 } else if ((dev->vbus_session == 0)
2738 && (vbus == 1)
2739 && !dev->vbus_gpio.intr)
2740 schedule_work(&dev->vbus_gpio.irq_work_fall);
2741
2742 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2743 }
2744
2745 if (dev_intr & UDC_DEVINT_SOF)
2746 dev_dbg(&dev->pdev->dev, "SOF\n");
2747
2748 if (dev_intr & UDC_DEVINT_ES)
2749 dev_dbg(&dev->pdev->dev, "ES\n");
2750
2751 if (dev_intr & UDC_DEVINT_RWKP)
2752 dev_dbg(&dev->pdev->dev, "RWKP\n");
2753}
2754
2755
2756
2757
2758
2759
2760static irqreturn_t pch_udc_isr(int irq, void *pdev)
2761{
2762 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2763 u32 dev_intr, ep_intr;
2764 int i;
2765
2766 dev_intr = pch_udc_read_device_interrupts(dev);
2767 ep_intr = pch_udc_read_ep_interrupts(dev);
2768
2769
2770 if (dev_intr == ep_intr)
2771 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2772 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2773
2774 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2775 return IRQ_HANDLED;
2776 }
2777 if (dev_intr)
2778
2779 pch_udc_write_device_interrupts(dev, dev_intr);
2780 if (ep_intr)
2781
2782 pch_udc_write_ep_interrupts(dev, ep_intr);
2783 if (!dev_intr && !ep_intr)
2784 return IRQ_NONE;
2785 spin_lock(&dev->lock);
2786 if (dev_intr)
2787 pch_udc_dev_isr(dev, dev_intr);
2788 if (ep_intr) {
2789 pch_udc_read_all_epstatus(dev, ep_intr);
2790
2791 if (ep_intr & UDC_EPINT_IN_EP0) {
2792 pch_udc_svc_control_in(dev);
2793 pch_udc_postsvc_epinters(dev, 0);
2794 }
2795
2796 if (ep_intr & UDC_EPINT_OUT_EP0)
2797 pch_udc_svc_control_out(dev);
2798
2799 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2800 if (ep_intr & (1 << i)) {
2801 pch_udc_svc_data_in(dev, i);
2802 pch_udc_postsvc_epinters(dev, i);
2803 }
2804 }
2805
2806 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2807 PCH_UDC_USED_EP_NUM); i++)
2808 if (ep_intr & (1 << i))
2809 pch_udc_svc_data_out(dev, i -
2810 UDC_EPINT_OUT_SHIFT);
2811 }
2812 spin_unlock(&dev->lock);
2813 return IRQ_HANDLED;
2814}
2815
2816
2817
2818
2819
2820static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2821{
2822
2823 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2824 UDC_EPINT_OUT_EP0);
2825
2826 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2827 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2828 UDC_DEVINT_SI | UDC_DEVINT_SC);
2829}
2830
2831
2832
2833
2834
2835static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2836{
2837 const char *const ep_string[] = {
2838 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2839 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2840 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2841 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2842 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2843 "ep15in", "ep15out",
2844 };
2845 int i;
2846
2847 dev->gadget.speed = USB_SPEED_UNKNOWN;
2848 INIT_LIST_HEAD(&dev->gadget.ep_list);
2849
2850
2851 memset(dev->ep, 0, sizeof dev->ep);
2852 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2853 struct pch_udc_ep *ep = &dev->ep[i];
2854 ep->dev = dev;
2855 ep->halted = 1;
2856 ep->num = i / 2;
2857 ep->in = ~i & 1;
2858 ep->ep.name = ep_string[i];
2859 ep->ep.ops = &pch_udc_ep_ops;
2860 if (ep->in) {
2861 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2862 ep->ep.caps.dir_in = true;
2863 } else {
2864 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2865 UDC_EP_REG_SHIFT;
2866 ep->ep.caps.dir_out = true;
2867 }
2868 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2869 ep->ep.caps.type_control = true;
2870 } else {
2871 ep->ep.caps.type_iso = true;
2872 ep->ep.caps.type_bulk = true;
2873 ep->ep.caps.type_int = true;
2874 }
2875
2876 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2877 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2878 INIT_LIST_HEAD(&ep->queue);
2879 }
2880 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2881 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2882
2883
2884 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2885 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2886
2887 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2888 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2899{
2900 pch_udc_init(dev);
2901 pch_udc_pcd_reinit(dev);
2902 pch_vbus_gpio_init(dev, vbus_gpio_port);
2903 return 0;
2904}
2905
2906
2907
2908
2909
2910static int init_dma_pools(struct pch_udc_dev *dev)
2911{
2912 struct pch_udc_stp_dma_desc *td_stp;
2913 struct pch_udc_data_dma_desc *td_data;
2914 void *ep0out_buf;
2915
2916
2917 dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2918 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2919 if (!dev->data_requests) {
2920 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2921 __func__);
2922 return -ENOMEM;
2923 }
2924
2925
2926 dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2927 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2928 if (!dev->stp_requests) {
2929 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2930 __func__);
2931 return -ENOMEM;
2932 }
2933
2934 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2935 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2936 if (!td_stp) {
2937 dev_err(&dev->pdev->dev,
2938 "%s: can't allocate setup dma descriptor\n", __func__);
2939 return -ENOMEM;
2940 }
2941 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2942
2943
2944 td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2945 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2946 if (!td_data) {
2947 dev_err(&dev->pdev->dev,
2948 "%s: can't allocate data dma descriptor\n", __func__);
2949 return -ENOMEM;
2950 }
2951 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2952 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2953 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2954 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2955 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2956
2957 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2958 GFP_KERNEL);
2959 if (!ep0out_buf)
2960 return -ENOMEM;
2961 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2962 UDC_EP0OUT_BUFF_SIZE * 4,
2963 DMA_FROM_DEVICE);
2964 return 0;
2965}
2966
2967static int pch_udc_start(struct usb_gadget *g,
2968 struct usb_gadget_driver *driver)
2969{
2970 struct pch_udc_dev *dev = to_pch_udc(g);
2971
2972 driver->driver.bus = NULL;
2973 dev->driver = driver;
2974
2975
2976 pch_udc_setup_ep0(dev);
2977
2978
2979 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2980 pch_udc_clear_disconnect(dev);
2981
2982 dev->connected = 1;
2983 return 0;
2984}
2985
2986static int pch_udc_stop(struct usb_gadget *g)
2987{
2988 struct pch_udc_dev *dev = to_pch_udc(g);
2989
2990 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2991
2992
2993 dev->driver = NULL;
2994 dev->connected = 0;
2995
2996
2997 pch_udc_set_disconnect(dev);
2998
2999 return 0;
3000}
3001
3002static void pch_udc_shutdown(struct pci_dev *pdev)
3003{
3004 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3005
3006 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3007 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3008
3009
3010 pch_udc_set_disconnect(dev);
3011}
3012
3013static void pch_udc_remove(struct pci_dev *pdev)
3014{
3015 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3016
3017 usb_del_gadget_udc(&dev->gadget);
3018
3019
3020 if (dev->driver)
3021 dev_err(&pdev->dev,
3022 "%s: gadget driver still bound!!!\n", __func__);
3023
3024 dma_pool_destroy(dev->data_requests);
3025
3026 if (dev->stp_requests) {
3027
3028 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3029 dma_pool_free(dev->stp_requests,
3030 dev->ep[UDC_EP0OUT_IDX].td_stp,
3031 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3032 }
3033 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3034 dma_pool_free(dev->stp_requests,
3035 dev->ep[UDC_EP0OUT_IDX].td_data,
3036 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3037 }
3038 dma_pool_destroy(dev->stp_requests);
3039 }
3040
3041 if (dev->dma_addr)
3042 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3043 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3044
3045 pch_vbus_gpio_free(dev);
3046
3047 pch_udc_exit(dev);
3048}
3049
3050#ifdef CONFIG_PM_SLEEP
3051static int pch_udc_suspend(struct device *d)
3052{
3053 struct pci_dev *pdev = to_pci_dev(d);
3054 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3055
3056 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3057 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3058
3059 return 0;
3060}
3061
3062static int pch_udc_resume(struct device *d)
3063{
3064 return 0;
3065}
3066
3067static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3068#define PCH_UDC_PM_OPS (&pch_udc_pm)
3069#else
3070#define PCH_UDC_PM_OPS NULL
3071#endif
3072
3073static int pch_udc_probe(struct pci_dev *pdev,
3074 const struct pci_device_id *id)
3075{
3076 int bar;
3077 int retval;
3078 struct pch_udc_dev *dev;
3079
3080
3081 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3082 if (!dev)
3083 return -ENOMEM;
3084
3085
3086 retval = pcim_enable_device(pdev);
3087 if (retval)
3088 return retval;
3089
3090 pci_set_drvdata(pdev, dev);
3091
3092
3093 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3094 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3095 else
3096 bar = PCH_UDC_PCI_BAR;
3097
3098
3099 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3100 if (retval)
3101 return retval;
3102
3103 dev->base_addr = pcim_iomap_table(pdev)[bar];
3104
3105
3106 if (pch_udc_pcd_init(dev))
3107 return -ENODEV;
3108
3109 pci_enable_msi(pdev);
3110
3111 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3112 IRQF_SHARED, KBUILD_MODNAME, dev);
3113 if (retval) {
3114 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3115 pdev->irq);
3116 goto finished;
3117 }
3118
3119 pci_set_master(pdev);
3120 pci_try_set_mwi(pdev);
3121
3122
3123 spin_lock_init(&dev->lock);
3124 dev->pdev = pdev;
3125 dev->gadget.ops = &pch_udc_ops;
3126
3127 retval = init_dma_pools(dev);
3128 if (retval)
3129 goto finished;
3130
3131 dev->gadget.name = KBUILD_MODNAME;
3132 dev->gadget.max_speed = USB_SPEED_HIGH;
3133
3134
3135 pch_udc_set_disconnect(dev);
3136 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3137 if (retval)
3138 goto finished;
3139 return 0;
3140
3141finished:
3142 pch_udc_remove(pdev);
3143 return retval;
3144}
3145
3146static const struct pci_device_id pch_udc_pcidev_id[] = {
3147 {
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3149 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3150 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3151 .class_mask = 0xffffffff,
3152 },
3153 {
3154 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3155 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3156 .class_mask = 0xffffffff,
3157 },
3158 {
3159 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3160 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3161 .class_mask = 0xffffffff,
3162 },
3163 {
3164 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3165 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3166 .class_mask = 0xffffffff,
3167 },
3168 { 0 },
3169};
3170
3171MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3172
3173static struct pci_driver pch_udc_driver = {
3174 .name = KBUILD_MODNAME,
3175 .id_table = pch_udc_pcidev_id,
3176 .probe = pch_udc_probe,
3177 .remove = pch_udc_remove,
3178 .shutdown = pch_udc_shutdown,
3179 .driver = {
3180 .pm = PCH_UDC_PM_OPS,
3181 },
3182};
3183
3184module_pci_driver(pch_udc_driver);
3185
3186MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3187MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3188MODULE_LICENSE("GPL");
3189