1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/interrupt.h>
16#include <linux/usb/ch9.h>
17#include <linux/usb/gadget.h>
18#include <linux/gpio.h>
19#include <linux/irq.h>
20
21
22static int vbus_gpio_port = -1;
23
24#define PCH_VBUS_PERIOD 3000
25#define PCH_VBUS_INTERVAL 10
26
27
28#define UDC_EP_REG_SHIFT 0x20
29
30#define UDC_EPCTL_ADDR 0x00
31#define UDC_EPSTS_ADDR 0x04
32#define UDC_BUFIN_FRAMENUM_ADDR 0x08
33#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
34#define UDC_SUBPTR_ADDR 0x10
35#define UDC_DESPTR_ADDR 0x14
36#define UDC_CONFIRM_ADDR 0x18
37
38#define UDC_DEVCFG_ADDR 0x400
39#define UDC_DEVCTL_ADDR 0x404
40#define UDC_DEVSTS_ADDR 0x408
41#define UDC_DEVIRQSTS_ADDR 0x40C
42#define UDC_DEVIRQMSK_ADDR 0x410
43#define UDC_EPIRQSTS_ADDR 0x414
44#define UDC_EPIRQMSK_ADDR 0x418
45#define UDC_DEVLPM_ADDR 0x41C
46#define UDC_CSR_BUSY_ADDR 0x4f0
47#define UDC_SRST_ADDR 0x4fc
48#define UDC_CSR_ADDR 0x500
49
50
51
52#define UDC_EPCTL_MRXFLUSH (1 << 12)
53#define UDC_EPCTL_RRDY (1 << 9)
54#define UDC_EPCTL_CNAK (1 << 8)
55#define UDC_EPCTL_SNAK (1 << 7)
56#define UDC_EPCTL_NAK (1 << 6)
57#define UDC_EPCTL_P (1 << 3)
58#define UDC_EPCTL_F (1 << 1)
59#define UDC_EPCTL_S (1 << 0)
60#define UDC_EPCTL_ET_SHIFT 4
61
62#define UDC_EPCTL_ET_MASK 0x00000030
63
64#define UDC_EPCTL_ET_CONTROL 0
65#define UDC_EPCTL_ET_ISO 1
66#define UDC_EPCTL_ET_BULK 2
67#define UDC_EPCTL_ET_INTERRUPT 3
68
69
70
71#define UDC_EPSTS_XFERDONE (1 << 27)
72#define UDC_EPSTS_RSS (1 << 26)
73#define UDC_EPSTS_RCS (1 << 25)
74#define UDC_EPSTS_TXEMPTY (1 << 24)
75#define UDC_EPSTS_TDC (1 << 10)
76#define UDC_EPSTS_HE (1 << 9)
77#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78#define UDC_EPSTS_BNA (1 << 7)
79#define UDC_EPSTS_IN (1 << 6)
80#define UDC_EPSTS_OUT_SHIFT 4
81
82#define UDC_EPSTS_OUT_MASK 0x00000030
83#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84
85#define UDC_EPSTS_OUT_SETUP 2
86#define UDC_EPSTS_OUT_DATA 1
87
88
89
90#define UDC_DEVCFG_CSR_PRG (1 << 17)
91#define UDC_DEVCFG_SP (1 << 3)
92
93#define UDC_DEVCFG_SPD_HS 0x0
94#define UDC_DEVCFG_SPD_FS 0x1
95#define UDC_DEVCFG_SPD_LS 0x2
96
97
98
99#define UDC_DEVCTL_THLEN_SHIFT 24
100#define UDC_DEVCTL_BRLEN_SHIFT 16
101#define UDC_DEVCTL_CSR_DONE (1 << 13)
102#define UDC_DEVCTL_SD (1 << 10)
103#define UDC_DEVCTL_MODE (1 << 9)
104#define UDC_DEVCTL_BREN (1 << 8)
105#define UDC_DEVCTL_THE (1 << 7)
106#define UDC_DEVCTL_DU (1 << 4)
107#define UDC_DEVCTL_TDE (1 << 3)
108#define UDC_DEVCTL_RDE (1 << 2)
109#define UDC_DEVCTL_RES (1 << 0)
110
111
112
113#define UDC_DEVSTS_TS_SHIFT 18
114#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115#define UDC_DEVSTS_ALT_SHIFT 8
116#define UDC_DEVSTS_INTF_SHIFT 4
117#define UDC_DEVSTS_CFG_SHIFT 0
118
119#define UDC_DEVSTS_TS_MASK 0xfffc0000
120#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121#define UDC_DEVSTS_ALT_MASK 0x00000f00
122#define UDC_DEVSTS_INTF_MASK 0x000000f0
123#define UDC_DEVSTS_CFG_MASK 0x0000000f
124
125#define UDC_DEVSTS_ENUM_SPEED_FULL 1
126#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127#define UDC_DEVSTS_ENUM_SPEED_LOW 2
128#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130
131
132#define UDC_DEVINT_RWKP (1 << 7)
133#define UDC_DEVINT_ENUM (1 << 6)
134#define UDC_DEVINT_SOF (1 << 5)
135#define UDC_DEVINT_US (1 << 4)
136#define UDC_DEVINT_UR (1 << 3)
137#define UDC_DEVINT_ES (1 << 2)
138#define UDC_DEVINT_SI (1 << 1)
139#define UDC_DEVINT_SC (1 << 0)
140
141#define UDC_DEVINT_MSK 0x7f
142
143
144
145#define UDC_EPINT_IN_SHIFT 0
146#define UDC_EPINT_OUT_SHIFT 16
147#define UDC_EPINT_IN_EP0 (1 << 0)
148#define UDC_EPINT_OUT_EP0 (1 << 16)
149
150#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152
153
154#define UDC_CSR_BUSY (1 << 0)
155
156
157
158#define UDC_PSRST (1 << 1)
159#define UDC_SRST (1 << 0)
160
161
162
163#define UDC_CSR_NE_NUM_SHIFT 0
164#define UDC_CSR_NE_DIR_SHIFT 4
165#define UDC_CSR_NE_TYPE_SHIFT 5
166#define UDC_CSR_NE_CFG_SHIFT 7
167#define UDC_CSR_NE_INTF_SHIFT 11
168#define UDC_CSR_NE_ALT_SHIFT 15
169#define UDC_CSR_NE_MAX_PKT_SHIFT 19
170
171#define UDC_CSR_NE_NUM_MASK 0x0000000f
172#define UDC_CSR_NE_DIR_MASK 0x00000010
173#define UDC_CSR_NE_TYPE_MASK 0x00000060
174#define UDC_CSR_NE_CFG_MASK 0x00000780
175#define UDC_CSR_NE_INTF_MASK 0x00007800
176#define UDC_CSR_NE_ALT_MASK 0x00078000
177#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180#define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183
184#define UDC_EP0IN_IDX 0
185#define UDC_EP0OUT_IDX 1
186#define UDC_EPIN_IDX(ep) (ep * 2)
187#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188#define PCH_UDC_EP0 0
189#define PCH_UDC_EP1 1
190#define PCH_UDC_EP2 2
191#define PCH_UDC_EP3 3
192
193
194#define PCH_UDC_EP_NUM 32
195#define PCH_UDC_USED_EP_NUM 4
196
197#define PCH_UDC_BRLEN 0x0F
198#define PCH_UDC_THLEN 0x1F
199
200#define UDC_EP0IN_BUFF_SIZE 16
201#define UDC_EPIN_BUFF_SIZE 256
202#define UDC_EP0OUT_BUFF_SIZE 16
203#define UDC_EPOUT_BUFF_SIZE 256
204
205#define UDC_EP0IN_MAX_PKT_SIZE 64
206#define UDC_EP0OUT_MAX_PKT_SIZE 64
207#define UDC_BULK_MAX_PKT_SIZE 512
208
209
210#define DMA_DIR_RX 1
211#define DMA_DIR_TX 2
212#define DMA_ADDR_INVALID (~(dma_addr_t)0)
213#define UDC_DMA_MAXPACKET 65536
214
215
216
217
218
219
220
221
222
223struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228};
229
230
231
232
233
234
235
236
237
238struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242} __attribute((packed));
243
244
245
246#define PCH_UDC_BUFF_STS 0xC0000000
247#define PCH_UDC_BS_HST_RDY 0x00000000
248#define PCH_UDC_BS_DMA_BSY 0x40000000
249#define PCH_UDC_BS_DMA_DONE 0x80000000
250#define PCH_UDC_BS_HST_BSY 0xC0000000
251
252#define PCH_UDC_RXTX_STS 0x30000000
253#define PCH_UDC_RTS_SUCC 0x00000000
254#define PCH_UDC_RTS_DESERR 0x10000000
255#define PCH_UDC_RTS_BUFERR 0x30000000
256
257#define PCH_UDC_DMA_LAST 0x08000000
258
259#define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261
262
263
264
265
266
267
268struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272};
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303};
304
305
306
307
308
309
310
311
312
313struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344struct pch_udc_dev {
345 struct usb_gadget gadget;
346 struct usb_gadget_driver *driver;
347 struct pci_dev *pdev;
348 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
349 spinlock_t lock;
350 unsigned
351 stall:1,
352 prot_stall:1,
353 suspended:1,
354 connected:1,
355 vbus_session:1,
356 set_cfg_not_acked:1,
357 waiting_zlp_ack:1;
358 struct pci_pool *data_requests;
359 struct pci_pool *stp_requests;
360 dma_addr_t dma_addr;
361 struct usb_ctrlrequest setup_data;
362 void __iomem *base_addr;
363 struct pch_udc_cfg_data cfg_data;
364 struct pch_vbus_gpio_data vbus_gpio;
365};
366#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
367
368#define PCH_UDC_PCI_BAR_QUARK_X1000 0
369#define PCH_UDC_PCI_BAR 1
370
371#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
372#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
373
374#define PCI_VENDOR_ID_ROHM 0x10DB
375#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
376#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
377
378static const char ep0_string[] = "ep0in";
379static DEFINE_SPINLOCK(udc_stall_spinlock);
380static bool speed_fs;
381module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
382MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398struct pch_udc_request {
399 struct usb_request req;
400 dma_addr_t td_data_phys;
401 struct pch_udc_data_dma_desc *td_data;
402 struct pch_udc_data_dma_desc *td_data_last;
403 struct list_head queue;
404 unsigned dma_going:1,
405 dma_mapped:1,
406 dma_done:1;
407 unsigned chain_len;
408 void *buf;
409 dma_addr_t dma;
410};
411
412static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
413{
414 return ioread32(dev->base_addr + reg);
415}
416
417static inline void pch_udc_writel(struct pch_udc_dev *dev,
418 unsigned long val, unsigned long reg)
419{
420 iowrite32(val, dev->base_addr + reg);
421}
422
423static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
424 unsigned long reg,
425 unsigned long bitmask)
426{
427 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
428}
429
430static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
431 unsigned long reg,
432 unsigned long bitmask)
433{
434 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
435}
436
437static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
438{
439 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
440}
441
442static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
443 unsigned long val, unsigned long reg)
444{
445 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
446}
447
448static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
449 unsigned long reg,
450 unsigned long bitmask)
451{
452 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
453}
454
455static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
456 unsigned long reg,
457 unsigned long bitmask)
458{
459 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
460}
461
462
463
464
465
466static void pch_udc_csr_busy(struct pch_udc_dev *dev)
467{
468 unsigned int count = 200;
469
470
471 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
472 && --count)
473 cpu_relax();
474 if (!count)
475 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
476}
477
478
479
480
481
482
483
484static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
485 unsigned int ep)
486{
487 unsigned long reg = PCH_UDC_CSR(ep);
488
489 pch_udc_csr_busy(dev);
490 pch_udc_writel(dev, val, reg);
491 pch_udc_csr_busy(dev);
492}
493
494
495
496
497
498
499
500
501static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
502{
503 unsigned long reg = PCH_UDC_CSR(ep);
504
505 pch_udc_csr_busy(dev);
506 pch_udc_readl(dev, reg);
507 pch_udc_csr_busy(dev);
508 return pch_udc_readl(dev, reg);
509}
510
511
512
513
514
515static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
516{
517 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
518 mdelay(1);
519 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
520}
521
522
523
524
525
526
527static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
528{
529 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
530 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
531}
532
533
534
535
536
537static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
538{
539 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
540}
541
542
543
544
545
546static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
547{
548 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
549}
550
551
552
553
554
555static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
556{
557 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
558}
559
560
561
562
563
564static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
565{
566
567 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569 mdelay(1);
570
571 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
572}
573
574
575
576
577
578
579static void pch_udc_init(struct pch_udc_dev *dev);
580static void pch_udc_reconnect(struct pch_udc_dev *dev)
581{
582 pch_udc_init(dev);
583
584
585
586 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
587 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
588
589
590 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
592 mdelay(1);
593
594 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
595}
596
597
598
599
600
601
602
603
604static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
605 int is_active)
606{
607 if (is_active) {
608 pch_udc_reconnect(dev);
609 dev->vbus_session = 1;
610 } else {
611 if (dev->driver && dev->driver->disconnect) {
612 spin_lock(&dev->lock);
613 dev->driver->disconnect(&dev->gadget);
614 spin_unlock(&dev->lock);
615 }
616 pch_udc_set_disconnect(dev);
617 dev->vbus_session = 0;
618 }
619}
620
621
622
623
624
625static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
626{
627 if (ep->in) {
628 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
629 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 } else {
631 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
632 }
633}
634
635
636
637
638
639static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
640{
641
642 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643
644 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
645}
646
647
648
649
650
651
652static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
653 u8 type)
654{
655 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
656 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
657}
658
659
660
661
662
663
664static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
665 u32 buf_size, u32 ep_in)
666{
667 u32 data;
668 if (ep_in) {
669 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
670 data = (data & 0xffff0000) | (buf_size & 0xffff);
671 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
672 } else {
673 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
674 data = (buf_size << 16) | (data & 0xffff);
675 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
676 }
677}
678
679
680
681
682
683
684static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
685{
686 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
687 data = (data & 0xffff0000) | (pkt_size & 0xffff);
688 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
689}
690
691
692
693
694
695
696static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
697{
698 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
699}
700
701
702
703
704
705
706static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
707{
708 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
709}
710
711
712
713
714
715static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
716{
717 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
718}
719
720
721
722
723
724static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
725{
726 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
727}
728
729
730
731
732
733static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
734{
735 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
736}
737
738
739
740
741
742
743
744
745
746static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
747{
748 if (dir == DMA_DIR_RX)
749 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
750 else if (dir == DMA_DIR_TX)
751 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
752}
753
754
755
756
757
758
759
760
761
762static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
763{
764 if (dir == DMA_DIR_RX)
765 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
766 else if (dir == DMA_DIR_TX)
767 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
768}
769
770
771
772
773
774
775static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
776{
777 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
778}
779
780
781
782
783
784
785static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
786 u32 mask)
787{
788 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
789}
790
791
792
793
794
795
796static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
797 u32 mask)
798{
799 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
800}
801
802
803
804
805
806
807static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
808 u32 mask)
809{
810 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
811}
812
813
814
815
816
817
818static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
819 u32 mask)
820{
821 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
822}
823
824
825
826
827
828
829static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
830{
831 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
832}
833
834
835
836
837
838
839static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
840 u32 val)
841{
842 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
843}
844
845
846
847
848
849
850static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
851{
852 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
853}
854
855
856
857
858
859
860static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
861 u32 val)
862{
863 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
864}
865
866
867
868
869
870
871static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
872{
873 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
874}
875
876
877
878
879
880
881static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
882{
883 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
884}
885
886
887
888
889
890
891static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
892{
893 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
894}
895
896
897
898
899
900
901static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
902{
903 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
904}
905
906
907
908
909
910
911static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
912 u32 stat)
913{
914 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
915}
916
917
918
919
920
921
922static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
923{
924 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
925}
926
927
928
929
930
931
932static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
933{
934 unsigned int loopcnt = 0;
935 struct pch_udc_dev *dev = ep->dev;
936
937 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
938 return;
939 if (!ep->in) {
940 loopcnt = 10000;
941 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
942 --loopcnt)
943 udelay(5);
944 if (!loopcnt)
945 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
946 __func__);
947 }
948 loopcnt = 10000;
949 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
950 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
951 udelay(5);
952 }
953 if (!loopcnt)
954 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
955 __func__, ep->num, (ep->in ? "in" : "out"));
956}
957
958
959
960
961
962
963
964
965static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
966{
967 if (dir) {
968 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
969 return;
970 }
971}
972
973
974
975
976
977
978static void pch_udc_ep_enable(struct pch_udc_ep *ep,
979 struct pch_udc_cfg_data *cfg,
980 const struct usb_endpoint_descriptor *desc)
981{
982 u32 val = 0;
983 u32 buff_size = 0;
984
985 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
986 if (ep->in)
987 buff_size = UDC_EPIN_BUFF_SIZE;
988 else
989 buff_size = UDC_EPOUT_BUFF_SIZE;
990 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
991 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
992 pch_udc_ep_set_nak(ep);
993 pch_udc_ep_fifo_flush(ep, ep->in);
994
995 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
996 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
997 UDC_CSR_NE_TYPE_SHIFT) |
998 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
999 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1000 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1001 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1002
1003 if (ep->in)
1004 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1005 else
1006 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1007}
1008
1009
1010
1011
1012
1013static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1014{
1015 if (ep->in) {
1016
1017 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1018
1019 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1021 } else {
1022
1023 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1024 }
1025
1026 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1027}
1028
1029
1030
1031
1032
1033static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1034{
1035 unsigned int count = 10000;
1036
1037
1038 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1039 udelay(5);
1040 if (!count)
1041 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1042}
1043
1044
1045
1046
1047
1048static void pch_udc_init(struct pch_udc_dev *dev)
1049{
1050 if (NULL == dev) {
1051 pr_err("%s: Invalid address\n", __func__);
1052 return;
1053 }
1054
1055 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1057 mdelay(1);
1058 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1059 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1060 mdelay(1);
1061
1062 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1063 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1064
1065
1066 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1067 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1068
1069
1070 if (speed_fs)
1071 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1073 else
1074 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1075 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1076 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1077 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1078 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1079 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1080 UDC_DEVCTL_THE);
1081}
1082
1083
1084
1085
1086
1087static void pch_udc_exit(struct pch_udc_dev *dev)
1088{
1089
1090 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1091
1092 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1093
1094 pch_udc_set_disconnect(dev);
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1106{
1107 struct pch_udc_dev *dev;
1108
1109 if (!gadget)
1110 return -EINVAL;
1111 dev = container_of(gadget, struct pch_udc_dev, gadget);
1112 return pch_udc_get_frame(dev);
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1124{
1125 struct pch_udc_dev *dev;
1126 unsigned long flags;
1127
1128 if (!gadget)
1129 return -EINVAL;
1130 dev = container_of(gadget, struct pch_udc_dev, gadget);
1131 spin_lock_irqsave(&dev->lock, flags);
1132 pch_udc_rmt_wakeup(dev);
1133 spin_unlock_irqrestore(&dev->lock, flags);
1134 return 0;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1148{
1149 struct pch_udc_dev *dev;
1150
1151 if (!gadget)
1152 return -EINVAL;
1153 gadget->is_selfpowered = (value != 0);
1154 dev = container_of(gadget, struct pch_udc_dev, gadget);
1155 if (value)
1156 pch_udc_set_selfpowered(dev);
1157 else
1158 pch_udc_clear_selfpowered(dev);
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1173{
1174 struct pch_udc_dev *dev;
1175
1176 if (!gadget)
1177 return -EINVAL;
1178 dev = container_of(gadget, struct pch_udc_dev, gadget);
1179 if (is_on) {
1180 pch_udc_reconnect(dev);
1181 } else {
1182 if (dev->driver && dev->driver->disconnect) {
1183 spin_lock(&dev->lock);
1184 dev->driver->disconnect(&dev->gadget);
1185 spin_unlock(&dev->lock);
1186 }
1187 pch_udc_set_disconnect(dev);
1188 }
1189
1190 return 0;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1205{
1206 struct pch_udc_dev *dev;
1207
1208 if (!gadget)
1209 return -EINVAL;
1210 dev = container_of(gadget, struct pch_udc_dev, gadget);
1211 pch_udc_vbus_session(dev, is_active);
1212 return 0;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1227{
1228 return -EOPNOTSUPP;
1229}
1230
1231static int pch_udc_start(struct usb_gadget *g,
1232 struct usb_gadget_driver *driver);
1233static int pch_udc_stop(struct usb_gadget *g);
1234
1235static const struct usb_gadget_ops pch_udc_ops = {
1236 .get_frame = pch_udc_pcd_get_frame,
1237 .wakeup = pch_udc_pcd_wakeup,
1238 .set_selfpowered = pch_udc_pcd_selfpowered,
1239 .pullup = pch_udc_pcd_pullup,
1240 .vbus_session = pch_udc_pcd_vbus_session,
1241 .vbus_draw = pch_udc_pcd_vbus_draw,
1242 .udc_start = pch_udc_start,
1243 .udc_stop = pch_udc_stop,
1244};
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1256{
1257 int vbus = 0;
1258
1259 if (dev->vbus_gpio.port)
1260 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1261 else
1262 vbus = -1;
1263
1264 return vbus;
1265}
1266
1267
1268
1269
1270
1271
1272
1273static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1274{
1275 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1276 struct pch_vbus_gpio_data, irq_work_fall);
1277 struct pch_udc_dev *dev =
1278 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1279 int vbus_saved = -1;
1280 int vbus;
1281 int count;
1282
1283 if (!dev->vbus_gpio.port)
1284 return;
1285
1286 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1287 count++) {
1288 vbus = pch_vbus_gpio_get_value(dev);
1289
1290 if ((vbus_saved == vbus) && (vbus == 0)) {
1291 dev_dbg(&dev->pdev->dev, "VBUS fell");
1292 if (dev->driver
1293 && dev->driver->disconnect) {
1294 dev->driver->disconnect(
1295 &dev->gadget);
1296 }
1297 if (dev->vbus_gpio.intr)
1298 pch_udc_init(dev);
1299 else
1300 pch_udc_reconnect(dev);
1301 return;
1302 }
1303 vbus_saved = vbus;
1304 mdelay(PCH_VBUS_INTERVAL);
1305 }
1306}
1307
1308
1309
1310
1311
1312
1313
1314static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1315{
1316 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1317 struct pch_vbus_gpio_data, irq_work_rise);
1318 struct pch_udc_dev *dev =
1319 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1320 int vbus;
1321
1322 if (!dev->vbus_gpio.port)
1323 return;
1324
1325 mdelay(PCH_VBUS_INTERVAL);
1326 vbus = pch_vbus_gpio_get_value(dev);
1327
1328 if (vbus == 1) {
1329 dev_dbg(&dev->pdev->dev, "VBUS rose");
1330 pch_udc_reconnect(dev);
1331 return;
1332 }
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1345{
1346 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1347
1348 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1349 return IRQ_NONE;
1350
1351 if (pch_vbus_gpio_get_value(dev))
1352 schedule_work(&dev->vbus_gpio.irq_work_rise);
1353 else
1354 schedule_work(&dev->vbus_gpio.irq_work_fall);
1355
1356 return IRQ_HANDLED;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1369{
1370 int err;
1371 int irq_num = 0;
1372
1373 dev->vbus_gpio.port = 0;
1374 dev->vbus_gpio.intr = 0;
1375
1376 if (vbus_gpio_port <= -1)
1377 return -EINVAL;
1378
1379 err = gpio_is_valid(vbus_gpio_port);
1380 if (!err) {
1381 pr_err("%s: gpio port %d is invalid\n",
1382 __func__, vbus_gpio_port);
1383 return -EINVAL;
1384 }
1385
1386 err = gpio_request(vbus_gpio_port, "pch_vbus");
1387 if (err) {
1388 pr_err("%s: can't request gpio port %d, err: %d\n",
1389 __func__, vbus_gpio_port, err);
1390 return -EINVAL;
1391 }
1392
1393 dev->vbus_gpio.port = vbus_gpio_port;
1394 gpio_direction_input(vbus_gpio_port);
1395 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1396
1397 irq_num = gpio_to_irq(vbus_gpio_port);
1398 if (irq_num > 0) {
1399 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1400 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1401 "vbus_detect", dev);
1402 if (!err) {
1403 dev->vbus_gpio.intr = irq_num;
1404 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1405 pch_vbus_gpio_work_rise);
1406 } else {
1407 pr_err("%s: can't request irq %d, err: %d\n",
1408 __func__, irq_num, err);
1409 }
1410 }
1411
1412 return 0;
1413}
1414
1415
1416
1417
1418
1419static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1420{
1421 if (dev->vbus_gpio.intr)
1422 free_irq(dev->vbus_gpio.intr, dev);
1423
1424 if (dev->vbus_gpio.port)
1425 gpio_free(dev->vbus_gpio.port);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1436 int status)
1437 __releases(&dev->lock)
1438 __acquires(&dev->lock)
1439{
1440 struct pch_udc_dev *dev;
1441 unsigned halted = ep->halted;
1442
1443 list_del_init(&req->queue);
1444
1445
1446 if (req->req.status == -EINPROGRESS)
1447 req->req.status = status;
1448 else
1449 status = req->req.status;
1450
1451 dev = ep->dev;
1452 if (req->dma_mapped) {
1453 if (req->dma == DMA_ADDR_INVALID) {
1454 if (ep->in)
1455 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 req->req.length,
1457 DMA_TO_DEVICE);
1458 else
1459 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 req->req.length,
1461 DMA_FROM_DEVICE);
1462 req->req.dma = DMA_ADDR_INVALID;
1463 } else {
1464 if (ep->in)
1465 dma_unmap_single(&dev->pdev->dev, req->dma,
1466 req->req.length,
1467 DMA_TO_DEVICE);
1468 else {
1469 dma_unmap_single(&dev->pdev->dev, req->dma,
1470 req->req.length,
1471 DMA_FROM_DEVICE);
1472 memcpy(req->req.buf, req->buf, req->req.length);
1473 }
1474 kfree(req->buf);
1475 req->dma = DMA_ADDR_INVALID;
1476 }
1477 req->dma_mapped = 0;
1478 }
1479 ep->halted = 1;
1480 spin_unlock(&dev->lock);
1481 if (!ep->in)
1482 pch_udc_ep_clear_rrdy(ep);
1483 usb_gadget_giveback_request(&ep->ep, &req->req);
1484 spin_lock(&dev->lock);
1485 ep->halted = halted;
1486}
1487
1488
1489
1490
1491
1492static void empty_req_queue(struct pch_udc_ep *ep)
1493{
1494 struct pch_udc_request *req;
1495
1496 ep->halted = 1;
1497 while (!list_empty(&ep->queue)) {
1498 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1499 complete_req(ep, req, -ESHUTDOWN);
1500 }
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1513 struct pch_udc_request *req)
1514{
1515 struct pch_udc_data_dma_desc *td = req->td_data;
1516 unsigned i = req->chain_len;
1517
1518 dma_addr_t addr2;
1519 dma_addr_t addr = (dma_addr_t)td->next;
1520 td->next = 0x00;
1521 for (; i > 1; --i) {
1522
1523 td = phys_to_virt(addr);
1524 addr2 = (dma_addr_t)td->next;
1525 pci_pool_free(dev->data_requests, td, addr);
1526 addr = addr2;
1527 }
1528 req->chain_len = 1;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1544 struct pch_udc_request *req,
1545 unsigned long buf_len,
1546 gfp_t gfp_flags)
1547{
1548 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1549 unsigned long bytes = req->req.length, i = 0;
1550 dma_addr_t dma_addr;
1551 unsigned len = 1;
1552
1553 if (req->chain_len > 1)
1554 pch_udc_free_dma_chain(ep->dev, req);
1555
1556 if (req->dma == DMA_ADDR_INVALID)
1557 td->dataptr = req->req.dma;
1558 else
1559 td->dataptr = req->dma;
1560
1561 td->status = PCH_UDC_BS_HST_BSY;
1562 for (; ; bytes -= buf_len, ++len) {
1563 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1564 if (bytes <= buf_len)
1565 break;
1566 last = td;
1567 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1568 &dma_addr);
1569 if (!td)
1570 goto nomem;
1571 i += buf_len;
1572 td->dataptr = req->td_data->dataptr + i;
1573 last->next = dma_addr;
1574 }
1575
1576 req->td_data_last = td;
1577 td->status |= PCH_UDC_DMA_LAST;
1578 td->next = req->td_data_phys;
1579 req->chain_len = len;
1580 return 0;
1581
1582nomem:
1583 if (len > 1) {
1584 req->chain_len = len;
1585 pch_udc_free_dma_chain(ep->dev, req);
1586 }
1587 req->chain_len = 1;
1588 return -ENOMEM;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1603 gfp_t gfp)
1604{
1605 int retval;
1606
1607
1608 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1609 if (retval) {
1610 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1611 return retval;
1612 }
1613 if (ep->in)
1614 req->td_data->status = (req->td_data->status &
1615 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1616 return 0;
1617}
1618
1619
1620
1621
1622
1623
1624
1625static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1626{
1627 struct pch_udc_dev *dev = ep->dev;
1628
1629
1630 complete_req(ep, req, 0);
1631
1632
1633
1634
1635 if (dev->set_cfg_not_acked) {
1636 pch_udc_set_csr_done(dev);
1637 dev->set_cfg_not_acked = 0;
1638 }
1639
1640 if (!dev->stall && dev->waiting_zlp_ack) {
1641 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1642 dev->waiting_zlp_ack = 0;
1643 }
1644}
1645
1646
1647
1648
1649
1650
1651static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1652 struct pch_udc_request *req)
1653{
1654 struct pch_udc_data_dma_desc *td_data;
1655
1656 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1657 td_data = req->td_data;
1658
1659 while (1) {
1660 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1661 PCH_UDC_BS_HST_RDY;
1662 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1663 break;
1664 td_data = phys_to_virt(td_data->next);
1665 }
1666
1667 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1668 req->dma_going = 1;
1669 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1670 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1671 pch_udc_ep_clear_nak(ep);
1672 pch_udc_ep_set_rrdy(ep);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1687 const struct usb_endpoint_descriptor *desc)
1688{
1689 struct pch_udc_ep *ep;
1690 struct pch_udc_dev *dev;
1691 unsigned long iflags;
1692
1693 if (!usbep || (usbep->name == ep0_string) || !desc ||
1694 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1695 return -EINVAL;
1696
1697 ep = container_of(usbep, struct pch_udc_ep, ep);
1698 dev = ep->dev;
1699 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1700 return -ESHUTDOWN;
1701 spin_lock_irqsave(&dev->lock, iflags);
1702 ep->ep.desc = desc;
1703 ep->halted = 0;
1704 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1705 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1706 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1707 spin_unlock_irqrestore(&dev->lock, iflags);
1708 return 0;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1721{
1722 struct pch_udc_ep *ep;
1723 unsigned long iflags;
1724
1725 if (!usbep)
1726 return -EINVAL;
1727
1728 ep = container_of(usbep, struct pch_udc_ep, ep);
1729 if ((usbep->name == ep0_string) || !ep->ep.desc)
1730 return -EINVAL;
1731
1732 spin_lock_irqsave(&ep->dev->lock, iflags);
1733 empty_req_queue(ep);
1734 ep->halted = 1;
1735 pch_udc_ep_disable(ep);
1736 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1737 ep->ep.desc = NULL;
1738 INIT_LIST_HEAD(&ep->queue);
1739 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1740 return 0;
1741}
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1754 gfp_t gfp)
1755{
1756 struct pch_udc_request *req;
1757 struct pch_udc_ep *ep;
1758 struct pch_udc_data_dma_desc *dma_desc;
1759
1760 if (!usbep)
1761 return NULL;
1762 ep = container_of(usbep, struct pch_udc_ep, ep);
1763 req = kzalloc(sizeof *req, gfp);
1764 if (!req)
1765 return NULL;
1766 req->req.dma = DMA_ADDR_INVALID;
1767 req->dma = DMA_ADDR_INVALID;
1768 INIT_LIST_HEAD(&req->queue);
1769 if (!ep->dev->dma_addr)
1770 return &req->req;
1771
1772 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1773 &req->td_data_phys);
1774 if (NULL == dma_desc) {
1775 kfree(req);
1776 return NULL;
1777 }
1778
1779 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1780 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1781 req->td_data = dma_desc;
1782 req->td_data_last = dma_desc;
1783 req->chain_len = 1;
1784 return &req->req;
1785}
1786
1787
1788
1789
1790
1791
1792
1793static void pch_udc_free_request(struct usb_ep *usbep,
1794 struct usb_request *usbreq)
1795{
1796 struct pch_udc_ep *ep;
1797 struct pch_udc_request *req;
1798 struct pch_udc_dev *dev;
1799
1800 if (!usbep || !usbreq)
1801 return;
1802 ep = container_of(usbep, struct pch_udc_ep, ep);
1803 req = container_of(usbreq, struct pch_udc_request, req);
1804 dev = ep->dev;
1805 if (!list_empty(&req->queue))
1806 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1807 __func__, usbep->name, req);
1808 if (req->td_data != NULL) {
1809 if (req->chain_len > 1)
1810 pch_udc_free_dma_chain(ep->dev, req);
1811 pci_pool_free(ep->dev->data_requests, req->td_data,
1812 req->td_data_phys);
1813 }
1814 kfree(req);
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1829 gfp_t gfp)
1830{
1831 int retval = 0;
1832 struct pch_udc_ep *ep;
1833 struct pch_udc_dev *dev;
1834 struct pch_udc_request *req;
1835 unsigned long iflags;
1836
1837 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1838 return -EINVAL;
1839 ep = container_of(usbep, struct pch_udc_ep, ep);
1840 dev = ep->dev;
1841 if (!ep->ep.desc && ep->num)
1842 return -EINVAL;
1843 req = container_of(usbreq, struct pch_udc_request, req);
1844 if (!list_empty(&req->queue))
1845 return -EINVAL;
1846 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1847 return -ESHUTDOWN;
1848 spin_lock_irqsave(&dev->lock, iflags);
1849
1850 if (usbreq->length &&
1851 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1852 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1853 if (ep->in)
1854 usbreq->dma = dma_map_single(&dev->pdev->dev,
1855 usbreq->buf,
1856 usbreq->length,
1857 DMA_TO_DEVICE);
1858 else
1859 usbreq->dma = dma_map_single(&dev->pdev->dev,
1860 usbreq->buf,
1861 usbreq->length,
1862 DMA_FROM_DEVICE);
1863 } else {
1864 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1865 if (!req->buf) {
1866 retval = -ENOMEM;
1867 goto probe_end;
1868 }
1869 if (ep->in) {
1870 memcpy(req->buf, usbreq->buf, usbreq->length);
1871 req->dma = dma_map_single(&dev->pdev->dev,
1872 req->buf,
1873 usbreq->length,
1874 DMA_TO_DEVICE);
1875 } else
1876 req->dma = dma_map_single(&dev->pdev->dev,
1877 req->buf,
1878 usbreq->length,
1879 DMA_FROM_DEVICE);
1880 }
1881 req->dma_mapped = 1;
1882 }
1883 if (usbreq->length > 0) {
1884 retval = prepare_dma(ep, req, GFP_ATOMIC);
1885 if (retval)
1886 goto probe_end;
1887 }
1888 usbreq->actual = 0;
1889 usbreq->status = -EINPROGRESS;
1890 req->dma_done = 0;
1891 if (list_empty(&ep->queue) && !ep->halted) {
1892
1893 if (!usbreq->length) {
1894 process_zlp(ep, req);
1895 retval = 0;
1896 goto probe_end;
1897 }
1898 if (!ep->in) {
1899 pch_udc_start_rxrequest(ep, req);
1900 } else {
1901
1902
1903
1904
1905
1906 pch_udc_wait_ep_stall(ep);
1907 pch_udc_ep_clear_nak(ep);
1908 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1909 }
1910 }
1911
1912 if (req != NULL)
1913 list_add_tail(&req->queue, &ep->queue);
1914
1915probe_end:
1916 spin_unlock_irqrestore(&dev->lock, iflags);
1917 return retval;
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1931 struct usb_request *usbreq)
1932{
1933 struct pch_udc_ep *ep;
1934 struct pch_udc_request *req;
1935 unsigned long flags;
1936 int ret = -EINVAL;
1937
1938 ep = container_of(usbep, struct pch_udc_ep, ep);
1939 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1940 return ret;
1941 req = container_of(usbreq, struct pch_udc_request, req);
1942 spin_lock_irqsave(&ep->dev->lock, flags);
1943
1944 list_for_each_entry(req, &ep->queue, queue) {
1945 if (&req->req == usbreq) {
1946 pch_udc_ep_set_nak(ep);
1947 if (!list_empty(&req->queue))
1948 complete_req(ep, req, -ECONNRESET);
1949 ret = 0;
1950 break;
1951 }
1952 }
1953 spin_unlock_irqrestore(&ep->dev->lock, flags);
1954 return ret;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1968{
1969 struct pch_udc_ep *ep;
1970 unsigned long iflags;
1971 int ret;
1972
1973 if (!usbep)
1974 return -EINVAL;
1975 ep = container_of(usbep, struct pch_udc_ep, ep);
1976 if (!ep->ep.desc && !ep->num)
1977 return -EINVAL;
1978 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1979 return -ESHUTDOWN;
1980 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1981 if (list_empty(&ep->queue)) {
1982 if (halt) {
1983 if (ep->num == PCH_UDC_EP0)
1984 ep->dev->stall = 1;
1985 pch_udc_ep_set_stall(ep);
1986 pch_udc_enable_ep_interrupts(
1987 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1988 } else {
1989 pch_udc_ep_clear_stall(ep);
1990 }
1991 ret = 0;
1992 } else {
1993 ret = -EAGAIN;
1994 }
1995 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1996 return ret;
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2010{
2011 struct pch_udc_ep *ep;
2012 unsigned long iflags;
2013 int ret;
2014
2015 if (!usbep)
2016 return -EINVAL;
2017 ep = container_of(usbep, struct pch_udc_ep, ep);
2018 if (!ep->ep.desc && !ep->num)
2019 return -EINVAL;
2020 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2021 return -ESHUTDOWN;
2022 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2023 if (!list_empty(&ep->queue)) {
2024 ret = -EAGAIN;
2025 } else {
2026 if (ep->num == PCH_UDC_EP0)
2027 ep->dev->stall = 1;
2028 pch_udc_ep_set_stall(ep);
2029 pch_udc_enable_ep_interrupts(ep->dev,
2030 PCH_UDC_EPINT(ep->in, ep->num));
2031 ep->dev->prot_stall = 1;
2032 ret = 0;
2033 }
2034 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2035 return ret;
2036}
2037
2038
2039
2040
2041
2042static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2043{
2044 struct pch_udc_ep *ep;
2045
2046 if (!usbep)
2047 return;
2048
2049 ep = container_of(usbep, struct pch_udc_ep, ep);
2050 if (ep->ep.desc || !ep->num)
2051 pch_udc_ep_fifo_flush(ep, ep->in);
2052}
2053
2054static const struct usb_ep_ops pch_udc_ep_ops = {
2055 .enable = pch_udc_pcd_ep_enable,
2056 .disable = pch_udc_pcd_ep_disable,
2057 .alloc_request = pch_udc_alloc_request,
2058 .free_request = pch_udc_free_request,
2059 .queue = pch_udc_pcd_queue,
2060 .dequeue = pch_udc_pcd_dequeue,
2061 .set_halt = pch_udc_pcd_set_halt,
2062 .set_wedge = pch_udc_pcd_set_wedge,
2063 .fifo_status = NULL,
2064 .fifo_flush = pch_udc_pcd_fifo_flush,
2065};
2066
2067
2068
2069
2070
2071static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2072{
2073 static u32 pky_marker;
2074
2075 if (!td_stp)
2076 return;
2077 td_stp->reserved = ++pky_marker;
2078 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2079 td_stp->status = PCH_UDC_BS_HST_RDY;
2080}
2081
2082
2083
2084
2085
2086
2087static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2088{
2089 struct pch_udc_request *req;
2090 struct pch_udc_data_dma_desc *td_data;
2091
2092 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2093 return;
2094
2095 if (list_empty(&ep->queue))
2096 return;
2097
2098
2099 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2100 if (req->dma_going)
2101 return;
2102 if (!req->td_data)
2103 return;
2104 pch_udc_wait_ep_stall(ep);
2105 req->dma_going = 1;
2106 pch_udc_ep_set_ddptr(ep, 0);
2107 td_data = req->td_data;
2108 while (1) {
2109 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2110 PCH_UDC_BS_HST_RDY;
2111 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2112 break;
2113 td_data = phys_to_virt(td_data->next);
2114 }
2115 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2116 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2117 pch_udc_ep_set_pd(ep);
2118 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2119 pch_udc_ep_clear_nak(ep);
2120}
2121
2122
2123
2124
2125
2126static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2127{
2128 struct pch_udc_request *req;
2129 struct pch_udc_dev *dev = ep->dev;
2130
2131 if (list_empty(&ep->queue))
2132 return;
2133 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2134 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2135 PCH_UDC_BS_DMA_DONE)
2136 return;
2137 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2138 PCH_UDC_RTS_SUCC) {
2139 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2140 "epstatus=0x%08x\n",
2141 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2142 (int)(ep->epsts));
2143 return;
2144 }
2145
2146 req->req.actual = req->req.length;
2147 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2148 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2149 complete_req(ep, req, 0);
2150 req->dma_going = 0;
2151 if (!list_empty(&ep->queue)) {
2152 pch_udc_wait_ep_stall(ep);
2153 pch_udc_ep_clear_nak(ep);
2154 pch_udc_enable_ep_interrupts(ep->dev,
2155 PCH_UDC_EPINT(ep->in, ep->num));
2156 } else {
2157 pch_udc_disable_ep_interrupts(ep->dev,
2158 PCH_UDC_EPINT(ep->in, ep->num));
2159 }
2160}
2161
2162
2163
2164
2165
2166static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2167{
2168 struct pch_udc_request *req;
2169 struct pch_udc_dev *dev = ep->dev;
2170 unsigned int count;
2171 struct pch_udc_data_dma_desc *td;
2172 dma_addr_t addr;
2173
2174 if (list_empty(&ep->queue))
2175 return;
2176
2177 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2178 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2179 pch_udc_ep_set_ddptr(ep, 0);
2180 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2181 PCH_UDC_BS_DMA_DONE)
2182 td = req->td_data_last;
2183 else
2184 td = req->td_data;
2185
2186 while (1) {
2187 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2188 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2189 "epstatus=0x%08x\n",
2190 (req->td_data->status & PCH_UDC_RXTX_STS),
2191 (int)(ep->epsts));
2192 return;
2193 }
2194 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2195 if (td->status & PCH_UDC_DMA_LAST) {
2196 count = td->status & PCH_UDC_RXTX_BYTES;
2197 break;
2198 }
2199 if (td == req->td_data_last) {
2200 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2201 return;
2202 }
2203 addr = (dma_addr_t)td->next;
2204 td = phys_to_virt(addr);
2205 }
2206
2207 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2208 count = UDC_DMA_MAXPACKET;
2209 req->td_data->status |= PCH_UDC_DMA_LAST;
2210 td->status |= PCH_UDC_BS_HST_BSY;
2211
2212 req->dma_going = 0;
2213 req->req.actual = count;
2214 complete_req(ep, req, 0);
2215
2216 if (!list_empty(&ep->queue)) {
2217 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2218 pch_udc_start_rxrequest(ep, req);
2219 }
2220}
2221
2222
2223
2224
2225
2226
2227
2228static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2229{
2230 u32 epsts;
2231 struct pch_udc_ep *ep;
2232
2233 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2234 epsts = ep->epsts;
2235 ep->epsts = 0;
2236
2237 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2238 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2239 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2240 return;
2241 if ((epsts & UDC_EPSTS_BNA))
2242 return;
2243 if (epsts & UDC_EPSTS_HE)
2244 return;
2245 if (epsts & UDC_EPSTS_RSS) {
2246 pch_udc_ep_set_stall(ep);
2247 pch_udc_enable_ep_interrupts(ep->dev,
2248 PCH_UDC_EPINT(ep->in, ep->num));
2249 }
2250 if (epsts & UDC_EPSTS_RCS) {
2251 if (!dev->prot_stall) {
2252 pch_udc_ep_clear_stall(ep);
2253 } else {
2254 pch_udc_ep_set_stall(ep);
2255 pch_udc_enable_ep_interrupts(ep->dev,
2256 PCH_UDC_EPINT(ep->in, ep->num));
2257 }
2258 }
2259 if (epsts & UDC_EPSTS_TDC)
2260 pch_udc_complete_transfer(ep);
2261
2262 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2263 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2264 pch_udc_start_next_txrequest(ep);
2265}
2266
2267
2268
2269
2270
2271
2272static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2273{
2274 u32 epsts;
2275 struct pch_udc_ep *ep;
2276 struct pch_udc_request *req = NULL;
2277
2278 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2279 epsts = ep->epsts;
2280 ep->epsts = 0;
2281
2282 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2283
2284 req = list_entry(ep->queue.next, struct pch_udc_request,
2285 queue);
2286 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2287 PCH_UDC_BS_DMA_DONE) {
2288 if (!req->dma_going)
2289 pch_udc_start_rxrequest(ep, req);
2290 return;
2291 }
2292 }
2293 if (epsts & UDC_EPSTS_HE)
2294 return;
2295 if (epsts & UDC_EPSTS_RSS) {
2296 pch_udc_ep_set_stall(ep);
2297 pch_udc_enable_ep_interrupts(ep->dev,
2298 PCH_UDC_EPINT(ep->in, ep->num));
2299 }
2300 if (epsts & UDC_EPSTS_RCS) {
2301 if (!dev->prot_stall) {
2302 pch_udc_ep_clear_stall(ep);
2303 } else {
2304 pch_udc_ep_set_stall(ep);
2305 pch_udc_enable_ep_interrupts(ep->dev,
2306 PCH_UDC_EPINT(ep->in, ep->num));
2307 }
2308 }
2309 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2310 UDC_EPSTS_OUT_DATA) {
2311 if (ep->dev->prot_stall == 1) {
2312 pch_udc_ep_set_stall(ep);
2313 pch_udc_enable_ep_interrupts(ep->dev,
2314 PCH_UDC_EPINT(ep->in, ep->num));
2315 } else {
2316 pch_udc_complete_receiver(ep);
2317 }
2318 }
2319 if (list_empty(&ep->queue))
2320 pch_udc_set_dma(dev, DMA_DIR_RX);
2321}
2322
2323
2324
2325
2326
2327static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2328{
2329 u32 epsts;
2330 struct pch_udc_ep *ep;
2331 struct pch_udc_ep *ep_out;
2332
2333 ep = &dev->ep[UDC_EP0IN_IDX];
2334 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2335 epsts = ep->epsts;
2336 ep->epsts = 0;
2337
2338 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2339 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2340 UDC_EPSTS_XFERDONE)))
2341 return;
2342 if ((epsts & UDC_EPSTS_BNA))
2343 return;
2344 if (epsts & UDC_EPSTS_HE)
2345 return;
2346 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2347 pch_udc_complete_transfer(ep);
2348 pch_udc_clear_dma(dev, DMA_DIR_RX);
2349 ep_out->td_data->status = (ep_out->td_data->status &
2350 ~PCH_UDC_BUFF_STS) |
2351 PCH_UDC_BS_HST_RDY;
2352 pch_udc_ep_clear_nak(ep_out);
2353 pch_udc_set_dma(dev, DMA_DIR_RX);
2354 pch_udc_ep_set_rrdy(ep_out);
2355 }
2356
2357 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2358 !(epsts & UDC_EPSTS_TXEMPTY))
2359 pch_udc_start_next_txrequest(ep);
2360}
2361
2362
2363
2364
2365
2366
2367static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2368 __releases(&dev->lock)
2369 __acquires(&dev->lock)
2370{
2371 u32 stat;
2372 int setup_supported;
2373 struct pch_udc_ep *ep;
2374
2375 ep = &dev->ep[UDC_EP0OUT_IDX];
2376 stat = ep->epsts;
2377 ep->epsts = 0;
2378
2379
2380 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2381 UDC_EPSTS_OUT_SETUP) {
2382 dev->stall = 0;
2383 dev->ep[UDC_EP0IN_IDX].halted = 0;
2384 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2385 dev->setup_data = ep->td_stp->request;
2386 pch_udc_init_setup_buff(ep->td_stp);
2387 pch_udc_clear_dma(dev, DMA_DIR_RX);
2388 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2389 dev->ep[UDC_EP0IN_IDX].in);
2390 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2391 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2392 else
2393 dev->gadget.ep0 = &ep->ep;
2394 spin_lock(&dev->lock);
2395
2396 if ((dev->setup_data.bRequestType == 0x21) &&
2397 (dev->setup_data.bRequest == 0xFF))
2398 dev->prot_stall = 0;
2399
2400 setup_supported = dev->driver->setup(&dev->gadget,
2401 &dev->setup_data);
2402 spin_unlock(&dev->lock);
2403
2404 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2405 ep->td_data->status = (ep->td_data->status &
2406 ~PCH_UDC_BUFF_STS) |
2407 PCH_UDC_BS_HST_RDY;
2408 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2409 }
2410
2411 if (setup_supported >= 0 && setup_supported <
2412 UDC_EP0IN_MAX_PKT_SIZE) {
2413 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2414
2415
2416 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2417 pch_udc_set_dma(dev, DMA_DIR_RX);
2418 pch_udc_ep_clear_nak(ep);
2419 }
2420 } else if (setup_supported < 0) {
2421
2422 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2423 pch_udc_enable_ep_interrupts(ep->dev,
2424 PCH_UDC_EPINT(ep->in, ep->num));
2425 dev->stall = 0;
2426 pch_udc_set_dma(dev, DMA_DIR_RX);
2427 } else {
2428 dev->waiting_zlp_ack = 1;
2429 }
2430 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2431 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2432 pch_udc_clear_dma(dev, DMA_DIR_RX);
2433 pch_udc_ep_set_ddptr(ep, 0);
2434 if (!list_empty(&ep->queue)) {
2435 ep->epsts = stat;
2436 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2437 }
2438 pch_udc_set_dma(dev, DMA_DIR_RX);
2439 }
2440 pch_udc_ep_set_rrdy(ep);
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2451{
2452 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2453 if (list_empty(&ep->queue))
2454 return;
2455 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2456 pch_udc_ep_clear_nak(ep);
2457}
2458
2459
2460
2461
2462
2463
2464static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2465{
2466 int i;
2467 struct pch_udc_ep *ep;
2468
2469 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2470
2471 if (ep_intr & (0x1 << i)) {
2472 ep = &dev->ep[UDC_EPIN_IDX(i)];
2473 ep->epsts = pch_udc_read_ep_status(ep);
2474 pch_udc_clear_ep_status(ep, ep->epsts);
2475 }
2476
2477 if (ep_intr & (0x10000 << i)) {
2478 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2479 ep->epsts = pch_udc_read_ep_status(ep);
2480 pch_udc_clear_ep_status(ep, ep->epsts);
2481 }
2482 }
2483}
2484
2485
2486
2487
2488
2489
2490static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2491{
2492 struct pch_udc_ep *ep;
2493 u32 val;
2494
2495
2496 ep = &dev->ep[UDC_EP0IN_IDX];
2497 pch_udc_clear_ep_control(ep);
2498 pch_udc_ep_fifo_flush(ep, ep->in);
2499 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2500 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2501
2502 ep->td_data = NULL;
2503 ep->td_stp = NULL;
2504 ep->td_data_phys = 0;
2505 ep->td_stp_phys = 0;
2506
2507
2508 ep = &dev->ep[UDC_EP0OUT_IDX];
2509 pch_udc_clear_ep_control(ep);
2510 pch_udc_ep_fifo_flush(ep, ep->in);
2511 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2512 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2513 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2514 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2515
2516
2517 pch_udc_init_setup_buff(ep->td_stp);
2518
2519 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2520
2521 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2522
2523
2524 ep->td_data->status = PCH_UDC_DMA_LAST;
2525 ep->td_data->dataptr = dev->dma_addr;
2526 ep->td_data->next = ep->td_data_phys;
2527
2528 pch_udc_ep_clear_nak(ep);
2529}
2530
2531
2532
2533
2534
2535
2536static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2537{
2538 struct pch_udc_ep *ep;
2539 int i;
2540
2541 pch_udc_clear_dma(dev, DMA_DIR_TX);
2542 pch_udc_clear_dma(dev, DMA_DIR_RX);
2543
2544 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2545
2546 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2547
2548 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2549 ep = &dev->ep[i];
2550 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2551 pch_udc_clear_ep_control(ep);
2552 pch_udc_ep_set_ddptr(ep, 0);
2553 pch_udc_write_csr(ep->dev, 0x00, i);
2554 }
2555 dev->stall = 0;
2556 dev->prot_stall = 0;
2557 dev->waiting_zlp_ack = 0;
2558 dev->set_cfg_not_acked = 0;
2559
2560
2561 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2562 ep = &dev->ep[i];
2563 pch_udc_ep_set_nak(ep);
2564 pch_udc_ep_fifo_flush(ep, ep->in);
2565
2566 empty_req_queue(ep);
2567 }
2568 if (dev->driver) {
2569 spin_unlock(&dev->lock);
2570 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2571 spin_lock(&dev->lock);
2572 }
2573}
2574
2575
2576
2577
2578
2579
2580static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2581{
2582 u32 dev_stat, dev_speed;
2583 u32 speed = USB_SPEED_FULL;
2584
2585 dev_stat = pch_udc_read_device_status(dev);
2586 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2587 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2588 switch (dev_speed) {
2589 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2590 speed = USB_SPEED_HIGH;
2591 break;
2592 case UDC_DEVSTS_ENUM_SPEED_FULL:
2593 speed = USB_SPEED_FULL;
2594 break;
2595 case UDC_DEVSTS_ENUM_SPEED_LOW:
2596 speed = USB_SPEED_LOW;
2597 break;
2598 default:
2599 BUG();
2600 }
2601 dev->gadget.speed = speed;
2602 pch_udc_activate_control_ep(dev);
2603 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2604 pch_udc_set_dma(dev, DMA_DIR_TX);
2605 pch_udc_set_dma(dev, DMA_DIR_RX);
2606 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2607
2608
2609 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2610 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2611 UDC_DEVINT_SI | UDC_DEVINT_SC);
2612}
2613
2614
2615
2616
2617
2618
2619static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2620{
2621 u32 reg, dev_stat = 0;
2622 int i;
2623
2624 dev_stat = pch_udc_read_device_status(dev);
2625 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2626 UDC_DEVSTS_INTF_SHIFT;
2627 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2628 UDC_DEVSTS_ALT_SHIFT;
2629 dev->set_cfg_not_acked = 1;
2630
2631 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2632 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2633 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2634 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2635 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2636
2637
2638 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2639 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2640 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2641 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2642 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2643 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2644 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2645
2646 pch_udc_ep_clear_stall(&(dev->ep[i]));
2647 dev->ep[i].halted = 0;
2648 }
2649 dev->stall = 0;
2650 spin_unlock(&dev->lock);
2651 dev->driver->setup(&dev->gadget, &dev->setup_data);
2652 spin_lock(&dev->lock);
2653}
2654
2655
2656
2657
2658
2659
2660static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2661{
2662 int i;
2663 u32 reg, dev_stat = 0;
2664
2665 dev_stat = pch_udc_read_device_status(dev);
2666 dev->set_cfg_not_acked = 1;
2667 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2668 UDC_DEVSTS_CFG_SHIFT;
2669
2670 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2671 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2672 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2673
2674
2675 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2676 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2677 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2678 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2679 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2680
2681 pch_udc_ep_clear_stall(&(dev->ep[i]));
2682 dev->ep[i].halted = 0;
2683 }
2684 dev->stall = 0;
2685
2686
2687 spin_unlock(&dev->lock);
2688 dev->driver->setup(&dev->gadget, &dev->setup_data);
2689 spin_lock(&dev->lock);
2690}
2691
2692
2693
2694
2695
2696
2697
2698static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2699{
2700 int vbus;
2701
2702
2703 if (dev_intr & UDC_DEVINT_UR) {
2704 pch_udc_svc_ur_interrupt(dev);
2705 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2706 }
2707
2708 if (dev_intr & UDC_DEVINT_ENUM) {
2709 pch_udc_svc_enum_interrupt(dev);
2710 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2711 }
2712
2713 if (dev_intr & UDC_DEVINT_SI)
2714 pch_udc_svc_intf_interrupt(dev);
2715
2716 if (dev_intr & UDC_DEVINT_SC)
2717 pch_udc_svc_cfg_interrupt(dev);
2718
2719 if (dev_intr & UDC_DEVINT_US) {
2720 if (dev->driver
2721 && dev->driver->suspend) {
2722 spin_unlock(&dev->lock);
2723 dev->driver->suspend(&dev->gadget);
2724 spin_lock(&dev->lock);
2725 }
2726
2727 vbus = pch_vbus_gpio_get_value(dev);
2728 if ((dev->vbus_session == 0)
2729 && (vbus != 1)) {
2730 if (dev->driver && dev->driver->disconnect) {
2731 spin_unlock(&dev->lock);
2732 dev->driver->disconnect(&dev->gadget);
2733 spin_lock(&dev->lock);
2734 }
2735 pch_udc_reconnect(dev);
2736 } else if ((dev->vbus_session == 0)
2737 && (vbus == 1)
2738 && !dev->vbus_gpio.intr)
2739 schedule_work(&dev->vbus_gpio.irq_work_fall);
2740
2741 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2742 }
2743
2744 if (dev_intr & UDC_DEVINT_SOF)
2745 dev_dbg(&dev->pdev->dev, "SOF\n");
2746
2747 if (dev_intr & UDC_DEVINT_ES)
2748 dev_dbg(&dev->pdev->dev, "ES\n");
2749
2750 if (dev_intr & UDC_DEVINT_RWKP)
2751 dev_dbg(&dev->pdev->dev, "RWKP\n");
2752}
2753
2754
2755
2756
2757
2758
2759static irqreturn_t pch_udc_isr(int irq, void *pdev)
2760{
2761 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2762 u32 dev_intr, ep_intr;
2763 int i;
2764
2765 dev_intr = pch_udc_read_device_interrupts(dev);
2766 ep_intr = pch_udc_read_ep_interrupts(dev);
2767
2768
2769 if (dev_intr == ep_intr)
2770 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2771 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2772
2773 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2774 return IRQ_HANDLED;
2775 }
2776 if (dev_intr)
2777
2778 pch_udc_write_device_interrupts(dev, dev_intr);
2779 if (ep_intr)
2780
2781 pch_udc_write_ep_interrupts(dev, ep_intr);
2782 if (!dev_intr && !ep_intr)
2783 return IRQ_NONE;
2784 spin_lock(&dev->lock);
2785 if (dev_intr)
2786 pch_udc_dev_isr(dev, dev_intr);
2787 if (ep_intr) {
2788 pch_udc_read_all_epstatus(dev, ep_intr);
2789
2790 if (ep_intr & UDC_EPINT_IN_EP0) {
2791 pch_udc_svc_control_in(dev);
2792 pch_udc_postsvc_epinters(dev, 0);
2793 }
2794
2795 if (ep_intr & UDC_EPINT_OUT_EP0)
2796 pch_udc_svc_control_out(dev);
2797
2798 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2799 if (ep_intr & (1 << i)) {
2800 pch_udc_svc_data_in(dev, i);
2801 pch_udc_postsvc_epinters(dev, i);
2802 }
2803 }
2804
2805 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2806 PCH_UDC_USED_EP_NUM); i++)
2807 if (ep_intr & (1 << i))
2808 pch_udc_svc_data_out(dev, i -
2809 UDC_EPINT_OUT_SHIFT);
2810 }
2811 spin_unlock(&dev->lock);
2812 return IRQ_HANDLED;
2813}
2814
2815
2816
2817
2818
2819static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2820{
2821
2822 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2823 UDC_EPINT_OUT_EP0);
2824
2825 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2826 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2827 UDC_DEVINT_SI | UDC_DEVINT_SC);
2828}
2829
2830
2831
2832
2833
2834static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2835{
2836 const char *const ep_string[] = {
2837 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2838 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2839 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2840 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2841 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2842 "ep15in", "ep15out",
2843 };
2844 int i;
2845
2846 dev->gadget.speed = USB_SPEED_UNKNOWN;
2847 INIT_LIST_HEAD(&dev->gadget.ep_list);
2848
2849
2850 memset(dev->ep, 0, sizeof dev->ep);
2851 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2852 struct pch_udc_ep *ep = &dev->ep[i];
2853 ep->dev = dev;
2854 ep->halted = 1;
2855 ep->num = i / 2;
2856 ep->in = ~i & 1;
2857 ep->ep.name = ep_string[i];
2858 ep->ep.ops = &pch_udc_ep_ops;
2859 if (ep->in) {
2860 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2861 ep->ep.caps.dir_in = true;
2862 } else {
2863 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2864 UDC_EP_REG_SHIFT;
2865 ep->ep.caps.dir_out = true;
2866 }
2867 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2868 ep->ep.caps.type_control = true;
2869 } else {
2870 ep->ep.caps.type_iso = true;
2871 ep->ep.caps.type_bulk = true;
2872 ep->ep.caps.type_int = true;
2873 }
2874
2875 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2876 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2877 INIT_LIST_HEAD(&ep->queue);
2878 }
2879 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2880 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2881
2882
2883 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2884 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2885
2886 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2887 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2898{
2899 pch_udc_init(dev);
2900 pch_udc_pcd_reinit(dev);
2901 pch_vbus_gpio_init(dev, vbus_gpio_port);
2902 return 0;
2903}
2904
2905
2906
2907
2908
2909static int init_dma_pools(struct pch_udc_dev *dev)
2910{
2911 struct pch_udc_stp_dma_desc *td_stp;
2912 struct pch_udc_data_dma_desc *td_data;
2913 void *ep0out_buf;
2914
2915
2916 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2917 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2918 if (!dev->data_requests) {
2919 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2920 __func__);
2921 return -ENOMEM;
2922 }
2923
2924
2925 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2926 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2927 if (!dev->stp_requests) {
2928 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2929 __func__);
2930 return -ENOMEM;
2931 }
2932
2933 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2934 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2935 if (!td_stp) {
2936 dev_err(&dev->pdev->dev,
2937 "%s: can't allocate setup dma descriptor\n", __func__);
2938 return -ENOMEM;
2939 }
2940 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2941
2942
2943 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2944 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2945 if (!td_data) {
2946 dev_err(&dev->pdev->dev,
2947 "%s: can't allocate data dma descriptor\n", __func__);
2948 return -ENOMEM;
2949 }
2950 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2951 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2952 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2953 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2954 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2955
2956 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2957 GFP_KERNEL);
2958 if (!ep0out_buf)
2959 return -ENOMEM;
2960 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2961 UDC_EP0OUT_BUFF_SIZE * 4,
2962 DMA_FROM_DEVICE);
2963 return 0;
2964}
2965
2966static int pch_udc_start(struct usb_gadget *g,
2967 struct usb_gadget_driver *driver)
2968{
2969 struct pch_udc_dev *dev = to_pch_udc(g);
2970
2971 driver->driver.bus = NULL;
2972 dev->driver = driver;
2973
2974
2975 pch_udc_setup_ep0(dev);
2976
2977
2978 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2979 pch_udc_clear_disconnect(dev);
2980
2981 dev->connected = 1;
2982 return 0;
2983}
2984
2985static int pch_udc_stop(struct usb_gadget *g)
2986{
2987 struct pch_udc_dev *dev = to_pch_udc(g);
2988
2989 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2990
2991
2992 dev->driver = NULL;
2993 dev->connected = 0;
2994
2995
2996 pch_udc_set_disconnect(dev);
2997
2998 return 0;
2999}
3000
3001static void pch_udc_shutdown(struct pci_dev *pdev)
3002{
3003 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3004
3005 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3006 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3007
3008
3009 pch_udc_set_disconnect(dev);
3010}
3011
3012static void pch_udc_remove(struct pci_dev *pdev)
3013{
3014 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3015
3016 usb_del_gadget_udc(&dev->gadget);
3017
3018
3019 if (dev->driver)
3020 dev_err(&pdev->dev,
3021 "%s: gadget driver still bound!!!\n", __func__);
3022
3023 if (dev->data_requests)
3024 pci_pool_destroy(dev->data_requests);
3025
3026 if (dev->stp_requests) {
3027
3028 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3029 pci_pool_free(dev->stp_requests,
3030 dev->ep[UDC_EP0OUT_IDX].td_stp,
3031 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3032 }
3033 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3034 pci_pool_free(dev->stp_requests,
3035 dev->ep[UDC_EP0OUT_IDX].td_data,
3036 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3037 }
3038 pci_pool_destroy(dev->stp_requests);
3039 }
3040
3041 if (dev->dma_addr)
3042 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3043 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3044
3045 pch_vbus_gpio_free(dev);
3046
3047 pch_udc_exit(dev);
3048}
3049
3050#ifdef CONFIG_PM_SLEEP
3051static int pch_udc_suspend(struct device *d)
3052{
3053 struct pci_dev *pdev = to_pci_dev(d);
3054 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3055
3056 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3057 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3058
3059 return 0;
3060}
3061
3062static int pch_udc_resume(struct device *d)
3063{
3064 return 0;
3065}
3066
3067static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3068#define PCH_UDC_PM_OPS (&pch_udc_pm)
3069#else
3070#define PCH_UDC_PM_OPS NULL
3071#endif
3072
3073static int pch_udc_probe(struct pci_dev *pdev,
3074 const struct pci_device_id *id)
3075{
3076 int bar;
3077 int retval;
3078 struct pch_udc_dev *dev;
3079
3080
3081 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3082 if (!dev)
3083 return -ENOMEM;
3084
3085
3086 retval = pcim_enable_device(pdev);
3087 if (retval)
3088 return retval;
3089
3090 pci_set_drvdata(pdev, dev);
3091
3092
3093 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3094 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3095 else
3096 bar = PCH_UDC_PCI_BAR;
3097
3098
3099 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3100 if (retval)
3101 return retval;
3102
3103 dev->base_addr = pcim_iomap_table(pdev)[bar];
3104
3105
3106 if (pch_udc_pcd_init(dev))
3107 return -ENODEV;
3108
3109 pci_enable_msi(pdev);
3110
3111 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3112 IRQF_SHARED, KBUILD_MODNAME, dev);
3113 if (retval) {
3114 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3115 pdev->irq);
3116 goto finished;
3117 }
3118
3119 pci_set_master(pdev);
3120 pci_try_set_mwi(pdev);
3121
3122
3123 spin_lock_init(&dev->lock);
3124 dev->pdev = pdev;
3125 dev->gadget.ops = &pch_udc_ops;
3126
3127 retval = init_dma_pools(dev);
3128 if (retval)
3129 goto finished;
3130
3131 dev->gadget.name = KBUILD_MODNAME;
3132 dev->gadget.max_speed = USB_SPEED_HIGH;
3133
3134
3135 pch_udc_set_disconnect(dev);
3136 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3137 if (retval)
3138 goto finished;
3139 return 0;
3140
3141finished:
3142 pch_udc_remove(pdev);
3143 return retval;
3144}
3145
3146static const struct pci_device_id pch_udc_pcidev_id[] = {
3147 {
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3149 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3150 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3151 .class_mask = 0xffffffff,
3152 },
3153 {
3154 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3155 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3156 .class_mask = 0xffffffff,
3157 },
3158 {
3159 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3160 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3161 .class_mask = 0xffffffff,
3162 },
3163 {
3164 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3165 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3166 .class_mask = 0xffffffff,
3167 },
3168 { 0 },
3169};
3170
3171MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3172
3173static struct pci_driver pch_udc_driver = {
3174 .name = KBUILD_MODNAME,
3175 .id_table = pch_udc_pcidev_id,
3176 .probe = pch_udc_probe,
3177 .remove = pch_udc_remove,
3178 .shutdown = pch_udc_shutdown,
3179 .driver = {
3180 .pm = PCH_UDC_PM_OPS,
3181 },
3182};
3183
3184module_pci_driver(pch_udc_driver);
3185
3186MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3187MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3188MODULE_LICENSE("GPL");
3189