1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/interrupt.h>
16#include <linux/usb/ch9.h>
17#include <linux/usb/gadget.h>
18#include <linux/gpio.h>
19#include <linux/irq.h>
20
21
22static int vbus_gpio_port = -1;
23
24#define PCH_VBUS_PERIOD 3000
25#define PCH_VBUS_INTERVAL 10
26
27
28#define UDC_EP_REG_SHIFT 0x20
29
30#define UDC_EPCTL_ADDR 0x00
31#define UDC_EPSTS_ADDR 0x04
32#define UDC_BUFIN_FRAMENUM_ADDR 0x08
33#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
34#define UDC_SUBPTR_ADDR 0x10
35#define UDC_DESPTR_ADDR 0x14
36#define UDC_CONFIRM_ADDR 0x18
37
38#define UDC_DEVCFG_ADDR 0x400
39#define UDC_DEVCTL_ADDR 0x404
40#define UDC_DEVSTS_ADDR 0x408
41#define UDC_DEVIRQSTS_ADDR 0x40C
42#define UDC_DEVIRQMSK_ADDR 0x410
43#define UDC_EPIRQSTS_ADDR 0x414
44#define UDC_EPIRQMSK_ADDR 0x418
45#define UDC_DEVLPM_ADDR 0x41C
46#define UDC_CSR_BUSY_ADDR 0x4f0
47#define UDC_SRST_ADDR 0x4fc
48#define UDC_CSR_ADDR 0x500
49
50
51
52#define UDC_EPCTL_MRXFLUSH (1 << 12)
53#define UDC_EPCTL_RRDY (1 << 9)
54#define UDC_EPCTL_CNAK (1 << 8)
55#define UDC_EPCTL_SNAK (1 << 7)
56#define UDC_EPCTL_NAK (1 << 6)
57#define UDC_EPCTL_P (1 << 3)
58#define UDC_EPCTL_F (1 << 1)
59#define UDC_EPCTL_S (1 << 0)
60#define UDC_EPCTL_ET_SHIFT 4
61
62#define UDC_EPCTL_ET_MASK 0x00000030
63
64#define UDC_EPCTL_ET_CONTROL 0
65#define UDC_EPCTL_ET_ISO 1
66#define UDC_EPCTL_ET_BULK 2
67#define UDC_EPCTL_ET_INTERRUPT 3
68
69
70
71#define UDC_EPSTS_XFERDONE (1 << 27)
72#define UDC_EPSTS_RSS (1 << 26)
73#define UDC_EPSTS_RCS (1 << 25)
74#define UDC_EPSTS_TXEMPTY (1 << 24)
75#define UDC_EPSTS_TDC (1 << 10)
76#define UDC_EPSTS_HE (1 << 9)
77#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
78#define UDC_EPSTS_BNA (1 << 7)
79#define UDC_EPSTS_IN (1 << 6)
80#define UDC_EPSTS_OUT_SHIFT 4
81
82#define UDC_EPSTS_OUT_MASK 0x00000030
83#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
84
85#define UDC_EPSTS_OUT_SETUP 2
86#define UDC_EPSTS_OUT_DATA 1
87
88
89
90#define UDC_DEVCFG_CSR_PRG (1 << 17)
91#define UDC_DEVCFG_SP (1 << 3)
92
93#define UDC_DEVCFG_SPD_HS 0x0
94#define UDC_DEVCFG_SPD_FS 0x1
95#define UDC_DEVCFG_SPD_LS 0x2
96
97
98
99#define UDC_DEVCTL_THLEN_SHIFT 24
100#define UDC_DEVCTL_BRLEN_SHIFT 16
101#define UDC_DEVCTL_CSR_DONE (1 << 13)
102#define UDC_DEVCTL_SD (1 << 10)
103#define UDC_DEVCTL_MODE (1 << 9)
104#define UDC_DEVCTL_BREN (1 << 8)
105#define UDC_DEVCTL_THE (1 << 7)
106#define UDC_DEVCTL_DU (1 << 4)
107#define UDC_DEVCTL_TDE (1 << 3)
108#define UDC_DEVCTL_RDE (1 << 2)
109#define UDC_DEVCTL_RES (1 << 0)
110
111
112
113#define UDC_DEVSTS_TS_SHIFT 18
114#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
115#define UDC_DEVSTS_ALT_SHIFT 8
116#define UDC_DEVSTS_INTF_SHIFT 4
117#define UDC_DEVSTS_CFG_SHIFT 0
118
119#define UDC_DEVSTS_TS_MASK 0xfffc0000
120#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
121#define UDC_DEVSTS_ALT_MASK 0x00000f00
122#define UDC_DEVSTS_INTF_MASK 0x000000f0
123#define UDC_DEVSTS_CFG_MASK 0x0000000f
124
125#define UDC_DEVSTS_ENUM_SPEED_FULL 1
126#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
127#define UDC_DEVSTS_ENUM_SPEED_LOW 2
128#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
129
130
131
132#define UDC_DEVINT_RWKP (1 << 7)
133#define UDC_DEVINT_ENUM (1 << 6)
134#define UDC_DEVINT_SOF (1 << 5)
135#define UDC_DEVINT_US (1 << 4)
136#define UDC_DEVINT_UR (1 << 3)
137#define UDC_DEVINT_ES (1 << 2)
138#define UDC_DEVINT_SI (1 << 1)
139#define UDC_DEVINT_SC (1 << 0)
140
141#define UDC_DEVINT_MSK 0x7f
142
143
144
145#define UDC_EPINT_IN_SHIFT 0
146#define UDC_EPINT_OUT_SHIFT 16
147#define UDC_EPINT_IN_EP0 (1 << 0)
148#define UDC_EPINT_OUT_EP0 (1 << 16)
149
150#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
151
152
153
154#define UDC_CSR_BUSY (1 << 0)
155
156
157
158#define UDC_PSRST (1 << 1)
159#define UDC_SRST (1 << 0)
160
161
162
163#define UDC_CSR_NE_NUM_SHIFT 0
164#define UDC_CSR_NE_DIR_SHIFT 4
165#define UDC_CSR_NE_TYPE_SHIFT 5
166#define UDC_CSR_NE_CFG_SHIFT 7
167#define UDC_CSR_NE_INTF_SHIFT 11
168#define UDC_CSR_NE_ALT_SHIFT 15
169#define UDC_CSR_NE_MAX_PKT_SHIFT 19
170
171#define UDC_CSR_NE_NUM_MASK 0x0000000f
172#define UDC_CSR_NE_DIR_MASK 0x00000010
173#define UDC_CSR_NE_TYPE_MASK 0x00000060
174#define UDC_CSR_NE_CFG_MASK 0x00000780
175#define UDC_CSR_NE_INTF_MASK 0x00007800
176#define UDC_CSR_NE_ALT_MASK 0x00078000
177#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
178
179#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
180#define PCH_UDC_EPINT(in, num)\
181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182
183
184#define UDC_EP0IN_IDX 0
185#define UDC_EP0OUT_IDX 1
186#define UDC_EPIN_IDX(ep) (ep * 2)
187#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
188#define PCH_UDC_EP0 0
189#define PCH_UDC_EP1 1
190#define PCH_UDC_EP2 2
191#define PCH_UDC_EP3 3
192
193
194#define PCH_UDC_EP_NUM 32
195#define PCH_UDC_USED_EP_NUM 4
196
197#define PCH_UDC_BRLEN 0x0F
198#define PCH_UDC_THLEN 0x1F
199
200#define UDC_EP0IN_BUFF_SIZE 16
201#define UDC_EPIN_BUFF_SIZE 256
202#define UDC_EP0OUT_BUFF_SIZE 16
203#define UDC_EPOUT_BUFF_SIZE 256
204
205#define UDC_EP0IN_MAX_PKT_SIZE 64
206#define UDC_EP0OUT_MAX_PKT_SIZE 64
207#define UDC_BULK_MAX_PKT_SIZE 512
208
209
210#define DMA_DIR_RX 1
211#define DMA_DIR_TX 2
212#define DMA_ADDR_INVALID (~(dma_addr_t)0)
213#define UDC_DMA_MAXPACKET 65536
214
215
216
217
218
219
220
221
222
223struct pch_udc_data_dma_desc {
224 u32 status;
225 u32 reserved;
226 u32 dataptr;
227 u32 next;
228};
229
230
231
232
233
234
235
236
237
238struct pch_udc_stp_dma_desc {
239 u32 status;
240 u32 reserved;
241 struct usb_ctrlrequest request;
242} __attribute((packed));
243
244
245
246#define PCH_UDC_BUFF_STS 0xC0000000
247#define PCH_UDC_BS_HST_RDY 0x00000000
248#define PCH_UDC_BS_DMA_BSY 0x40000000
249#define PCH_UDC_BS_DMA_DONE 0x80000000
250#define PCH_UDC_BS_HST_BSY 0xC0000000
251
252#define PCH_UDC_RXTX_STS 0x30000000
253#define PCH_UDC_RTS_SUCC 0x00000000
254#define PCH_UDC_RTS_DESERR 0x10000000
255#define PCH_UDC_RTS_BUFERR 0x30000000
256
257#define PCH_UDC_DMA_LAST 0x08000000
258
259#define PCH_UDC_RXTX_BYTES 0x0000ffff
260
261
262
263
264
265
266
267
268struct pch_udc_cfg_data {
269 u16 cur_cfg;
270 u16 cur_intf;
271 u16 cur_alt;
272};
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290struct pch_udc_ep {
291 struct usb_ep ep;
292 dma_addr_t td_stp_phys;
293 dma_addr_t td_data_phys;
294 struct pch_udc_stp_dma_desc *td_stp;
295 struct pch_udc_data_dma_desc *td_data;
296 struct pch_udc_dev *dev;
297 unsigned long offset_addr;
298 struct list_head queue;
299 unsigned num:5,
300 in:1,
301 halted:1;
302 unsigned long epsts;
303};
304
305
306
307
308
309
310
311
312
313struct pch_vbus_gpio_data {
314 int port;
315 int intr;
316 struct work_struct irq_work_fall;
317 struct work_struct irq_work_rise;
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350struct pch_udc_dev {
351 struct usb_gadget gadget;
352 struct usb_gadget_driver *driver;
353 struct pci_dev *pdev;
354 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
355 spinlock_t lock;
356 unsigned active:1,
357 stall:1,
358 prot_stall:1,
359 irq_registered:1,
360 mem_region:1,
361 registered:1,
362 suspended:1,
363 connected:1,
364 vbus_session:1,
365 set_cfg_not_acked:1,
366 waiting_zlp_ack:1;
367 struct pci_pool *data_requests;
368 struct pci_pool *stp_requests;
369 dma_addr_t dma_addr;
370 void *ep0out_buf;
371 struct usb_ctrlrequest setup_data;
372 unsigned long phys_addr;
373 void __iomem *base_addr;
374 unsigned irq;
375 struct pch_udc_cfg_data cfg_data;
376 struct pch_vbus_gpio_data vbus_gpio;
377};
378
379#define PCH_UDC_PCI_BAR 1
380#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
381#define PCI_VENDOR_ID_ROHM 0x10DB
382#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
383#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
384
385static const char ep0_string[] = "ep0in";
386static DEFINE_SPINLOCK(udc_stall_spinlock);
387struct pch_udc_dev *pch_udc;
388static bool speed_fs;
389module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
390MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406struct pch_udc_request {
407 struct usb_request req;
408 dma_addr_t td_data_phys;
409 struct pch_udc_data_dma_desc *td_data;
410 struct pch_udc_data_dma_desc *td_data_last;
411 struct list_head queue;
412 unsigned dma_going:1,
413 dma_mapped:1,
414 dma_done:1;
415 unsigned chain_len;
416 void *buf;
417 dma_addr_t dma;
418};
419
420static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
421{
422 return ioread32(dev->base_addr + reg);
423}
424
425static inline void pch_udc_writel(struct pch_udc_dev *dev,
426 unsigned long val, unsigned long reg)
427{
428 iowrite32(val, dev->base_addr + reg);
429}
430
431static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
432 unsigned long reg,
433 unsigned long bitmask)
434{
435 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
436}
437
438static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
439 unsigned long reg,
440 unsigned long bitmask)
441{
442 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
443}
444
445static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
446{
447 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
448}
449
450static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
451 unsigned long val, unsigned long reg)
452{
453 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
454}
455
456static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
457 unsigned long reg,
458 unsigned long bitmask)
459{
460 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
461}
462
463static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
464 unsigned long reg,
465 unsigned long bitmask)
466{
467 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
468}
469
470
471
472
473
474static void pch_udc_csr_busy(struct pch_udc_dev *dev)
475{
476 unsigned int count = 200;
477
478
479 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
480 && --count)
481 cpu_relax();
482 if (!count)
483 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
484}
485
486
487
488
489
490
491
492static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
493 unsigned int ep)
494{
495 unsigned long reg = PCH_UDC_CSR(ep);
496
497 pch_udc_csr_busy(dev);
498 pch_udc_writel(dev, val, reg);
499 pch_udc_csr_busy(dev);
500}
501
502
503
504
505
506
507
508
509static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
510{
511 unsigned long reg = PCH_UDC_CSR(ep);
512
513 pch_udc_csr_busy(dev);
514 pch_udc_readl(dev, reg);
515 pch_udc_csr_busy(dev);
516 return pch_udc_readl(dev, reg);
517}
518
519
520
521
522
523static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
524{
525 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
526 mdelay(1);
527 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
528}
529
530
531
532
533
534
535static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
536{
537 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
538 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
539}
540
541
542
543
544
545static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
546{
547 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
548}
549
550
551
552
553
554static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
555{
556 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
557}
558
559
560
561
562
563static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
564{
565 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
566}
567
568
569
570
571
572static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
573{
574
575 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
576 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
577 mdelay(1);
578
579 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
580}
581
582
583
584
585
586
587static void pch_udc_init(struct pch_udc_dev *dev);
588static void pch_udc_reconnect(struct pch_udc_dev *dev)
589{
590 pch_udc_init(dev);
591
592
593
594 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
595 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
596
597
598 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
599 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
600 mdelay(1);
601
602 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
603}
604
605
606
607
608
609
610
611
612static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
613 int is_active)
614{
615 if (is_active) {
616 pch_udc_reconnect(dev);
617 dev->vbus_session = 1;
618 } else {
619 if (dev->driver && dev->driver->disconnect) {
620 spin_unlock(&dev->lock);
621 dev->driver->disconnect(&dev->gadget);
622 spin_lock(&dev->lock);
623 }
624 pch_udc_set_disconnect(dev);
625 dev->vbus_session = 0;
626 }
627}
628
629
630
631
632
633static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
634{
635 if (ep->in) {
636 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
637 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
638 } else {
639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
640 }
641}
642
643
644
645
646
647static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
648{
649
650 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
651
652 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
653}
654
655
656
657
658
659
660static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
661 u8 type)
662{
663 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
664 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
665}
666
667
668
669
670
671
672static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
673 u32 buf_size, u32 ep_in)
674{
675 u32 data;
676 if (ep_in) {
677 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
678 data = (data & 0xffff0000) | (buf_size & 0xffff);
679 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
680 } else {
681 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
682 data = (buf_size << 16) | (data & 0xffff);
683 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
684 }
685}
686
687
688
689
690
691
692static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
693{
694 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
695 data = (data & 0xffff0000) | (pkt_size & 0xffff);
696 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
697}
698
699
700
701
702
703
704static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
705{
706 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
707}
708
709
710
711
712
713
714static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
715{
716 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
717}
718
719
720
721
722
723static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
724{
725 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
726}
727
728
729
730
731
732static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
733{
734 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
735}
736
737
738
739
740
741static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
742{
743 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
744}
745
746
747
748
749
750
751
752
753
754static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
755{
756 if (dir == DMA_DIR_RX)
757 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
758 else if (dir == DMA_DIR_TX)
759 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
760}
761
762
763
764
765
766
767
768
769
770static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
771{
772 if (dir == DMA_DIR_RX)
773 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
774 else if (dir == DMA_DIR_TX)
775 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
776}
777
778
779
780
781
782
783static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
784{
785 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
786}
787
788
789
790
791
792
793static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
794 u32 mask)
795{
796 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
797}
798
799
800
801
802
803
804static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
805 u32 mask)
806{
807 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
808}
809
810
811
812
813
814
815static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
816 u32 mask)
817{
818 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
819}
820
821
822
823
824
825
826static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
827 u32 mask)
828{
829 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
830}
831
832
833
834
835
836
837static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
838{
839 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
840}
841
842
843
844
845
846
847static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
848 u32 val)
849{
850 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
851}
852
853
854
855
856
857
858static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
859{
860 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
861}
862
863
864
865
866
867
868static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
869 u32 val)
870{
871 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
872}
873
874
875
876
877
878
879static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
880{
881 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
882}
883
884
885
886
887
888
889static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
890{
891 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
892}
893
894
895
896
897
898
899static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
900{
901 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
902}
903
904
905
906
907
908
909static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
910{
911 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
912}
913
914
915
916
917
918
919static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
920 u32 stat)
921{
922 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
923}
924
925
926
927
928
929
930static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
931{
932 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
933}
934
935
936
937
938
939
940static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
941{
942 unsigned int loopcnt = 0;
943 struct pch_udc_dev *dev = ep->dev;
944
945 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
946 return;
947 if (!ep->in) {
948 loopcnt = 10000;
949 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
950 --loopcnt)
951 udelay(5);
952 if (!loopcnt)
953 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
954 __func__);
955 }
956 loopcnt = 10000;
957 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
958 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
959 udelay(5);
960 }
961 if (!loopcnt)
962 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
963 __func__, ep->num, (ep->in ? "in" : "out"));
964}
965
966
967
968
969
970
971
972
973static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
974{
975 if (dir) {
976 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
977 return;
978 }
979}
980
981
982
983
984
985
986static void pch_udc_ep_enable(struct pch_udc_ep *ep,
987 struct pch_udc_cfg_data *cfg,
988 const struct usb_endpoint_descriptor *desc)
989{
990 u32 val = 0;
991 u32 buff_size = 0;
992
993 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
994 if (ep->in)
995 buff_size = UDC_EPIN_BUFF_SIZE;
996 else
997 buff_size = UDC_EPOUT_BUFF_SIZE;
998 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
999 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
1000 pch_udc_ep_set_nak(ep);
1001 pch_udc_ep_fifo_flush(ep, ep->in);
1002
1003 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1004 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1005 UDC_CSR_NE_TYPE_SHIFT) |
1006 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1007 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1008 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1009 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1010
1011 if (ep->in)
1012 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1013 else
1014 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1015}
1016
1017
1018
1019
1020
1021static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1022{
1023 if (ep->in) {
1024
1025 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1026
1027 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1028 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1029 } else {
1030
1031 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1032 }
1033
1034 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1035}
1036
1037
1038
1039
1040
1041static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1042{
1043 unsigned int count = 10000;
1044
1045
1046 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1047 udelay(5);
1048 if (!count)
1049 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1050}
1051
1052
1053
1054
1055
1056static void pch_udc_init(struct pch_udc_dev *dev)
1057{
1058 if (NULL == dev) {
1059 pr_err("%s: Invalid address\n", __func__);
1060 return;
1061 }
1062
1063 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1064 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1065 mdelay(1);
1066 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1067 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1068 mdelay(1);
1069
1070 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1071 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1072
1073
1074 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1075 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1076
1077
1078 if (speed_fs)
1079 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1080 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1081 else
1082 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1083 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1084 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1085 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1086 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1087 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1088 UDC_DEVCTL_THE);
1089}
1090
1091
1092
1093
1094
1095static void pch_udc_exit(struct pch_udc_dev *dev)
1096{
1097
1098 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1099
1100 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1101
1102 pch_udc_set_disconnect(dev);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1114{
1115 struct pch_udc_dev *dev;
1116
1117 if (!gadget)
1118 return -EINVAL;
1119 dev = container_of(gadget, struct pch_udc_dev, gadget);
1120 return pch_udc_get_frame(dev);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1132{
1133 struct pch_udc_dev *dev;
1134 unsigned long flags;
1135
1136 if (!gadget)
1137 return -EINVAL;
1138 dev = container_of(gadget, struct pch_udc_dev, gadget);
1139 spin_lock_irqsave(&dev->lock, flags);
1140 pch_udc_rmt_wakeup(dev);
1141 spin_unlock_irqrestore(&dev->lock, flags);
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1156{
1157 struct pch_udc_dev *dev;
1158
1159 if (!gadget)
1160 return -EINVAL;
1161 dev = container_of(gadget, struct pch_udc_dev, gadget);
1162 if (value)
1163 pch_udc_set_selfpowered(dev);
1164 else
1165 pch_udc_clear_selfpowered(dev);
1166 return 0;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1180{
1181 struct pch_udc_dev *dev;
1182
1183 if (!gadget)
1184 return -EINVAL;
1185 dev = container_of(gadget, struct pch_udc_dev, gadget);
1186 if (is_on) {
1187 pch_udc_reconnect(dev);
1188 } else {
1189 if (dev->driver && dev->driver->disconnect) {
1190 spin_unlock(&dev->lock);
1191 dev->driver->disconnect(&dev->gadget);
1192 spin_lock(&dev->lock);
1193 }
1194 pch_udc_set_disconnect(dev);
1195 }
1196
1197 return 0;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1212{
1213 struct pch_udc_dev *dev;
1214
1215 if (!gadget)
1216 return -EINVAL;
1217 dev = container_of(gadget, struct pch_udc_dev, gadget);
1218 pch_udc_vbus_session(dev, is_active);
1219 return 0;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1234{
1235 return -EOPNOTSUPP;
1236}
1237
1238static int pch_udc_start(struct usb_gadget_driver *driver,
1239 int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
1240static int pch_udc_stop(struct usb_gadget_driver *driver);
1241static const struct usb_gadget_ops pch_udc_ops = {
1242 .get_frame = pch_udc_pcd_get_frame,
1243 .wakeup = pch_udc_pcd_wakeup,
1244 .set_selfpowered = pch_udc_pcd_selfpowered,
1245 .pullup = pch_udc_pcd_pullup,
1246 .vbus_session = pch_udc_pcd_vbus_session,
1247 .vbus_draw = pch_udc_pcd_vbus_draw,
1248 .start = pch_udc_start,
1249 .stop = pch_udc_stop,
1250};
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1262{
1263 int vbus = 0;
1264
1265 if (dev->vbus_gpio.port)
1266 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1267 else
1268 vbus = -1;
1269
1270 return vbus;
1271}
1272
1273
1274
1275
1276
1277
1278
1279static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1280{
1281 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1282 struct pch_vbus_gpio_data, irq_work_fall);
1283 struct pch_udc_dev *dev =
1284 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1285 int vbus_saved = -1;
1286 int vbus;
1287 int count;
1288
1289 if (!dev->vbus_gpio.port)
1290 return;
1291
1292 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1293 count++) {
1294 vbus = pch_vbus_gpio_get_value(dev);
1295
1296 if ((vbus_saved == vbus) && (vbus == 0)) {
1297 dev_dbg(&dev->pdev->dev, "VBUS fell");
1298 if (dev->driver
1299 && dev->driver->disconnect) {
1300 dev->driver->disconnect(
1301 &dev->gadget);
1302 }
1303 if (dev->vbus_gpio.intr)
1304 pch_udc_init(dev);
1305 else
1306 pch_udc_reconnect(dev);
1307 return;
1308 }
1309 vbus_saved = vbus;
1310 mdelay(PCH_VBUS_INTERVAL);
1311 }
1312}
1313
1314
1315
1316
1317
1318
1319
1320static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1321{
1322 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1323 struct pch_vbus_gpio_data, irq_work_rise);
1324 struct pch_udc_dev *dev =
1325 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1326 int vbus;
1327
1328 if (!dev->vbus_gpio.port)
1329 return;
1330
1331 mdelay(PCH_VBUS_INTERVAL);
1332 vbus = pch_vbus_gpio_get_value(dev);
1333
1334 if (vbus == 1) {
1335 dev_dbg(&dev->pdev->dev, "VBUS rose");
1336 pch_udc_reconnect(dev);
1337 return;
1338 }
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1351{
1352 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1353
1354 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1355 return IRQ_NONE;
1356
1357 if (pch_vbus_gpio_get_value(dev))
1358 schedule_work(&dev->vbus_gpio.irq_work_rise);
1359 else
1360 schedule_work(&dev->vbus_gpio.irq_work_fall);
1361
1362 return IRQ_HANDLED;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1375{
1376 int err;
1377 int irq_num = 0;
1378
1379 dev->vbus_gpio.port = 0;
1380 dev->vbus_gpio.intr = 0;
1381
1382 if (vbus_gpio_port <= -1)
1383 return -EINVAL;
1384
1385 err = gpio_is_valid(vbus_gpio_port);
1386 if (!err) {
1387 pr_err("%s: gpio port %d is invalid\n",
1388 __func__, vbus_gpio_port);
1389 return -EINVAL;
1390 }
1391
1392 err = gpio_request(vbus_gpio_port, "pch_vbus");
1393 if (err) {
1394 pr_err("%s: can't request gpio port %d, err: %d\n",
1395 __func__, vbus_gpio_port, err);
1396 return -EINVAL;
1397 }
1398
1399 dev->vbus_gpio.port = vbus_gpio_port;
1400 gpio_direction_input(vbus_gpio_port);
1401 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1402
1403 irq_num = gpio_to_irq(vbus_gpio_port);
1404 if (irq_num > 0) {
1405 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1406 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1407 "vbus_detect", dev);
1408 if (!err) {
1409 dev->vbus_gpio.intr = irq_num;
1410 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1411 pch_vbus_gpio_work_rise);
1412 } else {
1413 pr_err("%s: can't request irq %d, err: %d\n",
1414 __func__, irq_num, err);
1415 }
1416 }
1417
1418 return 0;
1419}
1420
1421
1422
1423
1424
1425static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1426{
1427 if (dev->vbus_gpio.intr)
1428 free_irq(dev->vbus_gpio.intr, dev);
1429
1430 if (dev->vbus_gpio.port)
1431 gpio_free(dev->vbus_gpio.port);
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1442 int status)
1443{
1444 struct pch_udc_dev *dev;
1445 unsigned halted = ep->halted;
1446
1447 list_del_init(&req->queue);
1448
1449
1450 if (req->req.status == -EINPROGRESS)
1451 req->req.status = status;
1452 else
1453 status = req->req.status;
1454
1455 dev = ep->dev;
1456 if (req->dma_mapped) {
1457 if (req->dma == DMA_ADDR_INVALID) {
1458 if (ep->in)
1459 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 req->req.length,
1461 DMA_TO_DEVICE);
1462 else
1463 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1464 req->req.length,
1465 DMA_FROM_DEVICE);
1466 req->req.dma = DMA_ADDR_INVALID;
1467 } else {
1468 if (ep->in)
1469 dma_unmap_single(&dev->pdev->dev, req->dma,
1470 req->req.length,
1471 DMA_TO_DEVICE);
1472 else {
1473 dma_unmap_single(&dev->pdev->dev, req->dma,
1474 req->req.length,
1475 DMA_FROM_DEVICE);
1476 memcpy(req->req.buf, req->buf, req->req.length);
1477 }
1478 kfree(req->buf);
1479 req->dma = DMA_ADDR_INVALID;
1480 }
1481 req->dma_mapped = 0;
1482 }
1483 ep->halted = 1;
1484 spin_unlock(&dev->lock);
1485 if (!ep->in)
1486 pch_udc_ep_clear_rrdy(ep);
1487 req->req.complete(&ep->ep, &req->req);
1488 spin_lock(&dev->lock);
1489 ep->halted = halted;
1490}
1491
1492
1493
1494
1495
1496static void empty_req_queue(struct pch_udc_ep *ep)
1497{
1498 struct pch_udc_request *req;
1499
1500 ep->halted = 1;
1501 while (!list_empty(&ep->queue)) {
1502 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1503 complete_req(ep, req, -ESHUTDOWN);
1504 }
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1517 struct pch_udc_request *req)
1518{
1519 struct pch_udc_data_dma_desc *td = req->td_data;
1520 unsigned i = req->chain_len;
1521
1522 dma_addr_t addr2;
1523 dma_addr_t addr = (dma_addr_t)td->next;
1524 td->next = 0x00;
1525 for (; i > 1; --i) {
1526
1527 td = phys_to_virt(addr);
1528 addr2 = (dma_addr_t)td->next;
1529 pci_pool_free(dev->data_requests, td, addr);
1530 td->next = 0x00;
1531 addr = addr2;
1532 }
1533 req->chain_len = 1;
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1549 struct pch_udc_request *req,
1550 unsigned long buf_len,
1551 gfp_t gfp_flags)
1552{
1553 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1554 unsigned long bytes = req->req.length, i = 0;
1555 dma_addr_t dma_addr;
1556 unsigned len = 1;
1557
1558 if (req->chain_len > 1)
1559 pch_udc_free_dma_chain(ep->dev, req);
1560
1561 if (req->dma == DMA_ADDR_INVALID)
1562 td->dataptr = req->req.dma;
1563 else
1564 td->dataptr = req->dma;
1565
1566 td->status = PCH_UDC_BS_HST_BSY;
1567 for (; ; bytes -= buf_len, ++len) {
1568 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1569 if (bytes <= buf_len)
1570 break;
1571 last = td;
1572 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1573 &dma_addr);
1574 if (!td)
1575 goto nomem;
1576 i += buf_len;
1577 td->dataptr = req->td_data->dataptr + i;
1578 last->next = dma_addr;
1579 }
1580
1581 req->td_data_last = td;
1582 td->status |= PCH_UDC_DMA_LAST;
1583 td->next = req->td_data_phys;
1584 req->chain_len = len;
1585 return 0;
1586
1587nomem:
1588 if (len > 1) {
1589 req->chain_len = len;
1590 pch_udc_free_dma_chain(ep->dev, req);
1591 }
1592 req->chain_len = 1;
1593 return -ENOMEM;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1608 gfp_t gfp)
1609{
1610 int retval;
1611
1612
1613 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1614 if (retval) {
1615 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1616 return retval;
1617 }
1618 if (ep->in)
1619 req->td_data->status = (req->td_data->status &
1620 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1621 return 0;
1622}
1623
1624
1625
1626
1627
1628
1629
1630static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1631{
1632 struct pch_udc_dev *dev = ep->dev;
1633
1634
1635 complete_req(ep, req, 0);
1636
1637
1638
1639
1640 if (dev->set_cfg_not_acked) {
1641 pch_udc_set_csr_done(dev);
1642 dev->set_cfg_not_acked = 0;
1643 }
1644
1645 if (!dev->stall && dev->waiting_zlp_ack) {
1646 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1647 dev->waiting_zlp_ack = 0;
1648 }
1649}
1650
1651
1652
1653
1654
1655
1656static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1657 struct pch_udc_request *req)
1658{
1659 struct pch_udc_data_dma_desc *td_data;
1660
1661 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1662 td_data = req->td_data;
1663
1664 while (1) {
1665 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1666 PCH_UDC_BS_HST_RDY;
1667 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1668 break;
1669 td_data = phys_to_virt(td_data->next);
1670 }
1671
1672 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1673 req->dma_going = 1;
1674 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1675 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1676 pch_udc_ep_clear_nak(ep);
1677 pch_udc_ep_set_rrdy(ep);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1692 const struct usb_endpoint_descriptor *desc)
1693{
1694 struct pch_udc_ep *ep;
1695 struct pch_udc_dev *dev;
1696 unsigned long iflags;
1697
1698 if (!usbep || (usbep->name == ep0_string) || !desc ||
1699 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1700 return -EINVAL;
1701
1702 ep = container_of(usbep, struct pch_udc_ep, ep);
1703 dev = ep->dev;
1704 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1705 return -ESHUTDOWN;
1706 spin_lock_irqsave(&dev->lock, iflags);
1707 ep->ep.desc = desc;
1708 ep->halted = 0;
1709 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1710 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1711 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1712 spin_unlock_irqrestore(&dev->lock, iflags);
1713 return 0;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1726{
1727 struct pch_udc_ep *ep;
1728 struct pch_udc_dev *dev;
1729 unsigned long iflags;
1730
1731 if (!usbep)
1732 return -EINVAL;
1733
1734 ep = container_of(usbep, struct pch_udc_ep, ep);
1735 dev = ep->dev;
1736 if ((usbep->name == ep0_string) || !ep->ep.desc)
1737 return -EINVAL;
1738
1739 spin_lock_irqsave(&ep->dev->lock, iflags);
1740 empty_req_queue(ep);
1741 ep->halted = 1;
1742 pch_udc_ep_disable(ep);
1743 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1744 ep->ep.desc = NULL;
1745 INIT_LIST_HEAD(&ep->queue);
1746 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1747 return 0;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1761 gfp_t gfp)
1762{
1763 struct pch_udc_request *req;
1764 struct pch_udc_ep *ep;
1765 struct pch_udc_data_dma_desc *dma_desc;
1766 struct pch_udc_dev *dev;
1767
1768 if (!usbep)
1769 return NULL;
1770 ep = container_of(usbep, struct pch_udc_ep, ep);
1771 dev = ep->dev;
1772 req = kzalloc(sizeof *req, gfp);
1773 if (!req)
1774 return NULL;
1775 req->req.dma = DMA_ADDR_INVALID;
1776 req->dma = DMA_ADDR_INVALID;
1777 INIT_LIST_HEAD(&req->queue);
1778 if (!ep->dev->dma_addr)
1779 return &req->req;
1780
1781 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1782 &req->td_data_phys);
1783 if (NULL == dma_desc) {
1784 kfree(req);
1785 return NULL;
1786 }
1787
1788 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1789 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1790 req->td_data = dma_desc;
1791 req->td_data_last = dma_desc;
1792 req->chain_len = 1;
1793 return &req->req;
1794}
1795
1796
1797
1798
1799
1800
1801
1802static void pch_udc_free_request(struct usb_ep *usbep,
1803 struct usb_request *usbreq)
1804{
1805 struct pch_udc_ep *ep;
1806 struct pch_udc_request *req;
1807 struct pch_udc_dev *dev;
1808
1809 if (!usbep || !usbreq)
1810 return;
1811 ep = container_of(usbep, struct pch_udc_ep, ep);
1812 req = container_of(usbreq, struct pch_udc_request, req);
1813 dev = ep->dev;
1814 if (!list_empty(&req->queue))
1815 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1816 __func__, usbep->name, req);
1817 if (req->td_data != NULL) {
1818 if (req->chain_len > 1)
1819 pch_udc_free_dma_chain(ep->dev, req);
1820 pci_pool_free(ep->dev->data_requests, req->td_data,
1821 req->td_data_phys);
1822 }
1823 kfree(req);
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1838 gfp_t gfp)
1839{
1840 int retval = 0;
1841 struct pch_udc_ep *ep;
1842 struct pch_udc_dev *dev;
1843 struct pch_udc_request *req;
1844 unsigned long iflags;
1845
1846 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1847 return -EINVAL;
1848 ep = container_of(usbep, struct pch_udc_ep, ep);
1849 dev = ep->dev;
1850 if (!ep->ep.desc && ep->num)
1851 return -EINVAL;
1852 req = container_of(usbreq, struct pch_udc_request, req);
1853 if (!list_empty(&req->queue))
1854 return -EINVAL;
1855 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1856 return -ESHUTDOWN;
1857 spin_lock_irqsave(&dev->lock, iflags);
1858
1859 if (usbreq->length &&
1860 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1861 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1862 if (ep->in)
1863 usbreq->dma = dma_map_single(&dev->pdev->dev,
1864 usbreq->buf,
1865 usbreq->length,
1866 DMA_TO_DEVICE);
1867 else
1868 usbreq->dma = dma_map_single(&dev->pdev->dev,
1869 usbreq->buf,
1870 usbreq->length,
1871 DMA_FROM_DEVICE);
1872 } else {
1873 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1874 if (!req->buf) {
1875 retval = -ENOMEM;
1876 goto probe_end;
1877 }
1878 if (ep->in) {
1879 memcpy(req->buf, usbreq->buf, usbreq->length);
1880 req->dma = dma_map_single(&dev->pdev->dev,
1881 req->buf,
1882 usbreq->length,
1883 DMA_TO_DEVICE);
1884 } else
1885 req->dma = dma_map_single(&dev->pdev->dev,
1886 req->buf,
1887 usbreq->length,
1888 DMA_FROM_DEVICE);
1889 }
1890 req->dma_mapped = 1;
1891 }
1892 if (usbreq->length > 0) {
1893 retval = prepare_dma(ep, req, GFP_ATOMIC);
1894 if (retval)
1895 goto probe_end;
1896 }
1897 usbreq->actual = 0;
1898 usbreq->status = -EINPROGRESS;
1899 req->dma_done = 0;
1900 if (list_empty(&ep->queue) && !ep->halted) {
1901
1902 if (!usbreq->length) {
1903 process_zlp(ep, req);
1904 retval = 0;
1905 goto probe_end;
1906 }
1907 if (!ep->in) {
1908 pch_udc_start_rxrequest(ep, req);
1909 } else {
1910
1911
1912
1913
1914
1915 pch_udc_wait_ep_stall(ep);
1916 pch_udc_ep_clear_nak(ep);
1917 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1918 }
1919 }
1920
1921 if (req != NULL)
1922 list_add_tail(&req->queue, &ep->queue);
1923
1924probe_end:
1925 spin_unlock_irqrestore(&dev->lock, iflags);
1926 return retval;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1940 struct usb_request *usbreq)
1941{
1942 struct pch_udc_ep *ep;
1943 struct pch_udc_request *req;
1944 struct pch_udc_dev *dev;
1945 unsigned long flags;
1946 int ret = -EINVAL;
1947
1948 ep = container_of(usbep, struct pch_udc_ep, ep);
1949 dev = ep->dev;
1950 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1951 return ret;
1952 req = container_of(usbreq, struct pch_udc_request, req);
1953 spin_lock_irqsave(&ep->dev->lock, flags);
1954
1955 list_for_each_entry(req, &ep->queue, queue) {
1956 if (&req->req == usbreq) {
1957 pch_udc_ep_set_nak(ep);
1958 if (!list_empty(&req->queue))
1959 complete_req(ep, req, -ECONNRESET);
1960 ret = 0;
1961 break;
1962 }
1963 }
1964 spin_unlock_irqrestore(&ep->dev->lock, flags);
1965 return ret;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1979{
1980 struct pch_udc_ep *ep;
1981 struct pch_udc_dev *dev;
1982 unsigned long iflags;
1983 int ret;
1984
1985 if (!usbep)
1986 return -EINVAL;
1987 ep = container_of(usbep, struct pch_udc_ep, ep);
1988 dev = ep->dev;
1989 if (!ep->ep.desc && !ep->num)
1990 return -EINVAL;
1991 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1992 return -ESHUTDOWN;
1993 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1994 if (list_empty(&ep->queue)) {
1995 if (halt) {
1996 if (ep->num == PCH_UDC_EP0)
1997 ep->dev->stall = 1;
1998 pch_udc_ep_set_stall(ep);
1999 pch_udc_enable_ep_interrupts(ep->dev,
2000 PCH_UDC_EPINT(ep->in,
2001 ep->num));
2002 } else {
2003 pch_udc_ep_clear_stall(ep);
2004 }
2005 ret = 0;
2006 } else {
2007 ret = -EAGAIN;
2008 }
2009 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2010 return ret;
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2024{
2025 struct pch_udc_ep *ep;
2026 struct pch_udc_dev *dev;
2027 unsigned long iflags;
2028 int ret;
2029
2030 if (!usbep)
2031 return -EINVAL;
2032 ep = container_of(usbep, struct pch_udc_ep, ep);
2033 dev = ep->dev;
2034 if (!ep->ep.desc && !ep->num)
2035 return -EINVAL;
2036 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2037 return -ESHUTDOWN;
2038 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2039 if (!list_empty(&ep->queue)) {
2040 ret = -EAGAIN;
2041 } else {
2042 if (ep->num == PCH_UDC_EP0)
2043 ep->dev->stall = 1;
2044 pch_udc_ep_set_stall(ep);
2045 pch_udc_enable_ep_interrupts(ep->dev,
2046 PCH_UDC_EPINT(ep->in, ep->num));
2047 ep->dev->prot_stall = 1;
2048 ret = 0;
2049 }
2050 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2051 return ret;
2052}
2053
2054
2055
2056
2057
2058static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2059{
2060 struct pch_udc_ep *ep;
2061
2062 if (!usbep)
2063 return;
2064
2065 ep = container_of(usbep, struct pch_udc_ep, ep);
2066 if (ep->ep.desc || !ep->num)
2067 pch_udc_ep_fifo_flush(ep, ep->in);
2068}
2069
2070static const struct usb_ep_ops pch_udc_ep_ops = {
2071 .enable = pch_udc_pcd_ep_enable,
2072 .disable = pch_udc_pcd_ep_disable,
2073 .alloc_request = pch_udc_alloc_request,
2074 .free_request = pch_udc_free_request,
2075 .queue = pch_udc_pcd_queue,
2076 .dequeue = pch_udc_pcd_dequeue,
2077 .set_halt = pch_udc_pcd_set_halt,
2078 .set_wedge = pch_udc_pcd_set_wedge,
2079 .fifo_status = NULL,
2080 .fifo_flush = pch_udc_pcd_fifo_flush,
2081};
2082
2083
2084
2085
2086
2087static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2088{
2089 static u32 pky_marker;
2090
2091 if (!td_stp)
2092 return;
2093 td_stp->reserved = ++pky_marker;
2094 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2095 td_stp->status = PCH_UDC_BS_HST_RDY;
2096}
2097
2098
2099
2100
2101
2102
2103static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2104{
2105 struct pch_udc_request *req;
2106 struct pch_udc_data_dma_desc *td_data;
2107
2108 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2109 return;
2110
2111 if (list_empty(&ep->queue))
2112 return;
2113
2114
2115 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2116 if (req->dma_going)
2117 return;
2118 if (!req->td_data)
2119 return;
2120 pch_udc_wait_ep_stall(ep);
2121 req->dma_going = 1;
2122 pch_udc_ep_set_ddptr(ep, 0);
2123 td_data = req->td_data;
2124 while (1) {
2125 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2126 PCH_UDC_BS_HST_RDY;
2127 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2128 break;
2129 td_data = phys_to_virt(td_data->next);
2130 }
2131 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2132 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2133 pch_udc_ep_set_pd(ep);
2134 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2135 pch_udc_ep_clear_nak(ep);
2136}
2137
2138
2139
2140
2141
2142static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2143{
2144 struct pch_udc_request *req;
2145 struct pch_udc_dev *dev = ep->dev;
2146
2147 if (list_empty(&ep->queue))
2148 return;
2149 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2150 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2151 PCH_UDC_BS_DMA_DONE)
2152 return;
2153 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2154 PCH_UDC_RTS_SUCC) {
2155 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2156 "epstatus=0x%08x\n",
2157 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2158 (int)(ep->epsts));
2159 return;
2160 }
2161
2162 req->req.actual = req->req.length;
2163 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2164 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2165 complete_req(ep, req, 0);
2166 req->dma_going = 0;
2167 if (!list_empty(&ep->queue)) {
2168 pch_udc_wait_ep_stall(ep);
2169 pch_udc_ep_clear_nak(ep);
2170 pch_udc_enable_ep_interrupts(ep->dev,
2171 PCH_UDC_EPINT(ep->in, ep->num));
2172 } else {
2173 pch_udc_disable_ep_interrupts(ep->dev,
2174 PCH_UDC_EPINT(ep->in, ep->num));
2175 }
2176}
2177
2178
2179
2180
2181
2182static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2183{
2184 struct pch_udc_request *req;
2185 struct pch_udc_dev *dev = ep->dev;
2186 unsigned int count;
2187 struct pch_udc_data_dma_desc *td;
2188 dma_addr_t addr;
2189
2190 if (list_empty(&ep->queue))
2191 return;
2192
2193 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2194 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2195 pch_udc_ep_set_ddptr(ep, 0);
2196 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2197 PCH_UDC_BS_DMA_DONE)
2198 td = req->td_data_last;
2199 else
2200 td = req->td_data;
2201
2202 while (1) {
2203 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2204 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2205 "epstatus=0x%08x\n",
2206 (req->td_data->status & PCH_UDC_RXTX_STS),
2207 (int)(ep->epsts));
2208 return;
2209 }
2210 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2211 if (td->status & PCH_UDC_DMA_LAST) {
2212 count = td->status & PCH_UDC_RXTX_BYTES;
2213 break;
2214 }
2215 if (td == req->td_data_last) {
2216 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2217 return;
2218 }
2219 addr = (dma_addr_t)td->next;
2220 td = phys_to_virt(addr);
2221 }
2222
2223 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2224 count = UDC_DMA_MAXPACKET;
2225 req->td_data->status |= PCH_UDC_DMA_LAST;
2226 td->status |= PCH_UDC_BS_HST_BSY;
2227
2228 req->dma_going = 0;
2229 req->req.actual = count;
2230 complete_req(ep, req, 0);
2231
2232 if (!list_empty(&ep->queue)) {
2233 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2234 pch_udc_start_rxrequest(ep, req);
2235 }
2236}
2237
2238
2239
2240
2241
2242
2243
2244static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2245{
2246 u32 epsts;
2247 struct pch_udc_ep *ep;
2248
2249 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2250 epsts = ep->epsts;
2251 ep->epsts = 0;
2252
2253 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2254 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2255 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2256 return;
2257 if ((epsts & UDC_EPSTS_BNA))
2258 return;
2259 if (epsts & UDC_EPSTS_HE)
2260 return;
2261 if (epsts & UDC_EPSTS_RSS) {
2262 pch_udc_ep_set_stall(ep);
2263 pch_udc_enable_ep_interrupts(ep->dev,
2264 PCH_UDC_EPINT(ep->in, ep->num));
2265 }
2266 if (epsts & UDC_EPSTS_RCS) {
2267 if (!dev->prot_stall) {
2268 pch_udc_ep_clear_stall(ep);
2269 } else {
2270 pch_udc_ep_set_stall(ep);
2271 pch_udc_enable_ep_interrupts(ep->dev,
2272 PCH_UDC_EPINT(ep->in, ep->num));
2273 }
2274 }
2275 if (epsts & UDC_EPSTS_TDC)
2276 pch_udc_complete_transfer(ep);
2277
2278 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2279 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2280 pch_udc_start_next_txrequest(ep);
2281}
2282
2283
2284
2285
2286
2287
2288static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2289{
2290 u32 epsts;
2291 struct pch_udc_ep *ep;
2292 struct pch_udc_request *req = NULL;
2293
2294 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2295 epsts = ep->epsts;
2296 ep->epsts = 0;
2297
2298 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2299
2300 req = list_entry(ep->queue.next, struct pch_udc_request,
2301 queue);
2302 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2303 PCH_UDC_BS_DMA_DONE) {
2304 if (!req->dma_going)
2305 pch_udc_start_rxrequest(ep, req);
2306 return;
2307 }
2308 }
2309 if (epsts & UDC_EPSTS_HE)
2310 return;
2311 if (epsts & UDC_EPSTS_RSS) {
2312 pch_udc_ep_set_stall(ep);
2313 pch_udc_enable_ep_interrupts(ep->dev,
2314 PCH_UDC_EPINT(ep->in, ep->num));
2315 }
2316 if (epsts & UDC_EPSTS_RCS) {
2317 if (!dev->prot_stall) {
2318 pch_udc_ep_clear_stall(ep);
2319 } else {
2320 pch_udc_ep_set_stall(ep);
2321 pch_udc_enable_ep_interrupts(ep->dev,
2322 PCH_UDC_EPINT(ep->in, ep->num));
2323 }
2324 }
2325 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2326 UDC_EPSTS_OUT_DATA) {
2327 if (ep->dev->prot_stall == 1) {
2328 pch_udc_ep_set_stall(ep);
2329 pch_udc_enable_ep_interrupts(ep->dev,
2330 PCH_UDC_EPINT(ep->in, ep->num));
2331 } else {
2332 pch_udc_complete_receiver(ep);
2333 }
2334 }
2335 if (list_empty(&ep->queue))
2336 pch_udc_set_dma(dev, DMA_DIR_RX);
2337}
2338
2339
2340
2341
2342
2343static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2344{
2345 u32 epsts;
2346 struct pch_udc_ep *ep;
2347 struct pch_udc_ep *ep_out;
2348
2349 ep = &dev->ep[UDC_EP0IN_IDX];
2350 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2351 epsts = ep->epsts;
2352 ep->epsts = 0;
2353
2354 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2355 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2356 UDC_EPSTS_XFERDONE)))
2357 return;
2358 if ((epsts & UDC_EPSTS_BNA))
2359 return;
2360 if (epsts & UDC_EPSTS_HE)
2361 return;
2362 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2363 pch_udc_complete_transfer(ep);
2364 pch_udc_clear_dma(dev, DMA_DIR_RX);
2365 ep_out->td_data->status = (ep_out->td_data->status &
2366 ~PCH_UDC_BUFF_STS) |
2367 PCH_UDC_BS_HST_RDY;
2368 pch_udc_ep_clear_nak(ep_out);
2369 pch_udc_set_dma(dev, DMA_DIR_RX);
2370 pch_udc_ep_set_rrdy(ep_out);
2371 }
2372
2373 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2374 !(epsts & UDC_EPSTS_TXEMPTY))
2375 pch_udc_start_next_txrequest(ep);
2376}
2377
2378
2379
2380
2381
2382
2383static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2384{
2385 u32 stat;
2386 int setup_supported;
2387 struct pch_udc_ep *ep;
2388
2389 ep = &dev->ep[UDC_EP0OUT_IDX];
2390 stat = ep->epsts;
2391 ep->epsts = 0;
2392
2393
2394 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2395 UDC_EPSTS_OUT_SETUP) {
2396 dev->stall = 0;
2397 dev->ep[UDC_EP0IN_IDX].halted = 0;
2398 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2399 dev->setup_data = ep->td_stp->request;
2400 pch_udc_init_setup_buff(ep->td_stp);
2401 pch_udc_clear_dma(dev, DMA_DIR_RX);
2402 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2403 dev->ep[UDC_EP0IN_IDX].in);
2404 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2405 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2406 else
2407 dev->gadget.ep0 = &ep->ep;
2408 spin_unlock(&dev->lock);
2409
2410 if ((dev->setup_data.bRequestType == 0x21) &&
2411 (dev->setup_data.bRequest == 0xFF))
2412 dev->prot_stall = 0;
2413
2414 setup_supported = dev->driver->setup(&dev->gadget,
2415 &dev->setup_data);
2416 spin_lock(&dev->lock);
2417
2418 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2419 ep->td_data->status = (ep->td_data->status &
2420 ~PCH_UDC_BUFF_STS) |
2421 PCH_UDC_BS_HST_RDY;
2422 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2423 }
2424
2425 if (setup_supported >= 0 && setup_supported <
2426 UDC_EP0IN_MAX_PKT_SIZE) {
2427 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2428
2429
2430 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2431 pch_udc_set_dma(dev, DMA_DIR_RX);
2432 pch_udc_ep_clear_nak(ep);
2433 }
2434 } else if (setup_supported < 0) {
2435
2436 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2437 pch_udc_enable_ep_interrupts(ep->dev,
2438 PCH_UDC_EPINT(ep->in, ep->num));
2439 dev->stall = 0;
2440 pch_udc_set_dma(dev, DMA_DIR_RX);
2441 } else {
2442 dev->waiting_zlp_ack = 1;
2443 }
2444 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2445 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2446 pch_udc_clear_dma(dev, DMA_DIR_RX);
2447 pch_udc_ep_set_ddptr(ep, 0);
2448 if (!list_empty(&ep->queue)) {
2449 ep->epsts = stat;
2450 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2451 }
2452 pch_udc_set_dma(dev, DMA_DIR_RX);
2453 }
2454 pch_udc_ep_set_rrdy(ep);
2455}
2456
2457
2458
2459
2460
2461
2462
2463
2464static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2465{
2466 struct pch_udc_ep *ep;
2467 struct pch_udc_request *req;
2468
2469 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2470 if (!list_empty(&ep->queue)) {
2471 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2472 pch_udc_enable_ep_interrupts(ep->dev,
2473 PCH_UDC_EPINT(ep->in, ep->num));
2474 pch_udc_ep_clear_nak(ep);
2475 }
2476}
2477
2478
2479
2480
2481
2482
2483static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2484{
2485 int i;
2486 struct pch_udc_ep *ep;
2487
2488 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2489
2490 if (ep_intr & (0x1 << i)) {
2491 ep = &dev->ep[UDC_EPIN_IDX(i)];
2492 ep->epsts = pch_udc_read_ep_status(ep);
2493 pch_udc_clear_ep_status(ep, ep->epsts);
2494 }
2495
2496 if (ep_intr & (0x10000 << i)) {
2497 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2498 ep->epsts = pch_udc_read_ep_status(ep);
2499 pch_udc_clear_ep_status(ep, ep->epsts);
2500 }
2501 }
2502}
2503
2504
2505
2506
2507
2508
2509static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2510{
2511 struct pch_udc_ep *ep;
2512 u32 val;
2513
2514
2515 ep = &dev->ep[UDC_EP0IN_IDX];
2516 pch_udc_clear_ep_control(ep);
2517 pch_udc_ep_fifo_flush(ep, ep->in);
2518 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2519 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2520
2521 ep->td_data = NULL;
2522 ep->td_stp = NULL;
2523 ep->td_data_phys = 0;
2524 ep->td_stp_phys = 0;
2525
2526
2527 ep = &dev->ep[UDC_EP0OUT_IDX];
2528 pch_udc_clear_ep_control(ep);
2529 pch_udc_ep_fifo_flush(ep, ep->in);
2530 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2531 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2532 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2533 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2534
2535
2536 pch_udc_init_setup_buff(ep->td_stp);
2537
2538 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2539
2540 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2541
2542
2543 ep->td_data->status = PCH_UDC_DMA_LAST;
2544 ep->td_data->dataptr = dev->dma_addr;
2545 ep->td_data->next = ep->td_data_phys;
2546
2547 pch_udc_ep_clear_nak(ep);
2548}
2549
2550
2551
2552
2553
2554
2555static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2556{
2557 struct pch_udc_ep *ep;
2558 int i;
2559
2560 pch_udc_clear_dma(dev, DMA_DIR_TX);
2561 pch_udc_clear_dma(dev, DMA_DIR_RX);
2562
2563 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2564
2565 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2566
2567 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2568 ep = &dev->ep[i];
2569 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2570 pch_udc_clear_ep_control(ep);
2571 pch_udc_ep_set_ddptr(ep, 0);
2572 pch_udc_write_csr(ep->dev, 0x00, i);
2573 }
2574 dev->stall = 0;
2575 dev->prot_stall = 0;
2576 dev->waiting_zlp_ack = 0;
2577 dev->set_cfg_not_acked = 0;
2578
2579
2580 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2581 ep = &dev->ep[i];
2582 pch_udc_ep_set_nak(ep);
2583 pch_udc_ep_fifo_flush(ep, ep->in);
2584
2585 empty_req_queue(ep);
2586 }
2587 if (dev->driver && dev->driver->disconnect) {
2588 spin_unlock(&dev->lock);
2589 dev->driver->disconnect(&dev->gadget);
2590 spin_lock(&dev->lock);
2591 }
2592}
2593
2594
2595
2596
2597
2598
2599static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2600{
2601 u32 dev_stat, dev_speed;
2602 u32 speed = USB_SPEED_FULL;
2603
2604 dev_stat = pch_udc_read_device_status(dev);
2605 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2606 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2607 switch (dev_speed) {
2608 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2609 speed = USB_SPEED_HIGH;
2610 break;
2611 case UDC_DEVSTS_ENUM_SPEED_FULL:
2612 speed = USB_SPEED_FULL;
2613 break;
2614 case UDC_DEVSTS_ENUM_SPEED_LOW:
2615 speed = USB_SPEED_LOW;
2616 break;
2617 default:
2618 BUG();
2619 }
2620 dev->gadget.speed = speed;
2621 pch_udc_activate_control_ep(dev);
2622 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2623 pch_udc_set_dma(dev, DMA_DIR_TX);
2624 pch_udc_set_dma(dev, DMA_DIR_RX);
2625 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2626
2627
2628 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2629 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2630 UDC_DEVINT_SI | UDC_DEVINT_SC);
2631}
2632
2633
2634
2635
2636
2637
2638static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2639{
2640 u32 reg, dev_stat = 0;
2641 int i, ret;
2642
2643 dev_stat = pch_udc_read_device_status(dev);
2644 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2645 UDC_DEVSTS_INTF_SHIFT;
2646 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2647 UDC_DEVSTS_ALT_SHIFT;
2648 dev->set_cfg_not_acked = 1;
2649
2650 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2651 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2652 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2653 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2654 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2655
2656
2657 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2658 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2659 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2660 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2661 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2662 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2663 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2664
2665 pch_udc_ep_clear_stall(&(dev->ep[i]));
2666 dev->ep[i].halted = 0;
2667 }
2668 dev->stall = 0;
2669 spin_unlock(&dev->lock);
2670 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2671 spin_lock(&dev->lock);
2672}
2673
2674
2675
2676
2677
2678
2679static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2680{
2681 int i, ret;
2682 u32 reg, dev_stat = 0;
2683
2684 dev_stat = pch_udc_read_device_status(dev);
2685 dev->set_cfg_not_acked = 1;
2686 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2687 UDC_DEVSTS_CFG_SHIFT;
2688
2689 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2690 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2691 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2692
2693
2694 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2695 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2696 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2697 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2698 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2699
2700 pch_udc_ep_clear_stall(&(dev->ep[i]));
2701 dev->ep[i].halted = 0;
2702 }
2703 dev->stall = 0;
2704
2705
2706 spin_unlock(&dev->lock);
2707 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2708 spin_lock(&dev->lock);
2709}
2710
2711
2712
2713
2714
2715
2716
2717static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2718{
2719 int vbus;
2720
2721
2722 if (dev_intr & UDC_DEVINT_UR) {
2723 pch_udc_svc_ur_interrupt(dev);
2724 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2725 }
2726
2727 if (dev_intr & UDC_DEVINT_ENUM) {
2728 pch_udc_svc_enum_interrupt(dev);
2729 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2730 }
2731
2732 if (dev_intr & UDC_DEVINT_SI)
2733 pch_udc_svc_intf_interrupt(dev);
2734
2735 if (dev_intr & UDC_DEVINT_SC)
2736 pch_udc_svc_cfg_interrupt(dev);
2737
2738 if (dev_intr & UDC_DEVINT_US) {
2739 if (dev->driver
2740 && dev->driver->suspend) {
2741 spin_unlock(&dev->lock);
2742 dev->driver->suspend(&dev->gadget);
2743 spin_lock(&dev->lock);
2744 }
2745
2746 vbus = pch_vbus_gpio_get_value(dev);
2747 if ((dev->vbus_session == 0)
2748 && (vbus != 1)) {
2749 if (dev->driver && dev->driver->disconnect) {
2750 spin_unlock(&dev->lock);
2751 dev->driver->disconnect(&dev->gadget);
2752 spin_lock(&dev->lock);
2753 }
2754 pch_udc_reconnect(dev);
2755 } else if ((dev->vbus_session == 0)
2756 && (vbus == 1)
2757 && !dev->vbus_gpio.intr)
2758 schedule_work(&dev->vbus_gpio.irq_work_fall);
2759
2760 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2761 }
2762
2763 if (dev_intr & UDC_DEVINT_SOF)
2764 dev_dbg(&dev->pdev->dev, "SOF\n");
2765
2766 if (dev_intr & UDC_DEVINT_ES)
2767 dev_dbg(&dev->pdev->dev, "ES\n");
2768
2769 if (dev_intr & UDC_DEVINT_RWKP)
2770 dev_dbg(&dev->pdev->dev, "RWKP\n");
2771}
2772
2773
2774
2775
2776
2777
2778static irqreturn_t pch_udc_isr(int irq, void *pdev)
2779{
2780 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2781 u32 dev_intr, ep_intr;
2782 int i;
2783
2784 dev_intr = pch_udc_read_device_interrupts(dev);
2785 ep_intr = pch_udc_read_ep_interrupts(dev);
2786
2787
2788 if (dev_intr == ep_intr)
2789 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2790 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2791
2792 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2793 return IRQ_HANDLED;
2794 }
2795 if (dev_intr)
2796
2797 pch_udc_write_device_interrupts(dev, dev_intr);
2798 if (ep_intr)
2799
2800 pch_udc_write_ep_interrupts(dev, ep_intr);
2801 if (!dev_intr && !ep_intr)
2802 return IRQ_NONE;
2803 spin_lock(&dev->lock);
2804 if (dev_intr)
2805 pch_udc_dev_isr(dev, dev_intr);
2806 if (ep_intr) {
2807 pch_udc_read_all_epstatus(dev, ep_intr);
2808
2809 if (ep_intr & UDC_EPINT_IN_EP0) {
2810 pch_udc_svc_control_in(dev);
2811 pch_udc_postsvc_epinters(dev, 0);
2812 }
2813
2814 if (ep_intr & UDC_EPINT_OUT_EP0)
2815 pch_udc_svc_control_out(dev);
2816
2817 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2818 if (ep_intr & (1 << i)) {
2819 pch_udc_svc_data_in(dev, i);
2820 pch_udc_postsvc_epinters(dev, i);
2821 }
2822 }
2823
2824 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2825 PCH_UDC_USED_EP_NUM); i++)
2826 if (ep_intr & (1 << i))
2827 pch_udc_svc_data_out(dev, i -
2828 UDC_EPINT_OUT_SHIFT);
2829 }
2830 spin_unlock(&dev->lock);
2831 return IRQ_HANDLED;
2832}
2833
2834
2835
2836
2837
2838static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2839{
2840
2841 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2842 UDC_EPINT_OUT_EP0);
2843
2844 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2845 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2846 UDC_DEVINT_SI | UDC_DEVINT_SC);
2847}
2848
2849
2850
2851
2852
2853static void gadget_release(struct device *pdev)
2854{
2855 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2856
2857 kfree(dev);
2858}
2859
2860
2861
2862
2863
2864static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2865{
2866 const char *const ep_string[] = {
2867 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2868 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2869 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2870 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2871 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2872 "ep15in", "ep15out",
2873 };
2874 int i;
2875
2876 dev->gadget.speed = USB_SPEED_UNKNOWN;
2877 INIT_LIST_HEAD(&dev->gadget.ep_list);
2878
2879
2880 memset(dev->ep, 0, sizeof dev->ep);
2881 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2882 struct pch_udc_ep *ep = &dev->ep[i];
2883 ep->dev = dev;
2884 ep->halted = 1;
2885 ep->num = i / 2;
2886 ep->in = ~i & 1;
2887 ep->ep.name = ep_string[i];
2888 ep->ep.ops = &pch_udc_ep_ops;
2889 if (ep->in)
2890 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2891 else
2892 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2893 UDC_EP_REG_SHIFT;
2894
2895 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2896 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2897 INIT_LIST_HEAD(&ep->queue);
2898 }
2899 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2900 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2901
2902
2903 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2904 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2905
2906 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2907 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2918{
2919 pch_udc_init(dev);
2920 pch_udc_pcd_reinit(dev);
2921 pch_vbus_gpio_init(dev, vbus_gpio_port);
2922 return 0;
2923}
2924
2925
2926
2927
2928
2929static int init_dma_pools(struct pch_udc_dev *dev)
2930{
2931 struct pch_udc_stp_dma_desc *td_stp;
2932 struct pch_udc_data_dma_desc *td_data;
2933
2934
2935 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2936 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2937 if (!dev->data_requests) {
2938 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2939 __func__);
2940 return -ENOMEM;
2941 }
2942
2943
2944 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2945 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2946 if (!dev->stp_requests) {
2947 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2948 __func__);
2949 return -ENOMEM;
2950 }
2951
2952 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2953 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2954 if (!td_stp) {
2955 dev_err(&dev->pdev->dev,
2956 "%s: can't allocate setup dma descriptor\n", __func__);
2957 return -ENOMEM;
2958 }
2959 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2960
2961
2962 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2963 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2964 if (!td_data) {
2965 dev_err(&dev->pdev->dev,
2966 "%s: can't allocate data dma descriptor\n", __func__);
2967 return -ENOMEM;
2968 }
2969 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2970 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2971 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2972 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2973 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2974
2975 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2976 if (!dev->ep0out_buf)
2977 return -ENOMEM;
2978 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2979 UDC_EP0OUT_BUFF_SIZE * 4,
2980 DMA_FROM_DEVICE);
2981 return 0;
2982}
2983
2984static int pch_udc_start(struct usb_gadget_driver *driver,
2985 int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
2986{
2987 struct pch_udc_dev *dev = pch_udc;
2988 int retval;
2989
2990 if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
2991 !driver->setup || !driver->unbind || !driver->disconnect) {
2992 dev_err(&dev->pdev->dev,
2993 "%s: invalid driver parameter\n", __func__);
2994 return -EINVAL;
2995 }
2996
2997 if (!dev)
2998 return -ENODEV;
2999
3000 if (dev->driver) {
3001 dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
3002 return -EBUSY;
3003 }
3004 driver->driver.bus = NULL;
3005 dev->driver = driver;
3006 dev->gadget.dev.driver = &driver->driver;
3007
3008
3009 retval = bind(&dev->gadget, driver);
3010
3011 if (retval) {
3012 dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
3013 __func__, driver->driver.name, retval);
3014 dev->driver = NULL;
3015 dev->gadget.dev.driver = NULL;
3016 return retval;
3017 }
3018
3019 pch_udc_setup_ep0(dev);
3020
3021
3022 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3023 pch_udc_clear_disconnect(dev);
3024
3025 dev->connected = 1;
3026 return 0;
3027}
3028
3029static int pch_udc_stop(struct usb_gadget_driver *driver)
3030{
3031 struct pch_udc_dev *dev = pch_udc;
3032
3033 if (!dev)
3034 return -ENODEV;
3035
3036 if (!driver || (driver != dev->driver)) {
3037 dev_err(&dev->pdev->dev,
3038 "%s: invalid driver parameter\n", __func__);
3039 return -EINVAL;
3040 }
3041
3042 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3043
3044
3045 driver->disconnect(&dev->gadget);
3046 driver->unbind(&dev->gadget);
3047 dev->gadget.dev.driver = NULL;
3048 dev->driver = NULL;
3049 dev->connected = 0;
3050
3051
3052 pch_udc_set_disconnect(dev);
3053 return 0;
3054}
3055
3056static void pch_udc_shutdown(struct pci_dev *pdev)
3057{
3058 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3059
3060 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3061 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3062
3063
3064 pch_udc_set_disconnect(dev);
3065}
3066
3067static void pch_udc_remove(struct pci_dev *pdev)
3068{
3069 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3070
3071 usb_del_gadget_udc(&dev->gadget);
3072
3073
3074 if (dev->driver)
3075 dev_err(&pdev->dev,
3076 "%s: gadget driver still bound!!!\n", __func__);
3077
3078 if (dev->data_requests)
3079 pci_pool_destroy(dev->data_requests);
3080
3081 if (dev->stp_requests) {
3082
3083 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3084 pci_pool_free(dev->stp_requests,
3085 dev->ep[UDC_EP0OUT_IDX].td_stp,
3086 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3087 }
3088 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3089 pci_pool_free(dev->stp_requests,
3090 dev->ep[UDC_EP0OUT_IDX].td_data,
3091 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3092 }
3093 pci_pool_destroy(dev->stp_requests);
3094 }
3095
3096 if (dev->dma_addr)
3097 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3098 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3099 kfree(dev->ep0out_buf);
3100
3101 pch_vbus_gpio_free(dev);
3102
3103 pch_udc_exit(dev);
3104
3105 if (dev->irq_registered)
3106 free_irq(pdev->irq, dev);
3107 if (dev->base_addr)
3108 iounmap(dev->base_addr);
3109 if (dev->mem_region)
3110 release_mem_region(dev->phys_addr,
3111 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
3112 if (dev->active)
3113 pci_disable_device(pdev);
3114 if (dev->registered)
3115 device_unregister(&dev->gadget.dev);
3116 kfree(dev);
3117 pci_set_drvdata(pdev, NULL);
3118}
3119
3120#ifdef CONFIG_PM
3121static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3122{
3123 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3124
3125 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3126 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3127
3128 pci_disable_device(pdev);
3129 pci_enable_wake(pdev, PCI_D3hot, 0);
3130
3131 if (pci_save_state(pdev)) {
3132 dev_err(&pdev->dev,
3133 "%s: could not save PCI config state\n", __func__);
3134 return -ENOMEM;
3135 }
3136 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3137 return 0;
3138}
3139
3140static int pch_udc_resume(struct pci_dev *pdev)
3141{
3142 int ret;
3143
3144 pci_set_power_state(pdev, PCI_D0);
3145 pci_restore_state(pdev);
3146 ret = pci_enable_device(pdev);
3147 if (ret) {
3148 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3149 return ret;
3150 }
3151 pci_enable_wake(pdev, PCI_D3hot, 0);
3152 return 0;
3153}
3154#else
3155#define pch_udc_suspend NULL
3156#define pch_udc_resume NULL
3157#endif
3158
3159static int pch_udc_probe(struct pci_dev *pdev,
3160 const struct pci_device_id *id)
3161{
3162 unsigned long resource;
3163 unsigned long len;
3164 int retval;
3165 struct pch_udc_dev *dev;
3166
3167
3168 if (pch_udc) {
3169 pr_err("%s: already probed\n", __func__);
3170 return -EBUSY;
3171 }
3172
3173 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3174 if (!dev) {
3175 pr_err("%s: no memory for device structure\n", __func__);
3176 return -ENOMEM;
3177 }
3178
3179 if (pci_enable_device(pdev) < 0) {
3180 kfree(dev);
3181 pr_err("%s: pci_enable_device failed\n", __func__);
3182 return -ENODEV;
3183 }
3184 dev->active = 1;
3185 pci_set_drvdata(pdev, dev);
3186
3187
3188 resource = pci_resource_start(pdev, 1);
3189 len = pci_resource_len(pdev, 1);
3190
3191 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3192 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3193 retval = -EBUSY;
3194 goto finished;
3195 }
3196 dev->phys_addr = resource;
3197 dev->mem_region = 1;
3198
3199 dev->base_addr = ioremap_nocache(resource, len);
3200 if (!dev->base_addr) {
3201 pr_err("%s: device memory cannot be mapped\n", __func__);
3202 retval = -ENOMEM;
3203 goto finished;
3204 }
3205 if (!pdev->irq) {
3206 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3207 retval = -ENODEV;
3208 goto finished;
3209 }
3210 pch_udc = dev;
3211
3212 if (pch_udc_pcd_init(dev)) {
3213 retval = -ENODEV;
3214 goto finished;
3215 }
3216 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3217 dev)) {
3218 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3219 pdev->irq);
3220 retval = -ENODEV;
3221 goto finished;
3222 }
3223 dev->irq = pdev->irq;
3224 dev->irq_registered = 1;
3225
3226 pci_set_master(pdev);
3227 pci_try_set_mwi(pdev);
3228
3229
3230 spin_lock_init(&dev->lock);
3231 dev->pdev = pdev;
3232 dev->gadget.ops = &pch_udc_ops;
3233
3234 retval = init_dma_pools(dev);
3235 if (retval)
3236 goto finished;
3237
3238 dev_set_name(&dev->gadget.dev, "gadget");
3239 dev->gadget.dev.parent = &pdev->dev;
3240 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3241 dev->gadget.dev.release = gadget_release;
3242 dev->gadget.name = KBUILD_MODNAME;
3243 dev->gadget.max_speed = USB_SPEED_HIGH;
3244
3245 retval = device_register(&dev->gadget.dev);
3246 if (retval)
3247 goto finished;
3248 dev->registered = 1;
3249
3250
3251 pch_udc_set_disconnect(dev);
3252 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3253 if (retval)
3254 goto finished;
3255 return 0;
3256
3257finished:
3258 pch_udc_remove(pdev);
3259 return retval;
3260}
3261
3262static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
3263 {
3264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3265 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3266 .class_mask = 0xffffffff,
3267 },
3268 {
3269 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3270 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3271 .class_mask = 0xffffffff,
3272 },
3273 {
3274 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3275 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3276 .class_mask = 0xffffffff,
3277 },
3278 { 0 },
3279};
3280
3281MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3282
3283static struct pci_driver pch_udc_driver = {
3284 .name = KBUILD_MODNAME,
3285 .id_table = pch_udc_pcidev_id,
3286 .probe = pch_udc_probe,
3287 .remove = pch_udc_remove,
3288 .suspend = pch_udc_suspend,
3289 .resume = pch_udc_resume,
3290 .shutdown = pch_udc_shutdown,
3291};
3292
3293module_pci_driver(pch_udc_driver);
3294
3295MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3296MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3297MODULE_LICENSE("GPL");
3298