1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/clk.h>
32#include <linux/err.h>
33#include <linux/io.h>
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36#include <linux/usb/usb_phy_generic.h>
37#include <linux/platform_data/usb-omap.h>
38
39#include "musb_core.h"
40
41
42
43
44
45#define USB_REVISION_REG 0x00
46#define USB_CTRL_REG 0x04
47#define USB_STAT_REG 0x08
48#define USB_EMULATION_REG 0x0c
49
50#define USB_AUTOREQ_REG 0x14
51#define USB_SRP_FIX_TIME_REG 0x18
52#define USB_TEARDOWN_REG 0x1c
53#define EP_INTR_SRC_REG 0x20
54#define EP_INTR_SRC_SET_REG 0x24
55#define EP_INTR_SRC_CLEAR_REG 0x28
56#define EP_INTR_MASK_REG 0x2c
57#define EP_INTR_MASK_SET_REG 0x30
58#define EP_INTR_MASK_CLEAR_REG 0x34
59#define EP_INTR_SRC_MASKED_REG 0x38
60#define CORE_INTR_SRC_REG 0x40
61#define CORE_INTR_SRC_SET_REG 0x44
62#define CORE_INTR_SRC_CLEAR_REG 0x48
63#define CORE_INTR_MASK_REG 0x4c
64#define CORE_INTR_MASK_SET_REG 0x50
65#define CORE_INTR_MASK_CLEAR_REG 0x54
66#define CORE_INTR_SRC_MASKED_REG 0x58
67
68#define USB_END_OF_INTR_REG 0x60
69
70
71#define AM35X_SOFT_RESET_MASK 1
72
73
74#define AM35X_INTR_USB_SHIFT 16
75#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
76#define AM35X_INTR_DRVVBUS 0x100
77#define AM35X_INTR_RX_SHIFT 16
78#define AM35X_INTR_TX_SHIFT 0
79#define AM35X_TX_EP_MASK 0xffff
80#define AM35X_RX_EP_MASK 0xfffe
81#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
82#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
83
84#define USB_MENTOR_CORE_OFFSET 0x400
85
86struct am35x_glue {
87 struct device *dev;
88 struct platform_device *musb;
89 struct platform_device *phy;
90 struct clk *phy_clk;
91 struct clk *clk;
92};
93
94
95
96
97static void am35x_musb_enable(struct musb *musb)
98{
99 void __iomem *reg_base = musb->ctrl_base;
100 u32 epmask;
101
102
103 epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
104 ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
105
106 musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
107 musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
108
109
110 musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
111 AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
112}
113
114
115
116
117static void am35x_musb_disable(struct musb *musb)
118{
119 void __iomem *reg_base = musb->ctrl_base;
120
121 musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
122 musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
123 AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
124 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
125 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
126}
127
128#define portstate(stmt) stmt
129
130static void am35x_musb_set_vbus(struct musb *musb, int is_on)
131{
132 WARN_ON(is_on && is_peripheral_active(musb));
133}
134
135#define POLL_SECONDS 2
136
137static struct timer_list otg_workaround;
138
139static void otg_timer(unsigned long _musb)
140{
141 struct musb *musb = (void *)_musb;
142 void __iomem *mregs = musb->mregs;
143 u8 devctl;
144 unsigned long flags;
145
146
147
148
149
150 devctl = musb_readb(mregs, MUSB_DEVCTL);
151 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
152 usb_otg_state_string(musb->xceiv->otg->state));
153
154 spin_lock_irqsave(&musb->lock, flags);
155 switch (musb->xceiv->otg->state) {
156 case OTG_STATE_A_WAIT_BCON:
157 devctl &= ~MUSB_DEVCTL_SESSION;
158 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
159
160 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
161 if (devctl & MUSB_DEVCTL_BDEVICE) {
162 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
163 MUSB_DEV_MODE(musb);
164 } else {
165 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
166 MUSB_HST_MODE(musb);
167 }
168 break;
169 case OTG_STATE_A_WAIT_VFALL:
170 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
171 musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
172 MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
173 break;
174 case OTG_STATE_B_IDLE:
175 devctl = musb_readb(mregs, MUSB_DEVCTL);
176 if (devctl & MUSB_DEVCTL_BDEVICE)
177 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
178 else
179 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
180 break;
181 default:
182 break;
183 }
184 spin_unlock_irqrestore(&musb->lock, flags);
185}
186
187static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
188{
189 static unsigned long last_timer;
190
191 if (timeout == 0)
192 timeout = jiffies + msecs_to_jiffies(3);
193
194
195 if (musb->is_active || (musb->a_wait_bcon == 0 &&
196 musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
197 dev_dbg(musb->controller, "%s active, deleting timer\n",
198 usb_otg_state_string(musb->xceiv->otg->state));
199 del_timer(&otg_workaround);
200 last_timer = jiffies;
201 return;
202 }
203
204 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
205 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
206 return;
207 }
208 last_timer = timeout;
209
210 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
211 usb_otg_state_string(musb->xceiv->otg->state),
212 jiffies_to_msecs(timeout - jiffies));
213 mod_timer(&otg_workaround, timeout);
214}
215
216static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
217{
218 struct musb *musb = hci;
219 void __iomem *reg_base = musb->ctrl_base;
220 struct device *dev = musb->controller;
221 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
222 struct omap_musb_board_data *data = plat->board_data;
223 struct usb_otg *otg = musb->xceiv->otg;
224 unsigned long flags;
225 irqreturn_t ret = IRQ_NONE;
226 u32 epintr, usbintr;
227
228 spin_lock_irqsave(&musb->lock, flags);
229
230
231 epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
232
233 if (epintr) {
234 musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
235
236 musb->int_rx =
237 (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
238 musb->int_tx =
239 (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
240 }
241
242
243 usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
244 if (!usbintr && !epintr)
245 goto eoi;
246
247 if (usbintr) {
248 musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
249
250 musb->int_usb =
251 (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
252 }
253
254
255
256
257
258
259
260
261 if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
262 int drvvbus = musb_readl(reg_base, USB_STAT_REG);
263 void __iomem *mregs = musb->mregs;
264 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
265 int err;
266
267 err = musb->int_usb & MUSB_INTR_VBUSERROR;
268 if (err) {
269
270
271
272
273
274
275
276
277
278
279
280 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
281 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
282 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
283 WARNING("VBUS error workaround (delay coming)\n");
284 } else if (drvvbus) {
285 MUSB_HST_MODE(musb);
286 otg->default_a = 1;
287 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
288 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
289 del_timer(&otg_workaround);
290 } else {
291 musb->is_active = 0;
292 MUSB_DEV_MODE(musb);
293 otg->default_a = 0;
294 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
295 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
296 }
297
298
299 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
300 drvvbus ? "on" : "off",
301 usb_otg_state_string(musb->xceiv->otg->state),
302 err ? " ERROR" : "",
303 devctl);
304 ret = IRQ_HANDLED;
305 }
306
307
308 if (musb->int_usb & MUSB_INTR_DISCONNECT) {
309 musb->int_tx = 0;
310 musb->int_rx = 0;
311 }
312
313 if (musb->int_tx || musb->int_rx || musb->int_usb)
314 ret |= musb_interrupt(musb);
315
316eoi:
317
318 if (ret == IRQ_HANDLED || epintr || usbintr) {
319
320 if (data->clear_irq)
321 data->clear_irq();
322
323 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
324 }
325
326
327 if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
328 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
329
330 spin_unlock_irqrestore(&musb->lock, flags);
331
332 return ret;
333}
334
335static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
336{
337 struct device *dev = musb->controller;
338 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
339 struct omap_musb_board_data *data = plat->board_data;
340 int retval = 0;
341
342 if (data->set_mode)
343 data->set_mode(musb_mode);
344 else
345 retval = -EIO;
346
347 return retval;
348}
349
350static int am35x_musb_init(struct musb *musb)
351{
352 struct device *dev = musb->controller;
353 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
354 struct omap_musb_board_data *data = plat->board_data;
355 void __iomem *reg_base = musb->ctrl_base;
356 u32 rev;
357
358 musb->mregs += USB_MENTOR_CORE_OFFSET;
359
360
361 rev = musb_readl(reg_base, USB_REVISION_REG);
362 if (!rev)
363 return -ENODEV;
364
365 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
366 if (IS_ERR_OR_NULL(musb->xceiv))
367 return -EPROBE_DEFER;
368
369 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
370
371
372 if (data->reset)
373 data->reset();
374
375
376 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
377
378
379 if (data->set_phy_power)
380 data->set_phy_power(1);
381
382 msleep(5);
383
384 musb->isr = am35x_musb_interrupt;
385
386
387 if (data->clear_irq)
388 data->clear_irq();
389
390 return 0;
391}
392
393static int am35x_musb_exit(struct musb *musb)
394{
395 struct device *dev = musb->controller;
396 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
397 struct omap_musb_board_data *data = plat->board_data;
398
399 del_timer_sync(&otg_workaround);
400
401
402 if (data->set_phy_power)
403 data->set_phy_power(0);
404
405 usb_put_phy(musb->xceiv);
406
407 return 0;
408}
409
410
411static void am35x_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
412{
413 void __iomem *fifo = hw_ep->fifo;
414 u32 val;
415 int i;
416
417
418 if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
419 readsl(fifo, dst, len >> 2);
420 dst += len & ~0x03;
421 len &= 0x03;
422 }
423
424
425
426
427 if (len > 4) {
428 for (i = 0; i < (len >> 2); i++) {
429 *(u32 *) dst = musb_readl(fifo, 0);
430 dst += 4;
431 }
432 len &= 0x03;
433 }
434 if (len > 0) {
435 val = musb_readl(fifo, 0);
436 memcpy(dst, &val, len);
437 }
438}
439
440static const struct musb_platform_ops am35x_ops = {
441 .quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
442 .init = am35x_musb_init,
443 .exit = am35x_musb_exit,
444
445 .read_fifo = am35x_read_fifo,
446#ifdef CONFIG_USB_INVENTRA_DMA
447 .dma_init = musbhs_dma_controller_create,
448 .dma_exit = musbhs_dma_controller_destroy,
449#endif
450 .enable = am35x_musb_enable,
451 .disable = am35x_musb_disable,
452
453 .set_mode = am35x_musb_set_mode,
454 .try_idle = am35x_musb_try_idle,
455
456 .set_vbus = am35x_musb_set_vbus,
457};
458
459static const struct platform_device_info am35x_dev_info = {
460 .name = "musb-hdrc",
461 .id = PLATFORM_DEVID_AUTO,
462 .dma_mask = DMA_BIT_MASK(32),
463};
464
465static int am35x_probe(struct platform_device *pdev)
466{
467 struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
468 struct platform_device *musb;
469 struct am35x_glue *glue;
470 struct platform_device_info pinfo;
471 struct clk *phy_clk;
472 struct clk *clk;
473
474 int ret = -ENOMEM;
475
476 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
477 if (!glue) {
478 dev_err(&pdev->dev, "failed to allocate glue context\n");
479 goto err0;
480 }
481
482 phy_clk = clk_get(&pdev->dev, "fck");
483 if (IS_ERR(phy_clk)) {
484 dev_err(&pdev->dev, "failed to get PHY clock\n");
485 ret = PTR_ERR(phy_clk);
486 goto err3;
487 }
488
489 clk = clk_get(&pdev->dev, "ick");
490 if (IS_ERR(clk)) {
491 dev_err(&pdev->dev, "failed to get clock\n");
492 ret = PTR_ERR(clk);
493 goto err4;
494 }
495
496 ret = clk_enable(phy_clk);
497 if (ret) {
498 dev_err(&pdev->dev, "failed to enable PHY clock\n");
499 goto err5;
500 }
501
502 ret = clk_enable(clk);
503 if (ret) {
504 dev_err(&pdev->dev, "failed to enable clock\n");
505 goto err6;
506 }
507
508 glue->dev = &pdev->dev;
509 glue->phy_clk = phy_clk;
510 glue->clk = clk;
511
512 pdata->platform_ops = &am35x_ops;
513
514 glue->phy = usb_phy_generic_register();
515 if (IS_ERR(glue->phy))
516 goto err7;
517 platform_set_drvdata(pdev, glue);
518
519 pinfo = am35x_dev_info;
520 pinfo.parent = &pdev->dev;
521 pinfo.res = pdev->resource;
522 pinfo.num_res = pdev->num_resources;
523 pinfo.data = pdata;
524 pinfo.size_data = sizeof(*pdata);
525
526 glue->musb = musb = platform_device_register_full(&pinfo);
527 if (IS_ERR(musb)) {
528 ret = PTR_ERR(musb);
529 dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
530 goto err8;
531 }
532
533 return 0;
534
535err8:
536 usb_phy_generic_unregister(glue->phy);
537
538err7:
539 clk_disable(clk);
540
541err6:
542 clk_disable(phy_clk);
543
544err5:
545 clk_put(clk);
546
547err4:
548 clk_put(phy_clk);
549
550err3:
551 kfree(glue);
552
553err0:
554 return ret;
555}
556
557static int am35x_remove(struct platform_device *pdev)
558{
559 struct am35x_glue *glue = platform_get_drvdata(pdev);
560
561 platform_device_unregister(glue->musb);
562 usb_phy_generic_unregister(glue->phy);
563 clk_disable(glue->clk);
564 clk_disable(glue->phy_clk);
565 clk_put(glue->clk);
566 clk_put(glue->phy_clk);
567 kfree(glue);
568
569 return 0;
570}
571
572#ifdef CONFIG_PM_SLEEP
573static int am35x_suspend(struct device *dev)
574{
575 struct am35x_glue *glue = dev_get_drvdata(dev);
576 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
577 struct omap_musb_board_data *data = plat->board_data;
578
579
580 if (data->set_phy_power)
581 data->set_phy_power(0);
582
583 clk_disable(glue->phy_clk);
584 clk_disable(glue->clk);
585
586 return 0;
587}
588
589static int am35x_resume(struct device *dev)
590{
591 struct am35x_glue *glue = dev_get_drvdata(dev);
592 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
593 struct omap_musb_board_data *data = plat->board_data;
594 int ret;
595
596
597 if (data->set_phy_power)
598 data->set_phy_power(1);
599
600 ret = clk_enable(glue->phy_clk);
601 if (ret) {
602 dev_err(dev, "failed to enable PHY clock\n");
603 return ret;
604 }
605
606 ret = clk_enable(glue->clk);
607 if (ret) {
608 dev_err(dev, "failed to enable clock\n");
609 return ret;
610 }
611
612 return 0;
613}
614#endif
615
616static SIMPLE_DEV_PM_OPS(am35x_pm_ops, am35x_suspend, am35x_resume);
617
618static struct platform_driver am35x_driver = {
619 .probe = am35x_probe,
620 .remove = am35x_remove,
621 .driver = {
622 .name = "musb-am35x",
623 .pm = &am35x_pm_ops,
624 },
625};
626
627MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
628MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
629MODULE_LICENSE("GPL v2");
630module_platform_driver(am35x_driver);
631