1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/usb.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/slab.h>
18#include <linux/dmaengine.h>
19
20#include "musb_core.h"
21#include "tusb6010.h"
22
23#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
24
25#define MAX_DMAREQ 5
26
27struct tusb_dma_data {
28 s8 dmareq;
29 struct dma_chan *chan;
30};
31
32struct tusb_omap_dma_ch {
33 struct musb *musb;
34 void __iomem *tbase;
35 unsigned long phys_offset;
36 int epnum;
37 u8 tx;
38 struct musb_hw_ep *hw_ep;
39
40 struct tusb_dma_data *dma_data;
41
42 struct tusb_omap_dma *tusb_dma;
43
44 dma_addr_t dma_addr;
45
46 u32 len;
47 u16 packet_sz;
48 u16 transfer_packet_sz;
49 u32 transfer_len;
50 u32 completed_len;
51};
52
53struct tusb_omap_dma {
54 struct dma_controller controller;
55 void __iomem *tbase;
56
57 struct tusb_dma_data dma_pool[MAX_DMAREQ];
58 unsigned multichannel:1;
59};
60
61
62
63
64static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
65{
66 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
67
68 if (reg != 0) {
69 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
70 chdat->epnum, reg & 0xf);
71 return -EAGAIN;
72 }
73
74 if (chdat->tx)
75 reg = (1 << 4) | chdat->epnum;
76 else
77 reg = chdat->epnum;
78
79 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
80
81 return 0;
82}
83
84static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
85{
86 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
87
88 if ((reg & 0xf) != chdat->epnum) {
89 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
90 chdat->epnum, reg & 0xf);
91 return;
92 }
93 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
94}
95
96
97
98
99
100static void tusb_omap_dma_cb(void *data)
101{
102 struct dma_channel *channel = (struct dma_channel *)data;
103 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
104 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
105 struct musb *musb = chdat->musb;
106 struct device *dev = musb->controller;
107 struct musb_hw_ep *hw_ep = chdat->hw_ep;
108 void __iomem *ep_conf = hw_ep->conf;
109 void __iomem *mbase = musb->mregs;
110 unsigned long remaining, flags, pio;
111
112 spin_lock_irqsave(&musb->lock, flags);
113
114 dev_dbg(musb->controller, "ep%i %s dma callback\n",
115 chdat->epnum, chdat->tx ? "tx" : "rx");
116
117 if (chdat->tx)
118 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
119 else
120 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
121
122 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
123
124
125 if (unlikely(remaining > chdat->transfer_len)) {
126 dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
127 chdat->tx ? "tx" : "rx", remaining);
128 remaining = 0;
129 }
130
131 channel->actual_len = chdat->transfer_len - remaining;
132 pio = chdat->len - channel->actual_len;
133
134 dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
135
136
137 if (pio > 0 && pio < 32) {
138 u8 *buf;
139
140 dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
141 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
142 if (chdat->tx) {
143 dma_unmap_single(dev, chdat->dma_addr,
144 chdat->transfer_len,
145 DMA_TO_DEVICE);
146 musb_write_fifo(hw_ep, pio, buf);
147 } else {
148 dma_unmap_single(dev, chdat->dma_addr,
149 chdat->transfer_len,
150 DMA_FROM_DEVICE);
151 musb_read_fifo(hw_ep, pio, buf);
152 }
153 channel->actual_len += pio;
154 }
155
156 if (!tusb_dma->multichannel)
157 tusb_omap_free_shared_dmareq(chdat);
158
159 channel->status = MUSB_DMA_STATUS_FREE;
160
161 musb_dma_completion(musb, chdat->epnum, chdat->tx);
162
163
164
165
166
167 if ((chdat->transfer_len < chdat->packet_sz)
168 || (chdat->transfer_len % chdat->packet_sz != 0)) {
169 u16 csr;
170
171 if (chdat->tx) {
172 dev_dbg(musb->controller, "terminating short tx packet\n");
173 musb_ep_select(mbase, chdat->epnum);
174 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
175 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
176 | MUSB_TXCSR_P_WZC_BITS;
177 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
178 }
179 }
180
181 spin_unlock_irqrestore(&musb->lock, flags);
182}
183
184static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
185 u8 rndis_mode, dma_addr_t dma_addr, u32 len)
186{
187 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
188 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
189 struct musb *musb = chdat->musb;
190 struct device *dev = musb->controller;
191 struct musb_hw_ep *hw_ep = chdat->hw_ep;
192 void __iomem *mbase = musb->mregs;
193 void __iomem *ep_conf = hw_ep->conf;
194 dma_addr_t fifo_addr = hw_ep->fifo_sync;
195 u32 dma_remaining;
196 u16 csr;
197 u32 psize;
198 struct tusb_dma_data *dma_data;
199 struct dma_async_tx_descriptor *dma_desc;
200 struct dma_slave_config dma_cfg;
201 enum dma_transfer_direction dma_dir;
202 u32 port_window;
203 int ret;
204
205 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
206 return false;
207
208
209
210
211
212
213
214 if (dma_addr & 0x2)
215 return false;
216
217
218
219
220
221
222 if (chdat->tx)
223 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
224 else
225 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
226
227 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
228 if (dma_remaining) {
229 dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
230 chdat->tx ? "tx" : "rx", dma_remaining);
231 return false;
232 }
233
234 chdat->transfer_len = len & ~0x1f;
235
236 if (len < packet_sz)
237 chdat->transfer_packet_sz = chdat->transfer_len;
238 else
239 chdat->transfer_packet_sz = packet_sz;
240
241 dma_data = chdat->dma_data;
242 if (!tusb_dma->multichannel) {
243 if (tusb_omap_use_shared_dmareq(chdat) != 0) {
244 dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
245 return false;
246 }
247 if (dma_data->dmareq < 0) {
248
249
250
251 WARN_ON(1);
252 return false;
253 }
254 }
255
256 chdat->packet_sz = packet_sz;
257 chdat->len = len;
258 channel->actual_len = 0;
259 chdat->dma_addr = dma_addr;
260 channel->status = MUSB_DMA_STATUS_BUSY;
261
262
263 if (chdat->tx) {
264 dma_dir = DMA_MEM_TO_DEV;
265 dma_map_single(dev, phys_to_virt(dma_addr), len,
266 DMA_TO_DEVICE);
267 } else {
268 dma_dir = DMA_DEV_TO_MEM;
269 dma_map_single(dev, phys_to_virt(dma_addr), len,
270 DMA_FROM_DEVICE);
271 }
272
273 memset(&dma_cfg, 0, sizeof(dma_cfg));
274
275
276 if ((dma_addr & 0x3) == 0) {
277 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
278 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
279 port_window = 8;
280 } else {
281 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
282 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
283 port_window = 16;
284
285 fifo_addr = hw_ep->fifo_async;
286 }
287
288 dev_dbg(musb->controller,
289 "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
290 chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
291 chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
292
293 dma_cfg.src_addr = fifo_addr;
294 dma_cfg.dst_addr = fifo_addr;
295 dma_cfg.src_port_window_size = port_window;
296 dma_cfg.src_maxburst = port_window;
297 dma_cfg.dst_port_window_size = port_window;
298 dma_cfg.dst_maxburst = port_window;
299
300 ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
301 if (ret) {
302 dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
303 return false;
304 }
305
306 dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
307 chdat->transfer_len, dma_dir,
308 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
309 if (!dma_desc) {
310 dev_err(musb->controller, "DMA prep_slave_single failed\n");
311 return false;
312 }
313
314 dma_desc->callback = tusb_omap_dma_cb;
315 dma_desc->callback_param = channel;
316 dmaengine_submit(dma_desc);
317
318 dev_dbg(musb->controller,
319 "ep%i %s using %i-bit %s dma from %pad to %pad\n",
320 chdat->epnum, chdat->tx ? "tx" : "rx",
321 dma_cfg.src_addr_width * 8,
322 ((dma_addr & 0x3) == 0) ? "sync" : "async",
323 (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
324 (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
325
326
327
328
329 musb_ep_select(mbase, chdat->epnum);
330 if (chdat->tx) {
331 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
332 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
333 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
334 csr &= ~MUSB_TXCSR_P_UNDERRUN;
335 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
336 } else {
337 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
338 csr |= MUSB_RXCSR_DMAENAB;
339 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
340 musb_writew(hw_ep->regs, MUSB_RXCSR,
341 csr | MUSB_RXCSR_P_WZC_BITS);
342 }
343
344
345 dma_async_issue_pending(dma_data->chan);
346
347 if (chdat->tx) {
348
349 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
350 psize &= ~0x7ff;
351 psize |= chdat->transfer_packet_sz;
352 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
353
354 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
355 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
356 } else {
357
358 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
359 psize &= ~(0x7ff << 16);
360 psize |= (chdat->transfer_packet_sz << 16);
361 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
362
363 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
364 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
365 }
366
367 return true;
368}
369
370static int tusb_omap_dma_abort(struct dma_channel *channel)
371{
372 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
373
374 if (chdat->dma_data)
375 dmaengine_terminate_all(chdat->dma_data->chan);
376
377 channel->status = MUSB_DMA_STATUS_FREE;
378
379 return 0;
380}
381
382static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
383{
384 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
385 int i, dmareq_nr = -1;
386
387 for (i = 0; i < MAX_DMAREQ; i++) {
388 int cur = (reg & (0xf << (i * 5))) >> (i * 5);
389 if (cur == 0) {
390 dmareq_nr = i;
391 break;
392 }
393 }
394
395 if (dmareq_nr == -1)
396 return -EAGAIN;
397
398 reg |= (chdat->epnum << (dmareq_nr * 5));
399 if (chdat->tx)
400 reg |= ((1 << 4) << (dmareq_nr * 5));
401 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
402
403 chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
404
405 return 0;
406}
407
408static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
409{
410 u32 reg;
411
412 if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
413 return;
414
415 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
416 reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
417 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
418
419 chdat->dma_data = NULL;
420}
421
422static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
423
424static struct dma_channel *
425tusb_omap_dma_allocate(struct dma_controller *c,
426 struct musb_hw_ep *hw_ep,
427 u8 tx)
428{
429 int ret, i;
430 struct tusb_omap_dma *tusb_dma;
431 struct musb *musb;
432 struct dma_channel *channel = NULL;
433 struct tusb_omap_dma_ch *chdat = NULL;
434 struct tusb_dma_data *dma_data = NULL;
435
436 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
437 musb = tusb_dma->controller.musb;
438
439
440 if (hw_ep->epnum == 0) {
441 dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
442 return NULL;
443 }
444
445 for (i = 0; i < MAX_DMAREQ; i++) {
446 struct dma_channel *ch = dma_channel_pool[i];
447 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
448 ch->status = MUSB_DMA_STATUS_FREE;
449 channel = ch;
450 chdat = ch->private_data;
451 break;
452 }
453 }
454
455 if (!channel)
456 return NULL;
457
458 chdat->musb = tusb_dma->controller.musb;
459 chdat->tbase = tusb_dma->tbase;
460 chdat->hw_ep = hw_ep;
461 chdat->epnum = hw_ep->epnum;
462 chdat->completed_len = 0;
463 chdat->tusb_dma = tusb_dma;
464 if (tx)
465 chdat->tx = 1;
466 else
467 chdat->tx = 0;
468
469 channel->max_len = 0x7fffffff;
470 channel->desired_mode = 0;
471 channel->actual_len = 0;
472
473 if (!chdat->dma_data) {
474 if (tusb_dma->multichannel) {
475 ret = tusb_omap_dma_allocate_dmareq(chdat);
476 if (ret != 0)
477 goto free_dmareq;
478 } else {
479 chdat->dma_data = &tusb_dma->dma_pool[0];
480 }
481 }
482
483 dma_data = chdat->dma_data;
484
485 dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
486 chdat->epnum,
487 chdat->tx ? "tx" : "rx",
488 tusb_dma->multichannel ? "shared" : "dedicated",
489 dma_data->dmareq);
490
491 return channel;
492
493free_dmareq:
494 tusb_omap_dma_free_dmareq(chdat);
495
496 dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
497 channel->status = MUSB_DMA_STATUS_UNKNOWN;
498
499 return NULL;
500}
501
502static void tusb_omap_dma_release(struct dma_channel *channel)
503{
504 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
505 struct musb *musb = chdat->musb;
506
507 dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
508
509 channel->status = MUSB_DMA_STATUS_UNKNOWN;
510
511 dmaengine_terminate_sync(chdat->dma_data->chan);
512 tusb_omap_dma_free_dmareq(chdat);
513
514 channel = NULL;
515}
516
517void tusb_dma_controller_destroy(struct dma_controller *c)
518{
519 struct tusb_omap_dma *tusb_dma;
520 int i;
521
522 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
523 for (i = 0; i < MAX_DMAREQ; i++) {
524 struct dma_channel *ch = dma_channel_pool[i];
525 if (ch) {
526 kfree(ch->private_data);
527 kfree(ch);
528 }
529
530
531 if (tusb_dma && tusb_dma->dma_pool[i].chan)
532 dma_release_channel(tusb_dma->dma_pool[i].chan);
533 }
534
535 kfree(tusb_dma);
536}
537EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
538
539static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
540{
541 struct musb *musb = tusb_dma->controller.musb;
542 int i;
543 int ret = 0;
544
545 for (i = 0; i < MAX_DMAREQ; i++) {
546 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
547
548
549
550
551
552
553 if (i == 0 || tusb_dma->multichannel) {
554 char ch_name[8];
555
556 sprintf(ch_name, "dmareq%d", i);
557 dma_data->chan = dma_request_chan(musb->controller,
558 ch_name);
559 if (IS_ERR(dma_data->chan)) {
560 dev_err(musb->controller,
561 "Failed to request %s\n", ch_name);
562 ret = PTR_ERR(dma_data->chan);
563 goto dma_error;
564 }
565
566 dma_data->dmareq = i;
567 } else {
568 dma_data->dmareq = -1;
569 }
570 }
571
572 return 0;
573
574dma_error:
575 for (; i >= 0; i--) {
576 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
577
578 if (dma_data->dmareq >= 0)
579 dma_release_channel(dma_data->chan);
580 }
581
582 return ret;
583}
584
585struct dma_controller *
586tusb_dma_controller_create(struct musb *musb, void __iomem *base)
587{
588 void __iomem *tbase = musb->ctrl_base;
589 struct tusb_omap_dma *tusb_dma;
590 int i;
591
592
593
594 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
595 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
596
597 musb_writel(tbase, TUSB_DMA_REQ_CONF,
598 TUSB_DMA_REQ_CONF_BURST_SIZE(2)
599 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
600 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
601
602 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
603 if (!tusb_dma)
604 goto out;
605
606 tusb_dma->controller.musb = musb;
607 tusb_dma->tbase = musb->ctrl_base;
608
609 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
610 tusb_dma->controller.channel_release = tusb_omap_dma_release;
611 tusb_dma->controller.channel_program = tusb_omap_dma_program;
612 tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
613
614 if (musb->tusb_revision >= TUSB_REV_30)
615 tusb_dma->multichannel = 1;
616
617 for (i = 0; i < MAX_DMAREQ; i++) {
618 struct dma_channel *ch;
619 struct tusb_omap_dma_ch *chdat;
620
621 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
622 if (!ch)
623 goto cleanup;
624
625 dma_channel_pool[i] = ch;
626
627 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
628 if (!chdat)
629 goto cleanup;
630
631 ch->status = MUSB_DMA_STATUS_UNKNOWN;
632 ch->private_data = chdat;
633 }
634
635 if (tusb_omap_allocate_dma_pool(tusb_dma))
636 goto cleanup;
637
638 return &tusb_dma->controller;
639
640cleanup:
641 musb_dma_controller_destroy(&tusb_dma->controller);
642out:
643 return NULL;
644}
645EXPORT_SYMBOL_GPL(tusb_dma_controller_create);
646