1
2
3
4
5
6#include <linux/dmaengine.h>
7#include <linux/dma-mapping.h>
8#include <linux/err.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/irq.h>
16#include <linux/of_dma.h>
17#include <linux/reset.h>
18#include <linux/of_device.h>
19
20#include "virt-dma.h"
21
22#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
23#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
24
25#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
26#define GDMA_REG_CTRL0_TX_MASK 0xffff
27#define GDMA_REG_CTRL0_TX_SHIFT 16
28#define GDMA_REG_CTRL0_CURR_MASK 0xff
29#define GDMA_REG_CTRL0_CURR_SHIFT 8
30#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
31#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
32#define GDMA_REG_CTRL0_BURST_MASK 0x7
33#define GDMA_REG_CTRL0_BURST_SHIFT 3
34#define GDMA_REG_CTRL0_DONE_INT BIT(2)
35#define GDMA_REG_CTRL0_ENABLE BIT(1)
36#define GDMA_REG_CTRL0_SW_MODE BIT(0)
37
38#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
39#define GDMA_REG_CTRL1_SEG_MASK 0xf
40#define GDMA_REG_CTRL1_SEG_SHIFT 22
41#define GDMA_REG_CTRL1_REQ_MASK 0x3f
42#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
43#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
44#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
45#define GDMA_REG_CTRL1_NEXT_SHIFT 3
46#define GDMA_REG_CTRL1_COHERENT BIT(2)
47#define GDMA_REG_CTRL1_FAIL BIT(1)
48#define GDMA_REG_CTRL1_MASK BIT(0)
49
50#define GDMA_REG_UNMASK_INT 0x200
51#define GDMA_REG_DONE_INT 0x204
52
53#define GDMA_REG_GCT 0x220
54#define GDMA_REG_GCT_CHAN_MASK 0x3
55#define GDMA_REG_GCT_CHAN_SHIFT 3
56#define GDMA_REG_GCT_VER_MASK 0x3
57#define GDMA_REG_GCT_VER_SHIFT 1
58#define GDMA_REG_GCT_ARBIT_RR BIT(0)
59
60#define GDMA_REG_REQSTS 0x2a0
61#define GDMA_REG_ACKSTS 0x2a4
62#define GDMA_REG_FINSTS 0x2a8
63
64
65#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
66#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
67#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
68
69#define GDMA_RT305X_CTRL1_FAIL BIT(4)
70#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
71#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
72
73#define GDMA_RT305X_STATUS_INT 0x80
74#define GDMA_RT305X_STATUS_SIGNAL 0x84
75#define GDMA_RT305X_GCT 0x88
76
77
78#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
79#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
80
81enum gdma_dma_transfer_size {
82 GDMA_TRANSFER_SIZE_4BYTE = 0,
83 GDMA_TRANSFER_SIZE_8BYTE = 1,
84 GDMA_TRANSFER_SIZE_16BYTE = 2,
85 GDMA_TRANSFER_SIZE_32BYTE = 3,
86 GDMA_TRANSFER_SIZE_64BYTE = 4,
87};
88
89struct gdma_dma_sg {
90 dma_addr_t src_addr;
91 dma_addr_t dst_addr;
92 u32 len;
93};
94
95struct gdma_dma_desc {
96 struct virt_dma_desc vdesc;
97
98 enum dma_transfer_direction direction;
99 bool cyclic;
100
101 u32 residue;
102 unsigned int num_sgs;
103 struct gdma_dma_sg sg[];
104};
105
106struct gdma_dmaengine_chan {
107 struct virt_dma_chan vchan;
108 unsigned int id;
109 unsigned int slave_id;
110
111 dma_addr_t fifo_addr;
112 enum gdma_dma_transfer_size burst_size;
113
114 struct gdma_dma_desc *desc;
115 unsigned int next_sg;
116};
117
118struct gdma_dma_dev {
119 struct dma_device ddev;
120 struct device_dma_parameters dma_parms;
121 struct gdma_data *data;
122 void __iomem *base;
123 struct tasklet_struct task;
124 volatile unsigned long chan_issued;
125 atomic_t cnt;
126
127 struct gdma_dmaengine_chan chan[];
128};
129
130struct gdma_data {
131 int chancnt;
132 u32 done_int_reg;
133 void (*init)(struct gdma_dma_dev *dma_dev);
134 int (*start_transfer)(struct gdma_dmaengine_chan *chan);
135};
136
137static struct gdma_dma_dev *gdma_dma_chan_get_dev(
138 struct gdma_dmaengine_chan *chan)
139{
140 return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
141 ddev);
142}
143
144static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
145{
146 return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
147}
148
149static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
150{
151 return container_of(vdesc, struct gdma_dma_desc, vdesc);
152}
153
154static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
155 unsigned int reg)
156{
157 return readl(dma_dev->base + reg);
158}
159
160static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
161 unsigned int reg, uint32_t val)
162{
163 writel(val, dma_dev->base + reg);
164}
165
166static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
167{
168 if (maxburst < 2)
169 return GDMA_TRANSFER_SIZE_4BYTE;
170 else if (maxburst < 4)
171 return GDMA_TRANSFER_SIZE_8BYTE;
172 else if (maxburst < 8)
173 return GDMA_TRANSFER_SIZE_16BYTE;
174 else if (maxburst < 16)
175 return GDMA_TRANSFER_SIZE_32BYTE;
176 else
177 return GDMA_TRANSFER_SIZE_64BYTE;
178}
179
180static int gdma_dma_config(struct dma_chan *c,
181 struct dma_slave_config *config)
182{
183 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
184 struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
185
186 if (config->device_fc) {
187 dev_err(dma_dev->ddev.dev, "not support flow controller\n");
188 return -EINVAL;
189 }
190
191 switch (config->direction) {
192 case DMA_MEM_TO_DEV:
193 if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
194 dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
195 return -EINVAL;
196 }
197 chan->slave_id = config->slave_id;
198 chan->fifo_addr = config->dst_addr;
199 chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
200 break;
201 case DMA_DEV_TO_MEM:
202 if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
203 dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
204 return -EINVAL;
205 }
206 chan->slave_id = config->slave_id;
207 chan->fifo_addr = config->src_addr;
208 chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
209 break;
210 default:
211 dev_err(dma_dev->ddev.dev, "direction type %d error\n",
212 config->direction);
213 return -EINVAL;
214 }
215
216 return 0;
217}
218
219static int gdma_dma_terminate_all(struct dma_chan *c)
220{
221 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
222 struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
223 unsigned long flags, timeout;
224 LIST_HEAD(head);
225 int i = 0;
226
227 spin_lock_irqsave(&chan->vchan.lock, flags);
228 chan->desc = NULL;
229 clear_bit(chan->id, &dma_dev->chan_issued);
230 vchan_get_all_descriptors(&chan->vchan, &head);
231 spin_unlock_irqrestore(&chan->vchan.lock, flags);
232
233 vchan_dma_desc_free_list(&chan->vchan, &head);
234
235
236 timeout = jiffies + msecs_to_jiffies(5000);
237 while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
238 GDMA_REG_CTRL0_ENABLE) {
239 if (time_after_eq(jiffies, timeout)) {
240 dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
241 chan->id);
242
243 gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
244 break;
245 }
246 cpu_relax();
247 i++;
248 }
249
250 if (i)
251 dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
252 chan->id, i);
253
254 return 0;
255}
256
257static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
258{
259 dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, intr %08x, signal %08x\n",
260 id,
261 gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
262 gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
263 gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
264 gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
265 gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
266 gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
267}
268
269static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
270{
271 struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
272 dma_addr_t src_addr, dst_addr;
273 struct gdma_dma_sg *sg;
274 u32 ctrl0, ctrl1;
275
276
277 ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
278 if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
279 dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
280 chan->id, ctrl0);
281 rt305x_dump_reg(dma_dev, chan->id);
282 return -EINVAL;
283 }
284
285 sg = &chan->desc->sg[chan->next_sg];
286 if (chan->desc->direction == DMA_MEM_TO_DEV) {
287 src_addr = sg->src_addr;
288 dst_addr = chan->fifo_addr;
289 ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED |
290 (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
291 (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
292 } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
293 src_addr = chan->fifo_addr;
294 dst_addr = sg->dst_addr;
295 ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED |
296 (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
297 (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
298 } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
299
300
301
302
303 src_addr = sg->src_addr;
304 dst_addr = sg->dst_addr;
305 ctrl0 = GDMA_REG_CTRL0_SW_MODE |
306 (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
307 (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
308 } else {
309 dev_err(dma_dev->ddev.dev, "direction type %d error\n",
310 chan->desc->direction);
311 return -EINVAL;
312 }
313
314 ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
315 (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
316 GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
317 ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
318
319 chan->next_sg++;
320 gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
321 gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
322 gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
323
324
325 wmb();
326 gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
327
328 return 0;
329}
330
331static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
332{
333 dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, unmask %08x, done %08x, req %08x, ack %08x, fin %08x\n",
334 id,
335 gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
336 gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
337 gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
338 gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
339 gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
340 gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
341 gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
342 gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
343 gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
344}
345
346static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
347{
348 struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
349 dma_addr_t src_addr, dst_addr;
350 struct gdma_dma_sg *sg;
351 u32 ctrl0, ctrl1;
352
353
354 ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
355 if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
356 dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
357 chan->id, ctrl0);
358 rt3883_dump_reg(dma_dev, chan->id);
359 return -EINVAL;
360 }
361
362 sg = &chan->desc->sg[chan->next_sg];
363 if (chan->desc->direction == DMA_MEM_TO_DEV) {
364 src_addr = sg->src_addr;
365 dst_addr = chan->fifo_addr;
366 ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
367 ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
368 (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
369 } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
370 src_addr = chan->fifo_addr;
371 dst_addr = sg->dst_addr;
372 ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
373 ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
374 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
375 GDMA_REG_CTRL1_COHERENT;
376 } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
377 src_addr = sg->src_addr;
378 dst_addr = sg->dst_addr;
379 ctrl0 = GDMA_REG_CTRL0_SW_MODE;
380 ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
381 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
382 GDMA_REG_CTRL1_COHERENT;
383 } else {
384 dev_err(dma_dev->ddev.dev, "direction type %d error\n",
385 chan->desc->direction);
386 return -EINVAL;
387 }
388
389 ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
390 (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
391 GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
392 ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
393
394 chan->next_sg++;
395 gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
396 gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
397 gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
398
399
400 wmb();
401 gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
402
403 return 0;
404}
405
406static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
407 struct gdma_dmaengine_chan *chan)
408{
409 return dma_dev->data->start_transfer(chan);
410}
411
412static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
413{
414 struct virt_dma_desc *vdesc;
415
416 vdesc = vchan_next_desc(&chan->vchan);
417 if (!vdesc) {
418 chan->desc = NULL;
419 return 0;
420 }
421 chan->desc = to_gdma_dma_desc(vdesc);
422 chan->next_sg = 0;
423
424 return 1;
425}
426
427static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
428 struct gdma_dmaengine_chan *chan)
429{
430 struct gdma_dma_desc *desc;
431 unsigned long flags;
432 int chan_issued;
433
434 chan_issued = 0;
435 spin_lock_irqsave(&chan->vchan.lock, flags);
436 desc = chan->desc;
437 if (desc) {
438 if (desc->cyclic) {
439 vchan_cyclic_callback(&desc->vdesc);
440 if (chan->next_sg == desc->num_sgs)
441 chan->next_sg = 0;
442 chan_issued = 1;
443 } else {
444 desc->residue -= desc->sg[chan->next_sg - 1].len;
445 if (chan->next_sg == desc->num_sgs) {
446 list_del(&desc->vdesc.node);
447 vchan_cookie_complete(&desc->vdesc);
448 chan_issued = gdma_next_desc(chan);
449 } else {
450 chan_issued = 1;
451 }
452 }
453 } else {
454 dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
455 chan->id);
456 }
457 if (chan_issued)
458 set_bit(chan->id, &dma_dev->chan_issued);
459 spin_unlock_irqrestore(&chan->vchan.lock, flags);
460}
461
462static irqreturn_t gdma_dma_irq(int irq, void *devid)
463{
464 struct gdma_dma_dev *dma_dev = devid;
465 u32 done, done_reg;
466 unsigned int i;
467
468 done_reg = dma_dev->data->done_int_reg;
469 done = gdma_dma_read(dma_dev, done_reg);
470 if (unlikely(!done))
471 return IRQ_NONE;
472
473
474 gdma_dma_write(dma_dev, done_reg, done);
475
476 i = 0;
477 while (done) {
478 if (done & 0x1) {
479 gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
480 atomic_dec(&dma_dev->cnt);
481 }
482 done >>= 1;
483 i++;
484 }
485
486
487 if (dma_dev->chan_issued)
488 tasklet_schedule(&dma_dev->task);
489
490 return IRQ_HANDLED;
491}
492
493static void gdma_dma_issue_pending(struct dma_chan *c)
494{
495 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
496 struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
497 unsigned long flags;
498
499 spin_lock_irqsave(&chan->vchan.lock, flags);
500 if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
501 if (gdma_next_desc(chan)) {
502 set_bit(chan->id, &dma_dev->chan_issued);
503 tasklet_schedule(&dma_dev->task);
504 } else {
505 dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
506 chan->id);
507 }
508 }
509 spin_unlock_irqrestore(&chan->vchan.lock, flags);
510}
511
512static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
513 struct dma_chan *c, struct scatterlist *sgl,
514 unsigned int sg_len, enum dma_transfer_direction direction,
515 unsigned long flags, void *context)
516{
517 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
518 struct gdma_dma_desc *desc;
519 struct scatterlist *sg;
520 unsigned int i;
521
522 desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
523 if (!desc) {
524 dev_err(c->device->dev, "alloc sg decs error\n");
525 return NULL;
526 }
527 desc->residue = 0;
528
529 for_each_sg(sgl, sg, sg_len, i) {
530 if (direction == DMA_MEM_TO_DEV) {
531 desc->sg[i].src_addr = sg_dma_address(sg);
532 } else if (direction == DMA_DEV_TO_MEM) {
533 desc->sg[i].dst_addr = sg_dma_address(sg);
534 } else {
535 dev_err(c->device->dev, "direction type %d error\n",
536 direction);
537 goto free_desc;
538 }
539
540 if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
541 dev_err(c->device->dev, "sg len too large %d\n",
542 sg_dma_len(sg));
543 goto free_desc;
544 }
545 desc->sg[i].len = sg_dma_len(sg);
546 desc->residue += sg_dma_len(sg);
547 }
548
549 desc->num_sgs = sg_len;
550 desc->direction = direction;
551 desc->cyclic = false;
552
553 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
554
555free_desc:
556 kfree(desc);
557 return NULL;
558}
559
560static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy(
561 struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
562 size_t len, unsigned long flags)
563{
564 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
565 struct gdma_dma_desc *desc;
566 unsigned int num_periods, i;
567 size_t xfer_count;
568
569 if (len <= 0)
570 return NULL;
571
572 chan->burst_size = gdma_dma_maxburst(len >> 2);
573
574 xfer_count = GDMA_REG_CTRL0_TX_MASK;
575 num_periods = DIV_ROUND_UP(len, xfer_count);
576
577 desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
578 if (!desc) {
579 dev_err(c->device->dev, "alloc memcpy decs error\n");
580 return NULL;
581 }
582 desc->residue = len;
583
584 for (i = 0; i < num_periods; i++) {
585 desc->sg[i].src_addr = src;
586 desc->sg[i].dst_addr = dest;
587 if (len > xfer_count)
588 desc->sg[i].len = xfer_count;
589 else
590 desc->sg[i].len = len;
591 src += desc->sg[i].len;
592 dest += desc->sg[i].len;
593 len -= desc->sg[i].len;
594 }
595
596 desc->num_sgs = num_periods;
597 desc->direction = DMA_MEM_TO_MEM;
598 desc->cyclic = false;
599
600 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
601}
602
603static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
604 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
605 size_t period_len, enum dma_transfer_direction direction,
606 unsigned long flags)
607{
608 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
609 struct gdma_dma_desc *desc;
610 unsigned int num_periods, i;
611
612 if (buf_len % period_len)
613 return NULL;
614
615 if (period_len > GDMA_REG_CTRL0_TX_MASK) {
616 dev_err(c->device->dev, "cyclic len too large %d\n",
617 period_len);
618 return NULL;
619 }
620
621 num_periods = buf_len / period_len;
622 desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
623 if (!desc) {
624 dev_err(c->device->dev, "alloc cyclic decs error\n");
625 return NULL;
626 }
627 desc->residue = buf_len;
628
629 for (i = 0; i < num_periods; i++) {
630 if (direction == DMA_MEM_TO_DEV) {
631 desc->sg[i].src_addr = buf_addr;
632 } else if (direction == DMA_DEV_TO_MEM) {
633 desc->sg[i].dst_addr = buf_addr;
634 } else {
635 dev_err(c->device->dev, "direction type %d error\n",
636 direction);
637 goto free_desc;
638 }
639 desc->sg[i].len = period_len;
640 buf_addr += period_len;
641 }
642
643 desc->num_sgs = num_periods;
644 desc->direction = direction;
645 desc->cyclic = true;
646
647 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
648
649free_desc:
650 kfree(desc);
651 return NULL;
652}
653
654static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
655 dma_cookie_t cookie,
656 struct dma_tx_state *state)
657{
658 struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
659 struct virt_dma_desc *vdesc;
660 enum dma_status status;
661 unsigned long flags;
662 struct gdma_dma_desc *desc;
663
664 status = dma_cookie_status(c, cookie, state);
665 if (status == DMA_COMPLETE || !state)
666 return status;
667
668 spin_lock_irqsave(&chan->vchan.lock, flags);
669 desc = chan->desc;
670 if (desc && (cookie == desc->vdesc.tx.cookie)) {
671
672
673
674
675
676 if (desc->cyclic)
677 state->residue = desc->residue -
678 ((chan->next_sg - 1) * desc->sg[0].len);
679 else
680 state->residue = desc->residue;
681 } else {
682 vdesc = vchan_find_desc(&chan->vchan, cookie);
683 if (vdesc)
684 state->residue = to_gdma_dma_desc(vdesc)->residue;
685 }
686 spin_unlock_irqrestore(&chan->vchan.lock, flags);
687
688 dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
689
690 return status;
691}
692
693static void gdma_dma_free_chan_resources(struct dma_chan *c)
694{
695 vchan_free_chan_resources(to_virt_chan(c));
696}
697
698static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
699{
700 kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
701}
702
703static void gdma_dma_tasklet(struct tasklet_struct *t)
704{
705 struct gdma_dma_dev *dma_dev = from_tasklet(dma_dev, t, task);
706 struct gdma_dmaengine_chan *chan;
707 static unsigned int last_chan;
708 unsigned int i, chan_mask;
709
710
711 i = last_chan;
712 chan_mask = dma_dev->data->chancnt - 1;
713 do {
714
715
716
717
718
719
720 if (atomic_read(&dma_dev->cnt) >= 2) {
721 last_chan = i;
722 break;
723 }
724
725 if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
726 chan = &dma_dev->chan[i];
727 if (chan->desc) {
728 atomic_inc(&dma_dev->cnt);
729 gdma_start_transfer(dma_dev, chan);
730 } else {
731 dev_dbg(dma_dev->ddev.dev,
732 "chan %d no desc to issue\n",
733 chan->id);
734 }
735 if (!dma_dev->chan_issued)
736 break;
737 }
738
739 i = (i + 1) & chan_mask;
740 } while (i != last_chan);
741}
742
743static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
744{
745 u32 gct;
746
747
748 gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
749
750 gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
751 dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
752 (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
753 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
754 GDMA_REG_GCT_CHAN_MASK));
755}
756
757static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
758{
759 u32 gct;
760
761
762 gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
763
764 gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
765 dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
766 (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
767 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
768 GDMA_REG_GCT_CHAN_MASK));
769}
770
771static struct gdma_data rt305x_gdma_data = {
772 .chancnt = 8,
773 .done_int_reg = GDMA_RT305X_STATUS_INT,
774 .init = rt305x_gdma_init,
775 .start_transfer = rt305x_gdma_start_transfer,
776};
777
778static struct gdma_data rt3883_gdma_data = {
779 .chancnt = 16,
780 .done_int_reg = GDMA_REG_DONE_INT,
781 .init = rt3883_gdma_init,
782 .start_transfer = rt3883_gdma_start_transfer,
783};
784
785static const struct of_device_id gdma_of_match_table[] = {
786 { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
787 { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
788 { },
789};
790MODULE_DEVICE_TABLE(of, gdma_of_match_table);
791
792static int gdma_dma_probe(struct platform_device *pdev)
793{
794 const struct of_device_id *match;
795 struct gdma_dmaengine_chan *chan;
796 struct gdma_dma_dev *dma_dev;
797 struct dma_device *dd;
798 unsigned int i;
799 int ret;
800 int irq;
801 void __iomem *base;
802 struct gdma_data *data;
803
804 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
805 if (ret)
806 return ret;
807
808 match = of_match_device(gdma_of_match_table, &pdev->dev);
809 if (!match)
810 return -EINVAL;
811 data = (struct gdma_data *)match->data;
812
813 dma_dev = devm_kzalloc(&pdev->dev,
814 struct_size(dma_dev, chan, data->chancnt),
815 GFP_KERNEL);
816 if (!dma_dev)
817 return -EINVAL;
818 dma_dev->data = data;
819
820 base = devm_platform_ioremap_resource(pdev, 0);
821 if (IS_ERR(base))
822 return PTR_ERR(base);
823 dma_dev->base = base;
824 tasklet_setup(&dma_dev->task, gdma_dma_tasklet);
825
826 irq = platform_get_irq(pdev, 0);
827 if (irq < 0)
828 return -EINVAL;
829 ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
830 0, dev_name(&pdev->dev), dma_dev);
831 if (ret) {
832 dev_err(&pdev->dev, "failed to request irq\n");
833 return ret;
834 }
835
836 ret = device_reset(&pdev->dev);
837 if (ret)
838 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
839
840 dd = &dma_dev->ddev;
841 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
842 dma_cap_set(DMA_SLAVE, dd->cap_mask);
843 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
844 dd->device_free_chan_resources = gdma_dma_free_chan_resources;
845 dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
846 dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
847 dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
848 dd->device_config = gdma_dma_config;
849 dd->device_terminate_all = gdma_dma_terminate_all;
850 dd->device_tx_status = gdma_dma_tx_status;
851 dd->device_issue_pending = gdma_dma_issue_pending;
852
853 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
854 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
855 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
856 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
857
858 dd->dev = &pdev->dev;
859 dd->dev->dma_parms = &dma_dev->dma_parms;
860 dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
861 INIT_LIST_HEAD(&dd->channels);
862
863 for (i = 0; i < data->chancnt; i++) {
864 chan = &dma_dev->chan[i];
865 chan->id = i;
866 chan->vchan.desc_free = gdma_dma_desc_free;
867 vchan_init(&chan->vchan, dd);
868 }
869
870
871 data->init(dma_dev);
872
873 ret = dma_async_device_register(dd);
874 if (ret) {
875 dev_err(&pdev->dev, "failed to register dma device\n");
876 return ret;
877 }
878
879 ret = of_dma_controller_register(pdev->dev.of_node,
880 of_dma_xlate_by_chan_id, dma_dev);
881 if (ret) {
882 dev_err(&pdev->dev, "failed to register of dma controller\n");
883 goto err_unregister;
884 }
885
886 platform_set_drvdata(pdev, dma_dev);
887
888 return 0;
889
890err_unregister:
891 dma_async_device_unregister(dd);
892 return ret;
893}
894
895static int gdma_dma_remove(struct platform_device *pdev)
896{
897 struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
898
899 tasklet_kill(&dma_dev->task);
900 of_dma_controller_free(pdev->dev.of_node);
901 dma_async_device_unregister(&dma_dev->ddev);
902
903 return 0;
904}
905
906static struct platform_driver gdma_dma_driver = {
907 .probe = gdma_dma_probe,
908 .remove = gdma_dma_remove,
909 .driver = {
910 .name = "gdma-rt2880",
911 .of_match_table = gdma_of_match_table,
912 },
913};
914module_platform_driver(gdma_dma_driver);
915
916MODULE_DESCRIPTION("Ralink/MTK DMA driver");
917MODULE_LICENSE("GPL v2");
918