1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/log2.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/amba/bus.h>
24#include <linux/regulator/consumer.h>
25#include <linux/platform_data/dma-ste-dma40.h>
26
27#include "dmaengine.h"
28#include "ste_dma40_ll.h"
29
30#define D40_NAME "dma40"
31
32#define D40_PHY_CHAN -1
33
34
35#define D40_CHAN_POS(chan) (2 * (chan / 2))
36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39#define D40_SUSPEND_MAX_IT 500
40
41
42#define DMA40_AUTOSUSPEND_DELAY 100
43
44
45#define LCLA_ALIGNMENT 0x40000
46
47
48#define D40_LCLA_LINK_PER_EVENT_GRP 128
49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52#define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55#define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58#define D40_ALLOC_FREE BIT(31)
59#define D40_ALLOC_PHY BIT(30)
60#define D40_ALLOC_LOG_FREE 0
61
62#define D40_MEMCPY_MAX_CHANS 8
63
64
65#define DB8500_DMA_MEMCPY_EV_0 51
66#define DB8500_DMA_MEMCPY_EV_1 56
67#define DB8500_DMA_MEMCPY_EV_2 57
68#define DB8500_DMA_MEMCPY_EV_3 58
69#define DB8500_DMA_MEMCPY_EV_4 59
70#define DB8500_DMA_MEMCPY_EV_5 60
71
72static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79};
80
81
82static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93};
94
95
96static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107};
108
109
110
111
112
113
114
115
116
117enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122};
123
124
125
126
127
128
129
130
131
132
133enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138};
139
140
141
142
143
144
145static u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152};
153
154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185};
186
187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210};
211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214static u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223};
224
225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242};
243
244
245static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256};
257
258static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271};
272
273
274
275
276
277
278
279struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282};
283
284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301};
302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322};
323
324
325
326
327
328
329
330
331
332
333
334
335struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341};
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378};
379
380
381
382
383
384
385
386
387
388
389
390
391struct d40_lcla_pool {
392 void *base;
393 dma_addr_t dma_addr;
394 void *base_unaligned;
395 int pages;
396 spinlock_t lock;
397 struct d40_desc **alloc_map;
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct d40_phy_res {
415 spinlock_t lock;
416 bool reserved;
417 int num;
418 u32 allocated_src;
419 u32 allocated_dst;
420 bool use_soft_lli;
421};
422
423struct d40_base;
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct d40_chan {
457 spinlock_t lock;
458 int log_num;
459 int pending_tx;
460 bool busy;
461 struct d40_phy_res *phy_chan;
462 struct dma_chan chan;
463 struct tasklet_struct tasklet;
464 struct list_head client;
465 struct list_head pending_queue;
466 struct list_head active;
467 struct list_head done;
468 struct list_head queue;
469 struct list_head prepare_queue;
470 struct stedma40_chan_cfg dma_cfg;
471 bool configured;
472 struct d40_base *base;
473
474 u32 src_def_cfg;
475 u32 dst_def_cfg;
476 struct d40_def_lcsp log_def;
477 struct d40_log_lli_full *lcpa;
478
479 dma_addr_t runtime_addr;
480 enum dma_transfer_direction runtime_direction;
481};
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct d40_gen_dmac {
501 u32 *backup;
502 u32 backup_size;
503 u32 realtime_en;
504 u32 realtime_clear;
505 u32 high_prio_en;
506 u32 high_prio_clear;
507 u32 interrupt_en;
508 u32 interrupt_clear;
509 struct d40_interrupt_lookup *il;
510 u32 il_size;
511 struct d40_reg_val *init_reg;
512 u32 init_reg_size;
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562struct d40_base {
563 spinlock_t interrupt_lock;
564 spinlock_t execmd_lock;
565 struct device *dev;
566 void __iomem *virtbase;
567 u8 rev:4;
568 struct clk *clk;
569 phys_addr_t phy_start;
570 resource_size_t phy_size;
571 int irq;
572 int num_memcpy_chans;
573 int num_phy_chans;
574 int num_log_chans;
575 struct device_dma_parameters dma_parms;
576 struct dma_device dma_both;
577 struct dma_device dma_slave;
578 struct dma_device dma_memcpy;
579 struct d40_chan *phy_chans;
580 struct d40_chan *log_chans;
581 struct d40_chan **lookup_log_chans;
582 struct d40_chan **lookup_phy_chans;
583 struct stedma40_platform_data *plat_data;
584 struct regulator *lcpa_regulator;
585
586 struct d40_phy_res *phy_res;
587 struct d40_lcla_pool lcla_pool;
588 void *lcpa_base;
589 dma_addr_t phy_lcpa;
590 resource_size_t lcpa_size;
591 struct kmem_cache *desc_slab;
592 u32 reg_val_backup[BACKUP_REGS_SZ];
593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
594 u32 *reg_val_backup_chan;
595 u16 gcc_pwr_off_mask;
596 struct d40_gen_dmac gen_dmac;
597};
598
599static struct device *chan2dev(struct d40_chan *d40c)
600{
601 return &d40c->chan.dev->device;
602}
603
604static bool chan_is_physical(struct d40_chan *chan)
605{
606 return chan->log_num == D40_PHY_CHAN;
607}
608
609static bool chan_is_logical(struct d40_chan *chan)
610{
611 return !chan_is_physical(chan);
612}
613
614static void __iomem *chan_base(struct d40_chan *chan)
615{
616 return chan->base->virtbase + D40_DREG_PCBASE +
617 chan->phy_chan->num * D40_DREG_PCDELTA;
618}
619
620#define d40_err(dev, format, arg...) \
621 dev_err(dev, "[%s] " format, __func__, ## arg)
622
623#define chan_err(d40c, format, arg...) \
624 d40_err(chan2dev(d40c), format, ## arg)
625
626static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
627 int lli_len)
628{
629 bool is_log = chan_is_logical(d40c);
630 u32 align;
631 void *base;
632
633 if (is_log)
634 align = sizeof(struct d40_log_lli);
635 else
636 align = sizeof(struct d40_phy_lli);
637
638 if (lli_len == 1) {
639 base = d40d->lli_pool.pre_alloc_lli;
640 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
641 d40d->lli_pool.base = NULL;
642 } else {
643 d40d->lli_pool.size = lli_len * 2 * align;
644
645 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
646 d40d->lli_pool.base = base;
647
648 if (d40d->lli_pool.base == NULL)
649 return -ENOMEM;
650 }
651
652 if (is_log) {
653 d40d->lli_log.src = PTR_ALIGN(base, align);
654 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
655
656 d40d->lli_pool.dma_addr = 0;
657 } else {
658 d40d->lli_phy.src = PTR_ALIGN(base, align);
659 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
660
661 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
662 d40d->lli_phy.src,
663 d40d->lli_pool.size,
664 DMA_TO_DEVICE);
665
666 if (dma_mapping_error(d40c->base->dev,
667 d40d->lli_pool.dma_addr)) {
668 kfree(d40d->lli_pool.base);
669 d40d->lli_pool.base = NULL;
670 d40d->lli_pool.dma_addr = 0;
671 return -ENOMEM;
672 }
673 }
674
675 return 0;
676}
677
678static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
679{
680 if (d40d->lli_pool.dma_addr)
681 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
682 d40d->lli_pool.size, DMA_TO_DEVICE);
683
684 kfree(d40d->lli_pool.base);
685 d40d->lli_pool.base = NULL;
686 d40d->lli_pool.size = 0;
687 d40d->lli_log.src = NULL;
688 d40d->lli_log.dst = NULL;
689 d40d->lli_phy.src = NULL;
690 d40d->lli_phy.dst = NULL;
691}
692
693static int d40_lcla_alloc_one(struct d40_chan *d40c,
694 struct d40_desc *d40d)
695{
696 unsigned long flags;
697 int i;
698 int ret = -EINVAL;
699
700 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
701
702
703
704
705
706 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
707 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
708
709 if (!d40c->base->lcla_pool.alloc_map[idx]) {
710 d40c->base->lcla_pool.alloc_map[idx] = d40d;
711 d40d->lcla_alloc++;
712 ret = i;
713 break;
714 }
715 }
716
717 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
718
719 return ret;
720}
721
722static int d40_lcla_free_all(struct d40_chan *d40c,
723 struct d40_desc *d40d)
724{
725 unsigned long flags;
726 int i;
727 int ret = -EINVAL;
728
729 if (chan_is_physical(d40c))
730 return 0;
731
732 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
733
734 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
735 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
736
737 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
738 d40c->base->lcla_pool.alloc_map[idx] = NULL;
739 d40d->lcla_alloc--;
740 if (d40d->lcla_alloc == 0) {
741 ret = 0;
742 break;
743 }
744 }
745 }
746
747 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
748
749 return ret;
750
751}
752
753static void d40_desc_remove(struct d40_desc *d40d)
754{
755 list_del(&d40d->node);
756}
757
758static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
759{
760 struct d40_desc *desc = NULL;
761
762 if (!list_empty(&d40c->client)) {
763 struct d40_desc *d;
764 struct d40_desc *_d;
765
766 list_for_each_entry_safe(d, _d, &d40c->client, node) {
767 if (async_tx_test_ack(&d->txd)) {
768 d40_desc_remove(d);
769 desc = d;
770 memset(desc, 0, sizeof(*desc));
771 break;
772 }
773 }
774 }
775
776 if (!desc)
777 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
778
779 if (desc)
780 INIT_LIST_HEAD(&desc->node);
781
782 return desc;
783}
784
785static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
786{
787
788 d40_pool_lli_free(d40c, d40d);
789 d40_lcla_free_all(d40c, d40d);
790 kmem_cache_free(d40c->base->desc_slab, d40d);
791}
792
793static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
794{
795 list_add_tail(&desc->node, &d40c->active);
796}
797
798static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
799{
800 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
801 struct d40_phy_lli *lli_src = desc->lli_phy.src;
802 void __iomem *base = chan_base(chan);
803
804 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
805 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
806 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
807 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
808
809 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
810 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
811 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
812 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
813}
814
815static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
816{
817 list_add_tail(&desc->node, &d40c->done);
818}
819
820static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
821{
822 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
823 struct d40_log_lli_bidir *lli = &desc->lli_log;
824 int lli_current = desc->lli_current;
825 int lli_len = desc->lli_len;
826 bool cyclic = desc->cyclic;
827 int curr_lcla = -EINVAL;
828 int first_lcla = 0;
829 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
830 bool linkback;
831
832
833
834
835
836 linkback = cyclic && lli_current == 0;
837
838
839
840
841
842 if (linkback || (lli_len - lli_current > 1)) {
843
844
845
846
847
848
849 if (!(chan->phy_chan->use_soft_lli &&
850 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
851 curr_lcla = d40_lcla_alloc_one(chan, desc);
852
853 first_lcla = curr_lcla;
854 }
855
856
857
858
859
860
861
862 if (!linkback || curr_lcla == -EINVAL) {
863 unsigned int flags = 0;
864
865 if (curr_lcla == -EINVAL)
866 flags |= LLI_TERM_INT;
867
868 d40_log_lli_lcpa_write(chan->lcpa,
869 &lli->dst[lli_current],
870 &lli->src[lli_current],
871 curr_lcla,
872 flags);
873 lli_current++;
874 }
875
876 if (curr_lcla < 0)
877 goto out;
878
879 for (; lli_current < lli_len; lli_current++) {
880 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
881 8 * curr_lcla * 2;
882 struct d40_log_lli *lcla = pool->base + lcla_offset;
883 unsigned int flags = 0;
884 int next_lcla;
885
886 if (lli_current + 1 < lli_len)
887 next_lcla = d40_lcla_alloc_one(chan, desc);
888 else
889 next_lcla = linkback ? first_lcla : -EINVAL;
890
891 if (cyclic || next_lcla == -EINVAL)
892 flags |= LLI_TERM_INT;
893
894 if (linkback && curr_lcla == first_lcla) {
895
896 d40_log_lli_lcpa_write(chan->lcpa,
897 &lli->dst[lli_current],
898 &lli->src[lli_current],
899 next_lcla, flags);
900 }
901
902
903
904
905
906 d40_log_lli_lcla_write(lcla,
907 &lli->dst[lli_current],
908 &lli->src[lli_current],
909 next_lcla, flags);
910
911
912
913
914
915 if (!use_esram_lcla) {
916 dma_sync_single_range_for_device(chan->base->dev,
917 pool->dma_addr, lcla_offset,
918 2 * sizeof(struct d40_log_lli),
919 DMA_TO_DEVICE);
920 }
921 curr_lcla = next_lcla;
922
923 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
924 lli_current++;
925 break;
926 }
927 }
928
929out:
930 desc->lli_current = lli_current;
931}
932
933static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
934{
935 if (chan_is_physical(d40c)) {
936 d40_phy_lli_load(d40c, d40d);
937 d40d->lli_current = d40d->lli_len;
938 } else
939 d40_log_lli_to_lcxa(d40c, d40d);
940}
941
942static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
943{
944 struct d40_desc *d;
945
946 if (list_empty(&d40c->active))
947 return NULL;
948
949 d = list_first_entry(&d40c->active,
950 struct d40_desc,
951 node);
952 return d;
953}
954
955
956static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
957{
958 d40_desc_remove(desc);
959 desc->is_in_client_list = false;
960 list_add_tail(&desc->node, &d40c->pending_queue);
961}
962
963static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
964{
965 struct d40_desc *d;
966
967 if (list_empty(&d40c->pending_queue))
968 return NULL;
969
970 d = list_first_entry(&d40c->pending_queue,
971 struct d40_desc,
972 node);
973 return d;
974}
975
976static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
977{
978 struct d40_desc *d;
979
980 if (list_empty(&d40c->queue))
981 return NULL;
982
983 d = list_first_entry(&d40c->queue,
984 struct d40_desc,
985 node);
986 return d;
987}
988
989static struct d40_desc *d40_first_done(struct d40_chan *d40c)
990{
991 if (list_empty(&d40c->done))
992 return NULL;
993
994 return list_first_entry(&d40c->done, struct d40_desc, node);
995}
996
997static int d40_psize_2_burst_size(bool is_log, int psize)
998{
999 if (is_log) {
1000 if (psize == STEDMA40_PSIZE_LOG_1)
1001 return 1;
1002 } else {
1003 if (psize == STEDMA40_PSIZE_PHY_1)
1004 return 1;
1005 }
1006
1007 return 2 << psize;
1008}
1009
1010
1011
1012
1013
1014
1015
1016static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1017{
1018 int dmalen;
1019 u32 max_w = max(data_width1, data_width2);
1020 u32 min_w = min(data_width1, data_width2);
1021 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1022
1023 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1024 seg_max -= max_w;
1025
1026 if (!IS_ALIGNED(size, max_w))
1027 return -EINVAL;
1028
1029 if (size <= seg_max)
1030 dmalen = 1;
1031 else {
1032 dmalen = size / seg_max;
1033 if (dmalen * seg_max < size)
1034 dmalen++;
1035 }
1036 return dmalen;
1037}
1038
1039static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1040 u32 data_width1, u32 data_width2)
1041{
1042 struct scatterlist *sg;
1043 int i;
1044 int len = 0;
1045 int ret;
1046
1047 for_each_sg(sgl, sg, sg_len, i) {
1048 ret = d40_size_2_dmalen(sg_dma_len(sg),
1049 data_width1, data_width2);
1050 if (ret < 0)
1051 return ret;
1052 len += ret;
1053 }
1054 return len;
1055}
1056
1057static int __d40_execute_command_phy(struct d40_chan *d40c,
1058 enum d40_command command)
1059{
1060 u32 status;
1061 int i;
1062 void __iomem *active_reg;
1063 int ret = 0;
1064 unsigned long flags;
1065 u32 wmask;
1066
1067 if (command == D40_DMA_STOP) {
1068 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1069 if (ret)
1070 return ret;
1071 }
1072
1073 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1074
1075 if (d40c->phy_chan->num % 2 == 0)
1076 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1077 else
1078 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1079
1080 if (command == D40_DMA_SUSPEND_REQ) {
1081 status = (readl(active_reg) &
1082 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1083 D40_CHAN_POS(d40c->phy_chan->num);
1084
1085 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1086 goto done;
1087 }
1088
1089 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1090 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1091 active_reg);
1092
1093 if (command == D40_DMA_SUSPEND_REQ) {
1094
1095 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1096 status = (readl(active_reg) &
1097 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1098 D40_CHAN_POS(d40c->phy_chan->num);
1099
1100 cpu_relax();
1101
1102
1103
1104
1105 udelay(3);
1106
1107 if (status == D40_DMA_STOP ||
1108 status == D40_DMA_SUSPENDED)
1109 break;
1110 }
1111
1112 if (i == D40_SUSPEND_MAX_IT) {
1113 chan_err(d40c,
1114 "unable to suspend the chl %d (log: %d) status %x\n",
1115 d40c->phy_chan->num, d40c->log_num,
1116 status);
1117 dump_stack();
1118 ret = -EBUSY;
1119 }
1120
1121 }
1122done:
1123 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1124 return ret;
1125}
1126
1127static void d40_term_all(struct d40_chan *d40c)
1128{
1129 struct d40_desc *d40d;
1130 struct d40_desc *_d;
1131
1132
1133 while ((d40d = d40_first_done(d40c))) {
1134 d40_desc_remove(d40d);
1135 d40_desc_free(d40c, d40d);
1136 }
1137
1138
1139 while ((d40d = d40_first_active_get(d40c))) {
1140 d40_desc_remove(d40d);
1141 d40_desc_free(d40c, d40d);
1142 }
1143
1144
1145 while ((d40d = d40_first_queued(d40c))) {
1146 d40_desc_remove(d40d);
1147 d40_desc_free(d40c, d40d);
1148 }
1149
1150
1151 while ((d40d = d40_first_pending(d40c))) {
1152 d40_desc_remove(d40d);
1153 d40_desc_free(d40c, d40d);
1154 }
1155
1156
1157 if (!list_empty(&d40c->client))
1158 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1159 d40_desc_remove(d40d);
1160 d40_desc_free(d40c, d40d);
1161 }
1162
1163
1164 if (!list_empty(&d40c->prepare_queue))
1165 list_for_each_entry_safe(d40d, _d,
1166 &d40c->prepare_queue, node) {
1167 d40_desc_remove(d40d);
1168 d40_desc_free(d40c, d40d);
1169 }
1170
1171 d40c->pending_tx = 0;
1172}
1173
1174static void __d40_config_set_event(struct d40_chan *d40c,
1175 enum d40_events event_type, u32 event,
1176 int reg)
1177{
1178 void __iomem *addr = chan_base(d40c) + reg;
1179 int tries;
1180 u32 status;
1181
1182 switch (event_type) {
1183
1184 case D40_DEACTIVATE_EVENTLINE:
1185
1186 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1187 | ~D40_EVENTLINE_MASK(event), addr);
1188 break;
1189
1190 case D40_SUSPEND_REQ_EVENTLINE:
1191 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1192 D40_EVENTLINE_POS(event);
1193
1194 if (status == D40_DEACTIVATE_EVENTLINE ||
1195 status == D40_SUSPEND_REQ_EVENTLINE)
1196 break;
1197
1198 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1199 | ~D40_EVENTLINE_MASK(event), addr);
1200
1201 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1202
1203 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1204 D40_EVENTLINE_POS(event);
1205
1206 cpu_relax();
1207
1208
1209
1210
1211 udelay(3);
1212
1213 if (status == D40_DEACTIVATE_EVENTLINE)
1214 break;
1215 }
1216
1217 if (tries == D40_SUSPEND_MAX_IT) {
1218 chan_err(d40c,
1219 "unable to stop the event_line chl %d (log: %d)"
1220 "status %x\n", d40c->phy_chan->num,
1221 d40c->log_num, status);
1222 }
1223 break;
1224
1225 case D40_ACTIVATE_EVENTLINE:
1226
1227
1228
1229
1230
1231 tries = 100;
1232 while (--tries) {
1233 writel((D40_ACTIVATE_EVENTLINE <<
1234 D40_EVENTLINE_POS(event)) |
1235 ~D40_EVENTLINE_MASK(event), addr);
1236
1237 if (readl(addr) & D40_EVENTLINE_MASK(event))
1238 break;
1239 }
1240
1241 if (tries != 99)
1242 dev_dbg(chan2dev(d40c),
1243 "[%s] workaround enable S%cLNK (%d tries)\n",
1244 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1245 100 - tries);
1246
1247 WARN_ON(!tries);
1248 break;
1249
1250 case D40_ROUND_EVENTLINE:
1251 BUG();
1252 break;
1253
1254 }
1255}
1256
1257static void d40_config_set_event(struct d40_chan *d40c,
1258 enum d40_events event_type)
1259{
1260 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1261
1262
1263 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1264 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1265 __d40_config_set_event(d40c, event_type, event,
1266 D40_CHAN_REG_SSLNK);
1267
1268 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1269 __d40_config_set_event(d40c, event_type, event,
1270 D40_CHAN_REG_SDLNK);
1271}
1272
1273static u32 d40_chan_has_events(struct d40_chan *d40c)
1274{
1275 void __iomem *chanbase = chan_base(d40c);
1276 u32 val;
1277
1278 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1279 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1280
1281 return val;
1282}
1283
1284static int
1285__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1286{
1287 unsigned long flags;
1288 int ret = 0;
1289 u32 active_status;
1290 void __iomem *active_reg;
1291
1292 if (d40c->phy_chan->num % 2 == 0)
1293 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1294 else
1295 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1296
1297
1298 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1299
1300 switch (command) {
1301 case D40_DMA_STOP:
1302 case D40_DMA_SUSPEND_REQ:
1303
1304 active_status = (readl(active_reg) &
1305 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1306 D40_CHAN_POS(d40c->phy_chan->num);
1307
1308 if (active_status == D40_DMA_RUN)
1309 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1310 else
1311 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1312
1313 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1314 ret = __d40_execute_command_phy(d40c, command);
1315
1316 break;
1317
1318 case D40_DMA_RUN:
1319
1320 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1321 ret = __d40_execute_command_phy(d40c, command);
1322 break;
1323
1324 case D40_DMA_SUSPENDED:
1325 BUG();
1326 break;
1327 }
1328
1329 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1330 return ret;
1331}
1332
1333static int d40_channel_execute_command(struct d40_chan *d40c,
1334 enum d40_command command)
1335{
1336 if (chan_is_logical(d40c))
1337 return __d40_execute_command_log(d40c, command);
1338 else
1339 return __d40_execute_command_phy(d40c, command);
1340}
1341
1342static u32 d40_get_prmo(struct d40_chan *d40c)
1343{
1344 static const unsigned int phy_map[] = {
1345 [STEDMA40_PCHAN_BASIC_MODE]
1346 = D40_DREG_PRMO_PCHAN_BASIC,
1347 [STEDMA40_PCHAN_MODULO_MODE]
1348 = D40_DREG_PRMO_PCHAN_MODULO,
1349 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1350 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1351 };
1352 static const unsigned int log_map[] = {
1353 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1354 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1355 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1356 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1357 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1358 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1359 };
1360
1361 if (chan_is_physical(d40c))
1362 return phy_map[d40c->dma_cfg.mode_opt];
1363 else
1364 return log_map[d40c->dma_cfg.mode_opt];
1365}
1366
1367static void d40_config_write(struct d40_chan *d40c)
1368{
1369 u32 addr_base;
1370 u32 var;
1371
1372
1373 addr_base = (d40c->phy_chan->num % 2) * 4;
1374
1375 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1376 D40_CHAN_POS(d40c->phy_chan->num);
1377 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1378
1379
1380 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1381
1382 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1383
1384 if (chan_is_logical(d40c)) {
1385 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1386 & D40_SREG_ELEM_LOG_LIDX_MASK;
1387 void __iomem *chanbase = chan_base(d40c);
1388
1389
1390 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1391 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1392
1393
1394 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1395 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1396
1397
1398 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1399 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1400 }
1401}
1402
1403static u32 d40_residue(struct d40_chan *d40c)
1404{
1405 u32 num_elt;
1406
1407 if (chan_is_logical(d40c))
1408 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1409 >> D40_MEM_LCSP2_ECNT_POS;
1410 else {
1411 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1412 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1413 >> D40_SREG_ELEM_PHY_ECNT_POS;
1414 }
1415
1416 return num_elt * d40c->dma_cfg.dst_info.data_width;
1417}
1418
1419static bool d40_tx_is_linked(struct d40_chan *d40c)
1420{
1421 bool is_link;
1422
1423 if (chan_is_logical(d40c))
1424 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1425 else
1426 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1427 & D40_SREG_LNK_PHYS_LNK_MASK;
1428
1429 return is_link;
1430}
1431
1432static int d40_pause(struct dma_chan *chan)
1433{
1434 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1435 int res = 0;
1436 unsigned long flags;
1437
1438 if (d40c->phy_chan == NULL) {
1439 chan_err(d40c, "Channel is not allocated!\n");
1440 return -EINVAL;
1441 }
1442
1443 if (!d40c->busy)
1444 return 0;
1445
1446 spin_lock_irqsave(&d40c->lock, flags);
1447 pm_runtime_get_sync(d40c->base->dev);
1448
1449 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1450
1451 pm_runtime_mark_last_busy(d40c->base->dev);
1452 pm_runtime_put_autosuspend(d40c->base->dev);
1453 spin_unlock_irqrestore(&d40c->lock, flags);
1454 return res;
1455}
1456
1457static int d40_resume(struct dma_chan *chan)
1458{
1459 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1460 int res = 0;
1461 unsigned long flags;
1462
1463 if (d40c->phy_chan == NULL) {
1464 chan_err(d40c, "Channel is not allocated!\n");
1465 return -EINVAL;
1466 }
1467
1468 if (!d40c->busy)
1469 return 0;
1470
1471 spin_lock_irqsave(&d40c->lock, flags);
1472 pm_runtime_get_sync(d40c->base->dev);
1473
1474
1475 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1476 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1477
1478 pm_runtime_mark_last_busy(d40c->base->dev);
1479 pm_runtime_put_autosuspend(d40c->base->dev);
1480 spin_unlock_irqrestore(&d40c->lock, flags);
1481 return res;
1482}
1483
1484static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1485{
1486 struct d40_chan *d40c = container_of(tx->chan,
1487 struct d40_chan,
1488 chan);
1489 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1490 unsigned long flags;
1491 dma_cookie_t cookie;
1492
1493 spin_lock_irqsave(&d40c->lock, flags);
1494 cookie = dma_cookie_assign(tx);
1495 d40_desc_queue(d40c, d40d);
1496 spin_unlock_irqrestore(&d40c->lock, flags);
1497
1498 return cookie;
1499}
1500
1501static int d40_start(struct d40_chan *d40c)
1502{
1503 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1504}
1505
1506static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1507{
1508 struct d40_desc *d40d;
1509 int err;
1510
1511
1512 d40d = d40_first_queued(d40c);
1513
1514 if (d40d != NULL) {
1515 if (!d40c->busy) {
1516 d40c->busy = true;
1517 pm_runtime_get_sync(d40c->base->dev);
1518 }
1519
1520
1521 d40_desc_remove(d40d);
1522
1523
1524 d40_desc_submit(d40c, d40d);
1525
1526
1527 d40_desc_load(d40c, d40d);
1528
1529
1530 err = d40_start(d40c);
1531
1532 if (err)
1533 return NULL;
1534 }
1535
1536 return d40d;
1537}
1538
1539
1540static void dma_tc_handle(struct d40_chan *d40c)
1541{
1542 struct d40_desc *d40d;
1543
1544
1545 d40d = d40_first_active_get(d40c);
1546
1547 if (d40d == NULL)
1548 return;
1549
1550 if (d40d->cyclic) {
1551
1552
1553
1554
1555
1556
1557 if (d40d->lli_current < d40d->lli_len
1558 && !d40_tx_is_linked(d40c)
1559 && !d40_residue(d40c)) {
1560 d40_lcla_free_all(d40c, d40d);
1561 d40_desc_load(d40c, d40d);
1562 (void) d40_start(d40c);
1563
1564 if (d40d->lli_current == d40d->lli_len)
1565 d40d->lli_current = 0;
1566 }
1567 } else {
1568 d40_lcla_free_all(d40c, d40d);
1569
1570 if (d40d->lli_current < d40d->lli_len) {
1571 d40_desc_load(d40c, d40d);
1572
1573 (void) d40_start(d40c);
1574 return;
1575 }
1576
1577 if (d40_queue_start(d40c) == NULL) {
1578 d40c->busy = false;
1579
1580 pm_runtime_mark_last_busy(d40c->base->dev);
1581 pm_runtime_put_autosuspend(d40c->base->dev);
1582 }
1583
1584 d40_desc_remove(d40d);
1585 d40_desc_done(d40c, d40d);
1586 }
1587
1588 d40c->pending_tx++;
1589 tasklet_schedule(&d40c->tasklet);
1590
1591}
1592
1593static void dma_tasklet(unsigned long data)
1594{
1595 struct d40_chan *d40c = (struct d40_chan *) data;
1596 struct d40_desc *d40d;
1597 unsigned long flags;
1598 bool callback_active;
1599 dma_async_tx_callback callback;
1600 void *callback_param;
1601
1602 spin_lock_irqsave(&d40c->lock, flags);
1603
1604
1605 d40d = d40_first_done(d40c);
1606 if (d40d == NULL) {
1607
1608 d40d = d40_first_active_get(d40c);
1609 if (d40d == NULL || !d40d->cyclic)
1610 goto err;
1611 }
1612
1613 if (!d40d->cyclic)
1614 dma_cookie_complete(&d40d->txd);
1615
1616
1617
1618
1619
1620 if (d40c->pending_tx == 0) {
1621 spin_unlock_irqrestore(&d40c->lock, flags);
1622 return;
1623 }
1624
1625
1626 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1627 callback = d40d->txd.callback;
1628 callback_param = d40d->txd.callback_param;
1629
1630 if (!d40d->cyclic) {
1631 if (async_tx_test_ack(&d40d->txd)) {
1632 d40_desc_remove(d40d);
1633 d40_desc_free(d40c, d40d);
1634 } else if (!d40d->is_in_client_list) {
1635 d40_desc_remove(d40d);
1636 d40_lcla_free_all(d40c, d40d);
1637 list_add_tail(&d40d->node, &d40c->client);
1638 d40d->is_in_client_list = true;
1639 }
1640 }
1641
1642 d40c->pending_tx--;
1643
1644 if (d40c->pending_tx)
1645 tasklet_schedule(&d40c->tasklet);
1646
1647 spin_unlock_irqrestore(&d40c->lock, flags);
1648
1649 if (callback_active && callback)
1650 callback(callback_param);
1651
1652 return;
1653
1654err:
1655
1656 if (d40c->pending_tx > 0)
1657 d40c->pending_tx--;
1658 spin_unlock_irqrestore(&d40c->lock, flags);
1659}
1660
1661static irqreturn_t d40_handle_interrupt(int irq, void *data)
1662{
1663 int i;
1664 u32 idx;
1665 u32 row;
1666 long chan = -1;
1667 struct d40_chan *d40c;
1668 unsigned long flags;
1669 struct d40_base *base = data;
1670 u32 regs[base->gen_dmac.il_size];
1671 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1672 u32 il_size = base->gen_dmac.il_size;
1673
1674 spin_lock_irqsave(&base->interrupt_lock, flags);
1675
1676
1677 for (i = 0; i < il_size; i++)
1678 regs[i] = readl(base->virtbase + il[i].src);
1679
1680 for (;;) {
1681
1682 chan = find_next_bit((unsigned long *)regs,
1683 BITS_PER_LONG * il_size, chan + 1);
1684
1685
1686 if (chan == BITS_PER_LONG * il_size)
1687 break;
1688
1689 row = chan / BITS_PER_LONG;
1690 idx = chan & (BITS_PER_LONG - 1);
1691
1692 if (il[row].offset == D40_PHY_CHAN)
1693 d40c = base->lookup_phy_chans[idx];
1694 else
1695 d40c = base->lookup_log_chans[il[row].offset + idx];
1696
1697 if (!d40c) {
1698
1699
1700
1701
1702 continue;
1703 }
1704
1705
1706 writel(BIT(idx), base->virtbase + il[row].clr);
1707
1708 spin_lock(&d40c->lock);
1709
1710 if (!il[row].is_error)
1711 dma_tc_handle(d40c);
1712 else
1713 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1714 chan, il[row].offset, idx);
1715
1716 spin_unlock(&d40c->lock);
1717 }
1718
1719 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1720
1721 return IRQ_HANDLED;
1722}
1723
1724static int d40_validate_conf(struct d40_chan *d40c,
1725 struct stedma40_chan_cfg *conf)
1726{
1727 int res = 0;
1728 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1729
1730 if (!conf->dir) {
1731 chan_err(d40c, "Invalid direction.\n");
1732 res = -EINVAL;
1733 }
1734
1735 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1736 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1737 (conf->dev_type < 0)) {
1738 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1739 res = -EINVAL;
1740 }
1741
1742 if (conf->dir == DMA_DEV_TO_DEV) {
1743
1744
1745
1746
1747 chan_err(d40c, "periph to periph not supported\n");
1748 res = -EINVAL;
1749 }
1750
1751 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1752 conf->src_info.data_width !=
1753 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1754 conf->dst_info.data_width) {
1755
1756
1757
1758
1759
1760 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1761 res = -EINVAL;
1762 }
1763
1764 return res;
1765}
1766
1767static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1768 bool is_src, int log_event_line, bool is_log,
1769 bool *first_user)
1770{
1771 unsigned long flags;
1772 spin_lock_irqsave(&phy->lock, flags);
1773
1774 *first_user = ((phy->allocated_src | phy->allocated_dst)
1775 == D40_ALLOC_FREE);
1776
1777 if (!is_log) {
1778
1779 if (phy->allocated_src == D40_ALLOC_FREE &&
1780 phy->allocated_dst == D40_ALLOC_FREE) {
1781 phy->allocated_dst = D40_ALLOC_PHY;
1782 phy->allocated_src = D40_ALLOC_PHY;
1783 goto found;
1784 } else
1785 goto not_found;
1786 }
1787
1788
1789 if (is_src) {
1790 if (phy->allocated_src == D40_ALLOC_PHY)
1791 goto not_found;
1792
1793 if (phy->allocated_src == D40_ALLOC_FREE)
1794 phy->allocated_src = D40_ALLOC_LOG_FREE;
1795
1796 if (!(phy->allocated_src & BIT(log_event_line))) {
1797 phy->allocated_src |= BIT(log_event_line);
1798 goto found;
1799 } else
1800 goto not_found;
1801 } else {
1802 if (phy->allocated_dst == D40_ALLOC_PHY)
1803 goto not_found;
1804
1805 if (phy->allocated_dst == D40_ALLOC_FREE)
1806 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1807
1808 if (!(phy->allocated_dst & BIT(log_event_line))) {
1809 phy->allocated_dst |= BIT(log_event_line);
1810 goto found;
1811 } else
1812 goto not_found;
1813 }
1814
1815not_found:
1816 spin_unlock_irqrestore(&phy->lock, flags);
1817 return false;
1818found:
1819 spin_unlock_irqrestore(&phy->lock, flags);
1820 return true;
1821}
1822
1823static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1824 int log_event_line)
1825{
1826 unsigned long flags;
1827 bool is_free = false;
1828
1829 spin_lock_irqsave(&phy->lock, flags);
1830 if (!log_event_line) {
1831 phy->allocated_dst = D40_ALLOC_FREE;
1832 phy->allocated_src = D40_ALLOC_FREE;
1833 is_free = true;
1834 goto out;
1835 }
1836
1837
1838 if (is_src) {
1839 phy->allocated_src &= ~BIT(log_event_line);
1840 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1841 phy->allocated_src = D40_ALLOC_FREE;
1842 } else {
1843 phy->allocated_dst &= ~BIT(log_event_line);
1844 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1845 phy->allocated_dst = D40_ALLOC_FREE;
1846 }
1847
1848 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1849 D40_ALLOC_FREE);
1850
1851out:
1852 spin_unlock_irqrestore(&phy->lock, flags);
1853
1854 return is_free;
1855}
1856
1857static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1858{
1859 int dev_type = d40c->dma_cfg.dev_type;
1860 int event_group;
1861 int event_line;
1862 struct d40_phy_res *phys;
1863 int i;
1864 int j;
1865 int log_num;
1866 int num_phy_chans;
1867 bool is_src;
1868 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1869
1870 phys = d40c->base->phy_res;
1871 num_phy_chans = d40c->base->num_phy_chans;
1872
1873 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1874 log_num = 2 * dev_type;
1875 is_src = true;
1876 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1877 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1878
1879 log_num = 2 * dev_type + 1;
1880 is_src = false;
1881 } else
1882 return -EINVAL;
1883
1884 event_group = D40_TYPE_TO_GROUP(dev_type);
1885 event_line = D40_TYPE_TO_EVENT(dev_type);
1886
1887 if (!is_log) {
1888 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1889
1890 if (d40c->dma_cfg.use_fixed_channel) {
1891 i = d40c->dma_cfg.phy_channel;
1892 if (d40_alloc_mask_set(&phys[i], is_src,
1893 0, is_log,
1894 first_phy_user))
1895 goto found_phy;
1896 } else {
1897 for (i = 0; i < num_phy_chans; i++) {
1898 if (d40_alloc_mask_set(&phys[i], is_src,
1899 0, is_log,
1900 first_phy_user))
1901 goto found_phy;
1902 }
1903 }
1904 } else
1905 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1906 int phy_num = j + event_group * 2;
1907 for (i = phy_num; i < phy_num + 2; i++) {
1908 if (d40_alloc_mask_set(&phys[i],
1909 is_src,
1910 0,
1911 is_log,
1912 first_phy_user))
1913 goto found_phy;
1914 }
1915 }
1916 return -EINVAL;
1917found_phy:
1918 d40c->phy_chan = &phys[i];
1919 d40c->log_num = D40_PHY_CHAN;
1920 goto out;
1921 }
1922 if (dev_type == -1)
1923 return -EINVAL;
1924
1925
1926 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1927 int phy_num = j + event_group * 2;
1928
1929 if (d40c->dma_cfg.use_fixed_channel) {
1930 i = d40c->dma_cfg.phy_channel;
1931
1932 if ((i != phy_num) && (i != phy_num + 1)) {
1933 dev_err(chan2dev(d40c),
1934 "invalid fixed phy channel %d\n", i);
1935 return -EINVAL;
1936 }
1937
1938 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1939 is_log, first_phy_user))
1940 goto found_log;
1941
1942 dev_err(chan2dev(d40c),
1943 "could not allocate fixed phy channel %d\n", i);
1944 return -EINVAL;
1945 }
1946
1947
1948
1949
1950
1951
1952 if (is_src) {
1953 for (i = phy_num; i < phy_num + 2; i++) {
1954 if (d40_alloc_mask_set(&phys[i], is_src,
1955 event_line, is_log,
1956 first_phy_user))
1957 goto found_log;
1958 }
1959 } else {
1960 for (i = phy_num + 1; i >= phy_num; i--) {
1961 if (d40_alloc_mask_set(&phys[i], is_src,
1962 event_line, is_log,
1963 first_phy_user))
1964 goto found_log;
1965 }
1966 }
1967 }
1968 return -EINVAL;
1969
1970found_log:
1971 d40c->phy_chan = &phys[i];
1972 d40c->log_num = log_num;
1973out:
1974
1975 if (is_log)
1976 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1977 else
1978 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1979
1980 return 0;
1981
1982}
1983
1984static int d40_config_memcpy(struct d40_chan *d40c)
1985{
1986 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1987
1988 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1989 d40c->dma_cfg = dma40_memcpy_conf_log;
1990 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1991
1992 d40_log_cfg(&d40c->dma_cfg,
1993 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1994
1995 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1996 dma_has_cap(DMA_SLAVE, cap)) {
1997 d40c->dma_cfg = dma40_memcpy_conf_phy;
1998
1999
2000 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2001
2002
2003 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2004 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2005
2006 } else {
2007 chan_err(d40c, "No memcpy\n");
2008 return -EINVAL;
2009 }
2010
2011 return 0;
2012}
2013
2014static int d40_free_dma(struct d40_chan *d40c)
2015{
2016
2017 int res = 0;
2018 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2019 struct d40_phy_res *phy = d40c->phy_chan;
2020 bool is_src;
2021
2022
2023 d40_term_all(d40c);
2024
2025 if (phy == NULL) {
2026 chan_err(d40c, "phy == null\n");
2027 return -EINVAL;
2028 }
2029
2030 if (phy->allocated_src == D40_ALLOC_FREE &&
2031 phy->allocated_dst == D40_ALLOC_FREE) {
2032 chan_err(d40c, "channel already free\n");
2033 return -EINVAL;
2034 }
2035
2036 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2037 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2038 is_src = false;
2039 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2040 is_src = true;
2041 else {
2042 chan_err(d40c, "Unknown direction\n");
2043 return -EINVAL;
2044 }
2045
2046 pm_runtime_get_sync(d40c->base->dev);
2047 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2048 if (res) {
2049 chan_err(d40c, "stop failed\n");
2050 goto out;
2051 }
2052
2053 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2054
2055 if (chan_is_logical(d40c))
2056 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2057 else
2058 d40c->base->lookup_phy_chans[phy->num] = NULL;
2059
2060 if (d40c->busy) {
2061 pm_runtime_mark_last_busy(d40c->base->dev);
2062 pm_runtime_put_autosuspend(d40c->base->dev);
2063 }
2064
2065 d40c->busy = false;
2066 d40c->phy_chan = NULL;
2067 d40c->configured = false;
2068out:
2069
2070 pm_runtime_mark_last_busy(d40c->base->dev);
2071 pm_runtime_put_autosuspend(d40c->base->dev);
2072 return res;
2073}
2074
2075static bool d40_is_paused(struct d40_chan *d40c)
2076{
2077 void __iomem *chanbase = chan_base(d40c);
2078 bool is_paused = false;
2079 unsigned long flags;
2080 void __iomem *active_reg;
2081 u32 status;
2082 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2083
2084 spin_lock_irqsave(&d40c->lock, flags);
2085
2086 if (chan_is_physical(d40c)) {
2087 if (d40c->phy_chan->num % 2 == 0)
2088 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2089 else
2090 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2091
2092 status = (readl(active_reg) &
2093 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2094 D40_CHAN_POS(d40c->phy_chan->num);
2095 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2096 is_paused = true;
2097
2098 goto _exit;
2099 }
2100
2101 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2102 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2103 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2104 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2105 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2106 } else {
2107 chan_err(d40c, "Unknown direction\n");
2108 goto _exit;
2109 }
2110
2111 status = (status & D40_EVENTLINE_MASK(event)) >>
2112 D40_EVENTLINE_POS(event);
2113
2114 if (status != D40_DMA_RUN)
2115 is_paused = true;
2116_exit:
2117 spin_unlock_irqrestore(&d40c->lock, flags);
2118 return is_paused;
2119
2120}
2121
2122static u32 stedma40_residue(struct dma_chan *chan)
2123{
2124 struct d40_chan *d40c =
2125 container_of(chan, struct d40_chan, chan);
2126 u32 bytes_left;
2127 unsigned long flags;
2128
2129 spin_lock_irqsave(&d40c->lock, flags);
2130 bytes_left = d40_residue(d40c);
2131 spin_unlock_irqrestore(&d40c->lock, flags);
2132
2133 return bytes_left;
2134}
2135
2136static int
2137d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2138 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2139 unsigned int sg_len, dma_addr_t src_dev_addr,
2140 dma_addr_t dst_dev_addr)
2141{
2142 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2143 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2144 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2145 int ret;
2146
2147 ret = d40_log_sg_to_lli(sg_src, sg_len,
2148 src_dev_addr,
2149 desc->lli_log.src,
2150 chan->log_def.lcsp1,
2151 src_info->data_width,
2152 dst_info->data_width);
2153
2154 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2155 dst_dev_addr,
2156 desc->lli_log.dst,
2157 chan->log_def.lcsp3,
2158 dst_info->data_width,
2159 src_info->data_width);
2160
2161 return ret < 0 ? ret : 0;
2162}
2163
2164static int
2165d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2166 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2167 unsigned int sg_len, dma_addr_t src_dev_addr,
2168 dma_addr_t dst_dev_addr)
2169{
2170 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2171 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2172 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2173 unsigned long flags = 0;
2174 int ret;
2175
2176 if (desc->cyclic)
2177 flags |= LLI_CYCLIC | LLI_TERM_INT;
2178
2179 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2180 desc->lli_phy.src,
2181 virt_to_phys(desc->lli_phy.src),
2182 chan->src_def_cfg,
2183 src_info, dst_info, flags);
2184
2185 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2186 desc->lli_phy.dst,
2187 virt_to_phys(desc->lli_phy.dst),
2188 chan->dst_def_cfg,
2189 dst_info, src_info, flags);
2190
2191 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2192 desc->lli_pool.size, DMA_TO_DEVICE);
2193
2194 return ret < 0 ? ret : 0;
2195}
2196
2197static struct d40_desc *
2198d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2199 unsigned int sg_len, unsigned long dma_flags)
2200{
2201 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2202 struct d40_desc *desc;
2203 int ret;
2204
2205 desc = d40_desc_get(chan);
2206 if (!desc)
2207 return NULL;
2208
2209 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2210 cfg->dst_info.data_width);
2211 if (desc->lli_len < 0) {
2212 chan_err(chan, "Unaligned size\n");
2213 goto err;
2214 }
2215
2216 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2217 if (ret < 0) {
2218 chan_err(chan, "Could not allocate lli\n");
2219 goto err;
2220 }
2221
2222 desc->lli_current = 0;
2223 desc->txd.flags = dma_flags;
2224 desc->txd.tx_submit = d40_tx_submit;
2225
2226 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2227
2228 return desc;
2229
2230err:
2231 d40_desc_free(chan, desc);
2232 return NULL;
2233}
2234
2235static struct dma_async_tx_descriptor *
2236d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2237 struct scatterlist *sg_dst, unsigned int sg_len,
2238 enum dma_transfer_direction direction, unsigned long dma_flags)
2239{
2240 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2241 dma_addr_t src_dev_addr = 0;
2242 dma_addr_t dst_dev_addr = 0;
2243 struct d40_desc *desc;
2244 unsigned long flags;
2245 int ret;
2246
2247 if (!chan->phy_chan) {
2248 chan_err(chan, "Cannot prepare unallocated channel\n");
2249 return NULL;
2250 }
2251
2252 spin_lock_irqsave(&chan->lock, flags);
2253
2254 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2255 if (desc == NULL)
2256 goto err;
2257
2258 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2259 desc->cyclic = true;
2260
2261 if (direction == DMA_DEV_TO_MEM)
2262 src_dev_addr = chan->runtime_addr;
2263 else if (direction == DMA_MEM_TO_DEV)
2264 dst_dev_addr = chan->runtime_addr;
2265
2266 if (chan_is_logical(chan))
2267 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2268 sg_len, src_dev_addr, dst_dev_addr);
2269 else
2270 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2271 sg_len, src_dev_addr, dst_dev_addr);
2272
2273 if (ret) {
2274 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2275 chan_is_logical(chan) ? "log" : "phy", ret);
2276 goto err;
2277 }
2278
2279
2280
2281
2282
2283 list_add_tail(&desc->node, &chan->prepare_queue);
2284
2285 spin_unlock_irqrestore(&chan->lock, flags);
2286
2287 return &desc->txd;
2288
2289err:
2290 if (desc)
2291 d40_desc_free(chan, desc);
2292 spin_unlock_irqrestore(&chan->lock, flags);
2293 return NULL;
2294}
2295
2296bool stedma40_filter(struct dma_chan *chan, void *data)
2297{
2298 struct stedma40_chan_cfg *info = data;
2299 struct d40_chan *d40c =
2300 container_of(chan, struct d40_chan, chan);
2301 int err;
2302
2303 if (data) {
2304 err = d40_validate_conf(d40c, info);
2305 if (!err)
2306 d40c->dma_cfg = *info;
2307 } else
2308 err = d40_config_memcpy(d40c);
2309
2310 if (!err)
2311 d40c->configured = true;
2312
2313 return err == 0;
2314}
2315EXPORT_SYMBOL(stedma40_filter);
2316
2317static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2318{
2319 bool realtime = d40c->dma_cfg.realtime;
2320 bool highprio = d40c->dma_cfg.high_priority;
2321 u32 rtreg;
2322 u32 event = D40_TYPE_TO_EVENT(dev_type);
2323 u32 group = D40_TYPE_TO_GROUP(dev_type);
2324 u32 bit = BIT(event);
2325 u32 prioreg;
2326 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2327
2328 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2329
2330
2331
2332
2333
2334
2335
2336
2337 if (!src && chan_is_logical(d40c))
2338 highprio = false;
2339
2340 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2341
2342
2343 if (!src)
2344 bit <<= 16;
2345
2346 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2347 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2348}
2349
2350static void d40_set_prio_realtime(struct d40_chan *d40c)
2351{
2352 if (d40c->base->rev < 3)
2353 return;
2354
2355 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2356 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2357 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2358
2359 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2360 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2361 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2362}
2363
2364#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2365#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2366#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2367#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2368#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2369
2370static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2371 struct of_dma *ofdma)
2372{
2373 struct stedma40_chan_cfg cfg;
2374 dma_cap_mask_t cap;
2375 u32 flags;
2376
2377 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2378
2379 dma_cap_zero(cap);
2380 dma_cap_set(DMA_SLAVE, cap);
2381
2382 cfg.dev_type = dma_spec->args[0];
2383 flags = dma_spec->args[2];
2384
2385 switch (D40_DT_FLAGS_MODE(flags)) {
2386 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2387 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2388 }
2389
2390 switch (D40_DT_FLAGS_DIR(flags)) {
2391 case 0:
2392 cfg.dir = DMA_MEM_TO_DEV;
2393 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2394 break;
2395 case 1:
2396 cfg.dir = DMA_DEV_TO_MEM;
2397 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2398 break;
2399 }
2400
2401 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2402 cfg.phy_channel = dma_spec->args[1];
2403 cfg.use_fixed_channel = true;
2404 }
2405
2406 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2407 cfg.high_priority = true;
2408
2409 return dma_request_channel(cap, stedma40_filter, &cfg);
2410}
2411
2412
2413static int d40_alloc_chan_resources(struct dma_chan *chan)
2414{
2415 int err;
2416 unsigned long flags;
2417 struct d40_chan *d40c =
2418 container_of(chan, struct d40_chan, chan);
2419 bool is_free_phy;
2420 spin_lock_irqsave(&d40c->lock, flags);
2421
2422 dma_cookie_init(chan);
2423
2424
2425 if (!d40c->configured) {
2426 err = d40_config_memcpy(d40c);
2427 if (err) {
2428 chan_err(d40c, "Failed to configure memcpy channel\n");
2429 goto fail;
2430 }
2431 }
2432
2433 err = d40_allocate_channel(d40c, &is_free_phy);
2434 if (err) {
2435 chan_err(d40c, "Failed to allocate channel\n");
2436 d40c->configured = false;
2437 goto fail;
2438 }
2439
2440 pm_runtime_get_sync(d40c->base->dev);
2441
2442 d40_set_prio_realtime(d40c);
2443
2444 if (chan_is_logical(d40c)) {
2445 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2446 d40c->lcpa = d40c->base->lcpa_base +
2447 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2448 else
2449 d40c->lcpa = d40c->base->lcpa_base +
2450 d40c->dma_cfg.dev_type *
2451 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2452
2453
2454 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2455 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2456 }
2457
2458 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2459 chan_is_logical(d40c) ? "logical" : "physical",
2460 d40c->phy_chan->num,
2461 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2462
2463
2464
2465
2466
2467
2468
2469 if (is_free_phy)
2470 d40_config_write(d40c);
2471fail:
2472 pm_runtime_mark_last_busy(d40c->base->dev);
2473 pm_runtime_put_autosuspend(d40c->base->dev);
2474 spin_unlock_irqrestore(&d40c->lock, flags);
2475 return err;
2476}
2477
2478static void d40_free_chan_resources(struct dma_chan *chan)
2479{
2480 struct d40_chan *d40c =
2481 container_of(chan, struct d40_chan, chan);
2482 int err;
2483 unsigned long flags;
2484
2485 if (d40c->phy_chan == NULL) {
2486 chan_err(d40c, "Cannot free unallocated channel\n");
2487 return;
2488 }
2489
2490 spin_lock_irqsave(&d40c->lock, flags);
2491
2492 err = d40_free_dma(d40c);
2493
2494 if (err)
2495 chan_err(d40c, "Failed to free channel\n");
2496 spin_unlock_irqrestore(&d40c->lock, flags);
2497}
2498
2499static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2500 dma_addr_t dst,
2501 dma_addr_t src,
2502 size_t size,
2503 unsigned long dma_flags)
2504{
2505 struct scatterlist dst_sg;
2506 struct scatterlist src_sg;
2507
2508 sg_init_table(&dst_sg, 1);
2509 sg_init_table(&src_sg, 1);
2510
2511 sg_dma_address(&dst_sg) = dst;
2512 sg_dma_address(&src_sg) = src;
2513
2514 sg_dma_len(&dst_sg) = size;
2515 sg_dma_len(&src_sg) = size;
2516
2517 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2518}
2519
2520static struct dma_async_tx_descriptor *
2521d40_prep_memcpy_sg(struct dma_chan *chan,
2522 struct scatterlist *dst_sg, unsigned int dst_nents,
2523 struct scatterlist *src_sg, unsigned int src_nents,
2524 unsigned long dma_flags)
2525{
2526 if (dst_nents != src_nents)
2527 return NULL;
2528
2529 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2530}
2531
2532static struct dma_async_tx_descriptor *
2533d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2534 unsigned int sg_len, enum dma_transfer_direction direction,
2535 unsigned long dma_flags, void *context)
2536{
2537 if (!is_slave_direction(direction))
2538 return NULL;
2539
2540 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2541}
2542
2543static struct dma_async_tx_descriptor *
2544dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2545 size_t buf_len, size_t period_len,
2546 enum dma_transfer_direction direction, unsigned long flags)
2547{
2548 unsigned int periods = buf_len / period_len;
2549 struct dma_async_tx_descriptor *txd;
2550 struct scatterlist *sg;
2551 int i;
2552
2553 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2554 if (!sg)
2555 return NULL;
2556
2557 for (i = 0; i < periods; i++) {
2558 sg_dma_address(&sg[i]) = dma_addr;
2559 sg_dma_len(&sg[i]) = period_len;
2560 dma_addr += period_len;
2561 }
2562
2563 sg[periods].offset = 0;
2564 sg_dma_len(&sg[periods]) = 0;
2565 sg[periods].page_link =
2566 ((unsigned long)sg | 0x01) & ~0x02;
2567
2568 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2569 DMA_PREP_INTERRUPT);
2570
2571 kfree(sg);
2572
2573 return txd;
2574}
2575
2576static enum dma_status d40_tx_status(struct dma_chan *chan,
2577 dma_cookie_t cookie,
2578 struct dma_tx_state *txstate)
2579{
2580 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2581 enum dma_status ret;
2582
2583 if (d40c->phy_chan == NULL) {
2584 chan_err(d40c, "Cannot read status of unallocated channel\n");
2585 return -EINVAL;
2586 }
2587
2588 ret = dma_cookie_status(chan, cookie, txstate);
2589 if (ret != DMA_COMPLETE)
2590 dma_set_residue(txstate, stedma40_residue(chan));
2591
2592 if (d40_is_paused(d40c))
2593 ret = DMA_PAUSED;
2594
2595 return ret;
2596}
2597
2598static void d40_issue_pending(struct dma_chan *chan)
2599{
2600 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2601 unsigned long flags;
2602
2603 if (d40c->phy_chan == NULL) {
2604 chan_err(d40c, "Channel is not allocated!\n");
2605 return;
2606 }
2607
2608 spin_lock_irqsave(&d40c->lock, flags);
2609
2610 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2611
2612
2613 if (!d40c->busy)
2614 (void) d40_queue_start(d40c);
2615
2616 spin_unlock_irqrestore(&d40c->lock, flags);
2617}
2618
2619static int d40_terminate_all(struct dma_chan *chan)
2620{
2621 unsigned long flags;
2622 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2623 int ret;
2624
2625 if (d40c->phy_chan == NULL) {
2626 chan_err(d40c, "Channel is not allocated!\n");
2627 return -EINVAL;
2628 }
2629
2630 spin_lock_irqsave(&d40c->lock, flags);
2631
2632 pm_runtime_get_sync(d40c->base->dev);
2633 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2634 if (ret)
2635 chan_err(d40c, "Failed to stop channel\n");
2636
2637 d40_term_all(d40c);
2638 pm_runtime_mark_last_busy(d40c->base->dev);
2639 pm_runtime_put_autosuspend(d40c->base->dev);
2640 if (d40c->busy) {
2641 pm_runtime_mark_last_busy(d40c->base->dev);
2642 pm_runtime_put_autosuspend(d40c->base->dev);
2643 }
2644 d40c->busy = false;
2645
2646 spin_unlock_irqrestore(&d40c->lock, flags);
2647 return 0;
2648}
2649
2650static int
2651dma40_config_to_halfchannel(struct d40_chan *d40c,
2652 struct stedma40_half_channel_info *info,
2653 u32 maxburst)
2654{
2655 int psize;
2656
2657 if (chan_is_logical(d40c)) {
2658 if (maxburst >= 16)
2659 psize = STEDMA40_PSIZE_LOG_16;
2660 else if (maxburst >= 8)
2661 psize = STEDMA40_PSIZE_LOG_8;
2662 else if (maxburst >= 4)
2663 psize = STEDMA40_PSIZE_LOG_4;
2664 else
2665 psize = STEDMA40_PSIZE_LOG_1;
2666 } else {
2667 if (maxburst >= 16)
2668 psize = STEDMA40_PSIZE_PHY_16;
2669 else if (maxburst >= 8)
2670 psize = STEDMA40_PSIZE_PHY_8;
2671 else if (maxburst >= 4)
2672 psize = STEDMA40_PSIZE_PHY_4;
2673 else
2674 psize = STEDMA40_PSIZE_PHY_1;
2675 }
2676
2677 info->psize = psize;
2678 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2679
2680 return 0;
2681}
2682
2683
2684static int d40_set_runtime_config(struct dma_chan *chan,
2685 struct dma_slave_config *config)
2686{
2687 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2688 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2689 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2690 dma_addr_t config_addr;
2691 u32 src_maxburst, dst_maxburst;
2692 int ret;
2693
2694 if (d40c->phy_chan == NULL) {
2695 chan_err(d40c, "Channel is not allocated!\n");
2696 return -EINVAL;
2697 }
2698
2699 src_addr_width = config->src_addr_width;
2700 src_maxburst = config->src_maxburst;
2701 dst_addr_width = config->dst_addr_width;
2702 dst_maxburst = config->dst_maxburst;
2703
2704 if (config->direction == DMA_DEV_TO_MEM) {
2705 config_addr = config->src_addr;
2706
2707 if (cfg->dir != DMA_DEV_TO_MEM)
2708 dev_dbg(d40c->base->dev,
2709 "channel was not configured for peripheral "
2710 "to memory transfer (%d) overriding\n",
2711 cfg->dir);
2712 cfg->dir = DMA_DEV_TO_MEM;
2713
2714
2715 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2716 dst_addr_width = src_addr_width;
2717 if (dst_maxburst == 0)
2718 dst_maxburst = src_maxburst;
2719
2720 } else if (config->direction == DMA_MEM_TO_DEV) {
2721 config_addr = config->dst_addr;
2722
2723 if (cfg->dir != DMA_MEM_TO_DEV)
2724 dev_dbg(d40c->base->dev,
2725 "channel was not configured for memory "
2726 "to peripheral transfer (%d) overriding\n",
2727 cfg->dir);
2728 cfg->dir = DMA_MEM_TO_DEV;
2729
2730
2731 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2732 src_addr_width = dst_addr_width;
2733 if (src_maxburst == 0)
2734 src_maxburst = dst_maxburst;
2735 } else {
2736 dev_err(d40c->base->dev,
2737 "unrecognized channel direction %d\n",
2738 config->direction);
2739 return -EINVAL;
2740 }
2741
2742 if (config_addr <= 0) {
2743 dev_err(d40c->base->dev, "no address supplied\n");
2744 return -EINVAL;
2745 }
2746
2747 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2748 dev_err(d40c->base->dev,
2749 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2750 src_maxburst,
2751 src_addr_width,
2752 dst_maxburst,
2753 dst_addr_width);
2754 return -EINVAL;
2755 }
2756
2757 if (src_maxburst > 16) {
2758 src_maxburst = 16;
2759 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2760 } else if (dst_maxburst > 16) {
2761 dst_maxburst = 16;
2762 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2763 }
2764
2765
2766 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2767 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2768 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2769 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2770 !is_power_of_2(src_addr_width) ||
2771 !is_power_of_2(dst_addr_width))
2772 return -EINVAL;
2773
2774 cfg->src_info.data_width = src_addr_width;
2775 cfg->dst_info.data_width = dst_addr_width;
2776
2777 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2778 src_maxburst);
2779 if (ret)
2780 return ret;
2781
2782 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2783 dst_maxburst);
2784 if (ret)
2785 return ret;
2786
2787
2788 if (chan_is_logical(d40c))
2789 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2790 else
2791 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2792
2793
2794 d40c->runtime_addr = config_addr;
2795 d40c->runtime_direction = config->direction;
2796 dev_dbg(d40c->base->dev,
2797 "configured channel %s for %s, data width %d/%d, "
2798 "maxburst %d/%d elements, LE, no flow control\n",
2799 dma_chan_name(chan),
2800 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2801 src_addr_width, dst_addr_width,
2802 src_maxburst, dst_maxburst);
2803
2804 return 0;
2805}
2806
2807
2808
2809static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2810 struct d40_chan *chans, int offset,
2811 int num_chans)
2812{
2813 int i = 0;
2814 struct d40_chan *d40c;
2815
2816 INIT_LIST_HEAD(&dma->channels);
2817
2818 for (i = offset; i < offset + num_chans; i++) {
2819 d40c = &chans[i];
2820 d40c->base = base;
2821 d40c->chan.device = dma;
2822
2823 spin_lock_init(&d40c->lock);
2824
2825 d40c->log_num = D40_PHY_CHAN;
2826
2827 INIT_LIST_HEAD(&d40c->done);
2828 INIT_LIST_HEAD(&d40c->active);
2829 INIT_LIST_HEAD(&d40c->queue);
2830 INIT_LIST_HEAD(&d40c->pending_queue);
2831 INIT_LIST_HEAD(&d40c->client);
2832 INIT_LIST_HEAD(&d40c->prepare_queue);
2833
2834 tasklet_init(&d40c->tasklet, dma_tasklet,
2835 (unsigned long) d40c);
2836
2837 list_add_tail(&d40c->chan.device_node,
2838 &dma->channels);
2839 }
2840}
2841
2842static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2843{
2844 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2845 dev->device_prep_slave_sg = d40_prep_slave_sg;
2846
2847 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2848 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2849
2850
2851
2852
2853
2854 dev->copy_align = 2;
2855 }
2856
2857 if (dma_has_cap(DMA_SG, dev->cap_mask))
2858 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2859
2860 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2861 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2862
2863 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2864 dev->device_free_chan_resources = d40_free_chan_resources;
2865 dev->device_issue_pending = d40_issue_pending;
2866 dev->device_tx_status = d40_tx_status;
2867 dev->device_config = d40_set_runtime_config;
2868 dev->device_pause = d40_pause;
2869 dev->device_resume = d40_resume;
2870 dev->device_terminate_all = d40_terminate_all;
2871 dev->dev = base->dev;
2872}
2873
2874static int __init d40_dmaengine_init(struct d40_base *base,
2875 int num_reserved_chans)
2876{
2877 int err ;
2878
2879 d40_chan_init(base, &base->dma_slave, base->log_chans,
2880 0, base->num_log_chans);
2881
2882 dma_cap_zero(base->dma_slave.cap_mask);
2883 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2884 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2885
2886 d40_ops_init(base, &base->dma_slave);
2887
2888 err = dma_async_device_register(&base->dma_slave);
2889
2890 if (err) {
2891 d40_err(base->dev, "Failed to register slave channels\n");
2892 goto failure1;
2893 }
2894
2895 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2896 base->num_log_chans, base->num_memcpy_chans);
2897
2898 dma_cap_zero(base->dma_memcpy.cap_mask);
2899 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2900 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2901
2902 d40_ops_init(base, &base->dma_memcpy);
2903
2904 err = dma_async_device_register(&base->dma_memcpy);
2905
2906 if (err) {
2907 d40_err(base->dev,
2908 "Failed to regsiter memcpy only channels\n");
2909 goto failure2;
2910 }
2911
2912 d40_chan_init(base, &base->dma_both, base->phy_chans,
2913 0, num_reserved_chans);
2914
2915 dma_cap_zero(base->dma_both.cap_mask);
2916 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2917 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2918 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2919 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2920
2921 d40_ops_init(base, &base->dma_both);
2922 err = dma_async_device_register(&base->dma_both);
2923
2924 if (err) {
2925 d40_err(base->dev,
2926 "Failed to register logical and physical capable channels\n");
2927 goto failure3;
2928 }
2929 return 0;
2930failure3:
2931 dma_async_device_unregister(&base->dma_memcpy);
2932failure2:
2933 dma_async_device_unregister(&base->dma_slave);
2934failure1:
2935 return err;
2936}
2937
2938
2939#ifdef CONFIG_PM_SLEEP
2940static int dma40_suspend(struct device *dev)
2941{
2942 struct platform_device *pdev = to_platform_device(dev);
2943 struct d40_base *base = platform_get_drvdata(pdev);
2944 int ret;
2945
2946 ret = pm_runtime_force_suspend(dev);
2947 if (ret)
2948 return ret;
2949
2950 if (base->lcpa_regulator)
2951 ret = regulator_disable(base->lcpa_regulator);
2952 return ret;
2953}
2954
2955static int dma40_resume(struct device *dev)
2956{
2957 struct platform_device *pdev = to_platform_device(dev);
2958 struct d40_base *base = platform_get_drvdata(pdev);
2959 int ret = 0;
2960
2961 if (base->lcpa_regulator) {
2962 ret = regulator_enable(base->lcpa_regulator);
2963 if (ret)
2964 return ret;
2965 }
2966
2967 return pm_runtime_force_resume(dev);
2968}
2969#endif
2970
2971#ifdef CONFIG_PM
2972static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2973 u32 *regaddr, int num, bool save)
2974{
2975 int i;
2976
2977 for (i = 0; i < num; i++) {
2978 void __iomem *addr = baseaddr + regaddr[i];
2979
2980 if (save)
2981 backup[i] = readl_relaxed(addr);
2982 else
2983 writel_relaxed(backup[i], addr);
2984 }
2985}
2986
2987static void d40_save_restore_registers(struct d40_base *base, bool save)
2988{
2989 int i;
2990
2991
2992 for (i = 0; i < base->num_phy_chans; i++) {
2993 void __iomem *addr;
2994 int idx;
2995
2996 if (base->phy_res[i].reserved)
2997 continue;
2998
2999 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
3000 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
3001
3002 dma40_backup(addr, &base->reg_val_backup_chan[idx],
3003 d40_backup_regs_chan,
3004 ARRAY_SIZE(d40_backup_regs_chan),
3005 save);
3006 }
3007
3008
3009 dma40_backup(base->virtbase, base->reg_val_backup,
3010 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
3011 save);
3012
3013
3014 if (base->gen_dmac.backup)
3015 dma40_backup(base->virtbase, base->reg_val_backup_v4,
3016 base->gen_dmac.backup,
3017 base->gen_dmac.backup_size,
3018 save);
3019}
3020
3021static int dma40_runtime_suspend(struct device *dev)
3022{
3023 struct platform_device *pdev = to_platform_device(dev);
3024 struct d40_base *base = platform_get_drvdata(pdev);
3025
3026 d40_save_restore_registers(base, true);
3027
3028
3029 if (base->rev != 1)
3030 writel_relaxed(base->gcc_pwr_off_mask,
3031 base->virtbase + D40_DREG_GCC);
3032
3033 return 0;
3034}
3035
3036static int dma40_runtime_resume(struct device *dev)
3037{
3038 struct platform_device *pdev = to_platform_device(dev);
3039 struct d40_base *base = platform_get_drvdata(pdev);
3040
3041 d40_save_restore_registers(base, false);
3042
3043 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3044 base->virtbase + D40_DREG_GCC);
3045 return 0;
3046}
3047#endif
3048
3049static const struct dev_pm_ops dma40_pm_ops = {
3050 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3051 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3052 dma40_runtime_resume,
3053 NULL)
3054};
3055
3056
3057
3058static int __init d40_phy_res_init(struct d40_base *base)
3059{
3060 int i;
3061 int num_phy_chans_avail = 0;
3062 u32 val[2];
3063 int odd_even_bit = -2;
3064 int gcc = D40_DREG_GCC_ENA;
3065
3066 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3067 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3068
3069 for (i = 0; i < base->num_phy_chans; i++) {
3070 base->phy_res[i].num = i;
3071 odd_even_bit += 2 * ((i % 2) == 0);
3072 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3073
3074 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3075 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3076 base->phy_res[i].reserved = true;
3077 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3078 D40_DREG_GCC_SRC);
3079 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3080 D40_DREG_GCC_DST);
3081
3082
3083 } else {
3084 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3085 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3086 base->phy_res[i].reserved = false;
3087 num_phy_chans_avail++;
3088 }
3089 spin_lock_init(&base->phy_res[i].lock);
3090 }
3091
3092
3093 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3094 int chan = base->plat_data->disabled_channels[i];
3095
3096 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3097 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3098 base->phy_res[chan].reserved = true;
3099 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3100 D40_DREG_GCC_SRC);
3101 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3102 D40_DREG_GCC_DST);
3103 num_phy_chans_avail--;
3104 }
3105
3106
3107 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3108 int chan = base->plat_data->soft_lli_chans[i];
3109
3110 base->phy_res[chan].use_soft_lli = true;
3111 }
3112
3113 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3114 num_phy_chans_avail, base->num_phy_chans);
3115
3116
3117 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3118
3119 for (i = 0; i < base->num_phy_chans; i++) {
3120
3121 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3122 (val[0] & 0x3) != 1)
3123 dev_info(base->dev,
3124 "[%s] INFO: channel %d is misconfigured (%d)\n",
3125 __func__, i, val[0] & 0x3);
3126
3127 val[0] = val[0] >> 2;
3128 }
3129
3130
3131
3132
3133
3134
3135
3136 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3137 base->gcc_pwr_off_mask = gcc;
3138
3139 return num_phy_chans_avail;
3140}
3141
3142static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3143{
3144 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3145 struct clk *clk = NULL;
3146 void __iomem *virtbase = NULL;
3147 struct resource *res = NULL;
3148 struct d40_base *base = NULL;
3149 int num_log_chans = 0;
3150 int num_phy_chans;
3151 int num_memcpy_chans;
3152 int clk_ret = -EINVAL;
3153 int i;
3154 u32 pid;
3155 u32 cid;
3156 u8 rev;
3157
3158 clk = clk_get(&pdev->dev, NULL);
3159 if (IS_ERR(clk)) {
3160 d40_err(&pdev->dev, "No matching clock found\n");
3161 goto failure;
3162 }
3163
3164 clk_ret = clk_prepare_enable(clk);
3165 if (clk_ret) {
3166 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3167 goto failure;
3168 }
3169
3170
3171 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3172 if (!res)
3173 goto failure;
3174
3175 if (request_mem_region(res->start, resource_size(res),
3176 D40_NAME " I/O base") == NULL)
3177 goto failure;
3178
3179 virtbase = ioremap(res->start, resource_size(res));
3180 if (!virtbase)
3181 goto failure;
3182
3183
3184 for (pid = 0, i = 0; i < 4; i++)
3185 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3186 & 255) << (i * 8);
3187 for (cid = 0, i = 0; i < 4; i++)
3188 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3189 & 255) << (i * 8);
3190
3191 if (cid != AMBA_CID) {
3192 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3193 goto failure;
3194 }
3195 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3196 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3197 AMBA_MANF_BITS(pid),
3198 AMBA_VENDOR_ST);
3199 goto failure;
3200 }
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210 rev = AMBA_REV_BITS(pid);
3211 if (rev < 2) {
3212 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3213 goto failure;
3214 }
3215
3216
3217 if (plat_data->num_of_phy_chans)
3218 num_phy_chans = plat_data->num_of_phy_chans;
3219 else
3220 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3221
3222
3223 if (plat_data->num_of_memcpy_chans)
3224 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3225 else
3226 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3227
3228 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3229
3230 dev_info(&pdev->dev,
3231 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3232 rev, &res->start, num_phy_chans, num_log_chans);
3233
3234 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3235 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3236 sizeof(struct d40_chan), GFP_KERNEL);
3237
3238 if (base == NULL) {
3239 d40_err(&pdev->dev, "Out of memory\n");
3240 goto failure;
3241 }
3242
3243 base->rev = rev;
3244 base->clk = clk;
3245 base->num_memcpy_chans = num_memcpy_chans;
3246 base->num_phy_chans = num_phy_chans;
3247 base->num_log_chans = num_log_chans;
3248 base->phy_start = res->start;
3249 base->phy_size = resource_size(res);
3250 base->virtbase = virtbase;
3251 base->plat_data = plat_data;
3252 base->dev = &pdev->dev;
3253 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3254 base->log_chans = &base->phy_chans[num_phy_chans];
3255
3256 if (base->plat_data->num_of_phy_chans == 14) {
3257 base->gen_dmac.backup = d40_backup_regs_v4b;
3258 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3259 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3260 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3261 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3262 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3263 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3264 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3265 base->gen_dmac.il = il_v4b;
3266 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3267 base->gen_dmac.init_reg = dma_init_reg_v4b;
3268 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3269 } else {
3270 if (base->rev >= 3) {
3271 base->gen_dmac.backup = d40_backup_regs_v4a;
3272 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3273 }
3274 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3275 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3276 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3277 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3278 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3279 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3280 base->gen_dmac.il = il_v4a;
3281 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3282 base->gen_dmac.init_reg = dma_init_reg_v4a;
3283 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3284 }
3285
3286 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3287 GFP_KERNEL);
3288 if (!base->phy_res)
3289 goto failure;
3290
3291 base->lookup_phy_chans = kzalloc(num_phy_chans *
3292 sizeof(struct d40_chan *),
3293 GFP_KERNEL);
3294 if (!base->lookup_phy_chans)
3295 goto failure;
3296
3297 base->lookup_log_chans = kzalloc(num_log_chans *
3298 sizeof(struct d40_chan *),
3299 GFP_KERNEL);
3300 if (!base->lookup_log_chans)
3301 goto failure;
3302
3303 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3304 sizeof(d40_backup_regs_chan),
3305 GFP_KERNEL);
3306 if (!base->reg_val_backup_chan)
3307 goto failure;
3308
3309 base->lcla_pool.alloc_map =
3310 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3311 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
3312 if (!base->lcla_pool.alloc_map)
3313 goto failure;
3314
3315 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3316 0, SLAB_HWCACHE_ALIGN,
3317 NULL);
3318 if (base->desc_slab == NULL)
3319 goto failure;
3320
3321 return base;
3322
3323failure:
3324 if (!clk_ret)
3325 clk_disable_unprepare(clk);
3326 if (!IS_ERR(clk))
3327 clk_put(clk);
3328 if (virtbase)
3329 iounmap(virtbase);
3330 if (res)
3331 release_mem_region(res->start,
3332 resource_size(res));
3333 if (virtbase)
3334 iounmap(virtbase);
3335
3336 if (base) {
3337 kfree(base->lcla_pool.alloc_map);
3338 kfree(base->reg_val_backup_chan);
3339 kfree(base->lookup_log_chans);
3340 kfree(base->lookup_phy_chans);
3341 kfree(base->phy_res);
3342 kfree(base);
3343 }
3344
3345 return NULL;
3346}
3347
3348static void __init d40_hw_init(struct d40_base *base)
3349{
3350
3351 int i;
3352 u32 prmseo[2] = {0, 0};
3353 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3354 u32 pcmis = 0;
3355 u32 pcicr = 0;
3356 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3357 u32 reg_size = base->gen_dmac.init_reg_size;
3358
3359 for (i = 0; i < reg_size; i++)
3360 writel(dma_init_reg[i].val,
3361 base->virtbase + dma_init_reg[i].reg);
3362
3363
3364 for (i = 0; i < base->num_phy_chans; i++) {
3365
3366 activeo[i % 2] = activeo[i % 2] << 2;
3367
3368 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3369 == D40_ALLOC_PHY) {
3370 activeo[i % 2] |= 3;
3371 continue;
3372 }
3373
3374
3375 pcmis = (pcmis << 1) | 1;
3376
3377
3378 pcicr = (pcicr << 1) | 1;
3379
3380
3381 prmseo[i % 2] = prmseo[i % 2] << 2;
3382 prmseo[i % 2] |= 1;
3383
3384 }
3385
3386 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3387 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3388 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3389 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3390
3391
3392 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3393
3394
3395 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3396
3397
3398 base->gen_dmac.init_reg = NULL;
3399 base->gen_dmac.init_reg_size = 0;
3400}
3401
3402static int __init d40_lcla_allocate(struct d40_base *base)
3403{
3404 struct d40_lcla_pool *pool = &base->lcla_pool;
3405 unsigned long *page_list;
3406 int i, j;
3407 int ret = 0;
3408
3409
3410
3411
3412
3413
3414 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3415 GFP_KERNEL);
3416
3417 if (!page_list) {
3418 ret = -ENOMEM;
3419 goto failure;
3420 }
3421
3422
3423 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3424
3425 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3426 page_list[i] = __get_free_pages(GFP_KERNEL,
3427 base->lcla_pool.pages);
3428 if (!page_list[i]) {
3429
3430 d40_err(base->dev, "Failed to allocate %d pages.\n",
3431 base->lcla_pool.pages);
3432 ret = -ENOMEM;
3433
3434 for (j = 0; j < i; j++)
3435 free_pages(page_list[j], base->lcla_pool.pages);
3436 goto failure;
3437 }
3438
3439 if ((virt_to_phys((void *)page_list[i]) &
3440 (LCLA_ALIGNMENT - 1)) == 0)
3441 break;
3442 }
3443
3444 for (j = 0; j < i; j++)
3445 free_pages(page_list[j], base->lcla_pool.pages);
3446
3447 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3448 base->lcla_pool.base = (void *)page_list[i];
3449 } else {
3450
3451
3452
3453
3454 dev_warn(base->dev,
3455 "[%s] Failed to get %d pages @ 18 bit align.\n",
3456 __func__, base->lcla_pool.pages);
3457 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3458 base->num_phy_chans +
3459 LCLA_ALIGNMENT,
3460 GFP_KERNEL);
3461 if (!base->lcla_pool.base_unaligned) {
3462 ret = -ENOMEM;
3463 goto failure;
3464 }
3465
3466 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3467 LCLA_ALIGNMENT);
3468 }
3469
3470 pool->dma_addr = dma_map_single(base->dev, pool->base,
3471 SZ_1K * base->num_phy_chans,
3472 DMA_TO_DEVICE);
3473 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3474 pool->dma_addr = 0;
3475 ret = -ENOMEM;
3476 goto failure;
3477 }
3478
3479 writel(virt_to_phys(base->lcla_pool.base),
3480 base->virtbase + D40_DREG_LCLA);
3481failure:
3482 kfree(page_list);
3483 return ret;
3484}
3485
3486static int __init d40_of_probe(struct platform_device *pdev,
3487 struct device_node *np)
3488{
3489 struct stedma40_platform_data *pdata;
3490 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3491 const __be32 *list;
3492
3493 pdata = devm_kzalloc(&pdev->dev,
3494 sizeof(struct stedma40_platform_data),
3495 GFP_KERNEL);
3496 if (!pdata)
3497 return -ENOMEM;
3498
3499
3500 of_property_read_u32(np, "dma-channels", &num_phy);
3501 if (num_phy > 0)
3502 pdata->num_of_phy_chans = num_phy;
3503
3504 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3505 num_memcpy /= sizeof(*list);
3506
3507 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3508 d40_err(&pdev->dev,
3509 "Invalid number of memcpy channels specified (%d)\n",
3510 num_memcpy);
3511 return -EINVAL;
3512 }
3513 pdata->num_of_memcpy_chans = num_memcpy;
3514
3515 of_property_read_u32_array(np, "memcpy-channels",
3516 dma40_memcpy_channels,
3517 num_memcpy);
3518
3519 list = of_get_property(np, "disabled-channels", &num_disabled);
3520 num_disabled /= sizeof(*list);
3521
3522 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3523 d40_err(&pdev->dev,
3524 "Invalid number of disabled channels specified (%d)\n",
3525 num_disabled);
3526 return -EINVAL;
3527 }
3528
3529 of_property_read_u32_array(np, "disabled-channels",
3530 pdata->disabled_channels,
3531 num_disabled);
3532 pdata->disabled_channels[num_disabled] = -1;
3533
3534 pdev->dev.platform_data = pdata;
3535
3536 return 0;
3537}
3538
3539static int __init d40_probe(struct platform_device *pdev)
3540{
3541 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3542 struct device_node *np = pdev->dev.of_node;
3543 int ret = -ENOENT;
3544 struct d40_base *base = NULL;
3545 struct resource *res = NULL;
3546 int num_reserved_chans;
3547 u32 val;
3548
3549 if (!plat_data) {
3550 if (np) {
3551 if(d40_of_probe(pdev, np)) {
3552 ret = -ENOMEM;
3553 goto failure;
3554 }
3555 } else {
3556 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3557 goto failure;
3558 }
3559 }
3560
3561 base = d40_hw_detect_init(pdev);
3562 if (!base)
3563 goto failure;
3564
3565 num_reserved_chans = d40_phy_res_init(base);
3566
3567 platform_set_drvdata(pdev, base);
3568
3569 spin_lock_init(&base->interrupt_lock);
3570 spin_lock_init(&base->execmd_lock);
3571
3572
3573 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3574 if (!res) {
3575 ret = -ENOENT;
3576 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3577 goto failure;
3578 }
3579 base->lcpa_size = resource_size(res);
3580 base->phy_lcpa = res->start;
3581
3582 if (request_mem_region(res->start, resource_size(res),
3583 D40_NAME " I/O lcpa") == NULL) {
3584 ret = -EBUSY;
3585 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3586 goto failure;
3587 }
3588
3589
3590 val = readl(base->virtbase + D40_DREG_LCPA);
3591 if (res->start != val && val != 0) {
3592 dev_warn(&pdev->dev,
3593 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3594 __func__, val, &res->start);
3595 } else
3596 writel(res->start, base->virtbase + D40_DREG_LCPA);
3597
3598 base->lcpa_base = ioremap(res->start, resource_size(res));
3599 if (!base->lcpa_base) {
3600 ret = -ENOMEM;
3601 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3602 goto failure;
3603 }
3604
3605 if (base->plat_data->use_esram_lcla) {
3606 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3607 "lcla_esram");
3608 if (!res) {
3609 ret = -ENOENT;
3610 d40_err(&pdev->dev,
3611 "No \"lcla_esram\" memory resource\n");
3612 goto failure;
3613 }
3614 base->lcla_pool.base = ioremap(res->start,
3615 resource_size(res));
3616 if (!base->lcla_pool.base) {
3617 ret = -ENOMEM;
3618 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3619 goto failure;
3620 }
3621 writel(res->start, base->virtbase + D40_DREG_LCLA);
3622
3623 } else {
3624 ret = d40_lcla_allocate(base);
3625 if (ret) {
3626 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3627 goto failure;
3628 }
3629 }
3630
3631 spin_lock_init(&base->lcla_pool.lock);
3632
3633 base->irq = platform_get_irq(pdev, 0);
3634
3635 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3636 if (ret) {
3637 d40_err(&pdev->dev, "No IRQ defined\n");
3638 goto failure;
3639 }
3640
3641 if (base->plat_data->use_esram_lcla) {
3642
3643 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3644 if (IS_ERR(base->lcpa_regulator)) {
3645 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3646 ret = PTR_ERR(base->lcpa_regulator);
3647 base->lcpa_regulator = NULL;
3648 goto failure;
3649 }
3650
3651 ret = regulator_enable(base->lcpa_regulator);
3652 if (ret) {
3653 d40_err(&pdev->dev,
3654 "Failed to enable lcpa_regulator\n");
3655 regulator_put(base->lcpa_regulator);
3656 base->lcpa_regulator = NULL;
3657 goto failure;
3658 }
3659 }
3660
3661 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3662
3663 pm_runtime_irq_safe(base->dev);
3664 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3665 pm_runtime_use_autosuspend(base->dev);
3666 pm_runtime_mark_last_busy(base->dev);
3667 pm_runtime_set_active(base->dev);
3668 pm_runtime_enable(base->dev);
3669
3670 ret = d40_dmaengine_init(base, num_reserved_chans);
3671 if (ret)
3672 goto failure;
3673
3674 base->dev->dma_parms = &base->dma_parms;
3675 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3676 if (ret) {
3677 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3678 goto failure;
3679 }
3680
3681 d40_hw_init(base);
3682
3683 if (np) {
3684 ret = of_dma_controller_register(np, d40_xlate, NULL);
3685 if (ret)
3686 dev_err(&pdev->dev,
3687 "could not register of_dma_controller\n");
3688 }
3689
3690 dev_info(base->dev, "initialized\n");
3691 return 0;
3692
3693failure:
3694 if (base) {
3695 if (base->desc_slab)
3696 kmem_cache_destroy(base->desc_slab);
3697 if (base->virtbase)
3698 iounmap(base->virtbase);
3699
3700 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3701 iounmap(base->lcla_pool.base);
3702 base->lcla_pool.base = NULL;
3703 }
3704
3705 if (base->lcla_pool.dma_addr)
3706 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3707 SZ_1K * base->num_phy_chans,
3708 DMA_TO_DEVICE);
3709
3710 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3711 free_pages((unsigned long)base->lcla_pool.base,
3712 base->lcla_pool.pages);
3713
3714 kfree(base->lcla_pool.base_unaligned);
3715
3716 if (base->phy_lcpa)
3717 release_mem_region(base->phy_lcpa,
3718 base->lcpa_size);
3719 if (base->phy_start)
3720 release_mem_region(base->phy_start,
3721 base->phy_size);
3722 if (base->clk) {
3723 clk_disable_unprepare(base->clk);
3724 clk_put(base->clk);
3725 }
3726
3727 if (base->lcpa_regulator) {
3728 regulator_disable(base->lcpa_regulator);
3729 regulator_put(base->lcpa_regulator);
3730 }
3731
3732 kfree(base->lcla_pool.alloc_map);
3733 kfree(base->lookup_log_chans);
3734 kfree(base->lookup_phy_chans);
3735 kfree(base->phy_res);
3736 kfree(base);
3737 }
3738
3739 d40_err(&pdev->dev, "probe failed\n");
3740 return ret;
3741}
3742
3743static const struct of_device_id d40_match[] = {
3744 { .compatible = "stericsson,dma40", },
3745 {}
3746};
3747
3748static struct platform_driver d40_driver = {
3749 .driver = {
3750 .name = D40_NAME,
3751 .pm = &dma40_pm_ops,
3752 .of_match_table = d40_match,
3753 },
3754};
3755
3756static int __init stedma40_init(void)
3757{
3758 return platform_driver_probe(&d40_driver, d40_probe);
3759}
3760subsys_initcall(stedma40_init);
3761