1
2
3
4
5
6
7
8#include <linux/delay.h>
9#include <linux/of_dma.h>
10#include "rsnd.h"
11
12
13
14
15#define PDMASAR 0x00
16#define PDMADAR 0x04
17#define PDMACHCR 0x0c
18
19
20#define PDMACHCR_DE (1 << 0)
21
22
23struct rsnd_dmaen {
24 struct dma_chan *chan;
25 dma_cookie_t cookie;
26 unsigned int dma_len;
27};
28
29struct rsnd_dmapp {
30 int dmapp_id;
31 u32 chcr;
32};
33
34struct rsnd_dma {
35 struct rsnd_mod mod;
36 struct rsnd_mod *mod_from;
37 struct rsnd_mod *mod_to;
38 dma_addr_t src_addr;
39 dma_addr_t dst_addr;
40 union {
41 struct rsnd_dmaen en;
42 struct rsnd_dmapp pp;
43 } dma;
44};
45
46struct rsnd_dma_ctrl {
47 void __iomem *ppbase;
48 phys_addr_t ppres;
49 int dmaen_num;
50 int dmapp_num;
51};
52
53#define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
54#define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
55#define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
56#define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
57
58
59static struct rsnd_mod_ops mem_ops = {
60 .name = "mem",
61};
62
63static struct rsnd_mod mem = {
64};
65
66
67
68
69static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
70 struct rsnd_dai_stream *io)
71{
72 if (rsnd_io_is_working(io))
73 rsnd_dai_period_elapsed(io);
74}
75
76static void rsnd_dmaen_complete(void *data)
77{
78 struct rsnd_mod *mod = data;
79
80 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
81}
82
83static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
84 struct rsnd_mod *mod_from,
85 struct rsnd_mod *mod_to)
86{
87 if ((!mod_from && !mod_to) ||
88 (mod_from && mod_to))
89 return NULL;
90
91 if (mod_from)
92 return rsnd_mod_dma_req(io, mod_from);
93 else
94 return rsnd_mod_dma_req(io, mod_to);
95}
96
97static int rsnd_dmaen_stop(struct rsnd_mod *mod,
98 struct rsnd_dai_stream *io,
99 struct rsnd_priv *priv)
100{
101 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
102 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
103
104 if (dmaen->chan)
105 dmaengine_terminate_async(dmaen->chan);
106
107 return 0;
108}
109
110static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
111 struct rsnd_dai_stream *io,
112 struct rsnd_priv *priv)
113{
114 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
115 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
116
117
118
119
120
121
122 if (dmaen->chan)
123 dma_release_channel(dmaen->chan);
124
125 dmaen->chan = NULL;
126
127 return 0;
128}
129
130static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
131 struct rsnd_dai_stream *io,
132 struct rsnd_priv *priv)
133{
134 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
135 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
136 struct device *dev = rsnd_priv_to_dev(priv);
137
138
139 if (dmaen->chan)
140 return 0;
141
142
143
144
145
146
147 dmaen->chan = rsnd_dmaen_request_channel(io,
148 dma->mod_from,
149 dma->mod_to);
150 if (IS_ERR_OR_NULL(dmaen->chan)) {
151 dmaen->chan = NULL;
152 dev_err(dev, "can't get dma channel\n");
153 return -EIO;
154 }
155
156 return 0;
157}
158
159static int rsnd_dmaen_start(struct rsnd_mod *mod,
160 struct rsnd_dai_stream *io,
161 struct rsnd_priv *priv)
162{
163 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
164 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
165 struct snd_pcm_substream *substream = io->substream;
166 struct device *dev = rsnd_priv_to_dev(priv);
167 struct dma_async_tx_descriptor *desc;
168 struct dma_slave_config cfg = {};
169 enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
170 int is_play = rsnd_io_is_play(io);
171 int ret;
172
173
174
175
176
177
178 if (rsnd_runtime_channel_original(io) == 1) {
179 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
180 int bits = snd_pcm_format_physical_width(runtime->format);
181
182 switch (bits) {
183 case 8:
184 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
185 break;
186 case 16:
187 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
188 break;
189 case 32:
190 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
191 break;
192 default:
193 dev_err(dev, "invalid format width %d\n", bits);
194 return -EINVAL;
195 }
196 }
197
198 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
199 cfg.src_addr = dma->src_addr;
200 cfg.dst_addr = dma->dst_addr;
201 cfg.src_addr_width = buswidth;
202 cfg.dst_addr_width = buswidth;
203
204 dev_dbg(dev, "%s %pad -> %pad\n",
205 rsnd_mod_name(mod),
206 &cfg.src_addr, &cfg.dst_addr);
207
208 ret = dmaengine_slave_config(dmaen->chan, &cfg);
209 if (ret < 0)
210 return ret;
211
212 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
213 substream->runtime->dma_addr,
214 snd_pcm_lib_buffer_bytes(substream),
215 snd_pcm_lib_period_bytes(substream),
216 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
217 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
218
219 if (!desc) {
220 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
221 return -EIO;
222 }
223
224 desc->callback = rsnd_dmaen_complete;
225 desc->callback_param = rsnd_mod_get(dma);
226
227 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
228
229 dmaen->cookie = dmaengine_submit(desc);
230 if (dmaen->cookie < 0) {
231 dev_err(dev, "dmaengine_submit() fail\n");
232 return -EIO;
233 }
234
235 dma_async_issue_pending(dmaen->chan);
236
237 return 0;
238}
239
240struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, char *name,
241 struct rsnd_mod *mod, char *x)
242{
243 struct dma_chan *chan = NULL;
244 struct device_node *np;
245 int i = 0;
246
247 for_each_child_of_node(of_node, np) {
248 i = rsnd_node_fixed_index(np, name, i);
249
250 if (i == rsnd_mod_id_raw(mod) && (!chan))
251 chan = of_dma_request_slave_channel(np, x);
252 i++;
253 }
254
255
256 of_node_put(of_node);
257
258 return chan;
259}
260
261static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
262 struct rsnd_dma *dma,
263 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
264{
265 struct rsnd_priv *priv = rsnd_io_to_priv(io);
266 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
267 struct dma_chan *chan;
268
269
270 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
271 if (IS_ERR_OR_NULL(chan)) {
272
273 if (PTR_ERR(chan) == -EPROBE_DEFER)
274 return PTR_ERR(chan);
275
276
277
278
279
280
281
282 return -EAGAIN;
283 }
284
285
286
287
288
289
290 io->dmac_dev = chan->device->dev;
291
292 dma_release_channel(chan);
293
294 dmac->dmaen_num++;
295
296 return 0;
297}
298
299static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
300 struct rsnd_dai_stream *io,
301 snd_pcm_uframes_t *pointer)
302{
303 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
304 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
305 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
306 struct dma_tx_state state;
307 enum dma_status status;
308 unsigned int pos = 0;
309
310 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
311 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
312 if (state.residue > 0 && state.residue <= dmaen->dma_len)
313 pos = dmaen->dma_len - state.residue;
314 }
315 *pointer = bytes_to_frames(runtime, pos);
316
317 return 0;
318}
319
320static struct rsnd_mod_ops rsnd_dmaen_ops = {
321 .name = "audmac",
322 .prepare = rsnd_dmaen_prepare,
323 .cleanup = rsnd_dmaen_cleanup,
324 .start = rsnd_dmaen_start,
325 .stop = rsnd_dmaen_stop,
326 .pointer = rsnd_dmaen_pointer,
327 .get_status = rsnd_mod_get_status,
328};
329
330
331
332
333static const u8 gen2_id_table_ssiu[] = {
334
335 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
336
337 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
338
339 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
340
341 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
342
343 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
344
345 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
346
347 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348
349 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350
351 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
352
353 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
354};
355static const u8 gen2_id_table_scu[] = {
356 0x2d,
357 0x2e,
358 0x2f,
359 0x30,
360 0x31,
361 0x32,
362 0x33,
363 0x34,
364 0x35,
365 0x36,
366};
367static const u8 gen2_id_table_cmd[] = {
368 0x37,
369 0x38,
370};
371
372static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
373 struct rsnd_mod *mod)
374{
375 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
376 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
377 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
378 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
379 const u8 *entry = NULL;
380 int id = 255;
381 int size = 0;
382
383 if ((mod == ssi) ||
384 (mod == ssiu)) {
385 int busif = rsnd_mod_id_sub(ssiu);
386
387 entry = gen2_id_table_ssiu;
388 size = ARRAY_SIZE(gen2_id_table_ssiu);
389 id = (rsnd_mod_id(mod) * 8) + busif;
390 } else if (mod == src) {
391 entry = gen2_id_table_scu;
392 size = ARRAY_SIZE(gen2_id_table_scu);
393 id = rsnd_mod_id(mod);
394 } else if (mod == dvc) {
395 entry = gen2_id_table_cmd;
396 size = ARRAY_SIZE(gen2_id_table_cmd);
397 id = rsnd_mod_id(mod);
398 }
399
400 if ((!entry) || (size <= id)) {
401 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
402
403 dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
404
405
406 return 0x00;
407 }
408
409 return entry[id];
410}
411
412static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
413 struct rsnd_mod *mod_from,
414 struct rsnd_mod *mod_to)
415{
416 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
417 (rsnd_dmapp_get_id(io, mod_to) << 16);
418}
419
420#define rsnd_dmapp_addr(dmac, dma, reg) \
421 (dmac->ppbase + 0x20 + reg + \
422 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
423static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
424{
425 struct rsnd_mod *mod = rsnd_mod_get(dma);
426 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
427 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
428 struct device *dev = rsnd_priv_to_dev(priv);
429
430 dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
431
432 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
433}
434
435static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
436{
437 struct rsnd_mod *mod = rsnd_mod_get(dma);
438 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
439 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
440
441 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
442}
443
444static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
445{
446 struct rsnd_mod *mod = rsnd_mod_get(dma);
447 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
448 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
449 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
450 u32 val = ioread32(addr);
451
452 val &= ~mask;
453 val |= (data & mask);
454
455 iowrite32(val, addr);
456}
457
458static int rsnd_dmapp_stop(struct rsnd_mod *mod,
459 struct rsnd_dai_stream *io,
460 struct rsnd_priv *priv)
461{
462 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
463 int i;
464
465 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
466
467 for (i = 0; i < 1024; i++) {
468 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
469 return 0;
470 udelay(1);
471 }
472
473 return -EIO;
474}
475
476static int rsnd_dmapp_start(struct rsnd_mod *mod,
477 struct rsnd_dai_stream *io,
478 struct rsnd_priv *priv)
479{
480 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
481 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
482
483 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
484 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
485 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
486
487 return 0;
488}
489
490static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
491 struct rsnd_dma *dma,
492 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
493{
494 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
495 struct rsnd_priv *priv = rsnd_io_to_priv(io);
496 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
497 struct device *dev = rsnd_priv_to_dev(priv);
498
499 dmapp->dmapp_id = dmac->dmapp_num;
500 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
501
502 dmac->dmapp_num++;
503
504 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
505 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
506
507 return 0;
508}
509
510#ifdef CONFIG_DEBUG_FS
511static void rsnd_dmapp_debug_info(struct seq_file *m,
512 struct rsnd_dai_stream *io,
513 struct rsnd_mod *mod)
514{
515 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
516 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
517 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
518 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
519
520 rsnd_debugfs_reg_show(m, dmac->ppres, dmac->ppbase,
521 0x20 + 0x10 * dmapp->dmapp_id, 0x10);
522}
523#define DEBUG_INFO .debug_info = rsnd_dmapp_debug_info
524#else
525#define DEBUG_INFO
526#endif
527
528static struct rsnd_mod_ops rsnd_dmapp_ops = {
529 .name = "audmac-pp",
530 .start = rsnd_dmapp_start,
531 .stop = rsnd_dmapp_stop,
532 .quit = rsnd_dmapp_stop,
533 .get_status = rsnd_mod_get_status,
534 DEBUG_INFO
535};
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
557#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
558
559#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
560#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
561
562#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
563#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
564
565#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
566#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
567
568#define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
569#define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
570
571#define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
572#define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
573
574static dma_addr_t
575rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
576 struct rsnd_mod *mod,
577 int is_play, int is_from)
578{
579 struct rsnd_priv *priv = rsnd_io_to_priv(io);
580 struct device *dev = rsnd_priv_to_dev(priv);
581 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
582 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
583 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
584 !!(rsnd_io_to_mod_ssiu(io) == mod);
585 int use_src = !!rsnd_io_to_mod_src(io);
586 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
587 !!rsnd_io_to_mod_mix(io) ||
588 !!rsnd_io_to_mod_ctu(io);
589 int id = rsnd_mod_id(mod);
590 int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
591 struct dma_addr {
592 dma_addr_t out_addr;
593 dma_addr_t in_addr;
594 } dma_addrs[3][2][3] = {
595
596
597 {{{ 0, 0 },
598 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
599 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
600
601 {{ 0, 0, },
602 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
603 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
604 },
605
606
607 {{{ RDMA_SSI_O_N(ssi, id), 0 },
608 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
609 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
610
611 {{ 0, RDMA_SSI_I_N(ssi, id) },
612 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
613 { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
614 },
615
616
617 {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
618 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
619 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
620
621 {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
622 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
623 { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
624 };
625
626
627
628
629
630
631
632 if ((id == 9) && (busif >= 4))
633 dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
634 id, busif);
635
636
637 if (use_cmd && !use_src)
638 dev_err(dev, "DVC is selected without SRC\n");
639
640
641 if (is_ssi && rsnd_ssi_use_busif(io))
642 is_ssi++;
643
644 return (is_from) ?
645 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
646 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
647}
648
649static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
650 struct rsnd_mod *mod,
651 int is_play, int is_from)
652{
653 struct rsnd_priv *priv = rsnd_io_to_priv(io);
654
655
656
657
658 if (rsnd_is_gen1(priv))
659 return 0;
660
661 if (!mod)
662 return 0;
663
664 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
665}
666
667#define MOD_MAX (RSND_MOD_MAX + 1)
668static void rsnd_dma_of_path(struct rsnd_mod *this,
669 struct rsnd_dai_stream *io,
670 int is_play,
671 struct rsnd_mod **mod_from,
672 struct rsnd_mod **mod_to)
673{
674 struct rsnd_mod *ssi;
675 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
676 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
677 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
678 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
679 struct rsnd_mod *mod[MOD_MAX];
680 struct rsnd_mod *mod_start, *mod_end;
681 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
682 struct device *dev = rsnd_priv_to_dev(priv);
683 int nr, i, idx;
684
685
686
687
688
689
690
691
692
693
694
695 if (rsnd_ssiu_of_node(priv)) {
696 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
697
698
699 ssi = ssiu;
700 if (this == rsnd_io_to_mod_ssi(io))
701 this = ssiu;
702 } else {
703
704 ssi = rsnd_io_to_mod_ssi(io);
705 }
706
707 if (!ssi)
708 return;
709
710 nr = 0;
711 for (i = 0; i < MOD_MAX; i++) {
712 mod[i] = NULL;
713 nr += !!rsnd_io_to_mod(io, i);
714 }
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731 mod_start = (is_play) ? NULL : ssi;
732 mod_end = (is_play) ? ssi : NULL;
733
734 idx = 0;
735 mod[idx++] = mod_start;
736 for (i = 1; i < nr; i++) {
737 if (src) {
738 mod[idx++] = src;
739 src = NULL;
740 } else if (ctu) {
741 mod[idx++] = ctu;
742 ctu = NULL;
743 } else if (mix) {
744 mod[idx++] = mix;
745 mix = NULL;
746 } else if (dvc) {
747 mod[idx++] = dvc;
748 dvc = NULL;
749 }
750 }
751 mod[idx] = mod_end;
752
753
754
755
756
757
758
759 if ((this == ssi) == (is_play)) {
760 *mod_from = mod[idx - 1];
761 *mod_to = mod[idx];
762 } else {
763 *mod_from = mod[0];
764 *mod_to = mod[1];
765 }
766
767 dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
768 for (i = 0; i <= idx; i++) {
769 dev_dbg(dev, " %s%s\n",
770 rsnd_mod_name(mod[i] ? mod[i] : &mem),
771 (mod[i] == *mod_from) ? " from" :
772 (mod[i] == *mod_to) ? " to" : "");
773 }
774}
775
776static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
777 struct rsnd_mod **dma_mod)
778{
779 struct rsnd_mod *mod_from = NULL;
780 struct rsnd_mod *mod_to = NULL;
781 struct rsnd_priv *priv = rsnd_io_to_priv(io);
782 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
783 struct device *dev = rsnd_priv_to_dev(priv);
784 struct rsnd_dma *dma;
785 struct rsnd_mod_ops *ops;
786 enum rsnd_mod_type type;
787 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
788 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
789 int is_play = rsnd_io_is_play(io);
790 int ret, dma_id;
791
792
793
794
795
796
797
798 if (!dmac)
799 return -EAGAIN;
800
801 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
802
803
804 if (mod_from && mod_to) {
805 ops = &rsnd_dmapp_ops;
806 attach = rsnd_dmapp_attach;
807 dma_id = dmac->dmapp_num;
808 type = RSND_MOD_AUDMAPP;
809 } else {
810 ops = &rsnd_dmaen_ops;
811 attach = rsnd_dmaen_attach;
812 dma_id = dmac->dmaen_num;
813 type = RSND_MOD_AUDMA;
814 }
815
816
817 if (rsnd_is_gen1(priv)) {
818 ops = &rsnd_dmaen_ops;
819 attach = rsnd_dmaen_attach;
820 dma_id = dmac->dmaen_num;
821 type = RSND_MOD_AUDMA;
822 }
823
824 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
825 if (!dma)
826 return -ENOMEM;
827
828 *dma_mod = rsnd_mod_get(dma);
829
830 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
831 type, dma_id);
832 if (ret < 0)
833 return ret;
834
835 dev_dbg(dev, "%s %s -> %s\n",
836 rsnd_mod_name(*dma_mod),
837 rsnd_mod_name(mod_from ? mod_from : &mem),
838 rsnd_mod_name(mod_to ? mod_to : &mem));
839
840 ret = attach(io, dma, mod_from, mod_to);
841 if (ret < 0)
842 return ret;
843
844 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
845 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
846 dma->mod_from = mod_from;
847 dma->mod_to = mod_to;
848
849 return 0;
850}
851
852int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
853 struct rsnd_mod **dma_mod)
854{
855 if (!(*dma_mod)) {
856 int ret = rsnd_dma_alloc(io, mod, dma_mod);
857
858 if (ret < 0)
859 return ret;
860 }
861
862 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
863}
864
865int rsnd_dma_probe(struct rsnd_priv *priv)
866{
867 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
868 struct device *dev = rsnd_priv_to_dev(priv);
869 struct rsnd_dma_ctrl *dmac;
870 struct resource *res;
871
872
873
874
875 if (rsnd_is_gen1(priv))
876 return 0;
877
878
879
880
881 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
882 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
883 if (!dmac || !res) {
884 dev_err(dev, "dma allocate failed\n");
885 return 0;
886 }
887
888 dmac->dmapp_num = 0;
889 dmac->ppres = res->start;
890 dmac->ppbase = devm_ioremap_resource(dev, res);
891 if (IS_ERR(dmac->ppbase))
892 return PTR_ERR(dmac->ppbase);
893
894 priv->dma = dmac;
895
896
897 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
898}
899