1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/of_device.h>
11#include <linux/dma-mapping.h>
12#include <linux/slab.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/of_platform.h>
16
17#include <sound/soc.h>
18
19#include <linux/fsl/bestcomm/bestcomm.h>
20#include <linux/fsl/bestcomm/gen_bd.h>
21#include <asm/mpc52xx_psc.h>
22
23#include "mpc5200_dma.h"
24
25#define DRV_NAME "mpc5200_dma"
26
27
28
29
30static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma)
31{
32 struct psc_dma *psc_dma = _psc_dma;
33 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
34 u16 isr;
35
36 isr = in_be16(®s->mpc52xx_psc_isr);
37
38
39 if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP))
40 psc_dma->stats.underrun_count++;
41
42
43 if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR))
44 psc_dma->stats.overrun_count++;
45
46 out_8(®s->command, MPC52xx_PSC_RST_ERR_STAT);
47
48 return IRQ_HANDLED;
49}
50
51
52
53
54
55
56
57
58
59
60
61static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
62{
63 struct bcom_bd *bd;
64
65
66 bd = bcom_prepare_next_buffer(s->bcom_task);
67 bd->status = s->period_bytes;
68 bd->data[0] = s->runtime->dma_addr + (s->period_next * s->period_bytes);
69 bcom_submit_next_buffer(s->bcom_task, NULL);
70
71
72 s->period_next = (s->period_next + 1) % s->runtime->periods;
73}
74
75
76static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
77{
78 struct psc_dma_stream *s = _psc_dma_stream;
79
80 spin_lock(&s->psc_dma->lock);
81
82
83 while (bcom_buffer_done(s->bcom_task)) {
84 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
85
86 s->period_current = (s->period_current+1) % s->runtime->periods;
87 s->period_count++;
88
89 psc_dma_bcom_enqueue_next_buffer(s);
90 }
91 spin_unlock(&s->psc_dma->lock);
92
93
94
95 if (s->active)
96 snd_pcm_period_elapsed(s->stream);
97
98 return IRQ_HANDLED;
99}
100
101static int psc_dma_hw_free(struct snd_pcm_substream *substream)
102{
103 snd_pcm_set_runtime_buffer(substream, NULL);
104 return 0;
105}
106
107
108
109
110
111
112
113static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
114{
115 struct snd_soc_pcm_runtime *rtd = substream->private_data;
116 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
117 struct snd_pcm_runtime *runtime = substream->runtime;
118 struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma);
119 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
120 u16 imr;
121 unsigned long flags;
122 int i;
123
124 switch (cmd) {
125 case SNDRV_PCM_TRIGGER_START:
126 dev_dbg(psc_dma->dev, "START: stream=%i fbits=%u ps=%u #p=%u\n",
127 substream->pstr->stream, runtime->frame_bits,
128 (int)runtime->period_size, runtime->periods);
129 s->period_bytes = frames_to_bytes(runtime,
130 runtime->period_size);
131 s->period_next = 0;
132 s->period_current = 0;
133 s->active = 1;
134 s->period_count = 0;
135 s->runtime = runtime;
136
137
138
139
140 spin_lock_irqsave(&psc_dma->lock, flags);
141
142 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
143 bcom_gen_bd_rx_reset(s->bcom_task);
144 else
145 bcom_gen_bd_tx_reset(s->bcom_task);
146
147 for (i = 0; i < runtime->periods; i++)
148 if (!bcom_queue_full(s->bcom_task))
149 psc_dma_bcom_enqueue_next_buffer(s);
150
151 bcom_enable(s->bcom_task);
152 spin_unlock_irqrestore(&psc_dma->lock, flags);
153
154 out_8(®s->command, MPC52xx_PSC_RST_ERR_STAT);
155
156 break;
157
158 case SNDRV_PCM_TRIGGER_STOP:
159 dev_dbg(psc_dma->dev, "STOP: stream=%i periods_count=%i\n",
160 substream->pstr->stream, s->period_count);
161 s->active = 0;
162
163 spin_lock_irqsave(&psc_dma->lock, flags);
164 bcom_disable(s->bcom_task);
165 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
166 bcom_gen_bd_rx_reset(s->bcom_task);
167 else
168 bcom_gen_bd_tx_reset(s->bcom_task);
169 spin_unlock_irqrestore(&psc_dma->lock, flags);
170
171 break;
172
173 default:
174 dev_dbg(psc_dma->dev, "unhandled trigger: stream=%i cmd=%i\n",
175 substream->pstr->stream, cmd);
176 return -EINVAL;
177 }
178
179
180 imr = 0;
181 if (psc_dma->playback.active)
182 imr |= MPC52xx_PSC_IMR_TXEMP;
183 if (psc_dma->capture.active)
184 imr |= MPC52xx_PSC_IMR_ORERR;
185 out_be16(®s->isr_imr.imr, psc_dma->imr | imr);
186
187 return 0;
188}
189
190
191
192
193
194
195
196
197
198
199static const struct snd_pcm_hardware psc_dma_hardware = {
200 .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
201 SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
202 SNDRV_PCM_INFO_BATCH,
203 .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |
204 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE,
205 .period_bytes_max = 1024 * 1024,
206 .period_bytes_min = 32,
207 .periods_min = 2,
208 .periods_max = 256,
209 .buffer_bytes_max = 2 * 1024 * 1024,
210 .fifo_size = 512,
211};
212
213static int psc_dma_open(struct snd_pcm_substream *substream)
214{
215 struct snd_pcm_runtime *runtime = substream->runtime;
216 struct snd_soc_pcm_runtime *rtd = substream->private_data;
217 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
218 struct psc_dma_stream *s;
219 int rc;
220
221 dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream);
222
223 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
224 s = &psc_dma->capture;
225 else
226 s = &psc_dma->playback;
227
228 snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware);
229
230 rc = snd_pcm_hw_constraint_integer(runtime,
231 SNDRV_PCM_HW_PARAM_PERIODS);
232 if (rc < 0) {
233 dev_err(substream->pcm->card->dev, "invalid buffer size\n");
234 return rc;
235 }
236
237 s->stream = substream;
238 return 0;
239}
240
241static int psc_dma_close(struct snd_pcm_substream *substream)
242{
243 struct snd_soc_pcm_runtime *rtd = substream->private_data;
244 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
245 struct psc_dma_stream *s;
246
247 dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream);
248
249 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
250 s = &psc_dma->capture;
251 else
252 s = &psc_dma->playback;
253
254 if (!psc_dma->playback.active &&
255 !psc_dma->capture.active) {
256
257
258 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
259 out_8(&psc_dma->psc_regs->command, 4 << 4);
260 }
261 s->stream = NULL;
262 return 0;
263}
264
265static snd_pcm_uframes_t
266psc_dma_pointer(struct snd_pcm_substream *substream)
267{
268 struct snd_soc_pcm_runtime *rtd = substream->private_data;
269 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
270 struct psc_dma_stream *s;
271 dma_addr_t count;
272
273 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
274 s = &psc_dma->capture;
275 else
276 s = &psc_dma->playback;
277
278 count = s->period_current * s->period_bytes;
279
280 return bytes_to_frames(substream->runtime, count);
281}
282
283static int
284psc_dma_hw_params(struct snd_pcm_substream *substream,
285 struct snd_pcm_hw_params *params)
286{
287 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
288
289 return 0;
290}
291
292static const struct snd_pcm_ops psc_dma_ops = {
293 .open = psc_dma_open,
294 .close = psc_dma_close,
295 .hw_free = psc_dma_hw_free,
296 .ioctl = snd_pcm_lib_ioctl,
297 .pointer = psc_dma_pointer,
298 .trigger = psc_dma_trigger,
299 .hw_params = psc_dma_hw_params,
300};
301
302static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
303{
304 struct snd_card *card = rtd->card->snd_card;
305 struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
306 struct snd_soc_dai *dai = rtd->cpu_dai;
307 struct snd_pcm *pcm = rtd->pcm;
308 size_t size = psc_dma_hardware.buffer_bytes_max;
309 int rc;
310
311 dev_dbg(component->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
312 card, dai, pcm);
313
314 rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
315 if (rc)
316 return rc;
317
318 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
319 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
320 size, &pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
321 if (rc)
322 goto playback_alloc_err;
323 }
324
325 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
326 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
327 size, &pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->dma_buffer);
328 if (rc)
329 goto capture_alloc_err;
330 }
331
332 return 0;
333
334 capture_alloc_err:
335 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
336 snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
337
338 playback_alloc_err:
339 dev_err(card->dev, "Cannot allocate buffer(s)\n");
340
341 return -ENOMEM;
342}
343
344static void psc_dma_free(struct snd_pcm *pcm)
345{
346 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
347 struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
348 struct snd_pcm_substream *substream;
349 int stream;
350
351 dev_dbg(component->dev, "psc_dma_free(pcm=%p)\n", pcm);
352
353 for (stream = 0; stream < 2; stream++) {
354 substream = pcm->streams[stream].substream;
355 if (substream) {
356 snd_dma_free_pages(&substream->dma_buffer);
357 substream->dma_buffer.area = NULL;
358 substream->dma_buffer.addr = 0;
359 }
360 }
361}
362
363static const struct snd_soc_component_driver mpc5200_audio_dma_component = {
364 .name = DRV_NAME,
365 .ops = &psc_dma_ops,
366 .pcm_new = &psc_dma_new,
367 .pcm_free = &psc_dma_free,
368};
369
370int mpc5200_audio_dma_create(struct platform_device *op)
371{
372 phys_addr_t fifo;
373 struct psc_dma *psc_dma;
374 struct resource res;
375 int size, irq, rc;
376 const __be32 *prop;
377 void __iomem *regs;
378 int ret;
379
380
381 irq = irq_of_parse_and_map(op->dev.of_node, 0);
382 if (of_address_to_resource(op->dev.of_node, 0, &res)) {
383 dev_err(&op->dev, "Missing reg property\n");
384 return -ENODEV;
385 }
386 regs = ioremap(res.start, resource_size(&res));
387 if (!regs) {
388 dev_err(&op->dev, "Could not map registers\n");
389 return -ENODEV;
390 }
391
392
393 psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL);
394 if (!psc_dma) {
395 ret = -ENOMEM;
396 goto out_unmap;
397 }
398
399
400 prop = of_get_property(op->dev.of_node, "cell-index", &size);
401 if (!prop || size < sizeof *prop) {
402 ret = -ENODEV;
403 goto out_free;
404 }
405
406 spin_lock_init(&psc_dma->lock);
407 mutex_init(&psc_dma->mutex);
408 psc_dma->id = be32_to_cpu(*prop);
409 psc_dma->irq = irq;
410 psc_dma->psc_regs = regs;
411 psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs;
412 psc_dma->dev = &op->dev;
413 psc_dma->playback.psc_dma = psc_dma;
414 psc_dma->capture.psc_dma = psc_dma;
415 snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id);
416
417
418
419 fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32);
420 psc_dma->capture.bcom_task =
421 bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512);
422 psc_dma->playback.bcom_task =
423 bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo);
424 if (!psc_dma->capture.bcom_task ||
425 !psc_dma->playback.bcom_task) {
426 dev_err(&op->dev, "Could not allocate bestcomm tasks\n");
427 ret = -ENODEV;
428 goto out_free;
429 }
430
431
432 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
433
434 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX);
435
436 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX);
437
438 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT);
439
440 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1);
441
442
443
444
445
446 out_8(&psc_dma->psc_regs->mode, 0);
447 out_8(&psc_dma->psc_regs->mode, 0);
448
449
450 out_be16(&psc_dma->fifo_regs->rfalarm, 0x100);
451 out_8(&psc_dma->fifo_regs->rfcntl, 0x4);
452 out_be16(&psc_dma->fifo_regs->tfalarm, 0x100);
453 out_8(&psc_dma->fifo_regs->tfcntl, 0x7);
454
455
456 psc_dma->playback.irq =
457 bcom_get_task_irq(psc_dma->playback.bcom_task);
458 psc_dma->capture.irq =
459 bcom_get_task_irq(psc_dma->capture.bcom_task);
460
461 rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED,
462 "psc-dma-status", psc_dma);
463 rc |= request_irq(psc_dma->capture.irq, &psc_dma_bcom_irq, IRQF_SHARED,
464 "psc-dma-capture", &psc_dma->capture);
465 rc |= request_irq(psc_dma->playback.irq, &psc_dma_bcom_irq, IRQF_SHARED,
466 "psc-dma-playback", &psc_dma->playback);
467 if (rc) {
468 ret = -ENODEV;
469 goto out_irq;
470 }
471
472
473 dev_set_drvdata(&op->dev, psc_dma);
474
475
476 return devm_snd_soc_register_component(&op->dev,
477 &mpc5200_audio_dma_component, NULL, 0);
478out_irq:
479 free_irq(psc_dma->irq, psc_dma);
480 free_irq(psc_dma->capture.irq, &psc_dma->capture);
481 free_irq(psc_dma->playback.irq, &psc_dma->playback);
482out_free:
483 kfree(psc_dma);
484out_unmap:
485 iounmap(regs);
486 return ret;
487}
488EXPORT_SYMBOL_GPL(mpc5200_audio_dma_create);
489
490int mpc5200_audio_dma_destroy(struct platform_device *op)
491{
492 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
493
494 dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n");
495
496 bcom_gen_bd_rx_release(psc_dma->capture.bcom_task);
497 bcom_gen_bd_tx_release(psc_dma->playback.bcom_task);
498
499
500 free_irq(psc_dma->irq, psc_dma);
501 free_irq(psc_dma->capture.irq, &psc_dma->capture);
502 free_irq(psc_dma->playback.irq, &psc_dma->playback);
503
504 iounmap(psc_dma->psc_regs);
505 kfree(psc_dma);
506 dev_set_drvdata(&op->dev, NULL);
507
508 return 0;
509}
510EXPORT_SYMBOL_GPL(mpc5200_audio_dma_destroy);
511
512MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
513MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver");
514MODULE_LICENSE("GPL");
515