linux/sound/soc/soc-generic-dmaengine-pcm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3//  Copyright (C) 2013, Analog Devices Inc.
   4//      Author: Lars-Peter Clausen <lars@metafoo.de>
   5
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/dmaengine.h>
   9#include <linux/slab.h>
  10#include <sound/pcm.h>
  11#include <sound/pcm_params.h>
  12#include <sound/soc.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/of.h>
  15
  16#include <sound/dmaengine_pcm.h>
  17
  18static unsigned int prealloc_buffer_size_kbytes = 512;
  19module_param(prealloc_buffer_size_kbytes, uint, 0444);
  20MODULE_PARM_DESC(prealloc_buffer_size_kbytes, "Preallocate DMA buffer size (KB).");
  21
  22/*
  23 * The platforms dmaengine driver does not support reporting the amount of
  24 * bytes that are still left to transfer.
  25 */
  26#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
  27
  28static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
  29        struct snd_pcm_substream *substream)
  30{
  31        if (!pcm->chan[substream->stream])
  32                return NULL;
  33
  34        return pcm->chan[substream->stream]->device->dev;
  35}
  36
  37/**
  38 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
  39 * @substream: PCM substream
  40 * @params: hw_params
  41 * @slave_config: DMA slave config to prepare
  42 *
  43 * This function can be used as a generic prepare_slave_config callback for
  44 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
  45 * DAI DMA data. Internally the function will first call
  46 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
  47 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
  48 * remaining fields based on the DAI DMA data.
  49 */
  50int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
  51        struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
  52{
  53        struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
  54        struct snd_dmaengine_dai_dma_data *dma_data;
  55        int ret;
  56
  57        if (rtd->num_cpus > 1) {
  58                dev_err(rtd->dev,
  59                        "%s doesn't support Multi CPU yet\n", __func__);
  60                return -EINVAL;
  61        }
  62
  63        dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
  64
  65        ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
  66        if (ret)
  67                return ret;
  68
  69        snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
  70                slave_config);
  71
  72        return 0;
  73}
  74EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
  75
  76static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
  77                                   struct snd_pcm_substream *substream,
  78                                   struct snd_pcm_hw_params *params)
  79{
  80        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
  81        struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
  82        int (*prepare_slave_config)(struct snd_pcm_substream *substream,
  83                        struct snd_pcm_hw_params *params,
  84                        struct dma_slave_config *slave_config);
  85        struct dma_slave_config slave_config;
  86
  87        memset(&slave_config, 0, sizeof(slave_config));
  88
  89        if (!pcm->config)
  90                prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
  91        else
  92                prepare_slave_config = pcm->config->prepare_slave_config;
  93
  94        if (prepare_slave_config) {
  95                int ret = prepare_slave_config(substream, params, &slave_config);
  96                if (ret)
  97                        return ret;
  98
  99                ret = dmaengine_slave_config(chan, &slave_config);
 100                if (ret)
 101                        return ret;
 102        }
 103
 104        return 0;
 105}
 106
 107static int
 108dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
 109                                   struct snd_pcm_substream *substream)
 110{
 111        struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
 112        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 113        struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
 114        struct dma_chan *chan = pcm->chan[substream->stream];
 115        struct snd_dmaengine_dai_dma_data *dma_data;
 116        struct snd_pcm_hardware hw;
 117
 118        if (rtd->num_cpus > 1) {
 119                dev_err(rtd->dev,
 120                        "%s doesn't support Multi CPU yet\n", __func__);
 121                return -EINVAL;
 122        }
 123
 124        if (pcm->config && pcm->config->pcm_hardware)
 125                return snd_soc_set_runtime_hwparams(substream,
 126                                pcm->config->pcm_hardware);
 127
 128        dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
 129
 130        memset(&hw, 0, sizeof(hw));
 131        hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
 132                        SNDRV_PCM_INFO_INTERLEAVED;
 133        hw.periods_min = 2;
 134        hw.periods_max = UINT_MAX;
 135        hw.period_bytes_min = 256;
 136        hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
 137        hw.buffer_bytes_max = SIZE_MAX;
 138        hw.fifo_size = dma_data->fifo_size;
 139
 140        if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
 141                hw.info |= SNDRV_PCM_INFO_BATCH;
 142
 143        /**
 144         * FIXME: Remove the return value check to align with the code
 145         * before adding snd_dmaengine_pcm_refine_runtime_hwparams
 146         * function.
 147         */
 148        snd_dmaengine_pcm_refine_runtime_hwparams(substream,
 149                                                  dma_data,
 150                                                  &hw,
 151                                                  chan);
 152
 153        return snd_soc_set_runtime_hwparams(substream, &hw);
 154}
 155
 156static int dmaengine_pcm_open(struct snd_soc_component *component,
 157                              struct snd_pcm_substream *substream)
 158{
 159        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 160        struct dma_chan *chan = pcm->chan[substream->stream];
 161        int ret;
 162
 163        ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
 164        if (ret)
 165                return ret;
 166
 167        return snd_dmaengine_pcm_open(substream, chan);
 168}
 169
 170static int dmaengine_pcm_close(struct snd_soc_component *component,
 171                               struct snd_pcm_substream *substream)
 172{
 173        return snd_dmaengine_pcm_close(substream);
 174}
 175
 176static int dmaengine_pcm_trigger(struct snd_soc_component *component,
 177                                 struct snd_pcm_substream *substream, int cmd)
 178{
 179        return snd_dmaengine_pcm_trigger(substream, cmd);
 180}
 181
 182static struct dma_chan *dmaengine_pcm_compat_request_channel(
 183        struct snd_soc_component *component,
 184        struct snd_soc_pcm_runtime *rtd,
 185        struct snd_pcm_substream *substream)
 186{
 187        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 188        struct snd_dmaengine_dai_dma_data *dma_data;
 189        dma_filter_fn fn = NULL;
 190
 191        if (rtd->num_cpus > 1) {
 192                dev_err(rtd->dev,
 193                        "%s doesn't support Multi CPU yet\n", __func__);
 194                return NULL;
 195        }
 196
 197        dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
 198
 199        if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
 200                return pcm->chan[0];
 201
 202        if (pcm->config && pcm->config->compat_request_channel)
 203                return pcm->config->compat_request_channel(rtd, substream);
 204
 205        if (pcm->config)
 206                fn = pcm->config->compat_filter_fn;
 207
 208        return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
 209}
 210
 211static bool dmaengine_pcm_can_report_residue(struct device *dev,
 212        struct dma_chan *chan)
 213{
 214        struct dma_slave_caps dma_caps;
 215        int ret;
 216
 217        ret = dma_get_slave_caps(chan, &dma_caps);
 218        if (ret != 0) {
 219                dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
 220                         ret);
 221                return false;
 222        }
 223
 224        if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
 225                return false;
 226
 227        return true;
 228}
 229
 230static int dmaengine_pcm_new(struct snd_soc_component *component,
 231                             struct snd_soc_pcm_runtime *rtd)
 232{
 233        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 234        const struct snd_dmaengine_pcm_config *config = pcm->config;
 235        struct device *dev = component->dev;
 236        size_t prealloc_buffer_size;
 237        size_t max_buffer_size;
 238        unsigned int i;
 239
 240        if (config && config->prealloc_buffer_size) {
 241                prealloc_buffer_size = config->prealloc_buffer_size;
 242                max_buffer_size = config->pcm_hardware->buffer_bytes_max;
 243        } else {
 244                prealloc_buffer_size = prealloc_buffer_size_kbytes * 1024;
 245                max_buffer_size = SIZE_MAX;
 246        }
 247
 248        for_each_pcm_streams(i) {
 249                struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream;
 250                if (!substream)
 251                        continue;
 252
 253                if (!pcm->chan[i] && config && config->chan_names[i])
 254                        pcm->chan[i] = dma_request_slave_channel(dev,
 255                                config->chan_names[i]);
 256
 257                if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
 258                        pcm->chan[i] = dmaengine_pcm_compat_request_channel(
 259                                component, rtd, substream);
 260                }
 261
 262                if (!pcm->chan[i]) {
 263                        dev_err(component->dev,
 264                                "Missing dma channel for stream: %d\n", i);
 265                        return -EINVAL;
 266                }
 267
 268                snd_pcm_set_managed_buffer(substream,
 269                                SNDRV_DMA_TYPE_DEV_IRAM,
 270                                dmaengine_dma_dev(pcm, substream),
 271                                prealloc_buffer_size,
 272                                max_buffer_size);
 273
 274                if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
 275                        pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
 276
 277                if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
 278                        strscpy_pad(rtd->pcm->streams[i].pcm->name,
 279                                    rtd->pcm->streams[i].pcm->id,
 280                                    sizeof(rtd->pcm->streams[i].pcm->name));
 281                }
 282        }
 283
 284        return 0;
 285}
 286
 287static snd_pcm_uframes_t dmaengine_pcm_pointer(
 288        struct snd_soc_component *component,
 289        struct snd_pcm_substream *substream)
 290{
 291        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 292
 293        if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
 294                return snd_dmaengine_pcm_pointer_no_residue(substream);
 295        else
 296                return snd_dmaengine_pcm_pointer(substream);
 297}
 298
 299static int dmaengine_copy_user(struct snd_soc_component *component,
 300                               struct snd_pcm_substream *substream,
 301                               int channel, unsigned long hwoff,
 302                               void __user *buf, unsigned long bytes)
 303{
 304        struct snd_pcm_runtime *runtime = substream->runtime;
 305        struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
 306        int (*process)(struct snd_pcm_substream *substream,
 307                       int channel, unsigned long hwoff,
 308                       void *buf, unsigned long bytes) = pcm->config->process;
 309        bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
 310        void *dma_ptr = runtime->dma_area + hwoff +
 311                        channel * (runtime->dma_bytes / runtime->channels);
 312
 313        if (is_playback)
 314                if (copy_from_user(dma_ptr, buf, bytes))
 315                        return -EFAULT;
 316
 317        if (process) {
 318                int ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
 319                if (ret < 0)
 320                        return ret;
 321        }
 322
 323        if (!is_playback)
 324                if (copy_to_user(buf, dma_ptr, bytes))
 325                        return -EFAULT;
 326
 327        return 0;
 328}
 329
 330static const struct snd_soc_component_driver dmaengine_pcm_component = {
 331        .name           = SND_DMAENGINE_PCM_DRV_NAME,
 332        .probe_order    = SND_SOC_COMP_ORDER_LATE,
 333        .open           = dmaengine_pcm_open,
 334        .close          = dmaengine_pcm_close,
 335        .hw_params      = dmaengine_pcm_hw_params,
 336        .trigger        = dmaengine_pcm_trigger,
 337        .pointer        = dmaengine_pcm_pointer,
 338        .pcm_construct  = dmaengine_pcm_new,
 339};
 340
 341static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
 342        .name           = SND_DMAENGINE_PCM_DRV_NAME,
 343        .probe_order    = SND_SOC_COMP_ORDER_LATE,
 344        .open           = dmaengine_pcm_open,
 345        .close          = dmaengine_pcm_close,
 346        .hw_params      = dmaengine_pcm_hw_params,
 347        .trigger        = dmaengine_pcm_trigger,
 348        .pointer        = dmaengine_pcm_pointer,
 349        .copy_user      = dmaengine_copy_user,
 350        .pcm_construct  = dmaengine_pcm_new,
 351};
 352
 353static const char * const dmaengine_pcm_dma_channel_names[] = {
 354        [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
 355        [SNDRV_PCM_STREAM_CAPTURE] = "rx",
 356};
 357
 358static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
 359        struct device *dev, const struct snd_dmaengine_pcm_config *config)
 360{
 361        unsigned int i;
 362        const char *name;
 363        struct dma_chan *chan;
 364
 365        if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
 366            !(config && config->dma_dev && config->dma_dev->of_node)))
 367                return 0;
 368
 369        if (config && config->dma_dev) {
 370                /*
 371                 * If this warning is seen, it probably means that your Linux
 372                 * device structure does not match your HW device structure.
 373                 * It would be best to refactor the Linux device structure to
 374                 * correctly match the HW structure.
 375                 */
 376                dev_warn(dev, "DMA channels sourced from device %s",
 377                         dev_name(config->dma_dev));
 378                dev = config->dma_dev;
 379        }
 380
 381        for_each_pcm_streams(i) {
 382                if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
 383                        name = "rx-tx";
 384                else
 385                        name = dmaengine_pcm_dma_channel_names[i];
 386                if (config && config->chan_names[i])
 387                        name = config->chan_names[i];
 388                chan = dma_request_chan(dev, name);
 389                if (IS_ERR(chan)) {
 390                        /*
 391                         * Only report probe deferral errors, channels
 392                         * might not be present for devices that
 393                         * support only TX or only RX.
 394                         */
 395                        if (PTR_ERR(chan) == -EPROBE_DEFER)
 396                                return -EPROBE_DEFER;
 397                        pcm->chan[i] = NULL;
 398                } else {
 399                        pcm->chan[i] = chan;
 400                }
 401                if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
 402                        break;
 403        }
 404
 405        if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
 406                pcm->chan[1] = pcm->chan[0];
 407
 408        return 0;
 409}
 410
 411static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
 412{
 413        unsigned int i;
 414
 415        for_each_pcm_streams(i) {
 416                if (!pcm->chan[i])
 417                        continue;
 418                dma_release_channel(pcm->chan[i]);
 419                if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
 420                        break;
 421        }
 422}
 423
 424/**
 425 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
 426 * @dev: The parent device for the PCM device
 427 * @config: Platform specific PCM configuration
 428 * @flags: Platform specific quirks
 429 */
 430int snd_dmaengine_pcm_register(struct device *dev,
 431        const struct snd_dmaengine_pcm_config *config, unsigned int flags)
 432{
 433        const struct snd_soc_component_driver *driver;
 434        struct dmaengine_pcm *pcm;
 435        int ret;
 436
 437        pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
 438        if (!pcm)
 439                return -ENOMEM;
 440
 441#ifdef CONFIG_DEBUG_FS
 442        pcm->component.debugfs_prefix = "dma";
 443#endif
 444        pcm->config = config;
 445        pcm->flags = flags;
 446
 447        ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
 448        if (ret)
 449                goto err_free_dma;
 450
 451        if (config && config->process)
 452                driver = &dmaengine_pcm_component_process;
 453        else
 454                driver = &dmaengine_pcm_component;
 455
 456        ret = snd_soc_component_initialize(&pcm->component, driver, dev);
 457        if (ret)
 458                goto err_free_dma;
 459
 460        ret = snd_soc_add_component(&pcm->component, NULL, 0);
 461        if (ret)
 462                goto err_free_dma;
 463
 464        return 0;
 465
 466err_free_dma:
 467        dmaengine_pcm_release_chan(pcm);
 468        kfree(pcm);
 469        return ret;
 470}
 471EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
 472
 473/**
 474 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
 475 * @dev: Parent device the PCM was register with
 476 *
 477 * Removes a dmaengine based PCM device previously registered with
 478 * snd_dmaengine_pcm_register.
 479 */
 480void snd_dmaengine_pcm_unregister(struct device *dev)
 481{
 482        struct snd_soc_component *component;
 483        struct dmaengine_pcm *pcm;
 484
 485        component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
 486        if (!component)
 487                return;
 488
 489        pcm = soc_component_to_pcm(component);
 490
 491        snd_soc_unregister_component_by_driver(dev, component->driver);
 492        dmaengine_pcm_release_chan(pcm);
 493        kfree(pcm);
 494}
 495EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
 496
 497MODULE_LICENSE("GPL");
 498