linux/sound/soc/intel/skylake/skl-sst-cldma.c
<<
>>
Prefs
   1/*
   2 * skl-sst-cldma.c - Code Loader DMA handler
   3 *
   4 * Copyright (C) 2015, Intel Corporation.
   5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
   6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as version 2, as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include <linux/device.h>
  19#include <linux/mm.h>
  20#include <linux/delay.h>
  21#include "../common/sst-dsp.h"
  22#include "../common/sst-dsp-priv.h"
  23
  24static void skl_cldma_int_enable(struct sst_dsp *ctx)
  25{
  26        sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
  27                                SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
  28}
  29
  30void skl_cldma_int_disable(struct sst_dsp *ctx)
  31{
  32        sst_dsp_shim_update_bits_unlocked(ctx,
  33                        SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
  34}
  35
  36static void skl_cldma_stream_run(struct sst_dsp  *ctx, bool enable)
  37{
  38        unsigned char val;
  39        int timeout;
  40
  41        sst_dsp_shim_update_bits_unlocked(ctx,
  42                        SKL_ADSP_REG_CL_SD_CTL,
  43                        CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable));
  44
  45        udelay(3);
  46        timeout = 300;
  47        do {
  48                /* waiting for hardware to report that the stream Run bit set */
  49                val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) &
  50                        CL_SD_CTL_RUN_MASK;
  51                if (enable && val)
  52                        break;
  53                else if (!enable && !val)
  54                        break;
  55                udelay(3);
  56        } while (--timeout);
  57
  58        if (timeout == 0)
  59                dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable);
  60}
  61
  62static void skl_cldma_stream_clear(struct sst_dsp  *ctx)
  63{
  64        /* make sure Run bit is cleared before setting stream register */
  65        skl_cldma_stream_run(ctx, 0);
  66
  67        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  68                                CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
  69        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  70                                CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
  71        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  72                                CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
  73        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  74                                CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
  75
  76        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
  77        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
  78
  79        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
  80        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
  81}
  82
  83/* Code loader helper APIs */
  84static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
  85                struct snd_dma_buffer *dmab_data,
  86                u32 **bdlp, int size, int with_ioc)
  87{
  88        u32 *bdl = *bdlp;
  89
  90        ctx->cl_dev.frags = 0;
  91        while (size > 0) {
  92                phys_addr_t addr = virt_to_phys(dmab_data->area +
  93                                (ctx->cl_dev.frags * ctx->cl_dev.bufsize));
  94
  95                bdl[0] = cpu_to_le32(lower_32_bits(addr));
  96                bdl[1] = cpu_to_le32(upper_32_bits(addr));
  97
  98                bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
  99
 100                size -= ctx->cl_dev.bufsize;
 101                bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
 102
 103                bdl += 4;
 104                ctx->cl_dev.frags++;
 105        }
 106}
 107
 108/*
 109 * Setup controller
 110 * Configure the registers to update the dma buffer address and
 111 * enable interrupts.
 112 * Note: Using the channel 1 for transfer
 113 */
 114static void skl_cldma_setup_controller(struct sst_dsp  *ctx,
 115                struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
 116                u32 count)
 117{
 118        skl_cldma_stream_clear(ctx);
 119        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
 120                        CL_SD_BDLPLBA(dmab_bdl->addr));
 121        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
 122                        CL_SD_BDLPUBA(dmab_bdl->addr));
 123
 124        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
 125        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
 126        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 127                        CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
 128        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 129                        CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
 130        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 131                        CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
 132        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 133                        CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
 134}
 135
 136static void skl_cldma_setup_spb(struct sst_dsp  *ctx,
 137                unsigned int size, bool enable)
 138{
 139        if (enable)
 140                sst_dsp_shim_update_bits_unlocked(ctx,
 141                                SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
 142                                CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
 143                                CL_SPBFIFO_SPBFCCTL_SPIBE(1));
 144
 145        sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
 146}
 147
 148static void skl_cldma_cleanup_spb(struct sst_dsp  *ctx)
 149{
 150        sst_dsp_shim_update_bits_unlocked(ctx,
 151                        SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
 152                        CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
 153                        CL_SPBFIFO_SPBFCCTL_SPIBE(0));
 154
 155        sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
 156}
 157
 158static void skl_cldma_cleanup(struct sst_dsp  *ctx)
 159{
 160        skl_cldma_cleanup_spb(ctx);
 161        skl_cldma_stream_clear(ctx);
 162
 163        ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
 164        ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl);
 165}
 166
 167int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
 168{
 169        int ret = 0;
 170
 171        if (!wait_event_timeout(ctx->cl_dev.wait_queue,
 172                                ctx->cl_dev.wait_condition,
 173                                msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
 174                dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
 175                ret = -EIO;
 176                goto cleanup;
 177        }
 178
 179        dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
 180        if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
 181                dev_err(ctx->dev, "%s: DMA Error\n", __func__);
 182                ret = -EIO;
 183        }
 184
 185cleanup:
 186        ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
 187        return ret;
 188}
 189
 190static void skl_cldma_stop(struct sst_dsp *ctx)
 191{
 192        skl_cldma_stream_run(ctx, false);
 193}
 194
 195static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
 196                const void *curr_pos, bool intr_enable, bool trigger)
 197{
 198        dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
 199        dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
 200                        ctx->cl_dev.dma_buffer_offset, trigger);
 201        dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
 202
 203        /*
 204         * Check if the size exceeds buffer boundary. If it exceeds
 205         * max_buffer size, then copy till buffer size and then copy
 206         * remaining buffer from the start of ring buffer.
 207         */
 208        if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) {
 209                unsigned int size_b = ctx->cl_dev.bufsize -
 210                                        ctx->cl_dev.dma_buffer_offset;
 211                memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
 212                        curr_pos, size_b);
 213                size -= size_b;
 214                curr_pos += size_b;
 215                ctx->cl_dev.dma_buffer_offset = 0;
 216        }
 217
 218        memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
 219                        curr_pos, size);
 220
 221        if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
 222                ctx->cl_dev.dma_buffer_offset = 0;
 223        else
 224                ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
 225
 226        ctx->cl_dev.wait_condition = false;
 227
 228        if (intr_enable)
 229                skl_cldma_int_enable(ctx);
 230
 231        ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
 232        if (trigger)
 233                ctx->cl_dev.ops.cl_trigger(ctx, true);
 234}
 235
 236/*
 237 * The CL dma doesn't have any way to update the transfer status until a BDL
 238 * buffer is fully transferred
 239 *
 240 * So Copying is divided in two parts.
 241 * 1. Interrupt on buffer done where the size to be transferred is more than
 242 *    ring buffer size.
 243 * 2. Polling on fw register to identify if data left to transferred doesn't
 244 *    fill the ring buffer. Caller takes care of polling the required status
 245 *    register to identify the transfer status.
 246 * 3. if wait flag is set, waits for DBL interrupt to copy the next chunk till
 247 *    bytes_left is 0.
 248 *    if wait flag is not set, doesn't wait for BDL interrupt. after ccopying
 249 *    the first chunk return the no of bytes_left to be copied.
 250 */
 251static int
 252skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin,
 253                        u32 total_size, bool wait)
 254{
 255        int ret = 0;
 256        bool start = true;
 257        unsigned int excess_bytes;
 258        u32 size;
 259        unsigned int bytes_left = total_size;
 260        const void *curr_pos = bin;
 261
 262        if (total_size <= 0)
 263                return -EINVAL;
 264
 265        dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
 266
 267        while (bytes_left) {
 268                if (bytes_left > ctx->cl_dev.bufsize) {
 269
 270                        /*
 271                         * dma transfers only till the write pointer as
 272                         * updated in spib
 273                         */
 274                        if (ctx->cl_dev.curr_spib_pos == 0)
 275                                ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
 276
 277                        size = ctx->cl_dev.bufsize;
 278                        skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
 279
 280                        if (wait) {
 281                                start = false;
 282                                ret = skl_cldma_wait_interruptible(ctx);
 283                                if (ret < 0) {
 284                                        skl_cldma_stop(ctx);
 285                                        return ret;
 286                                }
 287                        }
 288                } else {
 289                        skl_cldma_int_disable(ctx);
 290
 291                        if ((ctx->cl_dev.curr_spib_pos + bytes_left)
 292                                                        <= ctx->cl_dev.bufsize) {
 293                                ctx->cl_dev.curr_spib_pos += bytes_left;
 294                        } else {
 295                                excess_bytes = bytes_left -
 296                                        (ctx->cl_dev.bufsize -
 297                                        ctx->cl_dev.curr_spib_pos);
 298                                ctx->cl_dev.curr_spib_pos = excess_bytes;
 299                        }
 300
 301                        size = bytes_left;
 302                        skl_cldma_fill_buffer(ctx, size,
 303                                        curr_pos, false, start);
 304                }
 305                bytes_left -= size;
 306                curr_pos = curr_pos + size;
 307                if (!wait)
 308                        return bytes_left;
 309        }
 310
 311        return bytes_left;
 312}
 313
 314void skl_cldma_process_intr(struct sst_dsp *ctx)
 315{
 316        u8 cl_dma_intr_status;
 317
 318        cl_dma_intr_status =
 319                sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
 320
 321        if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
 322                ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
 323        else
 324                ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
 325
 326        ctx->cl_dev.wait_condition = true;
 327        wake_up(&ctx->cl_dev.wait_queue);
 328}
 329
 330int skl_cldma_prepare(struct sst_dsp *ctx)
 331{
 332        int ret;
 333        u32 *bdl;
 334
 335        ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
 336
 337        /* Allocate cl ops */
 338        ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
 339        ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
 340        ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
 341        ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
 342        ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run;
 343        ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
 344        ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
 345        ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
 346
 347        /* Allocate buffer*/
 348        ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
 349                        &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
 350        if (ret < 0) {
 351                dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
 352                return ret;
 353        }
 354        /* Setup Code loader BDL */
 355        ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
 356                        &ctx->cl_dev.dmab_bdl, PAGE_SIZE);
 357        if (ret < 0) {
 358                dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
 359                ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
 360                return ret;
 361        }
 362        bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
 363
 364        /* Allocate BDLs */
 365        ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
 366                        &bdl, ctx->cl_dev.bufsize, 1);
 367        ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
 368                        ctx->cl_dev.bufsize, ctx->cl_dev.frags);
 369
 370        ctx->cl_dev.curr_spib_pos = 0;
 371        ctx->cl_dev.dma_buffer_offset = 0;
 372        init_waitqueue_head(&ctx->cl_dev.wait_queue);
 373
 374        return ret;
 375}
 376