linux/sound/soc/intel/skylake/skl-sst-cldma.c
<<
>>
Prefs
   1/*
   2 * skl-sst-cldma.c - Code Loader DMA handler
   3 *
   4 * Copyright (C) 2015, Intel Corporation.
   5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
   6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as version 2, as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include <linux/device.h>
  19#include <linux/mm.h>
  20#include <linux/delay.h>
  21#include <linux/sched.h>
  22#include "../common/sst-dsp.h"
  23#include "../common/sst-dsp-priv.h"
  24
  25static void skl_cldma_int_enable(struct sst_dsp *ctx)
  26{
  27        sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
  28                                SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
  29}
  30
  31void skl_cldma_int_disable(struct sst_dsp *ctx)
  32{
  33        sst_dsp_shim_update_bits_unlocked(ctx,
  34                        SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
  35}
  36
  37static void skl_cldma_stream_run(struct sst_dsp  *ctx, bool enable)
  38{
  39        unsigned char val;
  40        int timeout;
  41
  42        sst_dsp_shim_update_bits_unlocked(ctx,
  43                        SKL_ADSP_REG_CL_SD_CTL,
  44                        CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable));
  45
  46        udelay(3);
  47        timeout = 300;
  48        do {
  49                /* waiting for hardware to report that the stream Run bit set */
  50                val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) &
  51                        CL_SD_CTL_RUN_MASK;
  52                if (enable && val)
  53                        break;
  54                else if (!enable && !val)
  55                        break;
  56                udelay(3);
  57        } while (--timeout);
  58
  59        if (timeout == 0)
  60                dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable);
  61}
  62
  63static void skl_cldma_stream_clear(struct sst_dsp  *ctx)
  64{
  65        /* make sure Run bit is cleared before setting stream register */
  66        skl_cldma_stream_run(ctx, 0);
  67
  68        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  69                                CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
  70        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  71                                CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
  72        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  73                                CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
  74        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
  75                                CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
  76
  77        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
  78        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
  79
  80        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
  81        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
  82}
  83
  84/* Code loader helper APIs */
  85static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
  86                struct snd_dma_buffer *dmab_data,
  87                u32 **bdlp, int size, int with_ioc)
  88{
  89        u32 *bdl = *bdlp;
  90
  91        ctx->cl_dev.frags = 0;
  92        while (size > 0) {
  93                phys_addr_t addr = virt_to_phys(dmab_data->area +
  94                                (ctx->cl_dev.frags * ctx->cl_dev.bufsize));
  95
  96                bdl[0] = cpu_to_le32(lower_32_bits(addr));
  97                bdl[1] = cpu_to_le32(upper_32_bits(addr));
  98
  99                bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
 100
 101                size -= ctx->cl_dev.bufsize;
 102                bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
 103
 104                bdl += 4;
 105                ctx->cl_dev.frags++;
 106        }
 107}
 108
 109/*
 110 * Setup controller
 111 * Configure the registers to update the dma buffer address and
 112 * enable interrupts.
 113 * Note: Using the channel 1 for transfer
 114 */
 115static void skl_cldma_setup_controller(struct sst_dsp  *ctx,
 116                struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
 117                u32 count)
 118{
 119        skl_cldma_stream_clear(ctx);
 120        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
 121                        CL_SD_BDLPLBA(dmab_bdl->addr));
 122        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
 123                        CL_SD_BDLPUBA(dmab_bdl->addr));
 124
 125        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
 126        sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
 127        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 128                        CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
 129        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 130                        CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
 131        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 132                        CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
 133        sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
 134                        CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
 135}
 136
 137static void skl_cldma_setup_spb(struct sst_dsp  *ctx,
 138                unsigned int size, bool enable)
 139{
 140        if (enable)
 141                sst_dsp_shim_update_bits_unlocked(ctx,
 142                                SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
 143                                CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
 144                                CL_SPBFIFO_SPBFCCTL_SPIBE(1));
 145
 146        sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
 147}
 148
 149static void skl_cldma_cleanup_spb(struct sst_dsp  *ctx)
 150{
 151        sst_dsp_shim_update_bits_unlocked(ctx,
 152                        SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
 153                        CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
 154                        CL_SPBFIFO_SPBFCCTL_SPIBE(0));
 155
 156        sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
 157}
 158
 159static void skl_cldma_cleanup(struct sst_dsp  *ctx)
 160{
 161        skl_cldma_cleanup_spb(ctx);
 162        skl_cldma_stream_clear(ctx);
 163
 164        ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
 165        ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl);
 166}
 167
 168int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
 169{
 170        int ret = 0;
 171
 172        if (!wait_event_timeout(ctx->cl_dev.wait_queue,
 173                                ctx->cl_dev.wait_condition,
 174                                msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
 175                dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
 176                ret = -EIO;
 177                goto cleanup;
 178        }
 179
 180        dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
 181        if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
 182                dev_err(ctx->dev, "%s: DMA Error\n", __func__);
 183                ret = -EIO;
 184        }
 185
 186cleanup:
 187        ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
 188        return ret;
 189}
 190
 191static void skl_cldma_stop(struct sst_dsp *ctx)
 192{
 193        skl_cldma_stream_run(ctx, false);
 194}
 195
 196static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
 197                const void *curr_pos, bool intr_enable, bool trigger)
 198{
 199        dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
 200        dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
 201                        ctx->cl_dev.dma_buffer_offset, trigger);
 202        dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
 203
 204        /*
 205         * Check if the size exceeds buffer boundary. If it exceeds
 206         * max_buffer size, then copy till buffer size and then copy
 207         * remaining buffer from the start of ring buffer.
 208         */
 209        if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) {
 210                unsigned int size_b = ctx->cl_dev.bufsize -
 211                                        ctx->cl_dev.dma_buffer_offset;
 212                memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
 213                        curr_pos, size_b);
 214                size -= size_b;
 215                curr_pos += size_b;
 216                ctx->cl_dev.dma_buffer_offset = 0;
 217        }
 218
 219        memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
 220                        curr_pos, size);
 221
 222        if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
 223                ctx->cl_dev.dma_buffer_offset = 0;
 224        else
 225                ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
 226
 227        ctx->cl_dev.wait_condition = false;
 228
 229        if (intr_enable)
 230                skl_cldma_int_enable(ctx);
 231
 232        ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
 233        if (trigger)
 234                ctx->cl_dev.ops.cl_trigger(ctx, true);
 235}
 236
 237/*
 238 * The CL dma doesn't have any way to update the transfer status until a BDL
 239 * buffer is fully transferred
 240 *
 241 * So Copying is divided in two parts.
 242 * 1. Interrupt on buffer done where the size to be transferred is more than
 243 *    ring buffer size.
 244 * 2. Polling on fw register to identify if data left to transferred doesn't
 245 *    fill the ring buffer. Caller takes care of polling the required status
 246 *    register to identify the transfer status.
 247 * 3. if wait flag is set, waits for DBL interrupt to copy the next chunk till
 248 *    bytes_left is 0.
 249 *    if wait flag is not set, doesn't wait for BDL interrupt. after ccopying
 250 *    the first chunk return the no of bytes_left to be copied.
 251 */
 252static int
 253skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin,
 254                        u32 total_size, bool wait)
 255{
 256        int ret = 0;
 257        bool start = true;
 258        unsigned int excess_bytes;
 259        u32 size;
 260        unsigned int bytes_left = total_size;
 261        const void *curr_pos = bin;
 262
 263        if (total_size <= 0)
 264                return -EINVAL;
 265
 266        dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
 267
 268        while (bytes_left) {
 269                if (bytes_left > ctx->cl_dev.bufsize) {
 270
 271                        /*
 272                         * dma transfers only till the write pointer as
 273                         * updated in spib
 274                         */
 275                        if (ctx->cl_dev.curr_spib_pos == 0)
 276                                ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
 277
 278                        size = ctx->cl_dev.bufsize;
 279                        skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
 280
 281                        if (wait) {
 282                                start = false;
 283                                ret = skl_cldma_wait_interruptible(ctx);
 284                                if (ret < 0) {
 285                                        skl_cldma_stop(ctx);
 286                                        return ret;
 287                                }
 288                        }
 289                } else {
 290                        skl_cldma_int_disable(ctx);
 291
 292                        if ((ctx->cl_dev.curr_spib_pos + bytes_left)
 293                                                        <= ctx->cl_dev.bufsize) {
 294                                ctx->cl_dev.curr_spib_pos += bytes_left;
 295                        } else {
 296                                excess_bytes = bytes_left -
 297                                        (ctx->cl_dev.bufsize -
 298                                        ctx->cl_dev.curr_spib_pos);
 299                                ctx->cl_dev.curr_spib_pos = excess_bytes;
 300                        }
 301
 302                        size = bytes_left;
 303                        skl_cldma_fill_buffer(ctx, size,
 304                                        curr_pos, false, start);
 305                }
 306                bytes_left -= size;
 307                curr_pos = curr_pos + size;
 308                if (!wait)
 309                        return bytes_left;
 310        }
 311
 312        return bytes_left;
 313}
 314
 315void skl_cldma_process_intr(struct sst_dsp *ctx)
 316{
 317        u8 cl_dma_intr_status;
 318
 319        cl_dma_intr_status =
 320                sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
 321
 322        if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
 323                ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
 324        else
 325                ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
 326
 327        ctx->cl_dev.wait_condition = true;
 328        wake_up(&ctx->cl_dev.wait_queue);
 329}
 330
 331int skl_cldma_prepare(struct sst_dsp *ctx)
 332{
 333        int ret;
 334        u32 *bdl;
 335
 336        ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
 337
 338        /* Allocate cl ops */
 339        ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
 340        ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
 341        ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
 342        ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
 343        ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run;
 344        ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
 345        ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
 346        ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
 347
 348        /* Allocate buffer*/
 349        ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
 350                        &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
 351        if (ret < 0) {
 352                dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
 353                return ret;
 354        }
 355        /* Setup Code loader BDL */
 356        ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
 357                        &ctx->cl_dev.dmab_bdl, PAGE_SIZE);
 358        if (ret < 0) {
 359                dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
 360                ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
 361                return ret;
 362        }
 363        bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
 364
 365        /* Allocate BDLs */
 366        ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
 367                        &bdl, ctx->cl_dev.bufsize, 1);
 368        ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
 369                        ctx->cl_dev.bufsize, ctx->cl_dev.frags);
 370
 371        ctx->cl_dev.curr_spib_pos = 0;
 372        ctx->cl_dev.dma_buffer_offset = 0;
 373        init_waitqueue_head(&ctx->cl_dev.wait_queue);
 374
 375        return ret;
 376}
 377