linux/sound/soc/intel/atom/sst/sst_ipc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  sst_ipc.c - Intel SST Driver for audio engine
   4 *
   5 *  Copyright (C) 2008-14 Intel Corporation
   6 *  Authors:    Vinod Koul <vinod.koul@intel.com>
   7 *              Harsha Priya <priya.harsha@intel.com>
   8 *              Dharageswari R <dharageswari.r@intel.com>
   9 *              KP Jeeja <jeeja.kp@intel.com>
  10 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 *
  12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13 */
  14#include <linux/pci.h>
  15#include <linux/firmware.h>
  16#include <linux/sched.h>
  17#include <linux/delay.h>
  18#include <linux/pm_runtime.h>
  19#include <sound/core.h>
  20#include <sound/pcm.h>
  21#include <sound/soc.h>
  22#include <sound/compress_driver.h>
  23#include <asm/intel-mid.h>
  24#include <asm/platform_sst_audio.h>
  25#include "../sst-mfld-platform.h"
  26#include "sst.h"
  27#include "../../common/sst-dsp.h"
  28
  29struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
  30                                        u32 msg_id, u32 drv_id)
  31{
  32        struct sst_block *msg = NULL;
  33
  34        dev_dbg(ctx->dev, "Enter\n");
  35        msg = kzalloc(sizeof(*msg), GFP_KERNEL);
  36        if (!msg)
  37                return NULL;
  38        msg->condition = false;
  39        msg->on = true;
  40        msg->msg_id = msg_id;
  41        msg->drv_id = drv_id;
  42        spin_lock_bh(&ctx->block_lock);
  43        list_add_tail(&msg->node, &ctx->block_list);
  44        spin_unlock_bh(&ctx->block_lock);
  45
  46        return msg;
  47}
  48
  49/*
  50 * while handling the interrupts, we need to check for message status and
  51 * then if we are blocking for a message
  52 *
  53 * here we are unblocking the blocked ones, this is based on id we have
  54 * passed and search that for block threads.
  55 * We will not find block in two cases
  56 *  a) when its small message and block in not there, so silently ignore
  57 *  them
  58 *  b) when we are actually not able to find the block (bug perhaps)
  59 *
  60 *  Since we have bit of small messages we can spam kernel log with err
  61 *  print on above so need to keep as debug prints which should be enabled
  62 *  via dynamic debug while debugging IPC issues
  63 */
  64int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
  65                u32 drv_id, u32 ipc, void *data, u32 size)
  66{
  67        struct sst_block *block = NULL;
  68
  69        dev_dbg(ctx->dev, "Enter\n");
  70
  71        spin_lock_bh(&ctx->block_lock);
  72        list_for_each_entry(block, &ctx->block_list, node) {
  73                dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
  74                                                        block->drv_id);
  75                if (block->msg_id == ipc && block->drv_id == drv_id) {
  76                        dev_dbg(ctx->dev, "free up the block\n");
  77                        block->ret_code = result;
  78                        block->data = data;
  79                        block->size = size;
  80                        block->condition = true;
  81                        spin_unlock_bh(&ctx->block_lock);
  82                        wake_up(&ctx->wait_queue);
  83                        return 0;
  84                }
  85        }
  86        spin_unlock_bh(&ctx->block_lock);
  87        dev_dbg(ctx->dev,
  88                "Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
  89                ipc, drv_id);
  90        return -EINVAL;
  91}
  92
  93int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
  94{
  95        struct sst_block *block = NULL, *__block;
  96
  97        dev_dbg(ctx->dev, "Enter\n");
  98        spin_lock_bh(&ctx->block_lock);
  99        list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
 100                if (block == freed) {
 101                        pr_debug("pvt_id freed --> %d\n", freed->drv_id);
 102                        /* toggle the index position of pvt_id */
 103                        list_del(&freed->node);
 104                        spin_unlock_bh(&ctx->block_lock);
 105                        kfree(freed->data);
 106                        freed->data = NULL;
 107                        kfree(freed);
 108                        return 0;
 109                }
 110        }
 111        spin_unlock_bh(&ctx->block_lock);
 112        dev_err(ctx->dev, "block is already freed!!!\n");
 113        return -EINVAL;
 114}
 115
 116int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
 117                struct ipc_post *ipc_msg, bool sync)
 118{
 119        struct ipc_post *msg = ipc_msg;
 120        union ipc_header_mrfld header;
 121        unsigned int loop_count = 0;
 122        int retval = 0;
 123        unsigned long irq_flags;
 124
 125        dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
 126        spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 127        header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
 128        if (sync) {
 129                while (header.p.header_high.part.busy) {
 130                        if (loop_count > 25) {
 131                                dev_err(sst_drv_ctx->dev,
 132                                        "sst: Busy wait failed, cant send this msg\n");
 133                                retval = -EBUSY;
 134                                goto out;
 135                        }
 136                        cpu_relax();
 137                        loop_count++;
 138                        header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
 139                }
 140        } else {
 141                if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
 142                        /* queue is empty, nothing to send */
 143                        spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 144                        dev_dbg(sst_drv_ctx->dev,
 145                                        "Empty msg queue... NO Action\n");
 146                        return 0;
 147                }
 148
 149                if (header.p.header_high.part.busy) {
 150                        spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 151                        dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
 152                        return 0;
 153                }
 154
 155                /* copy msg from list */
 156                msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
 157                                struct ipc_post, node);
 158                list_del(&msg->node);
 159        }
 160        dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
 161                                msg->mrfld_header.p.header_high.full);
 162        dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
 163                        msg->mrfld_header.p.header_low_payload);
 164
 165        if (msg->mrfld_header.p.header_high.part.large)
 166                memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
 167                        msg->mailbox_data,
 168                        msg->mrfld_header.p.header_low_payload);
 169
 170        sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
 171
 172out:
 173        spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 174        kfree(msg->mailbox_data);
 175        kfree(msg);
 176        return retval;
 177}
 178
 179void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
 180{
 181        union interrupt_reg_mrfld isr;
 182        union interrupt_reg_mrfld imr;
 183        union ipc_header_mrfld clear_ipc;
 184        unsigned long irq_flags;
 185
 186        spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 187        imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
 188        isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
 189
 190        /* write 1 to clear*/
 191        isr.part.busy_interrupt = 1;
 192        sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
 193
 194        /* Set IA done bit */
 195        clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
 196
 197        clear_ipc.p.header_high.part.busy = 0;
 198        clear_ipc.p.header_high.part.done = 1;
 199        clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
 200        sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
 201        /* un mask busy interrupt */
 202        imr.part.busy_interrupt = 0;
 203        sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
 204        spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
 205}
 206
 207
 208/*
 209 * process_fw_init - process the FW init msg
 210 *
 211 * @msg: IPC message mailbox data from FW
 212 *
 213 * This function processes the FW init msg from FW
 214 * marks FW state and prints debug info of loaded FW
 215 */
 216static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
 217                        void *msg)
 218{
 219        struct ipc_header_fw_init *init =
 220                (struct ipc_header_fw_init *)msg;
 221        int retval = 0;
 222
 223        dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
 224        if (init->result) {
 225                sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
 226                dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
 227                                init->result);
 228                retval = init->result;
 229                goto ret;
 230        }
 231        if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
 232                   sizeof(init->fw_version)))
 233                dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
 234                        init->fw_version.type, init->fw_version.major,
 235                        init->fw_version.minor, init->fw_version.build);
 236        dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
 237                        init->build_info.date, init->build_info.time);
 238
 239        /* Save FW version */
 240        sst_drv_ctx->fw_version.type = init->fw_version.type;
 241        sst_drv_ctx->fw_version.major = init->fw_version.major;
 242        sst_drv_ctx->fw_version.minor = init->fw_version.minor;
 243        sst_drv_ctx->fw_version.build = init->fw_version.build;
 244
 245ret:
 246        sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
 247}
 248
 249static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
 250                        struct ipc_post *msg)
 251{
 252        u32 msg_id;
 253        int str_id;
 254        u32 data_size, i;
 255        void *data_offset;
 256        struct stream_info *stream;
 257        u32 msg_low, pipe_id;
 258
 259        msg_low = msg->mrfld_header.p.header_low_payload;
 260        msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
 261        data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
 262        data_size =  msg_low - (sizeof(struct ipc_dsp_hdr));
 263
 264        switch (msg_id) {
 265        case IPC_SST_PERIOD_ELAPSED_MRFLD:
 266                pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
 267                str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
 268                if (str_id > 0) {
 269                        dev_dbg(sst_drv_ctx->dev,
 270                                "Period elapsed rcvd for pipe id 0x%x\n",
 271                                pipe_id);
 272                        stream = &sst_drv_ctx->streams[str_id];
 273                        /* If stream is dropped, skip processing this message*/
 274                        if (stream->status == STREAM_INIT)
 275                                break;
 276                        if (stream->period_elapsed)
 277                                stream->period_elapsed(stream->pcm_substream);
 278                        if (stream->compr_cb)
 279                                stream->compr_cb(stream->compr_cb_param);
 280                }
 281                break;
 282
 283        case IPC_IA_DRAIN_STREAM_MRFLD:
 284                pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
 285                str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
 286                if (str_id > 0) {
 287                        stream = &sst_drv_ctx->streams[str_id];
 288                        if (stream->drain_notify)
 289                                stream->drain_notify(stream->drain_cb_param);
 290                }
 291                break;
 292
 293        case IPC_IA_FW_ASYNC_ERR_MRFLD:
 294                dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
 295                for (i = 0; i < (data_size/4); i++)
 296                        print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
 297                                        16, 4, data_offset, data_size, false);
 298                break;
 299
 300        case IPC_IA_FW_INIT_CMPLT_MRFLD:
 301                process_fw_init(sst_drv_ctx, data_offset);
 302                break;
 303
 304        case IPC_IA_BUF_UNDER_RUN_MRFLD:
 305                pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
 306                str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
 307                if (str_id > 0)
 308                        dev_err(sst_drv_ctx->dev,
 309                                "Buffer under-run for pipe:%#x str_id:%d\n",
 310                                pipe_id, str_id);
 311                break;
 312
 313        default:
 314                dev_err(sst_drv_ctx->dev,
 315                        "Unrecognized async msg from FW msg_id %#x\n", msg_id);
 316        }
 317}
 318
 319void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
 320                struct ipc_post *msg)
 321{
 322        unsigned int drv_id;
 323        void *data;
 324        union ipc_header_high msg_high;
 325        u32 msg_low;
 326        struct ipc_dsp_hdr *dsp_hdr;
 327
 328        msg_high = msg->mrfld_header.p.header_high;
 329        msg_low = msg->mrfld_header.p.header_low_payload;
 330
 331        dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
 332                        msg->mrfld_header.p.header_high.full,
 333                        msg->mrfld_header.p.header_low_payload);
 334
 335        drv_id = msg_high.part.drv_id;
 336
 337        /* Check for async messages first */
 338        if (drv_id == SST_ASYNC_DRV_ID) {
 339                /*FW sent async large message*/
 340                process_fw_async_msg(sst_drv_ctx, msg);
 341                return;
 342        }
 343
 344        /* FW sent short error response for an IPC */
 345        if (msg_high.part.result && drv_id && !msg_high.part.large) {
 346                /* 32-bit FW error code in msg_low */
 347                dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
 348                sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
 349                        msg_high.part.drv_id,
 350                        msg_high.part.msg_id, NULL, 0);
 351                return;
 352        }
 353
 354        /*
 355         * Process all valid responses
 356         * if it is a large message, the payload contains the size to
 357         * copy from mailbox
 358         **/
 359        if (msg_high.part.large) {
 360                data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
 361                if (!data)
 362                        return;
 363                /* Copy command id so that we can use to put sst to reset */
 364                dsp_hdr = (struct ipc_dsp_hdr *)data;
 365                dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
 366                if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
 367                                msg_high.part.drv_id,
 368                                msg_high.part.msg_id, data, msg_low))
 369                        kfree(data);
 370        } else {
 371                sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
 372                                msg_high.part.drv_id,
 373                                msg_high.part.msg_id, NULL, 0);
 374        }
 375
 376}
 377