linux/sound/soc/intel/atom/sst/sst.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  sst.c - Intel SST Driver for audio engine
   4 *
   5 *  Copyright (C) 2008-14       Intel Corp
   6 *  Authors:    Vinod Koul <vinod.koul@intel.com>
   7 *              Harsha Priya <priya.harsha@intel.com>
   8 *              Dharageswari R <dharageswari.r@intel.com>
   9 *              KP Jeeja <jeeja.kp@intel.com>
  10 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 *
  12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13 */
  14#include <linux/module.h>
  15#include <linux/fs.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/firmware.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/pm_qos.h>
  21#include <linux/async.h>
  22#include <linux/acpi.h>
  23#include <linux/sysfs.h>
  24#include <sound/core.h>
  25#include <sound/soc.h>
  26#include <asm/platform_sst_audio.h>
  27#include "../sst-mfld-platform.h"
  28#include "sst.h"
  29#include "../../common/sst-dsp.h"
  30
  31MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
  32MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
  33MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
  34MODULE_LICENSE("GPL v2");
  35
  36static inline bool sst_is_process_reply(u32 msg_id)
  37{
  38        return ((msg_id & PROCESS_MSG) ? true : false);
  39}
  40
  41static inline bool sst_validate_mailbox_size(unsigned int size)
  42{
  43        return ((size <= SST_MAILBOX_SIZE) ? true : false);
  44}
  45
  46static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
  47{
  48        union interrupt_reg_mrfld isr;
  49        union ipc_header_mrfld header;
  50        union sst_imr_reg_mrfld imr;
  51        struct ipc_post *msg = NULL;
  52        unsigned int size = 0;
  53        struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
  54        irqreturn_t retval = IRQ_HANDLED;
  55
  56        /* Interrupt arrived, check src */
  57        isr.full = sst_shim_read64(drv->shim, SST_ISRX);
  58
  59        if (isr.part.done_interrupt) {
  60                /* Clear done bit */
  61                spin_lock(&drv->ipc_spin_lock);
  62                header.full = sst_shim_read64(drv->shim,
  63                                        drv->ipc_reg.ipcx);
  64                header.p.header_high.part.done = 0;
  65                sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
  66
  67                /* write 1 to clear status register */;
  68                isr.part.done_interrupt = 1;
  69                sst_shim_write64(drv->shim, SST_ISRX, isr.full);
  70                spin_unlock(&drv->ipc_spin_lock);
  71
  72                /* we can send more messages to DSP so trigger work */
  73                queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq);
  74                retval = IRQ_HANDLED;
  75        }
  76
  77        if (isr.part.busy_interrupt) {
  78                /* message from dsp so copy that */
  79                spin_lock(&drv->ipc_spin_lock);
  80                imr.full = sst_shim_read64(drv->shim, SST_IMRX);
  81                imr.part.busy_interrupt = 1;
  82                sst_shim_write64(drv->shim, SST_IMRX, imr.full);
  83                spin_unlock(&drv->ipc_spin_lock);
  84                header.full =  sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
  85
  86                if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
  87                        drv->ops->clear_interrupt(drv);
  88                        return IRQ_HANDLED;
  89                }
  90
  91                if (header.p.header_high.part.large) {
  92                        size = header.p.header_low_payload;
  93                        if (sst_validate_mailbox_size(size)) {
  94                                memcpy_fromio(msg->mailbox_data,
  95                                        drv->mailbox + drv->mailbox_recv_offset, size);
  96                        } else {
  97                                dev_err(drv->dev,
  98                                        "Mailbox not copied, payload size is: %u\n", size);
  99                                header.p.header_low_payload = 0;
 100                        }
 101                }
 102
 103                msg->mrfld_header = header;
 104                msg->is_process_reply =
 105                        sst_is_process_reply(header.p.header_high.part.msg_id);
 106                spin_lock(&drv->rx_msg_lock);
 107                list_add_tail(&msg->node, &drv->rx_list);
 108                spin_unlock(&drv->rx_msg_lock);
 109                drv->ops->clear_interrupt(drv);
 110                retval = IRQ_WAKE_THREAD;
 111        }
 112        return retval;
 113}
 114
 115static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context)
 116{
 117        struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
 118        struct ipc_post *__msg, *msg = NULL;
 119        unsigned long irq_flags;
 120
 121        spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
 122        if (list_empty(&drv->rx_list)) {
 123                spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
 124                return IRQ_HANDLED;
 125        }
 126
 127        list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
 128                list_del(&msg->node);
 129                spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
 130                if (msg->is_process_reply)
 131                        drv->ops->process_message(msg);
 132                else
 133                        drv->ops->process_reply(drv, msg);
 134
 135                if (msg->is_large)
 136                        kfree(msg->mailbox_data);
 137                kfree(msg);
 138                spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
 139        }
 140        spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
 141        return IRQ_HANDLED;
 142}
 143
 144static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
 145{
 146        int ret = 0;
 147
 148        ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD,
 149                        IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL,
 150                        true, true, false, true);
 151
 152        if (ret < 0) {
 153                dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret);
 154                return -EIO;
 155        }
 156
 157        return 0;
 158}
 159
 160
 161static struct intel_sst_ops mrfld_ops = {
 162        .interrupt = intel_sst_interrupt_mrfld,
 163        .irq_thread = intel_sst_irq_thread_mrfld,
 164        .clear_interrupt = intel_sst_clear_intr_mrfld,
 165        .start = sst_start_mrfld,
 166        .reset = intel_sst_reset_dsp_mrfld,
 167        .post_message = sst_post_message_mrfld,
 168        .process_reply = sst_process_reply_mrfld,
 169        .save_dsp_context =  sst_save_dsp_context_v2,
 170        .alloc_stream = sst_alloc_stream_mrfld,
 171        .post_download = sst_post_download_mrfld,
 172};
 173
 174int sst_driver_ops(struct intel_sst_drv *sst)
 175{
 176
 177        switch (sst->dev_id) {
 178        case SST_MRFLD_PCI_ID:
 179        case SST_BYT_ACPI_ID:
 180        case SST_CHV_ACPI_ID:
 181                sst->tstamp = SST_TIME_STAMP_MRFLD;
 182                sst->ops = &mrfld_ops;
 183                return 0;
 184
 185        default:
 186                dev_err(sst->dev,
 187                        "SST Driver capabilities missing for dev_id: %x",
 188                        sst->dev_id);
 189                return -EINVAL;
 190        };
 191}
 192
 193void sst_process_pending_msg(struct work_struct *work)
 194{
 195        struct intel_sst_drv *ctx = container_of(work,
 196                        struct intel_sst_drv, ipc_post_msg_wq);
 197
 198        ctx->ops->post_message(ctx, NULL, false);
 199}
 200
 201static int sst_workqueue_init(struct intel_sst_drv *ctx)
 202{
 203        INIT_LIST_HEAD(&ctx->memcpy_list);
 204        INIT_LIST_HEAD(&ctx->rx_list);
 205        INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
 206        INIT_LIST_HEAD(&ctx->block_list);
 207        INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg);
 208        init_waitqueue_head(&ctx->wait_queue);
 209
 210        ctx->post_msg_wq =
 211                create_singlethread_workqueue("sst_post_msg_wq");
 212        if (!ctx->post_msg_wq)
 213                return -EBUSY;
 214        return 0;
 215}
 216
 217static void sst_init_locks(struct intel_sst_drv *ctx)
 218{
 219        mutex_init(&ctx->sst_lock);
 220        spin_lock_init(&ctx->rx_msg_lock);
 221        spin_lock_init(&ctx->ipc_spin_lock);
 222        spin_lock_init(&ctx->block_lock);
 223}
 224
 225int sst_alloc_drv_context(struct intel_sst_drv **ctx,
 226                struct device *dev, unsigned int dev_id)
 227{
 228        *ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL);
 229        if (!(*ctx))
 230                return -ENOMEM;
 231
 232        (*ctx)->dev = dev;
 233        (*ctx)->dev_id = dev_id;
 234
 235        return 0;
 236}
 237EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
 238
 239static ssize_t firmware_version_show(struct device *dev,
 240                            struct device_attribute *attr, char *buf)
 241{
 242        struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 243
 244        if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 &&
 245            ctx->fw_version.minor == 0 && ctx->fw_version.build == 0)
 246                return sprintf(buf, "FW not yet loaded\n");
 247        else
 248                return sprintf(buf, "v%02x.%02x.%02x.%02x\n",
 249                               ctx->fw_version.type, ctx->fw_version.major,
 250                               ctx->fw_version.minor, ctx->fw_version.build);
 251
 252}
 253
 254static DEVICE_ATTR_RO(firmware_version);
 255
 256static const struct attribute *sst_fw_version_attrs[] = {
 257        &dev_attr_firmware_version.attr,
 258        NULL,
 259};
 260
 261static const struct attribute_group sst_fw_version_attr_group = {
 262        .attrs = (struct attribute **)sst_fw_version_attrs,
 263};
 264
 265int sst_context_init(struct intel_sst_drv *ctx)
 266{
 267        int ret = 0, i;
 268
 269        if (!ctx->pdata)
 270                return -EINVAL;
 271
 272        if (!ctx->pdata->probe_data)
 273                return -EINVAL;
 274
 275        memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
 276
 277        ret = sst_driver_ops(ctx);
 278        if (ret != 0)
 279                return -EINVAL;
 280
 281        sst_init_locks(ctx);
 282        sst_set_fw_state_locked(ctx, SST_RESET);
 283
 284        /* pvt_id 0 reserved for async messages */
 285        ctx->pvt_id = 1;
 286        ctx->stream_cnt = 0;
 287        ctx->fw_in_mem = NULL;
 288        /* we use memcpy, so set to 0 */
 289        ctx->use_dma = 0;
 290        ctx->use_lli = 0;
 291
 292        if (sst_workqueue_init(ctx))
 293                return -EINVAL;
 294
 295        ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
 296        ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
 297        ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
 298
 299        dev_info(ctx->dev, "Got drv data max stream %d\n",
 300                                ctx->info.max_streams);
 301
 302        for (i = 1; i <= ctx->info.max_streams; i++) {
 303                struct stream_info *stream = &ctx->streams[i];
 304
 305                memset(stream, 0, sizeof(*stream));
 306                stream->pipe_id = PIPE_RSVD;
 307                mutex_init(&stream->lock);
 308        }
 309
 310        /* Register the ISR */
 311        ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
 312                                        ctx->ops->irq_thread, 0, SST_DRV_NAME,
 313                                        ctx);
 314        if (ret)
 315                goto do_free_mem;
 316
 317        dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num);
 318
 319        /* default intr are unmasked so set this as masked */
 320        sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
 321
 322        ctx->qos = devm_kzalloc(ctx->dev,
 323                sizeof(struct pm_qos_request), GFP_KERNEL);
 324        if (!ctx->qos) {
 325                ret = -ENOMEM;
 326                goto do_free_mem;
 327        }
 328        cpu_latency_qos_add_request(ctx->qos, PM_QOS_DEFAULT_VALUE);
 329
 330        dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name);
 331        ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name,
 332                                      ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
 333        if (ret) {
 334                dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
 335                goto do_free_mem;
 336        }
 337
 338        ret = sysfs_create_group(&ctx->dev->kobj,
 339                                 &sst_fw_version_attr_group);
 340        if (ret) {
 341                dev_err(ctx->dev,
 342                        "Unable to create sysfs\n");
 343                goto err_sysfs;
 344        }
 345
 346        sst_register(ctx->dev);
 347        return 0;
 348err_sysfs:
 349        sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
 350
 351do_free_mem:
 352        destroy_workqueue(ctx->post_msg_wq);
 353        return ret;
 354}
 355EXPORT_SYMBOL_GPL(sst_context_init);
 356
 357void sst_context_cleanup(struct intel_sst_drv *ctx)
 358{
 359        pm_runtime_get_noresume(ctx->dev);
 360        pm_runtime_disable(ctx->dev);
 361        sst_unregister(ctx->dev);
 362        sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
 363        sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
 364        flush_scheduled_work();
 365        destroy_workqueue(ctx->post_msg_wq);
 366        cpu_latency_qos_remove_request(ctx->qos);
 367        kfree(ctx->fw_sg_list.src);
 368        kfree(ctx->fw_sg_list.dst);
 369        ctx->fw_sg_list.list_len = 0;
 370        kfree(ctx->fw_in_mem);
 371        ctx->fw_in_mem = NULL;
 372        sst_memcpy_free_resources(ctx);
 373        ctx = NULL;
 374}
 375EXPORT_SYMBOL_GPL(sst_context_cleanup);
 376
 377void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
 378{
 379        pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY);
 380        pm_runtime_use_autosuspend(ctx->dev);
 381        /*
 382         * For acpi devices, the actual physical device state is
 383         * initially active. So change the state to active before
 384         * enabling the pm
 385         */
 386
 387        if (!acpi_disabled)
 388                pm_runtime_set_active(ctx->dev);
 389
 390        pm_runtime_enable(ctx->dev);
 391
 392        if (acpi_disabled)
 393                pm_runtime_set_active(ctx->dev);
 394        else
 395                pm_runtime_put_noidle(ctx->dev);
 396}
 397EXPORT_SYMBOL_GPL(sst_configure_runtime_pm);
 398
 399static int intel_sst_runtime_suspend(struct device *dev)
 400{
 401        int ret = 0;
 402        struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 403
 404        if (ctx->sst_state == SST_RESET) {
 405                dev_dbg(dev, "LPE is already in RESET state, No action\n");
 406                return 0;
 407        }
 408        /* save fw context */
 409        if (ctx->ops->save_dsp_context(ctx))
 410                return -EBUSY;
 411
 412        /* Move the SST state to Reset */
 413        sst_set_fw_state_locked(ctx, SST_RESET);
 414
 415        synchronize_irq(ctx->irq_num);
 416        flush_workqueue(ctx->post_msg_wq);
 417
 418        ctx->ops->reset(ctx);
 419
 420        return ret;
 421}
 422
 423static int intel_sst_suspend(struct device *dev)
 424{
 425        struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 426        struct sst_fw_save *fw_save;
 427        int i, ret = 0;
 428
 429        /* check first if we are already in SW reset */
 430        if (ctx->sst_state == SST_RESET)
 431                return 0;
 432
 433        /*
 434         * check if any stream is active and running
 435         * they should already by suspend by soc_suspend
 436         */
 437        for (i = 1; i <= ctx->info.max_streams; i++) {
 438                struct stream_info *stream = &ctx->streams[i];
 439
 440                if (stream->status == STREAM_RUNNING) {
 441                        dev_err(dev, "stream %d is running, can't suspend, abort\n", i);
 442                        return -EBUSY;
 443                }
 444
 445                if (ctx->pdata->streams_lost_on_suspend) {
 446                        stream->resume_status = stream->status;
 447                        stream->resume_prev = stream->prev;
 448                        if (stream->status != STREAM_UN_INIT)
 449                                sst_free_stream(ctx, i);
 450                }
 451        }
 452        synchronize_irq(ctx->irq_num);
 453        flush_workqueue(ctx->post_msg_wq);
 454
 455        /* Move the SST state to Reset */
 456        sst_set_fw_state_locked(ctx, SST_RESET);
 457
 458        /* tell DSP we are suspending */
 459        if (ctx->ops->save_dsp_context(ctx))
 460                return -EBUSY;
 461
 462        /* save the memories */
 463        fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
 464        if (!fw_save)
 465                return -ENOMEM;
 466        fw_save->iram = kvzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
 467        if (!fw_save->iram) {
 468                ret = -ENOMEM;
 469                goto iram;
 470        }
 471        fw_save->dram = kvzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
 472        if (!fw_save->dram) {
 473                ret = -ENOMEM;
 474                goto dram;
 475        }
 476        fw_save->sram = kvzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
 477        if (!fw_save->sram) {
 478                ret = -ENOMEM;
 479                goto sram;
 480        }
 481
 482        fw_save->ddr = kvzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
 483        if (!fw_save->ddr) {
 484                ret = -ENOMEM;
 485                goto ddr;
 486        }
 487
 488        memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base);
 489        memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base);
 490        memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE);
 491        memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base);
 492
 493        ctx->fw_save = fw_save;
 494        ctx->ops->reset(ctx);
 495        return 0;
 496ddr:
 497        kvfree(fw_save->sram);
 498sram:
 499        kvfree(fw_save->dram);
 500dram:
 501        kvfree(fw_save->iram);
 502iram:
 503        kfree(fw_save);
 504        return ret;
 505}
 506
 507static int intel_sst_resume(struct device *dev)
 508{
 509        struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 510        struct sst_fw_save *fw_save = ctx->fw_save;
 511        struct sst_block *block;
 512        int i, ret = 0;
 513
 514        if (!fw_save)
 515                return 0;
 516
 517        sst_set_fw_state_locked(ctx, SST_FW_LOADING);
 518
 519        /* we have to restore the memory saved */
 520        ctx->ops->reset(ctx);
 521
 522        ctx->fw_save = NULL;
 523
 524        memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base);
 525        memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base);
 526        memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
 527        memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
 528
 529        kvfree(fw_save->sram);
 530        kvfree(fw_save->dram);
 531        kvfree(fw_save->iram);
 532        kvfree(fw_save->ddr);
 533        kfree(fw_save);
 534
 535        block = sst_create_block(ctx, 0, FW_DWNL_ID);
 536        if (block == NULL)
 537                return -ENOMEM;
 538
 539
 540        /* start and wait for ack */
 541        ctx->ops->start(ctx);
 542        ret = sst_wait_timeout(ctx, block);
 543        if (ret) {
 544                dev_err(ctx->dev, "fw download failed %d\n", ret);
 545                /* FW download failed due to timeout */
 546                ret = -EBUSY;
 547
 548        } else {
 549                sst_set_fw_state_locked(ctx, SST_FW_RUNNING);
 550        }
 551
 552        if (ctx->pdata->streams_lost_on_suspend) {
 553                for (i = 1; i <= ctx->info.max_streams; i++) {
 554                        struct stream_info *stream = &ctx->streams[i];
 555
 556                        if (stream->resume_status != STREAM_UN_INIT) {
 557                                dev_dbg(ctx->dev, "Re-allocing stream %d status %d prev %d\n",
 558                                        i, stream->resume_status,
 559                                        stream->resume_prev);
 560                                sst_realloc_stream(ctx, i);
 561                                stream->status = stream->resume_status;
 562                                stream->prev = stream->resume_prev;
 563                        }
 564                }
 565        }
 566
 567        sst_free_block(ctx, block);
 568        return ret;
 569}
 570
 571const struct dev_pm_ops intel_sst_pm = {
 572        .suspend = intel_sst_suspend,
 573        .resume = intel_sst_resume,
 574        .runtime_suspend = intel_sst_runtime_suspend,
 575};
 576EXPORT_SYMBOL_GPL(intel_sst_pm);
 577