linux/sound/soc/intel/sst-firmware.c
<<
>>
Prefs
   1/*
   2 * Intel SST Firmware Loader
   3 *
   4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/slab.h>
  19#include <linux/sched.h>
  20#include <linux/firmware.h>
  21#include <linux/export.h>
  22#include <linux/platform_device.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/dmaengine.h>
  25#include <linux/pci.h>
  26#include <linux/acpi.h>
  27
  28/* supported DMA engine drivers */
  29#include <linux/platform_data/dma-dw.h>
  30#include <linux/dma/dw.h>
  31
  32#include <asm/page.h>
  33#include <asm/pgtable.h>
  34
  35#include "sst-dsp.h"
  36#include "sst-dsp-priv.h"
  37
  38#define SST_DMA_RESOURCES       2
  39#define SST_DSP_DMA_MAX_BURST   0x3
  40#define SST_HSW_BLOCK_ANY       0xffffffff
  41
  42#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  43
  44struct sst_dma {
  45        struct sst_dsp *sst;
  46
  47        struct dw_dma_chip *chip;
  48
  49        struct dma_async_tx_descriptor *desc;
  50        struct dma_chan *ch;
  51};
  52
  53static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  54{
  55        /* __iowrite32_copy use 32bit size values so divide by 4 */
  56        __iowrite32_copy((void *)dest, src, bytes/4);
  57}
  58
  59static void sst_dma_transfer_complete(void *arg)
  60{
  61        struct sst_dsp *sst = (struct sst_dsp *)arg;
  62
  63        dev_dbg(sst->dev, "DMA: callback\n");
  64}
  65
  66static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  67        dma_addr_t src_addr, size_t size)
  68{
  69        struct dma_async_tx_descriptor *desc;
  70        struct sst_dma *dma = sst->dma;
  71
  72        if (dma->ch == NULL) {
  73                dev_err(sst->dev, "error: no DMA channel\n");
  74                return -ENODEV;
  75        }
  76
  77        dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  78                (unsigned long)src_addr, (unsigned long)dest_addr, size);
  79
  80        desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  81                src_addr, size, DMA_CTRL_ACK);
  82        if (!desc){
  83                dev_err(sst->dev, "error: dma prep memcpy failed\n");
  84                return -EINVAL;
  85        }
  86
  87        desc->callback = sst_dma_transfer_complete;
  88        desc->callback_param = sst;
  89
  90        desc->tx_submit(desc);
  91        dma_wait_for_async_tx(desc);
  92
  93        return 0;
  94}
  95
  96/* copy to DSP */
  97int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  98        dma_addr_t src_addr, size_t size)
  99{
 100        return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
 101                        src_addr, size);
 102}
 103EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
 104
 105/* copy from DSP */
 106int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
 107        dma_addr_t src_addr, size_t size)
 108{
 109        return sst_dsp_dma_copy(sst, dest_addr,
 110                src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
 111}
 112EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
 113
 114/* remove module from memory - callers hold locks */
 115static void block_list_remove(struct sst_dsp *dsp,
 116        struct list_head *block_list)
 117{
 118        struct sst_mem_block *block, *tmp;
 119        int err;
 120
 121        /* disable each block  */
 122        list_for_each_entry(block, block_list, module_list) {
 123
 124                if (block->ops && block->ops->disable) {
 125                        err = block->ops->disable(block);
 126                        if (err < 0)
 127                                dev_err(dsp->dev,
 128                                        "error: cant disable block %d:%d\n",
 129                                        block->type, block->index);
 130                }
 131        }
 132
 133        /* mark each block as free */
 134        list_for_each_entry_safe(block, tmp, block_list, module_list) {
 135                list_del(&block->module_list);
 136                list_move(&block->list, &dsp->free_block_list);
 137                dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
 138                        block->type, block->index, block->offset);
 139        }
 140}
 141
 142/* prepare the memory block to receive data from host - callers hold locks */
 143static int block_list_prepare(struct sst_dsp *dsp,
 144        struct list_head *block_list)
 145{
 146        struct sst_mem_block *block;
 147        int ret = 0;
 148
 149        /* enable each block so that's it'e ready for data */
 150        list_for_each_entry(block, block_list, module_list) {
 151
 152                if (block->ops && block->ops->enable && !block->users) {
 153                        ret = block->ops->enable(block);
 154                        if (ret < 0) {
 155                                dev_err(dsp->dev,
 156                                        "error: cant disable block %d:%d\n",
 157                                        block->type, block->index);
 158                                goto err;
 159                        }
 160                }
 161        }
 162        return ret;
 163
 164err:
 165        list_for_each_entry(block, block_list, module_list) {
 166                if (block->ops && block->ops->disable)
 167                        block->ops->disable(block);
 168        }
 169        return ret;
 170}
 171
 172static struct dw_dma_platform_data dw_pdata = {
 173        .is_private = 1,
 174        .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
 175        .chan_priority = CHAN_PRIORITY_ASCENDING,
 176};
 177
 178static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
 179        int irq)
 180{
 181        struct dw_dma_chip *chip;
 182        int err;
 183
 184        chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
 185        if (!chip)
 186                return ERR_PTR(-ENOMEM);
 187
 188        chip->irq = irq;
 189        chip->regs = devm_ioremap_resource(dev, mem);
 190        if (IS_ERR(chip->regs))
 191                return ERR_CAST(chip->regs);
 192
 193        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
 194        if (err)
 195                return ERR_PTR(err);
 196
 197        chip->dev = dev;
 198        err = dw_dma_probe(chip, &dw_pdata);
 199        if (err)
 200                return ERR_PTR(err);
 201
 202        return chip;
 203}
 204
 205static void dw_remove(struct dw_dma_chip *chip)
 206{
 207        dw_dma_remove(chip);
 208}
 209
 210static bool dma_chan_filter(struct dma_chan *chan, void *param)
 211{
 212        struct sst_dsp *dsp = (struct sst_dsp *)param;
 213
 214        return chan->device->dev == dsp->dma_dev;
 215}
 216
 217int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
 218{
 219        struct sst_dma *dma = dsp->dma;
 220        struct dma_slave_config slave;
 221        dma_cap_mask_t mask;
 222        int ret;
 223
 224        /* The Intel MID DMA engine driver needs the slave config set but
 225         * Synopsis DMA engine driver safely ignores the slave config */
 226        dma_cap_zero(mask);
 227        dma_cap_set(DMA_SLAVE, mask);
 228        dma_cap_set(DMA_MEMCPY, mask);
 229
 230        dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
 231        if (dma->ch == NULL) {
 232                dev_err(dsp->dev, "error: DMA request channel failed\n");
 233                return -EIO;
 234        }
 235
 236        memset(&slave, 0, sizeof(slave));
 237        slave.direction = DMA_MEM_TO_DEV;
 238        slave.src_addr_width =
 239                slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 240        slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
 241
 242        ret = dmaengine_slave_config(dma->ch, &slave);
 243        if (ret) {
 244                dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
 245                        ret);
 246                dma_release_channel(dma->ch);
 247                dma->ch = NULL;
 248        }
 249
 250        return ret;
 251}
 252EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
 253
 254void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
 255{
 256        struct sst_dma *dma = dsp->dma;
 257
 258        if (!dma->ch)
 259                return;
 260
 261        dma_release_channel(dma->ch);
 262        dma->ch = NULL;
 263}
 264EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
 265
 266int sst_dma_new(struct sst_dsp *sst)
 267{
 268        struct sst_pdata *sst_pdata = sst->pdata;
 269        struct sst_dma *dma;
 270        struct resource mem;
 271        const char *dma_dev_name;
 272        int ret = 0;
 273
 274        /* configure the correct platform data for whatever DMA engine
 275        * is attached to the ADSP IP. */
 276        switch (sst->pdata->dma_engine) {
 277        case SST_DMA_TYPE_DW:
 278                dma_dev_name = "dw_dmac";
 279                break;
 280        case SST_DMA_TYPE_MID:
 281                dma_dev_name = "Intel MID DMA";
 282                break;
 283        default:
 284                dev_err(sst->dev, "error: invalid DMA engine %d\n",
 285                        sst->pdata->dma_engine);
 286                return -EINVAL;
 287        }
 288
 289        dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
 290        if (!dma)
 291                return -ENOMEM;
 292
 293        dma->sst = sst;
 294
 295        memset(&mem, 0, sizeof(mem));
 296
 297        mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
 298        mem.end   = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
 299        mem.flags = IORESOURCE_MEM;
 300
 301        /* now register DMA engine device */
 302        dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
 303        if (IS_ERR(dma->chip)) {
 304                dev_err(sst->dev, "error: DMA device register failed\n");
 305                ret = PTR_ERR(dma->chip);
 306                goto err_dma_dev;
 307        }
 308
 309        sst->dma = dma;
 310        sst->fw_use_dma = true;
 311        return 0;
 312
 313err_dma_dev:
 314        devm_kfree(sst->dev, dma);
 315        return ret;
 316}
 317EXPORT_SYMBOL(sst_dma_new);
 318
 319void sst_dma_free(struct sst_dma *dma)
 320{
 321
 322        if (dma == NULL)
 323                return;
 324
 325        if (dma->ch)
 326                dma_release_channel(dma->ch);
 327
 328        if (dma->chip)
 329                dw_remove(dma->chip);
 330
 331}
 332EXPORT_SYMBOL(sst_dma_free);
 333
 334/* create new generic firmware object */
 335struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 
 336        const struct firmware *fw, void *private)
 337{
 338        struct sst_fw *sst_fw;
 339        int err;
 340
 341        if (!dsp->ops->parse_fw)
 342                return NULL;
 343
 344        sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
 345        if (sst_fw == NULL)
 346                return NULL;
 347
 348        sst_fw->dsp = dsp;
 349        sst_fw->private = private;
 350        sst_fw->size = fw->size;
 351
 352        /* allocate DMA buffer to store FW data */
 353        sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
 354                                &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
 355        if (!sst_fw->dma_buf) {
 356                dev_err(dsp->dev, "error: DMA alloc failed\n");
 357                kfree(sst_fw);
 358                return NULL;
 359        }
 360
 361        /* copy FW data to DMA-able memory */
 362        memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
 363
 364        if (dsp->fw_use_dma) {
 365                err = sst_dsp_dma_get_channel(dsp, 0);
 366                if (err < 0)
 367                        goto chan_err;
 368        }
 369
 370        /* call core specific FW paser to load FW data into DSP */
 371        err = dsp->ops->parse_fw(sst_fw);
 372        if (err < 0) {
 373                dev_err(dsp->dev, "error: parse fw failed %d\n", err);
 374                goto parse_err;
 375        }
 376
 377        if (dsp->fw_use_dma)
 378                sst_dsp_dma_put_channel(dsp);
 379
 380        mutex_lock(&dsp->mutex);
 381        list_add(&sst_fw->list, &dsp->fw_list);
 382        mutex_unlock(&dsp->mutex);
 383
 384        return sst_fw;
 385
 386parse_err:
 387        if (dsp->fw_use_dma)
 388                sst_dsp_dma_put_channel(dsp);
 389chan_err:
 390        dma_free_coherent(dsp->dma_dev, sst_fw->size,
 391                                sst_fw->dma_buf,
 392                                sst_fw->dmable_fw_paddr);
 393        sst_fw->dma_buf = NULL;
 394        kfree(sst_fw);
 395        return NULL;
 396}
 397EXPORT_SYMBOL_GPL(sst_fw_new);
 398
 399int sst_fw_reload(struct sst_fw *sst_fw)
 400{
 401        struct sst_dsp *dsp = sst_fw->dsp;
 402        int ret;
 403
 404        dev_dbg(dsp->dev, "reloading firmware\n");
 405
 406        /* call core specific FW paser to load FW data into DSP */
 407        ret = dsp->ops->parse_fw(sst_fw);
 408        if (ret < 0)
 409                dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
 410
 411        return ret;
 412}
 413EXPORT_SYMBOL_GPL(sst_fw_reload);
 414
 415void sst_fw_unload(struct sst_fw *sst_fw)
 416{
 417        struct sst_dsp *dsp = sst_fw->dsp;
 418        struct sst_module *module, *mtmp;
 419        struct sst_module_runtime *runtime, *rtmp;
 420
 421        dev_dbg(dsp->dev, "unloading firmware\n");
 422
 423        mutex_lock(&dsp->mutex);
 424
 425        /* check module by module */
 426        list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
 427                if (module->sst_fw == sst_fw) {
 428
 429                        /* remove runtime modules */
 430                        list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
 431
 432                                block_list_remove(dsp, &runtime->block_list);
 433                                list_del(&runtime->list);
 434                                kfree(runtime);
 435                        }
 436
 437                        /* now remove the module */
 438                        block_list_remove(dsp, &module->block_list);
 439                        list_del(&module->list);
 440                        kfree(module);
 441                }
 442        }
 443
 444        /* remove all scratch blocks */
 445        block_list_remove(dsp, &dsp->scratch_block_list);
 446
 447        mutex_unlock(&dsp->mutex);
 448}
 449EXPORT_SYMBOL_GPL(sst_fw_unload);
 450
 451/* free single firmware object */
 452void sst_fw_free(struct sst_fw *sst_fw)
 453{
 454        struct sst_dsp *dsp = sst_fw->dsp;
 455
 456        mutex_lock(&dsp->mutex);
 457        list_del(&sst_fw->list);
 458        mutex_unlock(&dsp->mutex);
 459
 460        if (sst_fw->dma_buf)
 461                dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
 462                        sst_fw->dmable_fw_paddr);
 463        kfree(sst_fw);
 464}
 465EXPORT_SYMBOL_GPL(sst_fw_free);
 466
 467/* free all firmware objects */
 468void sst_fw_free_all(struct sst_dsp *dsp)
 469{
 470        struct sst_fw *sst_fw, *t;
 471
 472        mutex_lock(&dsp->mutex);
 473        list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
 474
 475                list_del(&sst_fw->list);
 476                dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
 477                        sst_fw->dmable_fw_paddr);
 478                kfree(sst_fw);
 479        }
 480        mutex_unlock(&dsp->mutex);
 481}
 482EXPORT_SYMBOL_GPL(sst_fw_free_all);
 483
 484/* create a new SST generic module from FW template */
 485struct sst_module *sst_module_new(struct sst_fw *sst_fw,
 486        struct sst_module_template *template, void *private)
 487{
 488        struct sst_dsp *dsp = sst_fw->dsp;
 489        struct sst_module *sst_module;
 490
 491        sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
 492        if (sst_module == NULL)
 493                return NULL;
 494
 495        sst_module->id = template->id;
 496        sst_module->dsp = dsp;
 497        sst_module->sst_fw = sst_fw;
 498        sst_module->scratch_size = template->scratch_size;
 499        sst_module->persistent_size = template->persistent_size;
 500        sst_module->entry = template->entry;
 501
 502        INIT_LIST_HEAD(&sst_module->block_list);
 503        INIT_LIST_HEAD(&sst_module->runtime_list);
 504
 505        mutex_lock(&dsp->mutex);
 506        list_add(&sst_module->list, &dsp->module_list);
 507        mutex_unlock(&dsp->mutex);
 508
 509        return sst_module;
 510}
 511EXPORT_SYMBOL_GPL(sst_module_new);
 512
 513/* free firmware module and remove from available list */
 514void sst_module_free(struct sst_module *sst_module)
 515{
 516        struct sst_dsp *dsp = sst_module->dsp;
 517
 518        mutex_lock(&dsp->mutex);
 519        list_del(&sst_module->list);
 520        mutex_unlock(&dsp->mutex);
 521
 522        kfree(sst_module);
 523}
 524EXPORT_SYMBOL_GPL(sst_module_free);
 525
 526struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
 527        int id, void *private)
 528{
 529        struct sst_dsp *dsp = module->dsp;
 530        struct sst_module_runtime *runtime;
 531
 532        runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 533        if (runtime == NULL)
 534                return NULL;
 535
 536        runtime->id = id;
 537        runtime->dsp = dsp;
 538        runtime->module = module;
 539        INIT_LIST_HEAD(&runtime->block_list);
 540
 541        mutex_lock(&dsp->mutex);
 542        list_add(&runtime->list, &module->runtime_list);
 543        mutex_unlock(&dsp->mutex);
 544
 545        return runtime;
 546}
 547EXPORT_SYMBOL_GPL(sst_module_runtime_new);
 548
 549void sst_module_runtime_free(struct sst_module_runtime *runtime)
 550{
 551        struct sst_dsp *dsp = runtime->dsp;
 552
 553        mutex_lock(&dsp->mutex);
 554        list_del(&runtime->list);
 555        mutex_unlock(&dsp->mutex);
 556
 557        kfree(runtime);
 558}
 559EXPORT_SYMBOL_GPL(sst_module_runtime_free);
 560
 561static struct sst_mem_block *find_block(struct sst_dsp *dsp,
 562        struct sst_block_allocator *ba)
 563{
 564        struct sst_mem_block *block;
 565
 566        list_for_each_entry(block, &dsp->free_block_list, list) {
 567                if (block->type == ba->type && block->offset == ba->offset)
 568                        return block;
 569        }
 570
 571        return NULL;
 572}
 573
 574/* Block allocator must be on block boundary */
 575static int block_alloc_contiguous(struct sst_dsp *dsp,
 576        struct sst_block_allocator *ba, struct list_head *block_list)
 577{
 578        struct list_head tmp = LIST_HEAD_INIT(tmp);
 579        struct sst_mem_block *block;
 580        u32 block_start = SST_HSW_BLOCK_ANY;
 581        int size = ba->size, offset = ba->offset;
 582
 583        while (ba->size > 0) {
 584
 585                block = find_block(dsp, ba);
 586                if (!block) {
 587                        list_splice(&tmp, &dsp->free_block_list);
 588
 589                        ba->size = size;
 590                        ba->offset = offset;
 591                        return -ENOMEM;
 592                }
 593
 594                list_move_tail(&block->list, &tmp);
 595                ba->offset += block->size;
 596                ba->size -= block->size;
 597        }
 598        ba->size = size;
 599        ba->offset = offset;
 600
 601        list_for_each_entry(block, &tmp, list) {
 602
 603                if (block->offset < block_start)
 604                        block_start = block->offset;
 605
 606                list_add(&block->module_list, block_list);
 607
 608                dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 609                        block->type, block->index, block->offset);
 610        }
 611
 612        list_splice(&tmp, &dsp->used_block_list);
 613        return 0;
 614}
 615
 616/* allocate first free DSP blocks for data - callers hold locks */
 617static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 618        struct list_head *block_list)
 619{
 620        struct sst_mem_block *block, *tmp;
 621        int ret = 0;
 622
 623        if (ba->size == 0)
 624                return 0;
 625
 626        /* find first free whole blocks that can hold module */
 627        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 628
 629                /* ignore blocks with wrong type */
 630                if (block->type != ba->type)
 631                        continue;
 632
 633                if (ba->size > block->size)
 634                        continue;
 635
 636                ba->offset = block->offset;
 637                block->bytes_used = ba->size % block->size;
 638                list_add(&block->module_list, block_list);
 639                list_move(&block->list, &dsp->used_block_list);
 640                dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 641                        block->type, block->index, block->offset);
 642                return 0;
 643        }
 644
 645        /* then find free multiple blocks that can hold module */
 646        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 647
 648                /* ignore blocks with wrong type */
 649                if (block->type != ba->type)
 650                        continue;
 651
 652                /* do we span > 1 blocks */
 653                if (ba->size > block->size) {
 654
 655                        /* align ba to block boundary */
 656                        ba->offset = block->offset;
 657
 658                        ret = block_alloc_contiguous(dsp, ba, block_list);
 659                        if (ret == 0)
 660                                return ret;
 661
 662                }
 663        }
 664
 665        /* not enough free block space */
 666        return -ENOMEM;
 667}
 668
 669int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 670        struct list_head *block_list)
 671{
 672        int ret;
 673
 674        dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
 675                ba->size, ba->offset, ba->type);
 676
 677        mutex_lock(&dsp->mutex);
 678
 679        ret = block_alloc(dsp, ba, block_list);
 680        if (ret < 0) {
 681                dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
 682                goto out;
 683        }
 684
 685        /* prepare DSP blocks for module usage */
 686        ret = block_list_prepare(dsp, block_list);
 687        if (ret < 0)
 688                dev_err(dsp->dev, "error: prepare failed\n");
 689
 690out:
 691        mutex_unlock(&dsp->mutex);
 692        return ret;
 693}
 694EXPORT_SYMBOL_GPL(sst_alloc_blocks);
 695
 696int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
 697{
 698        mutex_lock(&dsp->mutex);
 699        block_list_remove(dsp, block_list);
 700        mutex_unlock(&dsp->mutex);
 701        return 0;
 702}
 703EXPORT_SYMBOL_GPL(sst_free_blocks);
 704
 705/* allocate memory blocks for static module addresses - callers hold locks */
 706static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 707        struct list_head *block_list)
 708{
 709        struct sst_mem_block *block, *tmp;
 710        struct sst_block_allocator ba_tmp = *ba;
 711        u32 end = ba->offset + ba->size, block_end;
 712        int err;
 713
 714        /* only IRAM/DRAM blocks are managed */
 715        if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
 716                return 0;
 717
 718        /* are blocks already attached to this module */
 719        list_for_each_entry_safe(block, tmp, block_list, module_list) {
 720
 721                /* ignore blocks with wrong type */
 722                if (block->type != ba->type)
 723                        continue;
 724
 725                block_end = block->offset + block->size;
 726
 727                /* find block that holds section */
 728                if (ba->offset >= block->offset && end <= block_end)
 729                        return 0;
 730
 731                /* does block span more than 1 section */
 732                if (ba->offset >= block->offset && ba->offset < block_end) {
 733
 734                        /* align ba to block boundary */
 735                        ba_tmp.size -= block_end - ba->offset;
 736                        ba_tmp.offset = block_end;
 737                        err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
 738                        if (err < 0)
 739                                return -ENOMEM;
 740
 741                        /* module already owns blocks */
 742                        return 0;
 743                }
 744        }
 745
 746        /* find first free blocks that can hold section in free list */
 747        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 748                block_end = block->offset + block->size;
 749
 750                /* ignore blocks with wrong type */
 751                if (block->type != ba->type)
 752                        continue;
 753
 754                /* find block that holds section */
 755                if (ba->offset >= block->offset && end <= block_end) {
 756
 757                        /* add block */
 758                        list_move(&block->list, &dsp->used_block_list);
 759                        list_add(&block->module_list, block_list);
 760                        dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 761                                block->type, block->index, block->offset);
 762                        return 0;
 763                }
 764
 765                /* does block span more than 1 section */
 766                if (ba->offset >= block->offset && ba->offset < block_end) {
 767
 768                        /* add block */
 769                        list_move(&block->list, &dsp->used_block_list);
 770                        list_add(&block->module_list, block_list);
 771                        /* align ba to block boundary */
 772                        ba_tmp.size -= block_end - ba->offset;
 773                        ba_tmp.offset = block_end;
 774
 775                        err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
 776                        if (err < 0)
 777                                return -ENOMEM;
 778
 779                        return 0;
 780                }
 781        }
 782
 783        return -ENOMEM;
 784}
 785
 786/* Load fixed module data into DSP memory blocks */
 787int sst_module_alloc_blocks(struct sst_module *module)
 788{
 789        struct sst_dsp *dsp = module->dsp;
 790        struct sst_fw *sst_fw = module->sst_fw;
 791        struct sst_block_allocator ba;
 792        int ret;
 793
 794        memset(&ba, 0, sizeof(ba));
 795        ba.size = module->size;
 796        ba.type = module->type;
 797        ba.offset = module->offset;
 798
 799        dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
 800                ba.size, ba.offset, ba.type);
 801
 802        mutex_lock(&dsp->mutex);
 803
 804        /* alloc blocks that includes this section */
 805        ret = block_alloc_fixed(dsp, &ba, &module->block_list);
 806        if (ret < 0) {
 807                dev_err(dsp->dev,
 808                        "error: no free blocks for section at offset 0x%x size 0x%x\n",
 809                        module->offset, module->size);
 810                mutex_unlock(&dsp->mutex);
 811                return -ENOMEM;
 812        }
 813
 814        /* prepare DSP blocks for module copy */
 815        ret = block_list_prepare(dsp, &module->block_list);
 816        if (ret < 0) {
 817                dev_err(dsp->dev, "error: fw module prepare failed\n");
 818                goto err;
 819        }
 820
 821        /* copy partial module data to blocks */
 822        if (dsp->fw_use_dma) {
 823                ret = sst_dsp_dma_copyto(dsp,
 824                        dsp->addr.lpe_base + module->offset,
 825                        sst_fw->dmable_fw_paddr + module->data_offset,
 826                        module->size);
 827                if (ret < 0) {
 828                        dev_err(dsp->dev, "error: module copy failed\n");
 829                        goto err;
 830                }
 831        } else
 832                sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
 833                        module->size);
 834
 835        mutex_unlock(&dsp->mutex);
 836        return ret;
 837
 838err:
 839        block_list_remove(dsp, &module->block_list);
 840        mutex_unlock(&dsp->mutex);
 841        return ret;
 842}
 843EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
 844
 845/* Unload entire module from DSP memory */
 846int sst_module_free_blocks(struct sst_module *module)
 847{
 848        struct sst_dsp *dsp = module->dsp;
 849
 850        mutex_lock(&dsp->mutex);
 851        block_list_remove(dsp, &module->block_list);
 852        mutex_unlock(&dsp->mutex);
 853        return 0;
 854}
 855EXPORT_SYMBOL_GPL(sst_module_free_blocks);
 856
 857int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
 858        int offset)
 859{
 860        struct sst_dsp *dsp = runtime->dsp;
 861        struct sst_module *module = runtime->module;
 862        struct sst_block_allocator ba;
 863        int ret;
 864
 865        if (module->persistent_size == 0)
 866                return 0;
 867
 868        memset(&ba, 0, sizeof(ba));
 869        ba.size = module->persistent_size;
 870        ba.type = SST_MEM_DRAM;
 871
 872        mutex_lock(&dsp->mutex);
 873
 874        /* do we need to allocate at a fixed address ? */
 875        if (offset != 0) {
 876
 877                ba.offset = offset;
 878
 879                dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
 880                        ba.size, ba.type, ba.offset);
 881
 882                /* alloc blocks that includes this section */
 883                ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
 884
 885        } else {
 886                dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
 887                        ba.size, ba.type);
 888
 889                /* alloc blocks that includes this section */
 890                ret = block_alloc(dsp, &ba, &runtime->block_list);
 891        }
 892        if (ret < 0) {
 893                dev_err(dsp->dev,
 894                "error: no free blocks for runtime module size 0x%x\n",
 895                        module->persistent_size);
 896                mutex_unlock(&dsp->mutex);
 897                return -ENOMEM;
 898        }
 899        runtime->persistent_offset = ba.offset;
 900
 901        /* prepare DSP blocks for module copy */
 902        ret = block_list_prepare(dsp, &runtime->block_list);
 903        if (ret < 0) {
 904                dev_err(dsp->dev, "error: runtime block prepare failed\n");
 905                goto err;
 906        }
 907
 908        mutex_unlock(&dsp->mutex);
 909        return ret;
 910
 911err:
 912        block_list_remove(dsp, &module->block_list);
 913        mutex_unlock(&dsp->mutex);
 914        return ret;
 915}
 916EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
 917
 918int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
 919{
 920        struct sst_dsp *dsp = runtime->dsp;
 921
 922        mutex_lock(&dsp->mutex);
 923        block_list_remove(dsp, &runtime->block_list);
 924        mutex_unlock(&dsp->mutex);
 925        return 0;
 926}
 927EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
 928
 929int sst_module_runtime_save(struct sst_module_runtime *runtime,
 930        struct sst_module_runtime_context *context)
 931{
 932        struct sst_dsp *dsp = runtime->dsp;
 933        struct sst_module *module = runtime->module;
 934        int ret = 0;
 935
 936        dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
 937                runtime->id, runtime->persistent_offset,
 938                module->persistent_size);
 939
 940        context->buffer = dma_alloc_coherent(dsp->dma_dev,
 941                module->persistent_size,
 942                &context->dma_buffer, GFP_DMA | GFP_KERNEL);
 943        if (!context->buffer) {
 944                dev_err(dsp->dev, "error: DMA context alloc failed\n");
 945                return -ENOMEM;
 946        }
 947
 948        mutex_lock(&dsp->mutex);
 949
 950        if (dsp->fw_use_dma) {
 951
 952                ret = sst_dsp_dma_get_channel(dsp, 0);
 953                if (ret < 0)
 954                        goto err;
 955
 956                ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
 957                        dsp->addr.lpe_base + runtime->persistent_offset,
 958                        module->persistent_size);
 959                sst_dsp_dma_put_channel(dsp);
 960                if (ret < 0) {
 961                        dev_err(dsp->dev, "error: context copy failed\n");
 962                        goto err;
 963                }
 964        } else
 965                sst_memcpy32(context->buffer, dsp->addr.lpe +
 966                        runtime->persistent_offset,
 967                        module->persistent_size);
 968
 969err:
 970        mutex_unlock(&dsp->mutex);
 971        return ret;
 972}
 973EXPORT_SYMBOL_GPL(sst_module_runtime_save);
 974
 975int sst_module_runtime_restore(struct sst_module_runtime *runtime,
 976        struct sst_module_runtime_context *context)
 977{
 978        struct sst_dsp *dsp = runtime->dsp;
 979        struct sst_module *module = runtime->module;
 980        int ret = 0;
 981
 982        dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
 983                runtime->id, runtime->persistent_offset,
 984                module->persistent_size);
 985
 986        mutex_lock(&dsp->mutex);
 987
 988        if (!context->buffer) {
 989                dev_info(dsp->dev, "no context buffer need to restore!\n");
 990                goto err;
 991        }
 992
 993        if (dsp->fw_use_dma) {
 994
 995                ret = sst_dsp_dma_get_channel(dsp, 0);
 996                if (ret < 0)
 997                        goto err;
 998
 999                ret = sst_dsp_dma_copyto(dsp,
1000                        dsp->addr.lpe_base + runtime->persistent_offset,
1001                        context->dma_buffer, module->persistent_size);
1002                sst_dsp_dma_put_channel(dsp);
1003                if (ret < 0) {
1004                        dev_err(dsp->dev, "error: module copy failed\n");
1005                        goto err;
1006                }
1007        } else
1008                sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1009                        context->buffer, module->persistent_size);
1010
1011        dma_free_coherent(dsp->dma_dev, module->persistent_size,
1012                                context->buffer, context->dma_buffer);
1013        context->buffer = NULL;
1014
1015err:
1016        mutex_unlock(&dsp->mutex);
1017        return ret;
1018}
1019EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1020
1021/* register a DSP memory block for use with FW based modules */
1022struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1023        u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
1024        void *private)
1025{
1026        struct sst_mem_block *block;
1027
1028        block = kzalloc(sizeof(*block), GFP_KERNEL);
1029        if (block == NULL)
1030                return NULL;
1031
1032        block->offset = offset;
1033        block->size = size;
1034        block->index = index;
1035        block->type = type;
1036        block->dsp = dsp;
1037        block->private = private;
1038        block->ops = ops;
1039
1040        mutex_lock(&dsp->mutex);
1041        list_add(&block->list, &dsp->free_block_list);
1042        mutex_unlock(&dsp->mutex);
1043
1044        return block;
1045}
1046EXPORT_SYMBOL_GPL(sst_mem_block_register);
1047
1048/* unregister all DSP memory blocks */
1049void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1050{
1051        struct sst_mem_block *block, *tmp;
1052
1053        mutex_lock(&dsp->mutex);
1054
1055        /* unregister used blocks */
1056        list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1057                list_del(&block->list);
1058                kfree(block);
1059        }
1060
1061        /* unregister free blocks */
1062        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1063                list_del(&block->list);
1064                kfree(block);
1065        }
1066
1067        mutex_unlock(&dsp->mutex);
1068}
1069EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1070
1071/* allocate scratch buffer blocks */
1072int sst_block_alloc_scratch(struct sst_dsp *dsp)
1073{
1074        struct sst_module *module;
1075        struct sst_block_allocator ba;
1076        int ret;
1077
1078        mutex_lock(&dsp->mutex);
1079
1080        /* calculate required scratch size */
1081        dsp->scratch_size = 0;
1082        list_for_each_entry(module, &dsp->module_list, list) {
1083                dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1084                        module->id, module->scratch_size);
1085                if (dsp->scratch_size < module->scratch_size)
1086                        dsp->scratch_size = module->scratch_size;
1087        }
1088
1089        dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1090                dsp->scratch_size);
1091
1092        if (dsp->scratch_size == 0) {
1093                dev_info(dsp->dev, "no modules need scratch buffer\n");
1094                mutex_unlock(&dsp->mutex);
1095                return 0;
1096        }
1097
1098        /* allocate blocks for module scratch buffers */
1099        dev_dbg(dsp->dev, "allocating scratch blocks\n");
1100
1101        ba.size = dsp->scratch_size;
1102        ba.type = SST_MEM_DRAM;
1103
1104        /* do we need to allocate at fixed offset */
1105        if (dsp->scratch_offset != 0) {
1106
1107                dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1108                        ba.size, ba.type, ba.offset);
1109
1110                ba.offset = dsp->scratch_offset;
1111
1112                /* alloc blocks that includes this section */
1113                ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1114
1115        } else {
1116                dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1117                        ba.size, ba.type);
1118
1119                ba.offset = 0;
1120                ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1121        }
1122        if (ret < 0) {
1123                dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1124                mutex_unlock(&dsp->mutex);
1125                return ret;
1126        }
1127
1128        ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1129        if (ret < 0) {
1130                dev_err(dsp->dev, "error: scratch block prepare failed\n");
1131                mutex_unlock(&dsp->mutex);
1132                return ret;
1133        }
1134
1135        /* assign the same offset of scratch to each module */
1136        dsp->scratch_offset = ba.offset;
1137        mutex_unlock(&dsp->mutex);
1138        return dsp->scratch_size;
1139}
1140EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1141
1142/* free all scratch blocks */
1143void sst_block_free_scratch(struct sst_dsp *dsp)
1144{
1145        mutex_lock(&dsp->mutex);
1146        block_list_remove(dsp, &dsp->scratch_block_list);
1147        mutex_unlock(&dsp->mutex);
1148}
1149EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1150
1151/* get a module from it's unique ID */
1152struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1153{
1154        struct sst_module *module;
1155
1156        mutex_lock(&dsp->mutex);
1157
1158        list_for_each_entry(module, &dsp->module_list, list) {
1159                if (module->id == id) {
1160                        mutex_unlock(&dsp->mutex);
1161                        return module;
1162                }
1163        }
1164
1165        mutex_unlock(&dsp->mutex);
1166        return NULL;
1167}
1168EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1169
1170struct sst_module_runtime *sst_module_runtime_get_from_id(
1171        struct sst_module *module, u32 id)
1172{
1173        struct sst_module_runtime *runtime;
1174        struct sst_dsp *dsp = module->dsp;
1175
1176        mutex_lock(&dsp->mutex);
1177
1178        list_for_each_entry(runtime, &module->runtime_list, list) {
1179                if (runtime->id == id) {
1180                        mutex_unlock(&dsp->mutex);
1181                        return runtime;
1182                }
1183        }
1184
1185        mutex_unlock(&dsp->mutex);
1186        return NULL;
1187}
1188EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1189
1190/* returns block address in DSP address space */
1191u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1192        enum sst_mem_type type)
1193{
1194        switch (type) {
1195        case SST_MEM_IRAM:
1196                return offset - dsp->addr.iram_offset +
1197                        dsp->addr.dsp_iram_offset;
1198        case SST_MEM_DRAM:
1199                return offset - dsp->addr.dram_offset +
1200                        dsp->addr.dsp_dram_offset;
1201        default:
1202                return 0;
1203        }
1204}
1205EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1206