linux/sound/soc/intel/common/sst-firmware.c
<<
>>
Prefs
   1/*
   2 * Intel SST Firmware Loader
   3 *
   4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/slab.h>
  19#include <linux/sched.h>
  20#include <linux/firmware.h>
  21#include <linux/export.h>
  22#include <linux/platform_device.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/dmaengine.h>
  25#include <linux/pci.h>
  26#include <linux/acpi.h>
  27
  28/* supported DMA engine drivers */
  29#include <linux/dma/dw.h>
  30
  31#include <asm/page.h>
  32#include <asm/pgtable.h>
  33
  34#include "sst-dsp.h"
  35#include "sst-dsp-priv.h"
  36
  37#define SST_DMA_RESOURCES       2
  38#define SST_DSP_DMA_MAX_BURST   0x3
  39#define SST_HSW_BLOCK_ANY       0xffffffff
  40
  41#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  42
  43struct sst_dma {
  44        struct sst_dsp *sst;
  45
  46        struct dw_dma_chip *chip;
  47
  48        struct dma_async_tx_descriptor *desc;
  49        struct dma_chan *ch;
  50};
  51
  52static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  53{
  54        u32 tmp = 0;
  55        int i, m, n;
  56        const u8 *src_byte = src;
  57
  58        m = bytes / 4;
  59        n = bytes % 4;
  60
  61        /* __iowrite32_copy use 32bit size values so divide by 4 */
  62        __iowrite32_copy((void *)dest, src, m);
  63
  64        if (n) {
  65                for (i = 0; i < n; i++)
  66                        tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
  67                __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
  68        }
  69
  70}
  71
  72static void sst_dma_transfer_complete(void *arg)
  73{
  74        struct sst_dsp *sst = (struct sst_dsp *)arg;
  75
  76        dev_dbg(sst->dev, "DMA: callback\n");
  77}
  78
  79static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  80        dma_addr_t src_addr, size_t size)
  81{
  82        struct dma_async_tx_descriptor *desc;
  83        struct sst_dma *dma = sst->dma;
  84
  85        if (dma->ch == NULL) {
  86                dev_err(sst->dev, "error: no DMA channel\n");
  87                return -ENODEV;
  88        }
  89
  90        dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  91                (unsigned long)src_addr, (unsigned long)dest_addr, size);
  92
  93        desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  94                src_addr, size, DMA_CTRL_ACK);
  95        if (!desc){
  96                dev_err(sst->dev, "error: dma prep memcpy failed\n");
  97                return -EINVAL;
  98        }
  99
 100        desc->callback = sst_dma_transfer_complete;
 101        desc->callback_param = sst;
 102
 103        desc->tx_submit(desc);
 104        dma_wait_for_async_tx(desc);
 105
 106        return 0;
 107}
 108
 109/* copy to DSP */
 110int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
 111        dma_addr_t src_addr, size_t size)
 112{
 113        return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
 114                        src_addr, size);
 115}
 116EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
 117
 118/* copy from DSP */
 119int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
 120        dma_addr_t src_addr, size_t size)
 121{
 122        return sst_dsp_dma_copy(sst, dest_addr,
 123                src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
 124}
 125EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
 126
 127/* remove module from memory - callers hold locks */
 128static void block_list_remove(struct sst_dsp *dsp,
 129        struct list_head *block_list)
 130{
 131        struct sst_mem_block *block, *tmp;
 132        int err;
 133
 134        /* disable each block  */
 135        list_for_each_entry(block, block_list, module_list) {
 136
 137                if (block->ops && block->ops->disable) {
 138                        err = block->ops->disable(block);
 139                        if (err < 0)
 140                                dev_err(dsp->dev,
 141                                        "error: cant disable block %d:%d\n",
 142                                        block->type, block->index);
 143                }
 144        }
 145
 146        /* mark each block as free */
 147        list_for_each_entry_safe(block, tmp, block_list, module_list) {
 148                list_del(&block->module_list);
 149                list_move(&block->list, &dsp->free_block_list);
 150                dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
 151                        block->type, block->index, block->offset);
 152        }
 153}
 154
 155/* prepare the memory block to receive data from host - callers hold locks */
 156static int block_list_prepare(struct sst_dsp *dsp,
 157        struct list_head *block_list)
 158{
 159        struct sst_mem_block *block;
 160        int ret = 0;
 161
 162        /* enable each block so that's it'e ready for data */
 163        list_for_each_entry(block, block_list, module_list) {
 164
 165                if (block->ops && block->ops->enable && !block->users) {
 166                        ret = block->ops->enable(block);
 167                        if (ret < 0) {
 168                                dev_err(dsp->dev,
 169                                        "error: cant disable block %d:%d\n",
 170                                        block->type, block->index);
 171                                goto err;
 172                        }
 173                }
 174        }
 175        return ret;
 176
 177err:
 178        list_for_each_entry(block, block_list, module_list) {
 179                if (block->ops && block->ops->disable)
 180                        block->ops->disable(block);
 181        }
 182        return ret;
 183}
 184
 185static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
 186        int irq)
 187{
 188        struct dw_dma_chip *chip;
 189        int err;
 190
 191        chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
 192        if (!chip)
 193                return ERR_PTR(-ENOMEM);
 194
 195        chip->irq = irq;
 196        chip->regs = devm_ioremap_resource(dev, mem);
 197        if (IS_ERR(chip->regs))
 198                return ERR_CAST(chip->regs);
 199
 200        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
 201        if (err)
 202                return ERR_PTR(err);
 203
 204        chip->dev = dev;
 205
 206        err = dw_dma_probe(chip);
 207        if (err)
 208                return ERR_PTR(err);
 209
 210        return chip;
 211}
 212
 213static void dw_remove(struct dw_dma_chip *chip)
 214{
 215        dw_dma_remove(chip);
 216}
 217
 218static bool dma_chan_filter(struct dma_chan *chan, void *param)
 219{
 220        struct sst_dsp *dsp = (struct sst_dsp *)param;
 221
 222        return chan->device->dev == dsp->dma_dev;
 223}
 224
 225int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
 226{
 227        struct sst_dma *dma = dsp->dma;
 228        struct dma_slave_config slave;
 229        dma_cap_mask_t mask;
 230        int ret;
 231
 232        dma_cap_zero(mask);
 233        dma_cap_set(DMA_SLAVE, mask);
 234        dma_cap_set(DMA_MEMCPY, mask);
 235
 236        dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
 237        if (dma->ch == NULL) {
 238                dev_err(dsp->dev, "error: DMA request channel failed\n");
 239                return -EIO;
 240        }
 241
 242        memset(&slave, 0, sizeof(slave));
 243        slave.direction = DMA_MEM_TO_DEV;
 244        slave.src_addr_width =
 245                slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 246        slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
 247
 248        ret = dmaengine_slave_config(dma->ch, &slave);
 249        if (ret) {
 250                dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
 251                        ret);
 252                dma_release_channel(dma->ch);
 253                dma->ch = NULL;
 254        }
 255
 256        return ret;
 257}
 258EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
 259
 260void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
 261{
 262        struct sst_dma *dma = dsp->dma;
 263
 264        if (!dma->ch)
 265                return;
 266
 267        dma_release_channel(dma->ch);
 268        dma->ch = NULL;
 269}
 270EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
 271
 272int sst_dma_new(struct sst_dsp *sst)
 273{
 274        struct sst_pdata *sst_pdata = sst->pdata;
 275        struct sst_dma *dma;
 276        struct resource mem;
 277        const char *dma_dev_name;
 278        int ret = 0;
 279
 280        if (sst->pdata->resindex_dma_base == -1)
 281                /* DMA is not used, return and squelsh error messages */
 282                return 0;
 283
 284        /* configure the correct platform data for whatever DMA engine
 285        * is attached to the ADSP IP. */
 286        switch (sst->pdata->dma_engine) {
 287        case SST_DMA_TYPE_DW:
 288                dma_dev_name = "dw_dmac";
 289                break;
 290        default:
 291                dev_err(sst->dev, "error: invalid DMA engine %d\n",
 292                        sst->pdata->dma_engine);
 293                return -EINVAL;
 294        }
 295
 296        dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
 297        if (!dma)
 298                return -ENOMEM;
 299
 300        dma->sst = sst;
 301
 302        memset(&mem, 0, sizeof(mem));
 303
 304        mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
 305        mem.end   = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
 306        mem.flags = IORESOURCE_MEM;
 307
 308        /* now register DMA engine device */
 309        dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
 310        if (IS_ERR(dma->chip)) {
 311                dev_err(sst->dev, "error: DMA device register failed\n");
 312                ret = PTR_ERR(dma->chip);
 313                goto err_dma_dev;
 314        }
 315
 316        sst->dma = dma;
 317        sst->fw_use_dma = true;
 318        return 0;
 319
 320err_dma_dev:
 321        devm_kfree(sst->dev, dma);
 322        return ret;
 323}
 324EXPORT_SYMBOL(sst_dma_new);
 325
 326void sst_dma_free(struct sst_dma *dma)
 327{
 328
 329        if (dma == NULL)
 330                return;
 331
 332        if (dma->ch)
 333                dma_release_channel(dma->ch);
 334
 335        if (dma->chip)
 336                dw_remove(dma->chip);
 337
 338}
 339EXPORT_SYMBOL(sst_dma_free);
 340
 341/* create new generic firmware object */
 342struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 
 343        const struct firmware *fw, void *private)
 344{
 345        struct sst_fw *sst_fw;
 346        int err;
 347
 348        if (!dsp->ops->parse_fw)
 349                return NULL;
 350
 351        sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
 352        if (sst_fw == NULL)
 353                return NULL;
 354
 355        sst_fw->dsp = dsp;
 356        sst_fw->private = private;
 357        sst_fw->size = fw->size;
 358
 359        /* allocate DMA buffer to store FW data */
 360        sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
 361                                &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
 362        if (!sst_fw->dma_buf) {
 363                dev_err(dsp->dev, "error: DMA alloc failed\n");
 364                kfree(sst_fw);
 365                return NULL;
 366        }
 367
 368        /* copy FW data to DMA-able memory */
 369        memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
 370
 371        if (dsp->fw_use_dma) {
 372                err = sst_dsp_dma_get_channel(dsp, 0);
 373                if (err < 0)
 374                        goto chan_err;
 375        }
 376
 377        /* call core specific FW paser to load FW data into DSP */
 378        err = dsp->ops->parse_fw(sst_fw);
 379        if (err < 0) {
 380                dev_err(dsp->dev, "error: parse fw failed %d\n", err);
 381                goto parse_err;
 382        }
 383
 384        if (dsp->fw_use_dma)
 385                sst_dsp_dma_put_channel(dsp);
 386
 387        mutex_lock(&dsp->mutex);
 388        list_add(&sst_fw->list, &dsp->fw_list);
 389        mutex_unlock(&dsp->mutex);
 390
 391        return sst_fw;
 392
 393parse_err:
 394        if (dsp->fw_use_dma)
 395                sst_dsp_dma_put_channel(dsp);
 396chan_err:
 397        dma_free_coherent(dsp->dma_dev, sst_fw->size,
 398                                sst_fw->dma_buf,
 399                                sst_fw->dmable_fw_paddr);
 400        sst_fw->dma_buf = NULL;
 401        kfree(sst_fw);
 402        return NULL;
 403}
 404EXPORT_SYMBOL_GPL(sst_fw_new);
 405
 406int sst_fw_reload(struct sst_fw *sst_fw)
 407{
 408        struct sst_dsp *dsp = sst_fw->dsp;
 409        int ret;
 410
 411        dev_dbg(dsp->dev, "reloading firmware\n");
 412
 413        /* call core specific FW paser to load FW data into DSP */
 414        ret = dsp->ops->parse_fw(sst_fw);
 415        if (ret < 0)
 416                dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
 417
 418        return ret;
 419}
 420EXPORT_SYMBOL_GPL(sst_fw_reload);
 421
 422void sst_fw_unload(struct sst_fw *sst_fw)
 423{
 424        struct sst_dsp *dsp = sst_fw->dsp;
 425        struct sst_module *module, *mtmp;
 426        struct sst_module_runtime *runtime, *rtmp;
 427
 428        dev_dbg(dsp->dev, "unloading firmware\n");
 429
 430        mutex_lock(&dsp->mutex);
 431
 432        /* check module by module */
 433        list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
 434                if (module->sst_fw == sst_fw) {
 435
 436                        /* remove runtime modules */
 437                        list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
 438
 439                                block_list_remove(dsp, &runtime->block_list);
 440                                list_del(&runtime->list);
 441                                kfree(runtime);
 442                        }
 443
 444                        /* now remove the module */
 445                        block_list_remove(dsp, &module->block_list);
 446                        list_del(&module->list);
 447                        kfree(module);
 448                }
 449        }
 450
 451        /* remove all scratch blocks */
 452        block_list_remove(dsp, &dsp->scratch_block_list);
 453
 454        mutex_unlock(&dsp->mutex);
 455}
 456EXPORT_SYMBOL_GPL(sst_fw_unload);
 457
 458/* free single firmware object */
 459void sst_fw_free(struct sst_fw *sst_fw)
 460{
 461        struct sst_dsp *dsp = sst_fw->dsp;
 462
 463        mutex_lock(&dsp->mutex);
 464        list_del(&sst_fw->list);
 465        mutex_unlock(&dsp->mutex);
 466
 467        if (sst_fw->dma_buf)
 468                dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
 469                        sst_fw->dmable_fw_paddr);
 470        kfree(sst_fw);
 471}
 472EXPORT_SYMBOL_GPL(sst_fw_free);
 473
 474/* free all firmware objects */
 475void sst_fw_free_all(struct sst_dsp *dsp)
 476{
 477        struct sst_fw *sst_fw, *t;
 478
 479        mutex_lock(&dsp->mutex);
 480        list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
 481
 482                list_del(&sst_fw->list);
 483                dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
 484                        sst_fw->dmable_fw_paddr);
 485                kfree(sst_fw);
 486        }
 487        mutex_unlock(&dsp->mutex);
 488}
 489EXPORT_SYMBOL_GPL(sst_fw_free_all);
 490
 491/* create a new SST generic module from FW template */
 492struct sst_module *sst_module_new(struct sst_fw *sst_fw,
 493        struct sst_module_template *template, void *private)
 494{
 495        struct sst_dsp *dsp = sst_fw->dsp;
 496        struct sst_module *sst_module;
 497
 498        sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
 499        if (sst_module == NULL)
 500                return NULL;
 501
 502        sst_module->id = template->id;
 503        sst_module->dsp = dsp;
 504        sst_module->sst_fw = sst_fw;
 505        sst_module->scratch_size = template->scratch_size;
 506        sst_module->persistent_size = template->persistent_size;
 507        sst_module->entry = template->entry;
 508        sst_module->state = SST_MODULE_STATE_UNLOADED;
 509
 510        INIT_LIST_HEAD(&sst_module->block_list);
 511        INIT_LIST_HEAD(&sst_module->runtime_list);
 512
 513        mutex_lock(&dsp->mutex);
 514        list_add(&sst_module->list, &dsp->module_list);
 515        mutex_unlock(&dsp->mutex);
 516
 517        return sst_module;
 518}
 519EXPORT_SYMBOL_GPL(sst_module_new);
 520
 521/* free firmware module and remove from available list */
 522void sst_module_free(struct sst_module *sst_module)
 523{
 524        struct sst_dsp *dsp = sst_module->dsp;
 525
 526        mutex_lock(&dsp->mutex);
 527        list_del(&sst_module->list);
 528        mutex_unlock(&dsp->mutex);
 529
 530        kfree(sst_module);
 531}
 532EXPORT_SYMBOL_GPL(sst_module_free);
 533
 534struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
 535        int id, void *private)
 536{
 537        struct sst_dsp *dsp = module->dsp;
 538        struct sst_module_runtime *runtime;
 539
 540        runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 541        if (runtime == NULL)
 542                return NULL;
 543
 544        runtime->id = id;
 545        runtime->dsp = dsp;
 546        runtime->module = module;
 547        INIT_LIST_HEAD(&runtime->block_list);
 548
 549        mutex_lock(&dsp->mutex);
 550        list_add(&runtime->list, &module->runtime_list);
 551        mutex_unlock(&dsp->mutex);
 552
 553        return runtime;
 554}
 555EXPORT_SYMBOL_GPL(sst_module_runtime_new);
 556
 557void sst_module_runtime_free(struct sst_module_runtime *runtime)
 558{
 559        struct sst_dsp *dsp = runtime->dsp;
 560
 561        mutex_lock(&dsp->mutex);
 562        list_del(&runtime->list);
 563        mutex_unlock(&dsp->mutex);
 564
 565        kfree(runtime);
 566}
 567EXPORT_SYMBOL_GPL(sst_module_runtime_free);
 568
 569static struct sst_mem_block *find_block(struct sst_dsp *dsp,
 570        struct sst_block_allocator *ba)
 571{
 572        struct sst_mem_block *block;
 573
 574        list_for_each_entry(block, &dsp->free_block_list, list) {
 575                if (block->type == ba->type && block->offset == ba->offset)
 576                        return block;
 577        }
 578
 579        return NULL;
 580}
 581
 582/* Block allocator must be on block boundary */
 583static int block_alloc_contiguous(struct sst_dsp *dsp,
 584        struct sst_block_allocator *ba, struct list_head *block_list)
 585{
 586        struct list_head tmp = LIST_HEAD_INIT(tmp);
 587        struct sst_mem_block *block;
 588        u32 block_start = SST_HSW_BLOCK_ANY;
 589        int size = ba->size, offset = ba->offset;
 590
 591        while (ba->size > 0) {
 592
 593                block = find_block(dsp, ba);
 594                if (!block) {
 595                        list_splice(&tmp, &dsp->free_block_list);
 596
 597                        ba->size = size;
 598                        ba->offset = offset;
 599                        return -ENOMEM;
 600                }
 601
 602                list_move_tail(&block->list, &tmp);
 603                ba->offset += block->size;
 604                ba->size -= block->size;
 605        }
 606        ba->size = size;
 607        ba->offset = offset;
 608
 609        list_for_each_entry(block, &tmp, list) {
 610
 611                if (block->offset < block_start)
 612                        block_start = block->offset;
 613
 614                list_add(&block->module_list, block_list);
 615
 616                dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 617                        block->type, block->index, block->offset);
 618        }
 619
 620        list_splice(&tmp, &dsp->used_block_list);
 621        return 0;
 622}
 623
 624/* allocate first free DSP blocks for data - callers hold locks */
 625static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 626        struct list_head *block_list)
 627{
 628        struct sst_mem_block *block, *tmp;
 629        int ret = 0;
 630
 631        if (ba->size == 0)
 632                return 0;
 633
 634        /* find first free whole blocks that can hold module */
 635        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 636
 637                /* ignore blocks with wrong type */
 638                if (block->type != ba->type)
 639                        continue;
 640
 641                if (ba->size > block->size)
 642                        continue;
 643
 644                ba->offset = block->offset;
 645                block->bytes_used = ba->size % block->size;
 646                list_add(&block->module_list, block_list);
 647                list_move(&block->list, &dsp->used_block_list);
 648                dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 649                        block->type, block->index, block->offset);
 650                return 0;
 651        }
 652
 653        /* then find free multiple blocks that can hold module */
 654        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 655
 656                /* ignore blocks with wrong type */
 657                if (block->type != ba->type)
 658                        continue;
 659
 660                /* do we span > 1 blocks */
 661                if (ba->size > block->size) {
 662
 663                        /* align ba to block boundary */
 664                        ba->offset = block->offset;
 665
 666                        ret = block_alloc_contiguous(dsp, ba, block_list);
 667                        if (ret == 0)
 668                                return ret;
 669
 670                }
 671        }
 672
 673        /* not enough free block space */
 674        return -ENOMEM;
 675}
 676
 677int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 678        struct list_head *block_list)
 679{
 680        int ret;
 681
 682        dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
 683                ba->size, ba->offset, ba->type);
 684
 685        mutex_lock(&dsp->mutex);
 686
 687        ret = block_alloc(dsp, ba, block_list);
 688        if (ret < 0) {
 689                dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
 690                goto out;
 691        }
 692
 693        /* prepare DSP blocks for module usage */
 694        ret = block_list_prepare(dsp, block_list);
 695        if (ret < 0)
 696                dev_err(dsp->dev, "error: prepare failed\n");
 697
 698out:
 699        mutex_unlock(&dsp->mutex);
 700        return ret;
 701}
 702EXPORT_SYMBOL_GPL(sst_alloc_blocks);
 703
 704int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
 705{
 706        mutex_lock(&dsp->mutex);
 707        block_list_remove(dsp, block_list);
 708        mutex_unlock(&dsp->mutex);
 709        return 0;
 710}
 711EXPORT_SYMBOL_GPL(sst_free_blocks);
 712
 713/* allocate memory blocks for static module addresses - callers hold locks */
 714static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
 715        struct list_head *block_list)
 716{
 717        struct sst_mem_block *block, *tmp;
 718        struct sst_block_allocator ba_tmp = *ba;
 719        u32 end = ba->offset + ba->size, block_end;
 720        int err;
 721
 722        /* only IRAM/DRAM blocks are managed */
 723        if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
 724                return 0;
 725
 726        /* are blocks already attached to this module */
 727        list_for_each_entry_safe(block, tmp, block_list, module_list) {
 728
 729                /* ignore blocks with wrong type */
 730                if (block->type != ba->type)
 731                        continue;
 732
 733                block_end = block->offset + block->size;
 734
 735                /* find block that holds section */
 736                if (ba->offset >= block->offset && end <= block_end)
 737                        return 0;
 738
 739                /* does block span more than 1 section */
 740                if (ba->offset >= block->offset && ba->offset < block_end) {
 741
 742                        /* align ba to block boundary */
 743                        ba_tmp.size -= block_end - ba->offset;
 744                        ba_tmp.offset = block_end;
 745                        err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
 746                        if (err < 0)
 747                                return -ENOMEM;
 748
 749                        /* module already owns blocks */
 750                        return 0;
 751                }
 752        }
 753
 754        /* find first free blocks that can hold section in free list */
 755        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
 756                block_end = block->offset + block->size;
 757
 758                /* ignore blocks with wrong type */
 759                if (block->type != ba->type)
 760                        continue;
 761
 762                /* find block that holds section */
 763                if (ba->offset >= block->offset && end <= block_end) {
 764
 765                        /* add block */
 766                        list_move(&block->list, &dsp->used_block_list);
 767                        list_add(&block->module_list, block_list);
 768                        dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
 769                                block->type, block->index, block->offset);
 770                        return 0;
 771                }
 772
 773                /* does block span more than 1 section */
 774                if (ba->offset >= block->offset && ba->offset < block_end) {
 775
 776                        /* add block */
 777                        list_move(&block->list, &dsp->used_block_list);
 778                        list_add(&block->module_list, block_list);
 779                        /* align ba to block boundary */
 780                        ba_tmp.size -= block_end - ba->offset;
 781                        ba_tmp.offset = block_end;
 782
 783                        err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
 784                        if (err < 0)
 785                                return -ENOMEM;
 786
 787                        return 0;
 788                }
 789        }
 790
 791        return -ENOMEM;
 792}
 793
 794/* Load fixed module data into DSP memory blocks */
 795int sst_module_alloc_blocks(struct sst_module *module)
 796{
 797        struct sst_dsp *dsp = module->dsp;
 798        struct sst_fw *sst_fw = module->sst_fw;
 799        struct sst_block_allocator ba;
 800        int ret;
 801
 802        memset(&ba, 0, sizeof(ba));
 803        ba.size = module->size;
 804        ba.type = module->type;
 805        ba.offset = module->offset;
 806
 807        dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
 808                ba.size, ba.offset, ba.type);
 809
 810        mutex_lock(&dsp->mutex);
 811
 812        /* alloc blocks that includes this section */
 813        ret = block_alloc_fixed(dsp, &ba, &module->block_list);
 814        if (ret < 0) {
 815                dev_err(dsp->dev,
 816                        "error: no free blocks for section at offset 0x%x size 0x%x\n",
 817                        module->offset, module->size);
 818                mutex_unlock(&dsp->mutex);
 819                return -ENOMEM;
 820        }
 821
 822        /* prepare DSP blocks for module copy */
 823        ret = block_list_prepare(dsp, &module->block_list);
 824        if (ret < 0) {
 825                dev_err(dsp->dev, "error: fw module prepare failed\n");
 826                goto err;
 827        }
 828
 829        /* copy partial module data to blocks */
 830        if (dsp->fw_use_dma) {
 831                ret = sst_dsp_dma_copyto(dsp,
 832                        dsp->addr.lpe_base + module->offset,
 833                        sst_fw->dmable_fw_paddr + module->data_offset,
 834                        module->size);
 835                if (ret < 0) {
 836                        dev_err(dsp->dev, "error: module copy failed\n");
 837                        goto err;
 838                }
 839        } else
 840                sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
 841                        module->size);
 842
 843        mutex_unlock(&dsp->mutex);
 844        return ret;
 845
 846err:
 847        block_list_remove(dsp, &module->block_list);
 848        mutex_unlock(&dsp->mutex);
 849        return ret;
 850}
 851EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
 852
 853/* Unload entire module from DSP memory */
 854int sst_module_free_blocks(struct sst_module *module)
 855{
 856        struct sst_dsp *dsp = module->dsp;
 857
 858        mutex_lock(&dsp->mutex);
 859        block_list_remove(dsp, &module->block_list);
 860        mutex_unlock(&dsp->mutex);
 861        return 0;
 862}
 863EXPORT_SYMBOL_GPL(sst_module_free_blocks);
 864
 865int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
 866        int offset)
 867{
 868        struct sst_dsp *dsp = runtime->dsp;
 869        struct sst_module *module = runtime->module;
 870        struct sst_block_allocator ba;
 871        int ret;
 872
 873        if (module->persistent_size == 0)
 874                return 0;
 875
 876        memset(&ba, 0, sizeof(ba));
 877        ba.size = module->persistent_size;
 878        ba.type = SST_MEM_DRAM;
 879
 880        mutex_lock(&dsp->mutex);
 881
 882        /* do we need to allocate at a fixed address ? */
 883        if (offset != 0) {
 884
 885                ba.offset = offset;
 886
 887                dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
 888                        ba.size, ba.type, ba.offset);
 889
 890                /* alloc blocks that includes this section */
 891                ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
 892
 893        } else {
 894                dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
 895                        ba.size, ba.type);
 896
 897                /* alloc blocks that includes this section */
 898                ret = block_alloc(dsp, &ba, &runtime->block_list);
 899        }
 900        if (ret < 0) {
 901                dev_err(dsp->dev,
 902                "error: no free blocks for runtime module size 0x%x\n",
 903                        module->persistent_size);
 904                mutex_unlock(&dsp->mutex);
 905                return -ENOMEM;
 906        }
 907        runtime->persistent_offset = ba.offset;
 908
 909        /* prepare DSP blocks for module copy */
 910        ret = block_list_prepare(dsp, &runtime->block_list);
 911        if (ret < 0) {
 912                dev_err(dsp->dev, "error: runtime block prepare failed\n");
 913                goto err;
 914        }
 915
 916        mutex_unlock(&dsp->mutex);
 917        return ret;
 918
 919err:
 920        block_list_remove(dsp, &module->block_list);
 921        mutex_unlock(&dsp->mutex);
 922        return ret;
 923}
 924EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
 925
 926int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
 927{
 928        struct sst_dsp *dsp = runtime->dsp;
 929
 930        mutex_lock(&dsp->mutex);
 931        block_list_remove(dsp, &runtime->block_list);
 932        mutex_unlock(&dsp->mutex);
 933        return 0;
 934}
 935EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
 936
 937int sst_module_runtime_save(struct sst_module_runtime *runtime,
 938        struct sst_module_runtime_context *context)
 939{
 940        struct sst_dsp *dsp = runtime->dsp;
 941        struct sst_module *module = runtime->module;
 942        int ret = 0;
 943
 944        dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
 945                runtime->id, runtime->persistent_offset,
 946                module->persistent_size);
 947
 948        context->buffer = dma_alloc_coherent(dsp->dma_dev,
 949                module->persistent_size,
 950                &context->dma_buffer, GFP_DMA | GFP_KERNEL);
 951        if (!context->buffer) {
 952                dev_err(dsp->dev, "error: DMA context alloc failed\n");
 953                return -ENOMEM;
 954        }
 955
 956        mutex_lock(&dsp->mutex);
 957
 958        if (dsp->fw_use_dma) {
 959
 960                ret = sst_dsp_dma_get_channel(dsp, 0);
 961                if (ret < 0)
 962                        goto err;
 963
 964                ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
 965                        dsp->addr.lpe_base + runtime->persistent_offset,
 966                        module->persistent_size);
 967                sst_dsp_dma_put_channel(dsp);
 968                if (ret < 0) {
 969                        dev_err(dsp->dev, "error: context copy failed\n");
 970                        goto err;
 971                }
 972        } else
 973                sst_memcpy32(context->buffer, dsp->addr.lpe +
 974                        runtime->persistent_offset,
 975                        module->persistent_size);
 976
 977err:
 978        mutex_unlock(&dsp->mutex);
 979        return ret;
 980}
 981EXPORT_SYMBOL_GPL(sst_module_runtime_save);
 982
 983int sst_module_runtime_restore(struct sst_module_runtime *runtime,
 984        struct sst_module_runtime_context *context)
 985{
 986        struct sst_dsp *dsp = runtime->dsp;
 987        struct sst_module *module = runtime->module;
 988        int ret = 0;
 989
 990        dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
 991                runtime->id, runtime->persistent_offset,
 992                module->persistent_size);
 993
 994        mutex_lock(&dsp->mutex);
 995
 996        if (!context->buffer) {
 997                dev_info(dsp->dev, "no context buffer need to restore!\n");
 998                goto err;
 999        }
1000
1001        if (dsp->fw_use_dma) {
1002
1003                ret = sst_dsp_dma_get_channel(dsp, 0);
1004                if (ret < 0)
1005                        goto err;
1006
1007                ret = sst_dsp_dma_copyto(dsp,
1008                        dsp->addr.lpe_base + runtime->persistent_offset,
1009                        context->dma_buffer, module->persistent_size);
1010                sst_dsp_dma_put_channel(dsp);
1011                if (ret < 0) {
1012                        dev_err(dsp->dev, "error: module copy failed\n");
1013                        goto err;
1014                }
1015        } else
1016                sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1017                        context->buffer, module->persistent_size);
1018
1019        dma_free_coherent(dsp->dma_dev, module->persistent_size,
1020                                context->buffer, context->dma_buffer);
1021        context->buffer = NULL;
1022
1023err:
1024        mutex_unlock(&dsp->mutex);
1025        return ret;
1026}
1027EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1028
1029/* register a DSP memory block for use with FW based modules */
1030struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1031        u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
1032        u32 index, void *private)
1033{
1034        struct sst_mem_block *block;
1035
1036        block = kzalloc(sizeof(*block), GFP_KERNEL);
1037        if (block == NULL)
1038                return NULL;
1039
1040        block->offset = offset;
1041        block->size = size;
1042        block->index = index;
1043        block->type = type;
1044        block->dsp = dsp;
1045        block->private = private;
1046        block->ops = ops;
1047
1048        mutex_lock(&dsp->mutex);
1049        list_add(&block->list, &dsp->free_block_list);
1050        mutex_unlock(&dsp->mutex);
1051
1052        return block;
1053}
1054EXPORT_SYMBOL_GPL(sst_mem_block_register);
1055
1056/* unregister all DSP memory blocks */
1057void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1058{
1059        struct sst_mem_block *block, *tmp;
1060
1061        mutex_lock(&dsp->mutex);
1062
1063        /* unregister used blocks */
1064        list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1065                list_del(&block->list);
1066                kfree(block);
1067        }
1068
1069        /* unregister free blocks */
1070        list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1071                list_del(&block->list);
1072                kfree(block);
1073        }
1074
1075        mutex_unlock(&dsp->mutex);
1076}
1077EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1078
1079/* allocate scratch buffer blocks */
1080int sst_block_alloc_scratch(struct sst_dsp *dsp)
1081{
1082        struct sst_module *module;
1083        struct sst_block_allocator ba;
1084        int ret;
1085
1086        mutex_lock(&dsp->mutex);
1087
1088        /* calculate required scratch size */
1089        dsp->scratch_size = 0;
1090        list_for_each_entry(module, &dsp->module_list, list) {
1091                dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1092                        module->id, module->scratch_size);
1093                if (dsp->scratch_size < module->scratch_size)
1094                        dsp->scratch_size = module->scratch_size;
1095        }
1096
1097        dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1098                dsp->scratch_size);
1099
1100        if (dsp->scratch_size == 0) {
1101                dev_info(dsp->dev, "no modules need scratch buffer\n");
1102                mutex_unlock(&dsp->mutex);
1103                return 0;
1104        }
1105
1106        /* allocate blocks for module scratch buffers */
1107        dev_dbg(dsp->dev, "allocating scratch blocks\n");
1108
1109        ba.size = dsp->scratch_size;
1110        ba.type = SST_MEM_DRAM;
1111
1112        /* do we need to allocate at fixed offset */
1113        if (dsp->scratch_offset != 0) {
1114
1115                dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1116                        ba.size, ba.type, ba.offset);
1117
1118                ba.offset = dsp->scratch_offset;
1119
1120                /* alloc blocks that includes this section */
1121                ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1122
1123        } else {
1124                dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1125                        ba.size, ba.type);
1126
1127                ba.offset = 0;
1128                ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1129        }
1130        if (ret < 0) {
1131                dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1132                mutex_unlock(&dsp->mutex);
1133                return ret;
1134        }
1135
1136        ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1137        if (ret < 0) {
1138                dev_err(dsp->dev, "error: scratch block prepare failed\n");
1139                mutex_unlock(&dsp->mutex);
1140                return ret;
1141        }
1142
1143        /* assign the same offset of scratch to each module */
1144        dsp->scratch_offset = ba.offset;
1145        mutex_unlock(&dsp->mutex);
1146        return dsp->scratch_size;
1147}
1148EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1149
1150/* free all scratch blocks */
1151void sst_block_free_scratch(struct sst_dsp *dsp)
1152{
1153        mutex_lock(&dsp->mutex);
1154        block_list_remove(dsp, &dsp->scratch_block_list);
1155        mutex_unlock(&dsp->mutex);
1156}
1157EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1158
1159/* get a module from it's unique ID */
1160struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1161{
1162        struct sst_module *module;
1163
1164        mutex_lock(&dsp->mutex);
1165
1166        list_for_each_entry(module, &dsp->module_list, list) {
1167                if (module->id == id) {
1168                        mutex_unlock(&dsp->mutex);
1169                        return module;
1170                }
1171        }
1172
1173        mutex_unlock(&dsp->mutex);
1174        return NULL;
1175}
1176EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1177
1178struct sst_module_runtime *sst_module_runtime_get_from_id(
1179        struct sst_module *module, u32 id)
1180{
1181        struct sst_module_runtime *runtime;
1182        struct sst_dsp *dsp = module->dsp;
1183
1184        mutex_lock(&dsp->mutex);
1185
1186        list_for_each_entry(runtime, &module->runtime_list, list) {
1187                if (runtime->id == id) {
1188                        mutex_unlock(&dsp->mutex);
1189                        return runtime;
1190                }
1191        }
1192
1193        mutex_unlock(&dsp->mutex);
1194        return NULL;
1195}
1196EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1197
1198/* returns block address in DSP address space */
1199u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1200        enum sst_mem_type type)
1201{
1202        switch (type) {
1203        case SST_MEM_IRAM:
1204                return offset - dsp->addr.iram_offset +
1205                        dsp->addr.dsp_iram_offset;
1206        case SST_MEM_DRAM:
1207                return offset - dsp->addr.dram_offset +
1208                        dsp->addr.dsp_dram_offset;
1209        default:
1210                return 0;
1211        }
1212}
1213EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1214
1215struct sst_dsp *sst_dsp_new(struct device *dev,
1216        struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
1217{
1218        struct sst_dsp *sst;
1219        int err;
1220
1221        dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
1222
1223        sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
1224        if (sst == NULL)
1225                return NULL;
1226
1227        spin_lock_init(&sst->spinlock);
1228        mutex_init(&sst->mutex);
1229        sst->dev = dev;
1230        sst->dma_dev = pdata->dma_dev;
1231        sst->thread_context = sst_dev->thread_context;
1232        sst->sst_dev = sst_dev;
1233        sst->id = pdata->id;
1234        sst->irq = pdata->irq;
1235        sst->ops = sst_dev->ops;
1236        sst->pdata = pdata;
1237        INIT_LIST_HEAD(&sst->used_block_list);
1238        INIT_LIST_HEAD(&sst->free_block_list);
1239        INIT_LIST_HEAD(&sst->module_list);
1240        INIT_LIST_HEAD(&sst->fw_list);
1241        INIT_LIST_HEAD(&sst->scratch_block_list);
1242
1243        /* Initialise SST Audio DSP */
1244        if (sst->ops->init) {
1245                err = sst->ops->init(sst, pdata);
1246                if (err < 0)
1247                        return NULL;
1248        }
1249
1250        /* Register the ISR */
1251        err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
1252                sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
1253        if (err)
1254                goto irq_err;
1255
1256        err = sst_dma_new(sst);
1257        if (err)
1258                dev_warn(dev, "sst_dma_new failed %d\n", err);
1259
1260        return sst;
1261
1262irq_err:
1263        if (sst->ops->free)
1264                sst->ops->free(sst);
1265
1266        return NULL;
1267}
1268EXPORT_SYMBOL_GPL(sst_dsp_new);
1269
1270void sst_dsp_free(struct sst_dsp *sst)
1271{
1272        free_irq(sst->irq, sst);
1273        if (sst->ops->free)
1274                sst->ops->free(sst);
1275
1276        sst_dma_free(sst->dma);
1277}
1278EXPORT_SYMBOL_GPL(sst_dsp_free);
1279
1280MODULE_DESCRIPTION("Intel SST Firmware Loader");
1281MODULE_LICENSE("GPL v2");
1282