linux/drivers/crypto/ccp/ccp-dmaengine.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Gary R Hook <gary.hook@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/dmaengine.h>
  15#include <linux/spinlock.h>
  16#include <linux/mutex.h>
  17#include <linux/ccp.h>
  18
  19#include "ccp-dev.h"
  20#include "../../dma/dmaengine.h"
  21
  22#define CCP_DMA_WIDTH(_mask)            \
  23({                                      \
  24        u64 mask = _mask + 1;           \
  25        (mask == 0) ? 64 : fls64(mask); \
  26})
  27
  28static void ccp_free_cmd_resources(struct ccp_device *ccp,
  29                                   struct list_head *list)
  30{
  31        struct ccp_dma_cmd *cmd, *ctmp;
  32
  33        list_for_each_entry_safe(cmd, ctmp, list, entry) {
  34                list_del(&cmd->entry);
  35                kmem_cache_free(ccp->dma_cmd_cache, cmd);
  36        }
  37}
  38
  39static void ccp_free_desc_resources(struct ccp_device *ccp,
  40                                    struct list_head *list)
  41{
  42        struct ccp_dma_desc *desc, *dtmp;
  43
  44        list_for_each_entry_safe(desc, dtmp, list, entry) {
  45                ccp_free_cmd_resources(ccp, &desc->active);
  46                ccp_free_cmd_resources(ccp, &desc->pending);
  47
  48                list_del(&desc->entry);
  49                kmem_cache_free(ccp->dma_desc_cache, desc);
  50        }
  51}
  52
  53static void ccp_free_chan_resources(struct dma_chan *dma_chan)
  54{
  55        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  56                                                 dma_chan);
  57        unsigned long flags;
  58
  59        dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
  60
  61        spin_lock_irqsave(&chan->lock, flags);
  62
  63        ccp_free_desc_resources(chan->ccp, &chan->complete);
  64        ccp_free_desc_resources(chan->ccp, &chan->active);
  65        ccp_free_desc_resources(chan->ccp, &chan->pending);
  66        ccp_free_desc_resources(chan->ccp, &chan->created);
  67
  68        spin_unlock_irqrestore(&chan->lock, flags);
  69}
  70
  71static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
  72                                       struct list_head *list)
  73{
  74        struct ccp_dma_desc *desc, *dtmp;
  75
  76        list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
  77                if (!async_tx_test_ack(&desc->tx_desc))
  78                        continue;
  79
  80                dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
  81
  82                ccp_free_cmd_resources(ccp, &desc->active);
  83                ccp_free_cmd_resources(ccp, &desc->pending);
  84
  85                list_del(&desc->entry);
  86                kmem_cache_free(ccp->dma_desc_cache, desc);
  87        }
  88}
  89
  90static void ccp_do_cleanup(unsigned long data)
  91{
  92        struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
  93        unsigned long flags;
  94
  95        dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
  96                dma_chan_name(&chan->dma_chan));
  97
  98        spin_lock_irqsave(&chan->lock, flags);
  99
 100        ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
 101
 102        spin_unlock_irqrestore(&chan->lock, flags);
 103}
 104
 105static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
 106{
 107        struct ccp_dma_cmd *cmd;
 108        int ret;
 109
 110        cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
 111        list_move(&cmd->entry, &desc->active);
 112
 113        dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
 114                desc->tx_desc.cookie, cmd);
 115
 116        ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
 117        if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
 118                return 0;
 119
 120        dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
 121                ret, desc->tx_desc.cookie, cmd);
 122
 123        return ret;
 124}
 125
 126static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
 127{
 128        struct ccp_dma_cmd *cmd;
 129
 130        cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
 131                                       entry);
 132        if (!cmd)
 133                return;
 134
 135        dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
 136                __func__, desc->tx_desc.cookie, cmd);
 137
 138        list_del(&cmd->entry);
 139        kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
 140}
 141
 142static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
 143                                                struct ccp_dma_desc *desc)
 144{
 145        /* Move current DMA descriptor to the complete list */
 146        if (desc)
 147                list_move(&desc->entry, &chan->complete);
 148
 149        /* Get the next DMA descriptor on the active list */
 150        desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
 151                                        entry);
 152
 153        return desc;
 154}
 155
 156static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
 157                                                   struct ccp_dma_desc *desc)
 158{
 159        struct dma_async_tx_descriptor *tx_desc;
 160        unsigned long flags;
 161
 162        /* Loop over descriptors until one is found with commands */
 163        do {
 164                if (desc) {
 165                        /* Remove the DMA command from the list and free it */
 166                        ccp_free_active_cmd(desc);
 167
 168                        if (!list_empty(&desc->pending)) {
 169                                /* No errors, keep going */
 170                                if (desc->status != DMA_ERROR)
 171                                        return desc;
 172
 173                                /* Error, free remaining commands and move on */
 174                                ccp_free_cmd_resources(desc->ccp,
 175                                                       &desc->pending);
 176                        }
 177
 178                        tx_desc = &desc->tx_desc;
 179                } else {
 180                        tx_desc = NULL;
 181                }
 182
 183                spin_lock_irqsave(&chan->lock, flags);
 184
 185                if (desc) {
 186                        if (desc->status != DMA_ERROR)
 187                                desc->status = DMA_COMPLETE;
 188
 189                        dev_dbg(desc->ccp->dev,
 190                                "%s - tx %d complete, status=%u\n", __func__,
 191                                desc->tx_desc.cookie, desc->status);
 192
 193                        dma_cookie_complete(tx_desc);
 194                }
 195
 196                desc = __ccp_next_dma_desc(chan, desc);
 197
 198                spin_unlock_irqrestore(&chan->lock, flags);
 199
 200                if (tx_desc) {
 201                        if (tx_desc->callback &&
 202                            (tx_desc->flags & DMA_PREP_INTERRUPT))
 203                                tx_desc->callback(tx_desc->callback_param);
 204
 205                        dma_run_dependencies(tx_desc);
 206                }
 207        } while (desc);
 208
 209        return NULL;
 210}
 211
 212static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
 213{
 214        struct ccp_dma_desc *desc;
 215
 216        if (list_empty(&chan->pending))
 217                return NULL;
 218
 219        desc = list_empty(&chan->active)
 220                ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
 221                : NULL;
 222
 223        list_splice_tail_init(&chan->pending, &chan->active);
 224
 225        return desc;
 226}
 227
 228static void ccp_cmd_callback(void *data, int err)
 229{
 230        struct ccp_dma_desc *desc = data;
 231        struct ccp_dma_chan *chan;
 232        int ret;
 233
 234        if (err == -EINPROGRESS)
 235                return;
 236
 237        chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
 238                            dma_chan);
 239
 240        dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
 241                __func__, desc->tx_desc.cookie, err);
 242
 243        if (err)
 244                desc->status = DMA_ERROR;
 245
 246        while (true) {
 247                /* Check for DMA descriptor completion */
 248                desc = ccp_handle_active_desc(chan, desc);
 249
 250                /* Don't submit cmd if no descriptor or DMA is paused */
 251                if (!desc || (chan->status == DMA_PAUSED))
 252                        break;
 253
 254                ret = ccp_issue_next_cmd(desc);
 255                if (!ret)
 256                        break;
 257
 258                desc->status = DMA_ERROR;
 259        }
 260
 261        tasklet_schedule(&chan->cleanup_tasklet);
 262}
 263
 264static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
 265{
 266        struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
 267                                                 tx_desc);
 268        struct ccp_dma_chan *chan;
 269        dma_cookie_t cookie;
 270        unsigned long flags;
 271
 272        chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
 273
 274        spin_lock_irqsave(&chan->lock, flags);
 275
 276        cookie = dma_cookie_assign(tx_desc);
 277        list_del(&desc->entry);
 278        list_add_tail(&desc->entry, &chan->pending);
 279
 280        spin_unlock_irqrestore(&chan->lock, flags);
 281
 282        dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
 283                __func__, cookie);
 284
 285        return cookie;
 286}
 287
 288static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
 289{
 290        struct ccp_dma_cmd *cmd;
 291
 292        cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
 293        if (cmd)
 294                memset(cmd, 0, sizeof(*cmd));
 295
 296        return cmd;
 297}
 298
 299static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
 300                                               unsigned long flags)
 301{
 302        struct ccp_dma_desc *desc;
 303
 304        desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
 305        if (!desc)
 306                return NULL;
 307
 308        dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
 309        desc->tx_desc.flags = flags;
 310        desc->tx_desc.tx_submit = ccp_tx_submit;
 311        desc->ccp = chan->ccp;
 312        INIT_LIST_HEAD(&desc->pending);
 313        INIT_LIST_HEAD(&desc->active);
 314        desc->status = DMA_IN_PROGRESS;
 315
 316        return desc;
 317}
 318
 319static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
 320                                            struct scatterlist *dst_sg,
 321                                            unsigned int dst_nents,
 322                                            struct scatterlist *src_sg,
 323                                            unsigned int src_nents,
 324                                            unsigned long flags)
 325{
 326        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 327                                                 dma_chan);
 328        struct ccp_device *ccp = chan->ccp;
 329        struct ccp_dma_desc *desc;
 330        struct ccp_dma_cmd *cmd;
 331        struct ccp_cmd *ccp_cmd;
 332        struct ccp_passthru_nomap_engine *ccp_pt;
 333        unsigned int src_offset, src_len;
 334        unsigned int dst_offset, dst_len;
 335        unsigned int len;
 336        unsigned long sflags;
 337        size_t total_len;
 338
 339        if (!dst_sg || !src_sg)
 340                return NULL;
 341
 342        if (!dst_nents || !src_nents)
 343                return NULL;
 344
 345        desc = ccp_alloc_dma_desc(chan, flags);
 346        if (!desc)
 347                return NULL;
 348
 349        total_len = 0;
 350
 351        src_len = sg_dma_len(src_sg);
 352        src_offset = 0;
 353
 354        dst_len = sg_dma_len(dst_sg);
 355        dst_offset = 0;
 356
 357        while (true) {
 358                if (!src_len) {
 359                        src_nents--;
 360                        if (!src_nents)
 361                                break;
 362
 363                        src_sg = sg_next(src_sg);
 364                        if (!src_sg)
 365                                break;
 366
 367                        src_len = sg_dma_len(src_sg);
 368                        src_offset = 0;
 369                        continue;
 370                }
 371
 372                if (!dst_len) {
 373                        dst_nents--;
 374                        if (!dst_nents)
 375                                break;
 376
 377                        dst_sg = sg_next(dst_sg);
 378                        if (!dst_sg)
 379                                break;
 380
 381                        dst_len = sg_dma_len(dst_sg);
 382                        dst_offset = 0;
 383                        continue;
 384                }
 385
 386                len = min(dst_len, src_len);
 387
 388                cmd = ccp_alloc_dma_cmd(chan);
 389                if (!cmd)
 390                        goto err;
 391
 392                ccp_cmd = &cmd->ccp_cmd;
 393                ccp_cmd->ccp = chan->ccp;
 394                ccp_pt = &ccp_cmd->u.passthru_nomap;
 395                ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
 396                ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
 397                ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
 398                ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
 399                ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
 400                ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
 401                ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
 402                ccp_pt->src_len = len;
 403                ccp_pt->final = 1;
 404                ccp_cmd->callback = ccp_cmd_callback;
 405                ccp_cmd->data = desc;
 406
 407                list_add_tail(&cmd->entry, &desc->pending);
 408
 409                dev_dbg(ccp->dev,
 410                        "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
 411                        cmd, &ccp_pt->src_dma,
 412                        &ccp_pt->dst_dma, ccp_pt->src_len);
 413
 414                total_len += len;
 415
 416                src_len -= len;
 417                src_offset += len;
 418
 419                dst_len -= len;
 420                dst_offset += len;
 421        }
 422
 423        desc->len = total_len;
 424
 425        if (list_empty(&desc->pending))
 426                goto err;
 427
 428        dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
 429
 430        spin_lock_irqsave(&chan->lock, sflags);
 431
 432        list_add_tail(&desc->entry, &chan->created);
 433
 434        spin_unlock_irqrestore(&chan->lock, sflags);
 435
 436        return desc;
 437
 438err:
 439        ccp_free_cmd_resources(ccp, &desc->pending);
 440        kmem_cache_free(ccp->dma_desc_cache, desc);
 441
 442        return NULL;
 443}
 444
 445static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
 446        struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
 447        unsigned long flags)
 448{
 449        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 450                                                 dma_chan);
 451        struct ccp_dma_desc *desc;
 452        struct scatterlist dst_sg, src_sg;
 453
 454        dev_dbg(chan->ccp->dev,
 455                "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
 456                __func__, &src, &dst, len, flags);
 457
 458        sg_init_table(&dst_sg, 1);
 459        sg_dma_address(&dst_sg) = dst;
 460        sg_dma_len(&dst_sg) = len;
 461
 462        sg_init_table(&src_sg, 1);
 463        sg_dma_address(&src_sg) = src;
 464        sg_dma_len(&src_sg) = len;
 465
 466        desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
 467        if (!desc)
 468                return NULL;
 469
 470        return &desc->tx_desc;
 471}
 472
 473static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
 474        struct dma_chan *dma_chan, struct scatterlist *dst_sg,
 475        unsigned int dst_nents, struct scatterlist *src_sg,
 476        unsigned int src_nents, unsigned long flags)
 477{
 478        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 479                                                 dma_chan);
 480        struct ccp_dma_desc *desc;
 481
 482        dev_dbg(chan->ccp->dev,
 483                "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
 484                __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
 485
 486        desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
 487                               flags);
 488        if (!desc)
 489                return NULL;
 490
 491        return &desc->tx_desc;
 492}
 493
 494static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
 495        struct dma_chan *dma_chan, unsigned long flags)
 496{
 497        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 498                                                 dma_chan);
 499        struct ccp_dma_desc *desc;
 500
 501        desc = ccp_alloc_dma_desc(chan, flags);
 502        if (!desc)
 503                return NULL;
 504
 505        return &desc->tx_desc;
 506}
 507
 508static void ccp_issue_pending(struct dma_chan *dma_chan)
 509{
 510        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 511                                                 dma_chan);
 512        struct ccp_dma_desc *desc;
 513        unsigned long flags;
 514
 515        dev_dbg(chan->ccp->dev, "%s\n", __func__);
 516
 517        spin_lock_irqsave(&chan->lock, flags);
 518
 519        desc = __ccp_pending_to_active(chan);
 520
 521        spin_unlock_irqrestore(&chan->lock, flags);
 522
 523        /* If there was nothing active, start processing */
 524        if (desc)
 525                ccp_cmd_callback(desc, 0);
 526}
 527
 528static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
 529                                     dma_cookie_t cookie,
 530                                     struct dma_tx_state *state)
 531{
 532        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 533                                                 dma_chan);
 534        struct ccp_dma_desc *desc;
 535        enum dma_status ret;
 536        unsigned long flags;
 537
 538        if (chan->status == DMA_PAUSED) {
 539                ret = DMA_PAUSED;
 540                goto out;
 541        }
 542
 543        ret = dma_cookie_status(dma_chan, cookie, state);
 544        if (ret == DMA_COMPLETE) {
 545                spin_lock_irqsave(&chan->lock, flags);
 546
 547                /* Get status from complete chain, if still there */
 548                list_for_each_entry(desc, &chan->complete, entry) {
 549                        if (desc->tx_desc.cookie != cookie)
 550                                continue;
 551
 552                        ret = desc->status;
 553                        break;
 554                }
 555
 556                spin_unlock_irqrestore(&chan->lock, flags);
 557        }
 558
 559out:
 560        dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
 561
 562        return ret;
 563}
 564
 565static int ccp_pause(struct dma_chan *dma_chan)
 566{
 567        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 568                                                 dma_chan);
 569
 570        chan->status = DMA_PAUSED;
 571
 572        /*TODO: Wait for active DMA to complete before returning? */
 573
 574        return 0;
 575}
 576
 577static int ccp_resume(struct dma_chan *dma_chan)
 578{
 579        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 580                                                 dma_chan);
 581        struct ccp_dma_desc *desc;
 582        unsigned long flags;
 583
 584        spin_lock_irqsave(&chan->lock, flags);
 585
 586        desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
 587                                        entry);
 588
 589        spin_unlock_irqrestore(&chan->lock, flags);
 590
 591        /* Indicate the channel is running again */
 592        chan->status = DMA_IN_PROGRESS;
 593
 594        /* If there was something active, re-start */
 595        if (desc)
 596                ccp_cmd_callback(desc, 0);
 597
 598        return 0;
 599}
 600
 601static int ccp_terminate_all(struct dma_chan *dma_chan)
 602{
 603        struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
 604                                                 dma_chan);
 605        unsigned long flags;
 606
 607        dev_dbg(chan->ccp->dev, "%s\n", __func__);
 608
 609        /*TODO: Wait for active DMA to complete before continuing */
 610
 611        spin_lock_irqsave(&chan->lock, flags);
 612
 613        /*TODO: Purge the complete list? */
 614        ccp_free_desc_resources(chan->ccp, &chan->active);
 615        ccp_free_desc_resources(chan->ccp, &chan->pending);
 616        ccp_free_desc_resources(chan->ccp, &chan->created);
 617
 618        spin_unlock_irqrestore(&chan->lock, flags);
 619
 620        return 0;
 621}
 622
 623int ccp_dmaengine_register(struct ccp_device *ccp)
 624{
 625        struct ccp_dma_chan *chan;
 626        struct dma_device *dma_dev = &ccp->dma_dev;
 627        struct dma_chan *dma_chan;
 628        char *dma_cmd_cache_name;
 629        char *dma_desc_cache_name;
 630        unsigned int i;
 631        int ret;
 632
 633        ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
 634                                         sizeof(*(ccp->ccp_dma_chan)),
 635                                         GFP_KERNEL);
 636        if (!ccp->ccp_dma_chan)
 637                return -ENOMEM;
 638
 639        dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
 640                                            "%s-dmaengine-cmd-cache",
 641                                            ccp->name);
 642        if (!dma_cmd_cache_name)
 643                return -ENOMEM;
 644
 645        ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
 646                                               sizeof(struct ccp_dma_cmd),
 647                                               sizeof(void *),
 648                                               SLAB_HWCACHE_ALIGN, NULL);
 649        if (!ccp->dma_cmd_cache)
 650                return -ENOMEM;
 651
 652        dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
 653                                             "%s-dmaengine-desc-cache",
 654                                             ccp->name);
 655        if (!dma_desc_cache_name) {
 656                ret = -ENOMEM;
 657                goto err_cache;
 658        }
 659
 660        ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
 661                                                sizeof(struct ccp_dma_desc),
 662                                                sizeof(void *),
 663                                                SLAB_HWCACHE_ALIGN, NULL);
 664        if (!ccp->dma_desc_cache) {
 665                ret = -ENOMEM;
 666                goto err_cache;
 667        }
 668
 669        dma_dev->dev = ccp->dev;
 670        dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
 671        dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
 672        dma_dev->directions = DMA_MEM_TO_MEM;
 673        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 674        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 675        dma_cap_set(DMA_SG, dma_dev->cap_mask);
 676        dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
 677
 678        INIT_LIST_HEAD(&dma_dev->channels);
 679        for (i = 0; i < ccp->cmd_q_count; i++) {
 680                chan = ccp->ccp_dma_chan + i;
 681                dma_chan = &chan->dma_chan;
 682
 683                chan->ccp = ccp;
 684
 685                spin_lock_init(&chan->lock);
 686                INIT_LIST_HEAD(&chan->created);
 687                INIT_LIST_HEAD(&chan->pending);
 688                INIT_LIST_HEAD(&chan->active);
 689                INIT_LIST_HEAD(&chan->complete);
 690
 691                tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
 692                             (unsigned long)chan);
 693
 694                dma_chan->device = dma_dev;
 695                dma_cookie_init(dma_chan);
 696
 697                list_add_tail(&dma_chan->device_node, &dma_dev->channels);
 698        }
 699
 700        dma_dev->device_free_chan_resources = ccp_free_chan_resources;
 701        dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
 702        dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
 703        dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
 704        dma_dev->device_issue_pending = ccp_issue_pending;
 705        dma_dev->device_tx_status = ccp_tx_status;
 706        dma_dev->device_pause = ccp_pause;
 707        dma_dev->device_resume = ccp_resume;
 708        dma_dev->device_terminate_all = ccp_terminate_all;
 709
 710        ret = dma_async_device_register(dma_dev);
 711        if (ret)
 712                goto err_reg;
 713
 714        return 0;
 715
 716err_reg:
 717        kmem_cache_destroy(ccp->dma_desc_cache);
 718
 719err_cache:
 720        kmem_cache_destroy(ccp->dma_cmd_cache);
 721
 722        return ret;
 723}
 724
 725void ccp_dmaengine_unregister(struct ccp_device *ccp)
 726{
 727        struct dma_device *dma_dev = &ccp->dma_dev;
 728
 729        dma_async_device_unregister(dma_dev);
 730
 731        kmem_cache_destroy(ccp->dma_desc_cache);
 732        kmem_cache_destroy(ccp->dma_cmd_cache);
 733}
 734