linux/drivers/crypto/mxs-dcp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha.h>
  21#include <crypto/internal/hash.h>
  22#include <crypto/internal/skcipher.h>
  23
  24#define DCP_MAX_CHANS   4
  25#define DCP_BUF_SZ      PAGE_SIZE
  26#define DCP_SHA_PAY_SZ  64
  27
  28#define DCP_ALIGNMENT   64
  29
  30/*
  31 * Null hashes to align with hw behavior on imx6sl and ull
  32 * these are flipped for consistency with hw output
  33 */
  34static const uint8_t sha1_null_hash[] =
  35        "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  36        "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  37
  38static const uint8_t sha256_null_hash[] =
  39        "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  40        "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  41        "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  42        "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  43
  44/* DCP DMA descriptor. */
  45struct dcp_dma_desc {
  46        uint32_t        next_cmd_addr;
  47        uint32_t        control0;
  48        uint32_t        control1;
  49        uint32_t        source;
  50        uint32_t        destination;
  51        uint32_t        size;
  52        uint32_t        payload;
  53        uint32_t        status;
  54};
  55
  56/* Coherent aligned block for bounce buffering. */
  57struct dcp_coherent_block {
  58        uint8_t                 aes_in_buf[DCP_BUF_SZ];
  59        uint8_t                 aes_out_buf[DCP_BUF_SZ];
  60        uint8_t                 sha_in_buf[DCP_BUF_SZ];
  61        uint8_t                 sha_out_buf[DCP_SHA_PAY_SZ];
  62
  63        uint8_t                 aes_key[2 * AES_KEYSIZE_128];
  64
  65        struct dcp_dma_desc     desc[DCP_MAX_CHANS];
  66};
  67
  68struct dcp {
  69        struct device                   *dev;
  70        void __iomem                    *base;
  71
  72        uint32_t                        caps;
  73
  74        struct dcp_coherent_block       *coh;
  75
  76        struct completion               completion[DCP_MAX_CHANS];
  77        spinlock_t                      lock[DCP_MAX_CHANS];
  78        struct task_struct              *thread[DCP_MAX_CHANS];
  79        struct crypto_queue             queue[DCP_MAX_CHANS];
  80        struct clk                      *dcp_clk;
  81};
  82
  83enum dcp_chan {
  84        DCP_CHAN_HASH_SHA       = 0,
  85        DCP_CHAN_CRYPTO         = 2,
  86};
  87
  88struct dcp_async_ctx {
  89        /* Common context */
  90        enum dcp_chan   chan;
  91        uint32_t        fill;
  92
  93        /* SHA Hash-specific context */
  94        struct mutex                    mutex;
  95        uint32_t                        alg;
  96        unsigned int                    hot:1;
  97
  98        /* Crypto-specific context */
  99        struct crypto_sync_skcipher     *fallback;
 100        unsigned int                    key_len;
 101        uint8_t                         key[AES_KEYSIZE_128];
 102};
 103
 104struct dcp_aes_req_ctx {
 105        unsigned int    enc:1;
 106        unsigned int    ecb:1;
 107};
 108
 109struct dcp_sha_req_ctx {
 110        unsigned int    init:1;
 111        unsigned int    fini:1;
 112};
 113
 114struct dcp_export_state {
 115        struct dcp_sha_req_ctx req_ctx;
 116        struct dcp_async_ctx async_ctx;
 117};
 118
 119/*
 120 * There can even be only one instance of the MXS DCP due to the
 121 * design of Linux Crypto API.
 122 */
 123static struct dcp *global_sdcp;
 124
 125/* DCP register layout. */
 126#define MXS_DCP_CTRL                            0x00
 127#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES     (1 << 23)
 128#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING     (1 << 22)
 129
 130#define MXS_DCP_STAT                            0x10
 131#define MXS_DCP_STAT_CLR                        0x18
 132#define MXS_DCP_STAT_IRQ_MASK                   0xf
 133
 134#define MXS_DCP_CHANNELCTRL                     0x20
 135#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
 136
 137#define MXS_DCP_CAPABILITY1                     0x40
 138#define MXS_DCP_CAPABILITY1_SHA256              (4 << 16)
 139#define MXS_DCP_CAPABILITY1_SHA1                (1 << 16)
 140#define MXS_DCP_CAPABILITY1_AES128              (1 << 0)
 141
 142#define MXS_DCP_CONTEXT                         0x50
 143
 144#define MXS_DCP_CH_N_CMDPTR(n)                  (0x100 + ((n) * 0x40))
 145
 146#define MXS_DCP_CH_N_SEMA(n)                    (0x110 + ((n) * 0x40))
 147
 148#define MXS_DCP_CH_N_STAT(n)                    (0x120 + ((n) * 0x40))
 149#define MXS_DCP_CH_N_STAT_CLR(n)                (0x128 + ((n) * 0x40))
 150
 151/* DMA descriptor bits. */
 152#define MXS_DCP_CONTROL0_HASH_TERM              (1 << 13)
 153#define MXS_DCP_CONTROL0_HASH_INIT              (1 << 12)
 154#define MXS_DCP_CONTROL0_PAYLOAD_KEY            (1 << 11)
 155#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT         (1 << 8)
 156#define MXS_DCP_CONTROL0_CIPHER_INIT            (1 << 9)
 157#define MXS_DCP_CONTROL0_ENABLE_HASH            (1 << 6)
 158#define MXS_DCP_CONTROL0_ENABLE_CIPHER          (1 << 5)
 159#define MXS_DCP_CONTROL0_DECR_SEMAPHORE         (1 << 1)
 160#define MXS_DCP_CONTROL0_INTERRUPT              (1 << 0)
 161
 162#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256     (2 << 16)
 163#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1       (0 << 16)
 164#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC        (1 << 4)
 165#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB        (0 << 4)
 166#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128   (0 << 0)
 167
 168static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 169{
 170        struct dcp *sdcp = global_sdcp;
 171        const int chan = actx->chan;
 172        uint32_t stat;
 173        unsigned long ret;
 174        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 175
 176        dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 177                                              DMA_TO_DEVICE);
 178
 179        reinit_completion(&sdcp->completion[chan]);
 180
 181        /* Clear status register. */
 182        writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 183
 184        /* Load the DMA descriptor. */
 185        writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 186
 187        /* Increment the semaphore to start the DMA transfer. */
 188        writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 189
 190        ret = wait_for_completion_timeout(&sdcp->completion[chan],
 191                                          msecs_to_jiffies(1000));
 192        if (!ret) {
 193                dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 194                        chan, readl(sdcp->base + MXS_DCP_STAT));
 195                return -ETIMEDOUT;
 196        }
 197
 198        stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 199        if (stat & 0xff) {
 200                dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 201                        chan, stat);
 202                return -EINVAL;
 203        }
 204
 205        dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 206
 207        return 0;
 208}
 209
 210/*
 211 * Encryption (AES128)
 212 */
 213static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 214                           struct ablkcipher_request *req, int init)
 215{
 216        struct dcp *sdcp = global_sdcp;
 217        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 218        struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 219        int ret;
 220
 221        dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 222                                             2 * AES_KEYSIZE_128,
 223                                             DMA_TO_DEVICE);
 224        dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 225                                             DCP_BUF_SZ, DMA_TO_DEVICE);
 226        dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 227                                             DCP_BUF_SZ, DMA_FROM_DEVICE);
 228
 229        if (actx->fill % AES_BLOCK_SIZE) {
 230                dev_err(sdcp->dev, "Invalid block size!\n");
 231                ret = -EINVAL;
 232                goto aes_done_run;
 233        }
 234
 235        /* Fill in the DMA descriptor. */
 236        desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 237                    MXS_DCP_CONTROL0_INTERRUPT |
 238                    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 239
 240        /* Payload contains the key. */
 241        desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 242
 243        if (rctx->enc)
 244                desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 245        if (init)
 246                desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 247
 248        desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 249
 250        if (rctx->ecb)
 251                desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 252        else
 253                desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 254
 255        desc->next_cmd_addr = 0;
 256        desc->source = src_phys;
 257        desc->destination = dst_phys;
 258        desc->size = actx->fill;
 259        desc->payload = key_phys;
 260        desc->status = 0;
 261
 262        ret = mxs_dcp_start_dma(actx);
 263
 264aes_done_run:
 265        dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 266                         DMA_TO_DEVICE);
 267        dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 268        dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 269
 270        return ret;
 271}
 272
 273static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 274{
 275        struct dcp *sdcp = global_sdcp;
 276
 277        struct ablkcipher_request *req = ablkcipher_request_cast(arq);
 278        struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 279        struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 280
 281        struct scatterlist *dst = req->dst;
 282        struct scatterlist *src = req->src;
 283        const int nents = sg_nents(req->src);
 284
 285        const int out_off = DCP_BUF_SZ;
 286        uint8_t *in_buf = sdcp->coh->aes_in_buf;
 287        uint8_t *out_buf = sdcp->coh->aes_out_buf;
 288
 289        uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
 290        uint32_t dst_off = 0;
 291        uint32_t last_out_len = 0;
 292
 293        uint8_t *key = sdcp->coh->aes_key;
 294
 295        int ret = 0;
 296        int split = 0;
 297        unsigned int i, len, clen, rem = 0, tlen = 0;
 298        int init = 0;
 299        bool limit_hit = false;
 300
 301        actx->fill = 0;
 302
 303        /* Copy the key from the temporary location. */
 304        memcpy(key, actx->key, actx->key_len);
 305
 306        if (!rctx->ecb) {
 307                /* Copy the CBC IV just past the key. */
 308                memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
 309                /* CBC needs the INIT set. */
 310                init = 1;
 311        } else {
 312                memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 313        }
 314
 315        for_each_sg(req->src, src, nents, i) {
 316                src_buf = sg_virt(src);
 317                len = sg_dma_len(src);
 318                tlen += len;
 319                limit_hit = tlen > req->nbytes;
 320
 321                if (limit_hit)
 322                        len = req->nbytes - (tlen - len);
 323
 324                do {
 325                        if (actx->fill + len > out_off)
 326                                clen = out_off - actx->fill;
 327                        else
 328                                clen = len;
 329
 330                        memcpy(in_buf + actx->fill, src_buf, clen);
 331                        len -= clen;
 332                        src_buf += clen;
 333                        actx->fill += clen;
 334
 335                        /*
 336                         * If we filled the buffer or this is the last SG,
 337                         * submit the buffer.
 338                         */
 339                        if (actx->fill == out_off || sg_is_last(src) ||
 340                                limit_hit) {
 341                                ret = mxs_dcp_run_aes(actx, req, init);
 342                                if (ret)
 343                                        return ret;
 344                                init = 0;
 345
 346                                out_tmp = out_buf;
 347                                last_out_len = actx->fill;
 348                                while (dst && actx->fill) {
 349                                        if (!split) {
 350                                                dst_buf = sg_virt(dst);
 351                                                dst_off = 0;
 352                                        }
 353                                        rem = min(sg_dma_len(dst) - dst_off,
 354                                                  actx->fill);
 355
 356                                        memcpy(dst_buf + dst_off, out_tmp, rem);
 357                                        out_tmp += rem;
 358                                        dst_off += rem;
 359                                        actx->fill -= rem;
 360
 361                                        if (dst_off == sg_dma_len(dst)) {
 362                                                dst = sg_next(dst);
 363                                                split = 0;
 364                                        } else {
 365                                                split = 1;
 366                                        }
 367                                }
 368                        }
 369                } while (len);
 370
 371                if (limit_hit)
 372                        break;
 373        }
 374
 375        /* Copy the IV for CBC for chaining */
 376        if (!rctx->ecb) {
 377                if (rctx->enc)
 378                        memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
 379                                AES_BLOCK_SIZE);
 380                else
 381                        memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
 382                                AES_BLOCK_SIZE);
 383        }
 384
 385        return ret;
 386}
 387
 388static int dcp_chan_thread_aes(void *data)
 389{
 390        struct dcp *sdcp = global_sdcp;
 391        const int chan = DCP_CHAN_CRYPTO;
 392
 393        struct crypto_async_request *backlog;
 394        struct crypto_async_request *arq;
 395
 396        int ret;
 397
 398        while (!kthread_should_stop()) {
 399                set_current_state(TASK_INTERRUPTIBLE);
 400
 401                spin_lock(&sdcp->lock[chan]);
 402                backlog = crypto_get_backlog(&sdcp->queue[chan]);
 403                arq = crypto_dequeue_request(&sdcp->queue[chan]);
 404                spin_unlock(&sdcp->lock[chan]);
 405
 406                if (!backlog && !arq) {
 407                        schedule();
 408                        continue;
 409                }
 410
 411                set_current_state(TASK_RUNNING);
 412
 413                if (backlog)
 414                        backlog->complete(backlog, -EINPROGRESS);
 415
 416                if (arq) {
 417                        ret = mxs_dcp_aes_block_crypt(arq);
 418                        arq->complete(arq, ret);
 419                }
 420        }
 421
 422        return 0;
 423}
 424
 425static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
 426{
 427        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 428        struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 429        SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 430        int ret;
 431
 432        skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 433        skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
 434        skcipher_request_set_crypt(subreq, req->src, req->dst,
 435                                   req->nbytes, req->info);
 436
 437        if (enc)
 438                ret = crypto_skcipher_encrypt(subreq);
 439        else
 440                ret = crypto_skcipher_decrypt(subreq);
 441
 442        skcipher_request_zero(subreq);
 443
 444        return ret;
 445}
 446
 447static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
 448{
 449        struct dcp *sdcp = global_sdcp;
 450        struct crypto_async_request *arq = &req->base;
 451        struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 452        struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 453        int ret;
 454
 455        if (unlikely(actx->key_len != AES_KEYSIZE_128))
 456                return mxs_dcp_block_fallback(req, enc);
 457
 458        rctx->enc = enc;
 459        rctx->ecb = ecb;
 460        actx->chan = DCP_CHAN_CRYPTO;
 461
 462        spin_lock(&sdcp->lock[actx->chan]);
 463        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 464        spin_unlock(&sdcp->lock[actx->chan]);
 465
 466        wake_up_process(sdcp->thread[actx->chan]);
 467
 468        return ret;
 469}
 470
 471static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
 472{
 473        return mxs_dcp_aes_enqueue(req, 0, 1);
 474}
 475
 476static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
 477{
 478        return mxs_dcp_aes_enqueue(req, 1, 1);
 479}
 480
 481static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
 482{
 483        return mxs_dcp_aes_enqueue(req, 0, 0);
 484}
 485
 486static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
 487{
 488        return mxs_dcp_aes_enqueue(req, 1, 0);
 489}
 490
 491static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 492                              unsigned int len)
 493{
 494        struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
 495        unsigned int ret;
 496
 497        /*
 498         * AES 128 is supposed by the hardware, store key into temporary
 499         * buffer and exit. We must use the temporary buffer here, since
 500         * there can still be an operation in progress.
 501         */
 502        actx->key_len = len;
 503        if (len == AES_KEYSIZE_128) {
 504                memcpy(actx->key, key, len);
 505                return 0;
 506        }
 507
 508        /*
 509         * If the requested AES key size is not supported by the hardware,
 510         * but is supported by in-kernel software implementation, we use
 511         * software fallback.
 512         */
 513        crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514        crypto_sync_skcipher_set_flags(actx->fallback,
 515                                  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516
 517        ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
 518        if (!ret)
 519                return 0;
 520
 521        tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 522        tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
 523                               CRYPTO_TFM_RES_MASK;
 524
 525        return ret;
 526}
 527
 528static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
 529{
 530        const char *name = crypto_tfm_alg_name(tfm);
 531        struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 532        struct crypto_sync_skcipher *blk;
 533
 534        blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 535        if (IS_ERR(blk))
 536                return PTR_ERR(blk);
 537
 538        actx->fallback = blk;
 539        tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
 540        return 0;
 541}
 542
 543static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
 544{
 545        struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 546
 547        crypto_free_sync_skcipher(actx->fallback);
 548}
 549
 550/*
 551 * Hashing (SHA1/SHA256)
 552 */
 553static int mxs_dcp_run_sha(struct ahash_request *req)
 554{
 555        struct dcp *sdcp = global_sdcp;
 556        int ret;
 557
 558        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 559        struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 560        struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 561        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 562
 563        dma_addr_t digest_phys = 0;
 564        dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 565                                             DCP_BUF_SZ, DMA_TO_DEVICE);
 566
 567        /* Fill in the DMA descriptor. */
 568        desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 569                    MXS_DCP_CONTROL0_INTERRUPT |
 570                    MXS_DCP_CONTROL0_ENABLE_HASH;
 571        if (rctx->init)
 572                desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 573
 574        desc->control1 = actx->alg;
 575        desc->next_cmd_addr = 0;
 576        desc->source = buf_phys;
 577        desc->destination = 0;
 578        desc->size = actx->fill;
 579        desc->payload = 0;
 580        desc->status = 0;
 581
 582        /*
 583         * Align driver with hw behavior when generating null hashes
 584         */
 585        if (rctx->init && rctx->fini && desc->size == 0) {
 586                struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 587                const uint8_t *sha_buf =
 588                        (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 589                        sha1_null_hash : sha256_null_hash;
 590                memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 591                ret = 0;
 592                goto done_run;
 593        }
 594
 595        /* Set HASH_TERM bit for last transfer block. */
 596        if (rctx->fini) {
 597                digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 598                                             DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 599                desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 600                desc->payload = digest_phys;
 601        }
 602
 603        ret = mxs_dcp_start_dma(actx);
 604
 605        if (rctx->fini)
 606                dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 607                                 DMA_FROM_DEVICE);
 608
 609done_run:
 610        dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 611
 612        return ret;
 613}
 614
 615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 616{
 617        struct dcp *sdcp = global_sdcp;
 618
 619        struct ahash_request *req = ahash_request_cast(arq);
 620        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621        struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 622        struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 623        struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 624        const int nents = sg_nents(req->src);
 625
 626        uint8_t *in_buf = sdcp->coh->sha_in_buf;
 627        uint8_t *out_buf = sdcp->coh->sha_out_buf;
 628
 629        uint8_t *src_buf;
 630
 631        struct scatterlist *src;
 632
 633        unsigned int i, len, clen;
 634        int ret;
 635
 636        int fin = rctx->fini;
 637        if (fin)
 638                rctx->fini = 0;
 639
 640        for_each_sg(req->src, src, nents, i) {
 641                src_buf = sg_virt(src);
 642                len = sg_dma_len(src);
 643
 644                do {
 645                        if (actx->fill + len > DCP_BUF_SZ)
 646                                clen = DCP_BUF_SZ - actx->fill;
 647                        else
 648                                clen = len;
 649
 650                        memcpy(in_buf + actx->fill, src_buf, clen);
 651                        len -= clen;
 652                        src_buf += clen;
 653                        actx->fill += clen;
 654
 655                        /*
 656                         * If we filled the buffer and still have some
 657                         * more data, submit the buffer.
 658                         */
 659                        if (len && actx->fill == DCP_BUF_SZ) {
 660                                ret = mxs_dcp_run_sha(req);
 661                                if (ret)
 662                                        return ret;
 663                                actx->fill = 0;
 664                                rctx->init = 0;
 665                        }
 666                } while (len);
 667        }
 668
 669        if (fin) {
 670                rctx->fini = 1;
 671
 672                /* Submit whatever is left. */
 673                if (!req->result)
 674                        return -EINVAL;
 675
 676                ret = mxs_dcp_run_sha(req);
 677                if (ret)
 678                        return ret;
 679
 680                actx->fill = 0;
 681
 682                /* For some reason the result is flipped */
 683                for (i = 0; i < halg->digestsize; i++)
 684                        req->result[i] = out_buf[halg->digestsize - i - 1];
 685        }
 686
 687        return 0;
 688}
 689
 690static int dcp_chan_thread_sha(void *data)
 691{
 692        struct dcp *sdcp = global_sdcp;
 693        const int chan = DCP_CHAN_HASH_SHA;
 694
 695        struct crypto_async_request *backlog;
 696        struct crypto_async_request *arq;
 697        int ret;
 698
 699        while (!kthread_should_stop()) {
 700                set_current_state(TASK_INTERRUPTIBLE);
 701
 702                spin_lock(&sdcp->lock[chan]);
 703                backlog = crypto_get_backlog(&sdcp->queue[chan]);
 704                arq = crypto_dequeue_request(&sdcp->queue[chan]);
 705                spin_unlock(&sdcp->lock[chan]);
 706
 707                if (!backlog && !arq) {
 708                        schedule();
 709                        continue;
 710                }
 711
 712                set_current_state(TASK_RUNNING);
 713
 714                if (backlog)
 715                        backlog->complete(backlog, -EINPROGRESS);
 716
 717                if (arq) {
 718                        ret = dcp_sha_req_to_buf(arq);
 719                        arq->complete(arq, ret);
 720                }
 721        }
 722
 723        return 0;
 724}
 725
 726static int dcp_sha_init(struct ahash_request *req)
 727{
 728        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 729        struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 730
 731        struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 732
 733        /*
 734         * Start hashing session. The code below only inits the
 735         * hashing session context, nothing more.
 736         */
 737        memset(actx, 0, sizeof(*actx));
 738
 739        if (strcmp(halg->base.cra_name, "sha1") == 0)
 740                actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 741        else
 742                actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 743
 744        actx->fill = 0;
 745        actx->hot = 0;
 746        actx->chan = DCP_CHAN_HASH_SHA;
 747
 748        mutex_init(&actx->mutex);
 749
 750        return 0;
 751}
 752
 753static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 754{
 755        struct dcp *sdcp = global_sdcp;
 756
 757        struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 758        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 759        struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 760
 761        int ret;
 762
 763        /*
 764         * Ignore requests that have no data in them and are not
 765         * the trailing requests in the stream of requests.
 766         */
 767        if (!req->nbytes && !fini)
 768                return 0;
 769
 770        mutex_lock(&actx->mutex);
 771
 772        rctx->fini = fini;
 773
 774        if (!actx->hot) {
 775                actx->hot = 1;
 776                rctx->init = 1;
 777        }
 778
 779        spin_lock(&sdcp->lock[actx->chan]);
 780        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 781        spin_unlock(&sdcp->lock[actx->chan]);
 782
 783        wake_up_process(sdcp->thread[actx->chan]);
 784        mutex_unlock(&actx->mutex);
 785
 786        return ret;
 787}
 788
 789static int dcp_sha_update(struct ahash_request *req)
 790{
 791        return dcp_sha_update_fx(req, 0);
 792}
 793
 794static int dcp_sha_final(struct ahash_request *req)
 795{
 796        ahash_request_set_crypt(req, NULL, req->result, 0);
 797        req->nbytes = 0;
 798        return dcp_sha_update_fx(req, 1);
 799}
 800
 801static int dcp_sha_finup(struct ahash_request *req)
 802{
 803        return dcp_sha_update_fx(req, 1);
 804}
 805
 806static int dcp_sha_digest(struct ahash_request *req)
 807{
 808        int ret;
 809
 810        ret = dcp_sha_init(req);
 811        if (ret)
 812                return ret;
 813
 814        return dcp_sha_finup(req);
 815}
 816
 817static int dcp_sha_import(struct ahash_request *req, const void *in)
 818{
 819        struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 820        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 821        struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 822        const struct dcp_export_state *export = in;
 823
 824        memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 825        memset(actx, 0, sizeof(struct dcp_async_ctx));
 826        memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 827        memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 828
 829        return 0;
 830}
 831
 832static int dcp_sha_export(struct ahash_request *req, void *out)
 833{
 834        struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 835        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 836        struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 837        struct dcp_export_state *export = out;
 838
 839        memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 840        memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 841
 842        return 0;
 843}
 844
 845static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 846{
 847        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 848                                 sizeof(struct dcp_sha_req_ctx));
 849        return 0;
 850}
 851
 852static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 853{
 854}
 855
 856/* AES 128 ECB and AES 128 CBC */
 857static struct crypto_alg dcp_aes_algs[] = {
 858        {
 859                .cra_name               = "ecb(aes)",
 860                .cra_driver_name        = "ecb-aes-dcp",
 861                .cra_priority           = 400,
 862                .cra_alignmask          = 15,
 863                .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 864                                          CRYPTO_ALG_ASYNC |
 865                                          CRYPTO_ALG_NEED_FALLBACK,
 866                .cra_init               = mxs_dcp_aes_fallback_init,
 867                .cra_exit               = mxs_dcp_aes_fallback_exit,
 868                .cra_blocksize          = AES_BLOCK_SIZE,
 869                .cra_ctxsize            = sizeof(struct dcp_async_ctx),
 870                .cra_type               = &crypto_ablkcipher_type,
 871                .cra_module             = THIS_MODULE,
 872                .cra_u  = {
 873                        .ablkcipher = {
 874                                .min_keysize    = AES_MIN_KEY_SIZE,
 875                                .max_keysize    = AES_MAX_KEY_SIZE,
 876                                .setkey         = mxs_dcp_aes_setkey,
 877                                .encrypt        = mxs_dcp_aes_ecb_encrypt,
 878                                .decrypt        = mxs_dcp_aes_ecb_decrypt
 879                        },
 880                },
 881        }, {
 882                .cra_name               = "cbc(aes)",
 883                .cra_driver_name        = "cbc-aes-dcp",
 884                .cra_priority           = 400,
 885                .cra_alignmask          = 15,
 886                .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 887                                          CRYPTO_ALG_ASYNC |
 888                                          CRYPTO_ALG_NEED_FALLBACK,
 889                .cra_init               = mxs_dcp_aes_fallback_init,
 890                .cra_exit               = mxs_dcp_aes_fallback_exit,
 891                .cra_blocksize          = AES_BLOCK_SIZE,
 892                .cra_ctxsize            = sizeof(struct dcp_async_ctx),
 893                .cra_type               = &crypto_ablkcipher_type,
 894                .cra_module             = THIS_MODULE,
 895                .cra_u = {
 896                        .ablkcipher = {
 897                                .min_keysize    = AES_MIN_KEY_SIZE,
 898                                .max_keysize    = AES_MAX_KEY_SIZE,
 899                                .setkey         = mxs_dcp_aes_setkey,
 900                                .encrypt        = mxs_dcp_aes_cbc_encrypt,
 901                                .decrypt        = mxs_dcp_aes_cbc_decrypt,
 902                                .ivsize         = AES_BLOCK_SIZE,
 903                        },
 904                },
 905        },
 906};
 907
 908/* SHA1 */
 909static struct ahash_alg dcp_sha1_alg = {
 910        .init   = dcp_sha_init,
 911        .update = dcp_sha_update,
 912        .final  = dcp_sha_final,
 913        .finup  = dcp_sha_finup,
 914        .digest = dcp_sha_digest,
 915        .import = dcp_sha_import,
 916        .export = dcp_sha_export,
 917        .halg   = {
 918                .digestsize     = SHA1_DIGEST_SIZE,
 919                .statesize      = sizeof(struct dcp_export_state),
 920                .base           = {
 921                        .cra_name               = "sha1",
 922                        .cra_driver_name        = "sha1-dcp",
 923                        .cra_priority           = 400,
 924                        .cra_alignmask          = 63,
 925                        .cra_flags              = CRYPTO_ALG_ASYNC,
 926                        .cra_blocksize          = SHA1_BLOCK_SIZE,
 927                        .cra_ctxsize            = sizeof(struct dcp_async_ctx),
 928                        .cra_module             = THIS_MODULE,
 929                        .cra_init               = dcp_sha_cra_init,
 930                        .cra_exit               = dcp_sha_cra_exit,
 931                },
 932        },
 933};
 934
 935/* SHA256 */
 936static struct ahash_alg dcp_sha256_alg = {
 937        .init   = dcp_sha_init,
 938        .update = dcp_sha_update,
 939        .final  = dcp_sha_final,
 940        .finup  = dcp_sha_finup,
 941        .digest = dcp_sha_digest,
 942        .import = dcp_sha_import,
 943        .export = dcp_sha_export,
 944        .halg   = {
 945                .digestsize     = SHA256_DIGEST_SIZE,
 946                .statesize      = sizeof(struct dcp_export_state),
 947                .base           = {
 948                        .cra_name               = "sha256",
 949                        .cra_driver_name        = "sha256-dcp",
 950                        .cra_priority           = 400,
 951                        .cra_alignmask          = 63,
 952                        .cra_flags              = CRYPTO_ALG_ASYNC,
 953                        .cra_blocksize          = SHA256_BLOCK_SIZE,
 954                        .cra_ctxsize            = sizeof(struct dcp_async_ctx),
 955                        .cra_module             = THIS_MODULE,
 956                        .cra_init               = dcp_sha_cra_init,
 957                        .cra_exit               = dcp_sha_cra_exit,
 958                },
 959        },
 960};
 961
 962static irqreturn_t mxs_dcp_irq(int irq, void *context)
 963{
 964        struct dcp *sdcp = context;
 965        uint32_t stat;
 966        int i;
 967
 968        stat = readl(sdcp->base + MXS_DCP_STAT);
 969        stat &= MXS_DCP_STAT_IRQ_MASK;
 970        if (!stat)
 971                return IRQ_NONE;
 972
 973        /* Clear the interrupts. */
 974        writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 975
 976        /* Complete the DMA requests that finished. */
 977        for (i = 0; i < DCP_MAX_CHANS; i++)
 978                if (stat & (1 << i))
 979                        complete(&sdcp->completion[i]);
 980
 981        return IRQ_HANDLED;
 982}
 983
 984static int mxs_dcp_probe(struct platform_device *pdev)
 985{
 986        struct device *dev = &pdev->dev;
 987        struct dcp *sdcp = NULL;
 988        int i, ret;
 989        int dcp_vmi_irq, dcp_irq;
 990
 991        if (global_sdcp) {
 992                dev_err(dev, "Only one DCP instance allowed!\n");
 993                return -ENODEV;
 994        }
 995
 996        dcp_vmi_irq = platform_get_irq(pdev, 0);
 997        if (dcp_vmi_irq < 0) {
 998                dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
 999                return dcp_vmi_irq;
1000        }
1001
1002        dcp_irq = platform_get_irq(pdev, 1);
1003        if (dcp_irq < 0) {
1004                dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
1005                return dcp_irq;
1006        }
1007
1008        sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1009        if (!sdcp)
1010                return -ENOMEM;
1011
1012        sdcp->dev = dev;
1013        sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1014        if (IS_ERR(sdcp->base))
1015                return PTR_ERR(sdcp->base);
1016
1017
1018        ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1019                               "dcp-vmi-irq", sdcp);
1020        if (ret) {
1021                dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1022                return ret;
1023        }
1024
1025        ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1026                               "dcp-irq", sdcp);
1027        if (ret) {
1028                dev_err(dev, "Failed to claim DCP IRQ!\n");
1029                return ret;
1030        }
1031
1032        /* Allocate coherent helper block. */
1033        sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1034                                   GFP_KERNEL);
1035        if (!sdcp->coh)
1036                return -ENOMEM;
1037
1038        /* Re-align the structure so it fits the DCP constraints. */
1039        sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1040
1041        /* DCP clock is optional, only used on some SOCs */
1042        sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1043        if (IS_ERR(sdcp->dcp_clk)) {
1044                if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1045                        return PTR_ERR(sdcp->dcp_clk);
1046                sdcp->dcp_clk = NULL;
1047        }
1048        ret = clk_prepare_enable(sdcp->dcp_clk);
1049        if (ret)
1050                return ret;
1051
1052        /* Restart the DCP block. */
1053        ret = stmp_reset_block(sdcp->base);
1054        if (ret) {
1055                dev_err(dev, "Failed reset\n");
1056                goto err_disable_unprepare_clk;
1057        }
1058
1059        /* Initialize control register. */
1060        writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1061               MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1062               sdcp->base + MXS_DCP_CTRL);
1063
1064        /* Enable all DCP DMA channels. */
1065        writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1066               sdcp->base + MXS_DCP_CHANNELCTRL);
1067
1068        /*
1069         * We do not enable context switching. Give the context buffer a
1070         * pointer to an illegal address so if context switching is
1071         * inadvertantly enabled, the DCP will return an error instead of
1072         * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1073         * address will do.
1074         */
1075        writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1076        for (i = 0; i < DCP_MAX_CHANS; i++)
1077                writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1078        writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1079
1080        global_sdcp = sdcp;
1081
1082        platform_set_drvdata(pdev, sdcp);
1083
1084        for (i = 0; i < DCP_MAX_CHANS; i++) {
1085                spin_lock_init(&sdcp->lock[i]);
1086                init_completion(&sdcp->completion[i]);
1087                crypto_init_queue(&sdcp->queue[i], 50);
1088        }
1089
1090        /* Create the SHA and AES handler threads. */
1091        sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1092                                                      NULL, "mxs_dcp_chan/sha");
1093        if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1094                dev_err(dev, "Error starting SHA thread!\n");
1095                ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1096                goto err_disable_unprepare_clk;
1097        }
1098
1099        sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1100                                                    NULL, "mxs_dcp_chan/aes");
1101        if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1102                dev_err(dev, "Error starting SHA thread!\n");
1103                ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1104                goto err_destroy_sha_thread;
1105        }
1106
1107        /* Register the various crypto algorithms. */
1108        sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1109
1110        if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1111                ret = crypto_register_algs(dcp_aes_algs,
1112                                           ARRAY_SIZE(dcp_aes_algs));
1113                if (ret) {
1114                        /* Failed to register algorithm. */
1115                        dev_err(dev, "Failed to register AES crypto!\n");
1116                        goto err_destroy_aes_thread;
1117                }
1118        }
1119
1120        if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1121                ret = crypto_register_ahash(&dcp_sha1_alg);
1122                if (ret) {
1123                        dev_err(dev, "Failed to register %s hash!\n",
1124                                dcp_sha1_alg.halg.base.cra_name);
1125                        goto err_unregister_aes;
1126                }
1127        }
1128
1129        if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1130                ret = crypto_register_ahash(&dcp_sha256_alg);
1131                if (ret) {
1132                        dev_err(dev, "Failed to register %s hash!\n",
1133                                dcp_sha256_alg.halg.base.cra_name);
1134                        goto err_unregister_sha1;
1135                }
1136        }
1137
1138        return 0;
1139
1140err_unregister_sha1:
1141        if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1142                crypto_unregister_ahash(&dcp_sha1_alg);
1143
1144err_unregister_aes:
1145        if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1146                crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1147
1148err_destroy_aes_thread:
1149        kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1150
1151err_destroy_sha_thread:
1152        kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1153
1154err_disable_unprepare_clk:
1155        clk_disable_unprepare(sdcp->dcp_clk);
1156
1157        return ret;
1158}
1159
1160static int mxs_dcp_remove(struct platform_device *pdev)
1161{
1162        struct dcp *sdcp = platform_get_drvdata(pdev);
1163
1164        if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1165                crypto_unregister_ahash(&dcp_sha256_alg);
1166
1167        if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1168                crypto_unregister_ahash(&dcp_sha1_alg);
1169
1170        if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1171                crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1172
1173        kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1174        kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1175
1176        clk_disable_unprepare(sdcp->dcp_clk);
1177
1178        platform_set_drvdata(pdev, NULL);
1179
1180        global_sdcp = NULL;
1181
1182        return 0;
1183}
1184
1185static const struct of_device_id mxs_dcp_dt_ids[] = {
1186        { .compatible = "fsl,imx23-dcp", .data = NULL, },
1187        { .compatible = "fsl,imx28-dcp", .data = NULL, },
1188        { /* sentinel */ }
1189};
1190
1191MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1192
1193static struct platform_driver mxs_dcp_driver = {
1194        .probe  = mxs_dcp_probe,
1195        .remove = mxs_dcp_remove,
1196        .driver = {
1197                .name           = "mxs-dcp",
1198                .of_match_table = mxs_dcp_dt_ids,
1199        },
1200};
1201
1202module_platform_driver(mxs_dcp_driver);
1203
1204MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1205MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1206MODULE_LICENSE("GPL");
1207MODULE_ALIAS("platform:mxs-dcp");
1208