linux/drivers/crypto/dcp.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for DCP cryptographic accelerator.
   5 *
   6 * Copyright (c) 2013
   7 * Author: Tobias Rauter <tobias.rauter@gmail.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
  14 */
  15#include <linux/module.h>
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/platform_device.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/io.h>
  22#include <linux/mutex.h>
  23#include <linux/interrupt.h>
  24#include <linux/completion.h>
  25#include <linux/workqueue.h>
  26#include <linux/delay.h>
  27#include <linux/crypto.h>
  28#include <linux/miscdevice.h>
  29
  30#include <crypto/scatterwalk.h>
  31#include <crypto/aes.h>
  32
  33
  34/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
  35#define DBS_IOCTL_BASE   'd'
  36#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
  37#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
  38
  39/* DCP channel used for AES */
  40#define USED_CHANNEL 1
  41/* Ring Buffers' maximum size */
  42#define DCP_MAX_PKG 20
  43
  44/* Control Register */
  45#define DCP_REG_CTRL 0x000
  46#define DCP_CTRL_SFRST (1<<31)
  47#define DCP_CTRL_CLKGATE (1<<30)
  48#define DCP_CTRL_CRYPTO_PRESENT (1<<29)
  49#define DCP_CTRL_SHA_PRESENT (1<<28)
  50#define DCP_CTRL_GATHER_RES_WRITE (1<<23)
  51#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
  52#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
  53#define DCP_CTRL_CH_IRQ_E_0 0x01
  54#define DCP_CTRL_CH_IRQ_E_1 0x02
  55#define DCP_CTRL_CH_IRQ_E_2 0x04
  56#define DCP_CTRL_CH_IRQ_E_3 0x08
  57
  58/* Status register */
  59#define DCP_REG_STAT 0x010
  60#define DCP_STAT_OTP_KEY_READY (1<<28)
  61#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
  62#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
  63#define DCP_STAT_IRQ(stat) (stat&0x0F)
  64#define DCP_STAT_CHAN_0 (0x01)
  65#define DCP_STAT_CHAN_1 (0x02)
  66#define DCP_STAT_CHAN_2 (0x04)
  67#define DCP_STAT_CHAN_3 (0x08)
  68
  69/* Channel Control Register */
  70#define DCP_REG_CHAN_CTRL 0x020
  71#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
  72#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
  73#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
  74#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
  75#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
  76#define DCP_CHAN_CTRL_ENABLE_0 (0x01)
  77#define DCP_CHAN_CTRL_ENABLE_1 (0x02)
  78#define DCP_CHAN_CTRL_ENABLE_2 (0x04)
  79#define DCP_CHAN_CTRL_ENABLE_3 (0x08)
  80
  81/*
  82 * Channel Registers:
  83 * The DCP has 4 channels. Each of this channels
  84 * has 4 registers (command pointer, semaphore, status and options).
  85 * The address of register REG of channel CHAN is obtained by
  86 * dcp_chan_reg(REG, CHAN)
  87 */
  88#define DCP_REG_CHAN_PTR        0x00000100
  89#define DCP_REG_CHAN_SEMA       0x00000110
  90#define DCP_REG_CHAN_STAT       0x00000120
  91#define DCP_REG_CHAN_OPT        0x00000130
  92
  93#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0   0x010000
  94#define DCP_CHAN_STAT_NO_CHAIN          0x020000
  95#define DCP_CHAN_STAT_CONTEXT_ERROR     0x030000
  96#define DCP_CHAN_STAT_PAYLOAD_ERROR     0x040000
  97#define DCP_CHAN_STAT_INVALID_MODE      0x050000
  98#define DCP_CHAN_STAT_PAGEFAULT         0x40
  99#define DCP_CHAN_STAT_DST               0x20
 100#define DCP_CHAN_STAT_SRC               0x10
 101#define DCP_CHAN_STAT_PACKET            0x08
 102#define DCP_CHAN_STAT_SETUP             0x04
 103#define DCP_CHAN_STAT_MISMATCH          0x02
 104
 105/* hw packet control*/
 106
 107#define DCP_PKT_PAYLOAD_KEY     (1<<11)
 108#define DCP_PKT_OTP_KEY         (1<<10)
 109#define DCP_PKT_CIPHER_INIT     (1<<9)
 110#define DCP_PKG_CIPHER_ENCRYPT  (1<<8)
 111#define DCP_PKT_CIPHER_ENABLE   (1<<5)
 112#define DCP_PKT_DECR_SEM        (1<<1)
 113#define DCP_PKT_CHAIN           (1<<2)
 114#define DCP_PKT_IRQ             1
 115
 116#define DCP_PKT_MODE_CBC        (1<<4)
 117#define DCP_PKT_KEYSELECT_OTP   (0xFF<<8)
 118
 119/* cipher flags */
 120#define DCP_ENC         0x0001
 121#define DCP_DEC         0x0002
 122#define DCP_ECB         0x0004
 123#define DCP_CBC         0x0008
 124#define DCP_CBC_INIT    0x0010
 125#define DCP_NEW_KEY     0x0040
 126#define DCP_OTP_KEY     0x0080
 127#define DCP_AES         0x1000
 128
 129/* DCP Flags */
 130#define DCP_FLAG_BUSY   0x01
 131#define DCP_FLAG_PRODUCING      0x02
 132
 133/* clock defines */
 134#define CLOCK_ON        1
 135#define CLOCK_OFF       0
 136
 137struct dcp_dev_req_ctx {
 138        int mode;
 139};
 140
 141struct dcp_op {
 142        unsigned int            flags;
 143        u8                      key[AES_KEYSIZE_128];
 144        int                     keylen;
 145
 146        struct ablkcipher_request       *req;
 147        struct crypto_ablkcipher        *fallback;
 148
 149        uint32_t stat;
 150        uint32_t pkt1;
 151        uint32_t pkt2;
 152        struct ablkcipher_walk walk;
 153};
 154
 155struct dcp_dev {
 156        struct device *dev;
 157        void __iomem *dcp_regs_base;
 158
 159        int dcp_vmi_irq;
 160        int dcp_irq;
 161
 162        spinlock_t queue_lock;
 163        struct crypto_queue queue;
 164
 165        uint32_t pkt_produced;
 166        uint32_t pkt_consumed;
 167
 168        struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
 169        dma_addr_t hw_phys_pkg;
 170
 171        /* [KEY][IV] Both with 16 Bytes */
 172        u8 *payload_base;
 173        dma_addr_t payload_base_dma;
 174
 175
 176        struct tasklet_struct   done_task;
 177        struct tasklet_struct   queue_task;
 178        struct timer_list       watchdog;
 179
 180        unsigned long           flags;
 181
 182        struct dcp_op *ctx;
 183
 184        struct miscdevice dcp_bootstream_misc;
 185};
 186
 187struct dcp_hw_packet {
 188        uint32_t next;
 189        uint32_t pkt1;
 190        uint32_t pkt2;
 191        uint32_t src;
 192        uint32_t dst;
 193        uint32_t size;
 194        uint32_t payload;
 195        uint32_t stat;
 196};
 197
 198static struct dcp_dev *global_dev;
 199
 200static inline u32 dcp_chan_reg(u32 reg, int chan)
 201{
 202        return reg + (chan) * 0x40;
 203}
 204
 205static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
 206{
 207        writel(data, dev->dcp_regs_base + reg);
 208}
 209
 210static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
 211{
 212        writel(data, dev->dcp_regs_base + (reg | 0x04));
 213}
 214
 215static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
 216{
 217        writel(data, dev->dcp_regs_base + (reg | 0x08));
 218}
 219
 220static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
 221{
 222        writel(data, dev->dcp_regs_base + (reg | 0x0C));
 223}
 224
 225static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
 226{
 227        return readl(dev->dcp_regs_base + reg);
 228}
 229
 230static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
 231{
 232        dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
 233        dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
 234        dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
 235}
 236
 237static int dcp_dma_map(struct dcp_dev *dev,
 238        struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
 239{
 240        dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
 241        /* align to length = 16 */
 242        pkt->size = walk->nbytes - (walk->nbytes % 16);
 243
 244        pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
 245                pkt->size, DMA_TO_DEVICE);
 246
 247        if (pkt->src == 0) {
 248                dev_err(dev->dev, "Unable to map src");
 249                return -ENOMEM;
 250        }
 251
 252        pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
 253                pkt->size, DMA_FROM_DEVICE);
 254
 255        if (pkt->dst == 0) {
 256                dev_err(dev->dev, "Unable to map dst");
 257                dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
 258                return -ENOMEM;
 259        }
 260
 261        return 0;
 262}
 263
 264static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
 265                        uint8_t last)
 266{
 267        struct dcp_op *ctx = dev->ctx;
 268        pkt->pkt1 = ctx->pkt1;
 269        pkt->pkt2 = ctx->pkt2;
 270
 271        pkt->payload = (u32) dev->payload_base_dma;
 272        pkt->stat = 0;
 273
 274        if (ctx->flags & DCP_CBC_INIT) {
 275                pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
 276                ctx->flags &= ~DCP_CBC_INIT;
 277        }
 278
 279        mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
 280        pkt->pkt1 |= DCP_PKT_IRQ;
 281        if (!last)
 282                pkt->pkt1 |= DCP_PKT_CHAIN;
 283
 284        dev->pkt_produced++;
 285
 286        dcp_write(dev, 1,
 287                dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
 288}
 289
 290static void dcp_op_proceed(struct dcp_dev *dev)
 291{
 292        struct dcp_op *ctx = dev->ctx;
 293        struct dcp_hw_packet *pkt;
 294
 295        while (ctx->walk.nbytes) {
 296                int err = 0;
 297
 298                pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
 299                err = dcp_dma_map(dev, &ctx->walk, pkt);
 300                if (err) {
 301                        dev->ctx->stat |= err;
 302                        /* start timer to wait for already set up calls */
 303                        mod_timer(&dev->watchdog,
 304                                jiffies + msecs_to_jiffies(500));
 305                        break;
 306                }
 307
 308
 309                err = ctx->walk.nbytes - pkt->size;
 310                ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
 311
 312                dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
 313                /* we have to wait if no space is left in buffer */
 314                if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
 315                        break;
 316        }
 317        clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
 318}
 319
 320static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
 321{
 322        struct dcp_op *ctx = dev->ctx;
 323
 324        if (ctx->flags & DCP_NEW_KEY) {
 325                memcpy(dev->payload_base, ctx->key, ctx->keylen);
 326                ctx->flags &= ~DCP_NEW_KEY;
 327        }
 328
 329        ctx->pkt1 = 0;
 330        ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
 331        ctx->pkt1 |= DCP_PKT_DECR_SEM;
 332
 333        if (ctx->flags & DCP_OTP_KEY)
 334                ctx->pkt1 |= DCP_PKT_OTP_KEY;
 335        else
 336                ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
 337
 338        if (ctx->flags & DCP_ENC)
 339                ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
 340
 341        ctx->pkt2 = 0;
 342        if (ctx->flags & DCP_CBC)
 343                ctx->pkt2 |= DCP_PKT_MODE_CBC;
 344
 345        dev->pkt_produced = 0;
 346        dev->pkt_consumed = 0;
 347
 348        ctx->stat = 0;
 349        dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
 350        dcp_write(dev, (u32) dev->hw_phys_pkg,
 351                dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
 352
 353        set_bit(DCP_FLAG_PRODUCING, &dev->flags);
 354
 355        if (use_walk) {
 356                ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
 357                                ctx->req->src, ctx->req->nbytes);
 358                ablkcipher_walk_phys(ctx->req, &ctx->walk);
 359                dcp_op_proceed(dev);
 360        } else {
 361                dcp_op_one(dev, dev->hw_pkg[0], 1);
 362                clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
 363        }
 364}
 365
 366static void dcp_done_task(unsigned long data)
 367{
 368        struct dcp_dev *dev = (struct dcp_dev *)data;
 369        struct dcp_hw_packet *last_packet;
 370        int fin;
 371        fin = 0;
 372
 373        for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
 374                last_packet->stat == 1;
 375                last_packet =
 376                        dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
 377
 378                dcp_dma_unmap(dev, last_packet);
 379                last_packet->stat = 0;
 380                fin++;
 381        }
 382        /* the last call of this function already consumed this IRQ's packet */
 383        if (fin == 0)
 384                return;
 385
 386        dev_dbg(dev->dev,
 387                "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
 388                dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
 389
 390        last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
 391        if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
 392                if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
 393                        dcp_op_proceed(dev);
 394                return;
 395        }
 396
 397        while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
 398                dcp_dma_unmap(dev,
 399                        dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
 400        }
 401
 402        if (dev->ctx->flags & DCP_OTP_KEY) {
 403                /* we used the miscdevice, no walk to finish */
 404                clear_bit(DCP_FLAG_BUSY, &dev->flags);
 405                return;
 406        }
 407
 408        ablkcipher_walk_complete(&dev->ctx->walk);
 409        dev->ctx->req->base.complete(&dev->ctx->req->base,
 410                        dev->ctx->stat);
 411        dev->ctx->req = NULL;
 412        /* in case there are other requests in the queue */
 413        tasklet_schedule(&dev->queue_task);
 414}
 415
 416static void dcp_watchdog(unsigned long data)
 417{
 418        struct dcp_dev *dev = (struct dcp_dev *)data;
 419        dev->ctx->stat |= dcp_read(dev,
 420                        dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
 421
 422        dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
 423
 424        if (!dev->ctx->stat)
 425                dev->ctx->stat = -ETIMEDOUT;
 426
 427        dcp_done_task(data);
 428}
 429
 430
 431static irqreturn_t dcp_common_irq(int irq, void *context)
 432{
 433        u32 msk;
 434        struct dcp_dev *dev = (struct dcp_dev *) context;
 435
 436        del_timer(&dev->watchdog);
 437
 438        msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
 439        dcp_clear(dev, msk, DCP_REG_STAT);
 440        if (msk == 0)
 441                return IRQ_NONE;
 442
 443        dev->ctx->stat |= dcp_read(dev,
 444                        dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
 445
 446        if (msk & DCP_STAT_CHAN_1)
 447                tasklet_schedule(&dev->done_task);
 448
 449        return IRQ_HANDLED;
 450}
 451
 452static irqreturn_t dcp_vmi_irq(int irq, void *context)
 453{
 454        return dcp_common_irq(irq, context);
 455}
 456
 457static irqreturn_t dcp_irq(int irq, void *context)
 458{
 459        return dcp_common_irq(irq, context);
 460}
 461
 462static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
 463{
 464        dev->ctx = ctx;
 465
 466        if ((ctx->flags & DCP_CBC) && ctx->req->info) {
 467                ctx->flags |= DCP_CBC_INIT;
 468                memcpy(dev->payload_base + AES_KEYSIZE_128,
 469                        ctx->req->info, AES_KEYSIZE_128);
 470        }
 471
 472        dcp_op_start(dev, 1);
 473}
 474
 475static void dcp_queue_task(unsigned long data)
 476{
 477        struct dcp_dev *dev = (struct dcp_dev *) data;
 478        struct crypto_async_request *async_req, *backlog;
 479        struct crypto_ablkcipher *tfm;
 480        struct dcp_op *ctx;
 481        struct dcp_dev_req_ctx *rctx;
 482        struct ablkcipher_request *req;
 483        unsigned long flags;
 484
 485        spin_lock_irqsave(&dev->queue_lock, flags);
 486
 487        backlog = crypto_get_backlog(&dev->queue);
 488        async_req = crypto_dequeue_request(&dev->queue);
 489
 490        spin_unlock_irqrestore(&dev->queue_lock, flags);
 491
 492        if (!async_req)
 493                goto ret_nothing_done;
 494
 495        if (backlog)
 496                backlog->complete(backlog, -EINPROGRESS);
 497
 498        req = ablkcipher_request_cast(async_req);
 499        tfm = crypto_ablkcipher_reqtfm(req);
 500        rctx = ablkcipher_request_ctx(req);
 501        ctx = crypto_ablkcipher_ctx(tfm);
 502
 503        if (!req->src || !req->dst)
 504                goto ret_nothing_done;
 505
 506        ctx->flags |= rctx->mode;
 507        ctx->req = req;
 508
 509        dcp_crypt(dev, ctx);
 510
 511        return;
 512
 513ret_nothing_done:
 514        clear_bit(DCP_FLAG_BUSY, &dev->flags);
 515}
 516
 517
 518static int dcp_cra_init(struct crypto_tfm *tfm)
 519{
 520        const char *name = tfm->__crt_alg->cra_name;
 521        struct dcp_op *ctx = crypto_tfm_ctx(tfm);
 522
 523        tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
 524
 525        ctx->fallback = crypto_alloc_ablkcipher(name, 0,
 526                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 527
 528        if (IS_ERR(ctx->fallback)) {
 529                dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
 530                        name);
 531                return PTR_ERR(ctx->fallback);
 532        }
 533
 534        return 0;
 535}
 536
 537static void dcp_cra_exit(struct crypto_tfm *tfm)
 538{
 539        struct dcp_op *ctx = crypto_tfm_ctx(tfm);
 540
 541        if (ctx->fallback)
 542                crypto_free_ablkcipher(ctx->fallback);
 543
 544        ctx->fallback = NULL;
 545}
 546
 547/* async interface */
 548static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 549                unsigned int len)
 550{
 551        struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
 552        unsigned int ret = 0;
 553        ctx->keylen = len;
 554        ctx->flags = 0;
 555        if (len == AES_KEYSIZE_128) {
 556                if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
 557                        memcpy(ctx->key, key, len);
 558                        ctx->flags |= DCP_NEW_KEY;
 559                }
 560                return 0;
 561        }
 562
 563        ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 564        ctx->fallback->base.crt_flags |=
 565                (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 566
 567        ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
 568        if (ret) {
 569                struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
 570
 571                tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 572                tfm_aux->crt_flags |=
 573                        (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
 574        }
 575        return ret;
 576}
 577
 578static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
 579{
 580        struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
 581        struct dcp_dev *dev = global_dev;
 582        unsigned long flags;
 583        int err = 0;
 584
 585        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
 586                return -EINVAL;
 587
 588        rctx->mode = mode;
 589
 590        spin_lock_irqsave(&dev->queue_lock, flags);
 591        err = ablkcipher_enqueue_request(&dev->queue, req);
 592        spin_unlock_irqrestore(&dev->queue_lock, flags);
 593
 594        flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
 595
 596        if (!(flags & DCP_FLAG_BUSY))
 597                tasklet_schedule(&dev->queue_task);
 598
 599        return err;
 600}
 601
 602static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
 603{
 604        struct crypto_tfm *tfm =
 605                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 606        struct dcp_op *ctx = crypto_ablkcipher_ctx(
 607                crypto_ablkcipher_reqtfm(req));
 608
 609        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 610                int err = 0;
 611                ablkcipher_request_set_tfm(req, ctx->fallback);
 612                err = crypto_ablkcipher_encrypt(req);
 613                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 614                return err;
 615        }
 616
 617        return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
 618}
 619
 620static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
 621{
 622        struct crypto_tfm *tfm =
 623                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 624        struct dcp_op *ctx = crypto_ablkcipher_ctx(
 625                crypto_ablkcipher_reqtfm(req));
 626
 627        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 628                int err = 0;
 629                ablkcipher_request_set_tfm(req, ctx->fallback);
 630                err = crypto_ablkcipher_decrypt(req);
 631                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 632                return err;
 633        }
 634        return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
 635}
 636
 637static struct crypto_alg algs[] = {
 638        {
 639                .cra_name = "cbc(aes)",
 640                .cra_driver_name = "dcp-cbc-aes",
 641                .cra_alignmask = 3,
 642                .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 643                          CRYPTO_ALG_NEED_FALLBACK,
 644                .cra_blocksize = AES_KEYSIZE_128,
 645                .cra_type = &crypto_ablkcipher_type,
 646                .cra_priority = 300,
 647                .cra_u.ablkcipher = {
 648                        .min_keysize =  AES_KEYSIZE_128,
 649                        .max_keysize = AES_KEYSIZE_128,
 650                        .setkey = dcp_aes_setkey,
 651                        .encrypt = dcp_aes_cbc_encrypt,
 652                        .decrypt = dcp_aes_cbc_decrypt,
 653                        .ivsize = AES_KEYSIZE_128,
 654                }
 655
 656        },
 657};
 658
 659/* DCP bootstream verification interface: uses OTP key for crypto */
 660static int dcp_bootstream_open(struct inode *inode, struct file *file)
 661{
 662        file->private_data = container_of((file->private_data),
 663                        struct dcp_dev, dcp_bootstream_misc);
 664        return 0;
 665}
 666
 667static long dcp_bootstream_ioctl(struct file *file,
 668                                         unsigned int cmd, unsigned long arg)
 669{
 670        struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
 671        void __user *argp = (void __user *)arg;
 672        int ret;
 673
 674        if (dev == NULL)
 675                return -EBADF;
 676
 677        if (cmd != DBS_ENC && cmd != DBS_DEC)
 678                return -EINVAL;
 679
 680        if (copy_from_user(dev->payload_base, argp, 16))
 681                return -EFAULT;
 682
 683        if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
 684                return -EAGAIN;
 685
 686        dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
 687        if (!dev->ctx) {
 688                dev_err(dev->dev,
 689                        "cannot allocate context for OTP crypto");
 690                clear_bit(DCP_FLAG_BUSY, &dev->flags);
 691                return -ENOMEM;
 692        }
 693
 694        dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
 695        dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
 696        dev->hw_pkg[0]->src = dev->payload_base_dma;
 697        dev->hw_pkg[0]->dst = dev->payload_base_dma;
 698        dev->hw_pkg[0]->size = 16;
 699
 700        dcp_op_start(dev, 0);
 701
 702        while (test_bit(DCP_FLAG_BUSY, &dev->flags))
 703                cpu_relax();
 704
 705        ret = dev->ctx->stat;
 706        if (!ret && copy_to_user(argp, dev->payload_base, 16))
 707                ret =  -EFAULT;
 708
 709        kfree(dev->ctx);
 710
 711        return ret;
 712}
 713
 714static const struct file_operations dcp_bootstream_fops = {
 715        .owner =                THIS_MODULE,
 716        .unlocked_ioctl =       dcp_bootstream_ioctl,
 717        .open =                 dcp_bootstream_open,
 718};
 719
 720static int dcp_probe(struct platform_device *pdev)
 721{
 722        struct dcp_dev *dev = NULL;
 723        struct resource *r;
 724        int i, ret, j;
 725
 726        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
 727        if (!dev)
 728                return -ENOMEM;
 729
 730        global_dev = dev;
 731        dev->dev = &pdev->dev;
 732
 733        platform_set_drvdata(pdev, dev);
 734
 735        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 736        if (!r) {
 737                dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
 738                return -ENXIO;
 739        }
 740        dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
 741                                          resource_size(r));
 742
 743        dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
 744        udelay(10);
 745        dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
 746
 747        dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
 748                DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
 749                DCP_REG_CTRL);
 750
 751        dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
 752
 753        for (i = 0; i < 4; i++)
 754                dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
 755
 756        dcp_clear(dev, -1, DCP_REG_STAT);
 757
 758
 759        r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 760        if (!r) {
 761                dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
 762                return -EIO;
 763        }
 764        dev->dcp_vmi_irq = r->start;
 765        ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
 766        if (ret != 0) {
 767                dev_err(&pdev->dev, "can't request_irq (0)\n");
 768                return -EIO;
 769        }
 770
 771        r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
 772        if (!r) {
 773                dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
 774                ret = -EIO;
 775                goto err_free_irq0;
 776        }
 777        dev->dcp_irq = r->start;
 778        ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
 779        if (ret != 0) {
 780                dev_err(&pdev->dev, "can't request_irq (1)\n");
 781                ret = -EIO;
 782                goto err_free_irq0;
 783        }
 784
 785        dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
 786                        DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
 787                        &dev->hw_phys_pkg,
 788                        GFP_KERNEL);
 789        if (!dev->hw_pkg[0]) {
 790                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
 791                ret = -ENOMEM;
 792                goto err_free_irq1;
 793        }
 794
 795        for (i = 1; i < DCP_MAX_PKG; i++) {
 796                dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
 797                                + i * sizeof(struct dcp_hw_packet);
 798                dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
 799        }
 800        dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
 801
 802
 803        dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
 804                        &dev->payload_base_dma, GFP_KERNEL);
 805        if (!dev->payload_base) {
 806                dev_err(&pdev->dev, "Could not allocate memory for key\n");
 807                ret = -ENOMEM;
 808                goto err_free_hw_packet;
 809        }
 810        tasklet_init(&dev->queue_task, dcp_queue_task,
 811                (unsigned long) dev);
 812        tasklet_init(&dev->done_task, dcp_done_task,
 813                (unsigned long) dev);
 814        spin_lock_init(&dev->queue_lock);
 815
 816        crypto_init_queue(&dev->queue, 10);
 817
 818        init_timer(&dev->watchdog);
 819        dev->watchdog.function = &dcp_watchdog;
 820        dev->watchdog.data = (unsigned long)dev;
 821
 822        dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
 823        dev->dcp_bootstream_misc.name = "dcpboot",
 824        dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
 825        ret = misc_register(&dev->dcp_bootstream_misc);
 826        if (ret != 0) {
 827                dev_err(dev->dev, "Unable to register misc device\n");
 828                goto err_free_key_iv;
 829        }
 830
 831        for (i = 0; i < ARRAY_SIZE(algs); i++) {
 832                algs[i].cra_priority = 300;
 833                algs[i].cra_ctxsize = sizeof(struct dcp_op);
 834                algs[i].cra_module = THIS_MODULE;
 835                algs[i].cra_init = dcp_cra_init;
 836                algs[i].cra_exit = dcp_cra_exit;
 837                if (crypto_register_alg(&algs[i])) {
 838                        dev_err(&pdev->dev, "register algorithm failed\n");
 839                        ret = -ENOMEM;
 840                        goto err_unregister;
 841                }
 842        }
 843        dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
 844
 845        return 0;
 846
 847err_unregister:
 848        for (j = 0; j < i; j++)
 849                crypto_unregister_alg(&algs[j]);
 850err_free_key_iv:
 851        dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
 852                        dev->payload_base_dma);
 853err_free_hw_packet:
 854        dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
 855                sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
 856                dev->hw_phys_pkg);
 857err_free_irq1:
 858        free_irq(dev->dcp_irq, dev);
 859err_free_irq0:
 860        free_irq(dev->dcp_vmi_irq, dev);
 861
 862        return ret;
 863}
 864
 865static int dcp_remove(struct platform_device *pdev)
 866{
 867        struct dcp_dev *dev;
 868        int j;
 869        dev = platform_get_drvdata(pdev);
 870
 871        dma_free_coherent(&pdev->dev,
 872                        DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
 873                        dev->hw_pkg[0], dev->hw_phys_pkg);
 874
 875        dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
 876                        dev->payload_base_dma);
 877
 878        free_irq(dev->dcp_irq, dev);
 879        free_irq(dev->dcp_vmi_irq, dev);
 880
 881        tasklet_kill(&dev->done_task);
 882        tasklet_kill(&dev->queue_task);
 883
 884        for (j = 0; j < ARRAY_SIZE(algs); j++)
 885                crypto_unregister_alg(&algs[j]);
 886
 887        misc_deregister(&dev->dcp_bootstream_misc);
 888
 889        return 0;
 890}
 891
 892static struct of_device_id fs_dcp_of_match[] = {
 893        {       .compatible = "fsl-dcp"},
 894        {},
 895};
 896
 897static struct platform_driver fs_dcp_driver = {
 898        .probe = dcp_probe,
 899        .remove = dcp_remove,
 900        .driver = {
 901                .name = "fsl-dcp",
 902                .owner = THIS_MODULE,
 903                .of_match_table = fs_dcp_of_match
 904        }
 905};
 906
 907module_platform_driver(fs_dcp_driver);
 908
 909
 910MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
 911MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
 912MODULE_LICENSE("GPL");
 913