linux/drivers/crypto/bfin_crc.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support Blackfin CRC HW acceleration.
   5 *
   6 * Copyright 2012 Analog Devices Inc.
   7 *
   8 * Licensed under the GPL-2.
   9 */
  10
  11#include <linux/err.h>
  12#include <linux/device.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/errno.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <linux/irq.h>
  19#include <linux/io.h>
  20#include <linux/platform_device.h>
  21#include <linux/scatterlist.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/delay.h>
  24#include <linux/unaligned/access_ok.h>
  25#include <linux/crypto.h>
  26#include <linux/cryptohash.h>
  27#include <crypto/scatterwalk.h>
  28#include <crypto/algapi.h>
  29#include <crypto/hash.h>
  30#include <crypto/internal/hash.h>
  31
  32#include <asm/blackfin.h>
  33#include <asm/bfin_crc.h>
  34#include <asm/dma.h>
  35#include <asm/portmux.h>
  36
  37#define CRC_CCRYPTO_QUEUE_LENGTH        5
  38
  39#define DRIVER_NAME "bfin-hmac-crc"
  40#define CHKSUM_DIGEST_SIZE      4
  41#define CHKSUM_BLOCK_SIZE       1
  42
  43#define CRC_MAX_DMA_DESC        100
  44
  45#define CRC_CRYPTO_STATE_UPDATE         1
  46#define CRC_CRYPTO_STATE_FINALUPDATE    2
  47#define CRC_CRYPTO_STATE_FINISH         3
  48
  49struct bfin_crypto_crc {
  50        struct list_head        list;
  51        struct device           *dev;
  52        spinlock_t              lock;
  53
  54        int                     irq;
  55        int                     dma_ch;
  56        u32                     poly;
  57        volatile struct crc_register *regs;
  58
  59        struct ahash_request    *req; /* current request in operation */
  60        struct dma_desc_array   *sg_cpu; /* virt addr of sg dma descriptors */
  61        dma_addr_t              sg_dma; /* phy addr of sg dma descriptors */
  62        u8                      *sg_mid_buf;
  63
  64        struct tasklet_struct   done_task;
  65        struct crypto_queue     queue; /* waiting requests */
  66
  67        u8                      busy:1; /* crc device in operation flag */
  68};
  69
  70static struct bfin_crypto_crc_list {
  71        struct list_head        dev_list;
  72        spinlock_t              lock;
  73} crc_list;
  74
  75struct bfin_crypto_crc_reqctx {
  76        struct bfin_crypto_crc  *crc;
  77
  78        unsigned int            total;  /* total request bytes */
  79        size_t                  sg_buflen; /* bytes for this update */
  80        unsigned int            sg_nents;
  81        struct scatterlist      *sg; /* sg list head for this update*/
  82        struct scatterlist      bufsl[2]; /* chained sg list */
  83
  84        size_t                  bufnext_len;
  85        size_t                  buflast_len;
  86        u8                      bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
  87        u8                      buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
  88
  89        u8                      flag;
  90};
  91
  92struct bfin_crypto_crc_ctx {
  93        struct bfin_crypto_crc  *crc;
  94        u32                     key;
  95};
  96
  97
  98/*
  99 * derive number of elements in scatterlist
 100 */
 101static int sg_count(struct scatterlist *sg_list)
 102{
 103        struct scatterlist *sg = sg_list;
 104        int sg_nents = 1;
 105
 106        if (sg_list == NULL)
 107                return 0;
 108
 109        while (!sg_is_last(sg)) {
 110                sg_nents++;
 111                sg = scatterwalk_sg_next(sg);
 112        }
 113
 114        return sg_nents;
 115}
 116
 117/*
 118 * get element in scatter list by given index
 119 */
 120static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
 121                                unsigned int index)
 122{
 123        struct scatterlist *sg = NULL;
 124        int i;
 125
 126        for_each_sg(sg_list, sg, nents, i)
 127                if (i == index)
 128                        break;
 129
 130        return sg;
 131}
 132
 133static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
 134{
 135        crc->regs->datacntrld = 0;
 136        crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
 137        crc->regs->curresult = key;
 138
 139        /* setup CRC interrupts */
 140        crc->regs->status = CMPERRI | DCNTEXPI;
 141        crc->regs->intrenset = CMPERRI | DCNTEXPI;
 142        SSYNC();
 143
 144        return 0;
 145}
 146
 147static int bfin_crypto_crc_init(struct ahash_request *req)
 148{
 149        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 150        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 151        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 152        struct bfin_crypto_crc *crc;
 153
 154        dev_dbg(crc->dev, "crc_init\n");
 155        spin_lock_bh(&crc_list.lock);
 156        list_for_each_entry(crc, &crc_list.dev_list, list) {
 157                crc_ctx->crc = crc;
 158                break;
 159        }
 160        spin_unlock_bh(&crc_list.lock);
 161
 162        if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
 163                dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
 164                        CRC_MAX_DMA_DESC);
 165                return -EINVAL;
 166        }
 167
 168        ctx->crc = crc;
 169        ctx->bufnext_len = 0;
 170        ctx->buflast_len = 0;
 171        ctx->sg_buflen = 0;
 172        ctx->total = 0;
 173        ctx->flag = 0;
 174
 175        /* init crc results */
 176        put_unaligned_le32(crc_ctx->key, req->result);
 177
 178        dev_dbg(crc->dev, "init: digest size: %d\n",
 179                crypto_ahash_digestsize(tfm));
 180
 181        return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
 182}
 183
 184static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
 185{
 186        struct scatterlist *sg;
 187        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
 188        int i = 0, j = 0;
 189        unsigned long dma_config;
 190        unsigned int dma_count;
 191        unsigned int dma_addr;
 192        unsigned int mid_dma_count = 0;
 193        int dma_mod;
 194
 195        dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
 196
 197        for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
 198                dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
 199                dma_addr = sg_dma_address(sg);
 200                /* deduce extra bytes in last sg */
 201                if (sg_is_last(sg))
 202                        dma_count = sg_dma_len(sg) - ctx->bufnext_len;
 203                else
 204                        dma_count = sg_dma_len(sg);
 205
 206                if (mid_dma_count) {
 207                        /* Append last middle dma buffer to 4 bytes with first
 208                           bytes in current sg buffer. Move addr of current
 209                           sg and deduce the length of current sg.
 210                         */
 211                        memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
 212                                (void *)dma_addr,
 213                                CHKSUM_DIGEST_SIZE - mid_dma_count);
 214                        dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
 215                        dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
 216                }
 217                /* chop current sg dma len to multiple of 32 bits */
 218                mid_dma_count = dma_count % 4;
 219                dma_count &= ~0x3;
 220
 221                if (dma_addr % 4 == 0) {
 222                        dma_config |= WDSIZE_32;
 223                        dma_count >>= 2;
 224                        dma_mod = 4;
 225                } else if (dma_addr % 2 == 0) {
 226                        dma_config |= WDSIZE_16;
 227                        dma_count >>= 1;
 228                        dma_mod = 2;
 229                } else {
 230                        dma_config |= WDSIZE_8;
 231                        dma_mod = 1;
 232                }
 233
 234                crc->sg_cpu[i].start_addr = dma_addr;
 235                crc->sg_cpu[i].cfg = dma_config;
 236                crc->sg_cpu[i].x_count = dma_count;
 237                crc->sg_cpu[i].x_modify = dma_mod;
 238                dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 239                        "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 240                        i, crc->sg_cpu[i].start_addr,
 241                        crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 242                        crc->sg_cpu[i].x_modify);
 243                i++;
 244
 245                if (mid_dma_count) {
 246                        /* copy extra bytes to next middle dma buffer */
 247                        dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
 248                                DMAEN | PSIZE_32 | WDSIZE_32;
 249                        memcpy(crc->sg_mid_buf + (i << 2),
 250                                (void *)(dma_addr + (dma_count << 2)),
 251                                mid_dma_count);
 252                        /* setup new dma descriptor for next middle dma */
 253                        crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
 254                                        crc->sg_mid_buf + (i << 2),
 255                                        CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
 256                        crc->sg_cpu[i].cfg = dma_config;
 257                        crc->sg_cpu[i].x_count = 1;
 258                        crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
 259                        dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 260                                "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 261                                i, crc->sg_cpu[i].start_addr,
 262                                crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 263                                crc->sg_cpu[i].x_modify);
 264                        i++;
 265                }
 266        }
 267
 268        dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
 269        /* For final update req, append the buffer for next update as well*/
 270        if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
 271                ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
 272                crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
 273                                                CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
 274                crc->sg_cpu[i].cfg = dma_config;
 275                crc->sg_cpu[i].x_count = 1;
 276                crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
 277                dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 278                        "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 279                        i, crc->sg_cpu[i].start_addr,
 280                        crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 281                        crc->sg_cpu[i].x_modify);
 282                i++;
 283        }
 284
 285        if (i == 0)
 286                return;
 287
 288        flush_dcache_range((unsigned int)crc->sg_cpu,
 289                        (unsigned int)crc->sg_cpu +
 290                        i * sizeof(struct dma_desc_array));
 291
 292        /* Set the last descriptor to stop mode */
 293        crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
 294        crc->sg_cpu[i - 1].cfg |= DI_EN;
 295        set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
 296        set_dma_x_count(crc->dma_ch, 0);
 297        set_dma_x_modify(crc->dma_ch, 0);
 298        SSYNC();
 299        set_dma_config(crc->dma_ch, dma_config);
 300}
 301
 302static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
 303                                  struct ahash_request *req)
 304{
 305        struct crypto_async_request *async_req, *backlog;
 306        struct bfin_crypto_crc_reqctx *ctx;
 307        struct scatterlist *sg;
 308        int ret = 0;
 309        int nsg, i, j;
 310        unsigned int nextlen;
 311        unsigned long flags;
 312
 313        spin_lock_irqsave(&crc->lock, flags);
 314        if (req)
 315                ret = ahash_enqueue_request(&crc->queue, req);
 316        if (crc->busy) {
 317                spin_unlock_irqrestore(&crc->lock, flags);
 318                return ret;
 319        }
 320        backlog = crypto_get_backlog(&crc->queue);
 321        async_req = crypto_dequeue_request(&crc->queue);
 322        if (async_req)
 323                crc->busy = 1;
 324        spin_unlock_irqrestore(&crc->lock, flags);
 325
 326        if (!async_req)
 327                return ret;
 328
 329        if (backlog)
 330                backlog->complete(backlog, -EINPROGRESS);
 331
 332        req = ahash_request_cast(async_req);
 333        crc->req = req;
 334        ctx = ahash_request_ctx(req);
 335        ctx->sg = NULL;
 336        ctx->sg_buflen = 0;
 337        ctx->sg_nents = 0;
 338
 339        dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
 340                                                ctx->flag, req->nbytes);
 341
 342        if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
 343                if (ctx->bufnext_len == 0) {
 344                        crc->busy = 0;
 345                        return 0;
 346                }
 347
 348                /* Pack last crc update buffer to 32bit */
 349                memset(ctx->bufnext + ctx->bufnext_len, 0,
 350                                CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
 351        } else {
 352                /* Pack small data which is less than 32bit to buffer for next update. */
 353                if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
 354                        memcpy(ctx->bufnext + ctx->bufnext_len,
 355                                sg_virt(req->src), req->nbytes);
 356                        ctx->bufnext_len += req->nbytes;
 357                        if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
 358                                ctx->bufnext_len) {
 359                                goto finish_update;
 360                        } else {
 361                                crc->busy = 0;
 362                                return 0;
 363                        }
 364                }
 365
 366                if (ctx->bufnext_len) {
 367                        /* Chain in extra bytes of last update */
 368                        ctx->buflast_len = ctx->bufnext_len;
 369                        memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
 370
 371                        nsg = ctx->sg_buflen ? 2 : 1;
 372                        sg_init_table(ctx->bufsl, nsg);
 373                        sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
 374                        if (nsg > 1)
 375                                scatterwalk_sg_chain(ctx->bufsl, nsg,
 376                                                req->src);
 377                        ctx->sg = ctx->bufsl;
 378                } else
 379                        ctx->sg = req->src;
 380
 381                /* Chop crc buffer size to multiple of 32 bit */
 382                nsg = ctx->sg_nents = sg_count(ctx->sg);
 383                ctx->sg_buflen = ctx->buflast_len + req->nbytes;
 384                ctx->bufnext_len = ctx->sg_buflen % 4;
 385                ctx->sg_buflen &= ~0x3;
 386
 387                if (ctx->bufnext_len) {
 388                        /* copy extra bytes to buffer for next update */
 389                        memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
 390                        nextlen = ctx->bufnext_len;
 391                        for (i = nsg - 1; i >= 0; i--) {
 392                                sg = sg_get(ctx->sg, nsg, i);
 393                                j = min(nextlen, sg_dma_len(sg));
 394                                memcpy(ctx->bufnext + nextlen - j,
 395                                        sg_virt(sg) + sg_dma_len(sg) - j, j);
 396                                if (j == sg_dma_len(sg))
 397                                        ctx->sg_nents--;
 398                                nextlen -= j;
 399                                if (nextlen == 0)
 400                                        break;
 401                        }
 402                }
 403        }
 404
 405finish_update:
 406        if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
 407                ctx->flag == CRC_CRYPTO_STATE_FINISH))
 408                ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
 409
 410        /* set CRC data count before start DMA */
 411        crc->regs->datacnt = ctx->sg_buflen >> 2;
 412
 413        /* setup and enable CRC DMA */
 414        bfin_crypto_crc_config_dma(crc);
 415
 416        /* finally kick off CRC operation */
 417        crc->regs->control |= BLKEN;
 418        SSYNC();
 419
 420        return -EINPROGRESS;
 421}
 422
 423static int bfin_crypto_crc_update(struct ahash_request *req)
 424{
 425        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 426
 427        if (!req->nbytes)
 428                return 0;
 429
 430        dev_dbg(ctx->crc->dev, "crc_update\n");
 431        ctx->total += req->nbytes;
 432        ctx->flag = CRC_CRYPTO_STATE_UPDATE;
 433
 434        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 435}
 436
 437static int bfin_crypto_crc_final(struct ahash_request *req)
 438{
 439        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 440        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 441        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 442
 443        dev_dbg(ctx->crc->dev, "crc_final\n");
 444        ctx->flag = CRC_CRYPTO_STATE_FINISH;
 445        crc_ctx->key = 0;
 446
 447        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 448}
 449
 450static int bfin_crypto_crc_finup(struct ahash_request *req)
 451{
 452        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 453        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 454        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 455
 456        dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
 457        ctx->total += req->nbytes;
 458        ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
 459        crc_ctx->key = 0;
 460
 461        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 462}
 463
 464static int bfin_crypto_crc_digest(struct ahash_request *req)
 465{
 466        int ret;
 467
 468        ret = bfin_crypto_crc_init(req);
 469        if (ret)
 470                return ret;
 471
 472        return bfin_crypto_crc_finup(req);
 473}
 474
 475static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
 476                        unsigned int keylen)
 477{
 478        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 479
 480        dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
 481        if (keylen != CHKSUM_DIGEST_SIZE) {
 482                crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 483                return -EINVAL;
 484        }
 485
 486        crc_ctx->key = get_unaligned_le32(key);
 487
 488        return 0;
 489}
 490
 491static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
 492{
 493        struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
 494
 495        crc_ctx->key = 0;
 496        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 497                                 sizeof(struct bfin_crypto_crc_reqctx));
 498
 499        return 0;
 500}
 501
 502static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
 503{
 504}
 505
 506static struct ahash_alg algs = {
 507        .init           = bfin_crypto_crc_init,
 508        .update         = bfin_crypto_crc_update,
 509        .final          = bfin_crypto_crc_final,
 510        .finup          = bfin_crypto_crc_finup,
 511        .digest         = bfin_crypto_crc_digest,
 512        .setkey         = bfin_crypto_crc_setkey,
 513        .halg.digestsize        = CHKSUM_DIGEST_SIZE,
 514        .halg.base      = {
 515                .cra_name               = "hmac(crc32)",
 516                .cra_driver_name        = DRIVER_NAME,
 517                .cra_priority           = 100,
 518                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
 519                                                CRYPTO_ALG_ASYNC,
 520                .cra_blocksize          = CHKSUM_BLOCK_SIZE,
 521                .cra_ctxsize            = sizeof(struct bfin_crypto_crc_ctx),
 522                .cra_alignmask          = 3,
 523                .cra_module             = THIS_MODULE,
 524                .cra_init               = bfin_crypto_crc_cra_init,
 525                .cra_exit               = bfin_crypto_crc_cra_exit,
 526        }
 527};
 528
 529static void bfin_crypto_crc_done_task(unsigned long data)
 530{
 531        struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
 532
 533        bfin_crypto_crc_handle_queue(crc, NULL);
 534}
 535
 536static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
 537{
 538        struct bfin_crypto_crc *crc = dev_id;
 539
 540        if (crc->regs->status & DCNTEXP) {
 541                crc->regs->status = DCNTEXP;
 542                SSYNC();
 543
 544                /* prepare results */
 545                put_unaligned_le32(crc->regs->result, crc->req->result);
 546
 547                crc->regs->control &= ~BLKEN;
 548                crc->busy = 0;
 549
 550                if (crc->req->base.complete)
 551                        crc->req->base.complete(&crc->req->base, 0);
 552
 553                tasklet_schedule(&crc->done_task);
 554
 555                return IRQ_HANDLED;
 556        } else
 557                return IRQ_NONE;
 558}
 559
 560#ifdef CONFIG_PM
 561/**
 562 *      bfin_crypto_crc_suspend - suspend crc device
 563 *      @pdev: device being suspended
 564 *      @state: requested suspend state
 565 */
 566static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
 567{
 568        struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
 569        int i = 100000;
 570
 571        while ((crc->regs->control & BLKEN) && --i)
 572                cpu_relax();
 573
 574        if (i == 0)
 575                return -EBUSY;
 576
 577        return 0;
 578}
 579#else
 580# define bfin_crypto_crc_suspend NULL
 581#endif
 582
 583#define bfin_crypto_crc_resume NULL
 584
 585/**
 586 *      bfin_crypto_crc_probe - Initialize module
 587 *
 588 */
 589static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
 590{
 591        struct device *dev = &pdev->dev;
 592        struct resource *res;
 593        struct bfin_crypto_crc *crc;
 594        unsigned int timeout = 100000;
 595        int ret;
 596
 597        crc = kzalloc(sizeof(*crc), GFP_KERNEL);
 598        if (!crc) {
 599                dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
 600                return -ENOMEM;
 601        }
 602
 603        crc->dev = dev;
 604
 605        INIT_LIST_HEAD(&crc->list);
 606        spin_lock_init(&crc->lock);
 607        tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
 608        crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
 609
 610        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 611        if (res == NULL) {
 612                dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
 613                ret = -ENOENT;
 614                goto out_error_free_mem;
 615        }
 616
 617        crc->regs = ioremap(res->start, resource_size(res));
 618        if (!crc->regs) {
 619                dev_err(&pdev->dev, "Cannot map CRC IO\n");
 620                ret = -ENXIO;
 621                goto out_error_free_mem;
 622        }
 623
 624        crc->irq = platform_get_irq(pdev, 0);
 625        if (crc->irq < 0) {
 626                dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
 627                ret = -ENOENT;
 628                goto out_error_unmap;
 629        }
 630
 631        ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
 632        if (ret) {
 633                dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
 634                goto out_error_unmap;
 635        }
 636
 637        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 638        if (res == NULL) {
 639                dev_err(&pdev->dev, "No CRC DMA channel specified\n");
 640                ret = -ENOENT;
 641                goto out_error_irq;
 642        }
 643        crc->dma_ch = res->start;
 644
 645        ret = request_dma(crc->dma_ch, dev_name(dev));
 646        if (ret) {
 647                dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
 648                goto out_error_irq;
 649        }
 650
 651        crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
 652        if (crc->sg_cpu == NULL) {
 653                ret = -ENOMEM;
 654                goto out_error_dma;
 655        }
 656        /*
 657         * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle  +
 658         * 1 last + 1 next dma descriptors
 659         */
 660        crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
 661
 662        crc->regs->control = 0;
 663        SSYNC();
 664        crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
 665        SSYNC();
 666
 667        while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
 668                cpu_relax();
 669
 670        if (timeout == 0)
 671                dev_info(&pdev->dev, "init crc poly timeout\n");
 672
 673        spin_lock(&crc_list.lock);
 674        list_add(&crc->list, &crc_list.dev_list);
 675        spin_unlock(&crc_list.lock);
 676
 677        platform_set_drvdata(pdev, crc);
 678
 679        ret = crypto_register_ahash(&algs);
 680        if (ret) {
 681                spin_lock(&crc_list.lock);
 682                list_del(&crc->list);
 683                spin_unlock(&crc_list.lock);
 684                dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
 685                goto out_error_dma;
 686        }
 687
 688        dev_info(&pdev->dev, "initialized\n");
 689
 690        return 0;
 691
 692out_error_dma:
 693        if (crc->sg_cpu)
 694                dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
 695        free_dma(crc->dma_ch);
 696out_error_irq:
 697        free_irq(crc->irq, crc->dev);
 698out_error_unmap:
 699        iounmap((void *)crc->regs);
 700out_error_free_mem:
 701        kfree(crc);
 702
 703        return ret;
 704}
 705
 706/**
 707 *      bfin_crypto_crc_remove - Initialize module
 708 *
 709 */
 710static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
 711{
 712        struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
 713
 714        if (!crc)
 715                return -ENODEV;
 716
 717        spin_lock(&crc_list.lock);
 718        list_del(&crc->list);
 719        spin_unlock(&crc_list.lock);
 720
 721        crypto_unregister_ahash(&algs);
 722        tasklet_kill(&crc->done_task);
 723        iounmap((void *)crc->regs);
 724        free_dma(crc->dma_ch);
 725        if (crc->irq > 0)
 726                free_irq(crc->irq, crc->dev);
 727        kfree(crc);
 728
 729        return 0;
 730}
 731
 732static struct platform_driver bfin_crypto_crc_driver = {
 733        .probe     = bfin_crypto_crc_probe,
 734        .remove    = __devexit_p(bfin_crypto_crc_remove),
 735        .suspend   = bfin_crypto_crc_suspend,
 736        .resume    = bfin_crypto_crc_resume,
 737        .driver    = {
 738                .name  = DRIVER_NAME,
 739                .owner = THIS_MODULE,
 740        },
 741};
 742
 743/**
 744 *      bfin_crypto_crc_mod_init - Initialize module
 745 *
 746 *      Checks the module params and registers the platform driver.
 747 *      Real work is in the platform probe function.
 748 */
 749static int __init bfin_crypto_crc_mod_init(void)
 750{
 751        int ret;
 752
 753        pr_info("Blackfin hardware CRC crypto driver\n");
 754
 755        INIT_LIST_HEAD(&crc_list.dev_list);
 756        spin_lock_init(&crc_list.lock);
 757
 758        ret = platform_driver_register(&bfin_crypto_crc_driver);
 759        if (ret) {
 760                pr_info(KERN_ERR "unable to register driver\n");
 761                return ret;
 762        }
 763
 764        return 0;
 765}
 766
 767/**
 768 *      bfin_crypto_crc_mod_exit - Deinitialize module
 769 */
 770static void __exit bfin_crypto_crc_mod_exit(void)
 771{
 772        platform_driver_unregister(&bfin_crypto_crc_driver);
 773}
 774
 775module_init(bfin_crypto_crc_mod_init);
 776module_exit(bfin_crypto_crc_mod_exit);
 777
 778MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 779MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
 780MODULE_LICENSE("GPL");
 781