linux/drivers/crypto/img-hash.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 Imagination Technologies
   4 * Authors:  Will Thomas, James Hartley
   5 *
   6 *      Interface structure taken from omap-sham driver
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/of_device.h>
  17#include <linux/platform_device.h>
  18#include <linux/scatterlist.h>
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/md5.h>
  22#include <crypto/sha1.h>
  23#include <crypto/sha2.h>
  24
  25#define CR_RESET                        0
  26#define CR_RESET_SET                    1
  27#define CR_RESET_UNSET                  0
  28
  29#define CR_MESSAGE_LENGTH_H             0x4
  30#define CR_MESSAGE_LENGTH_L             0x8
  31
  32#define CR_CONTROL                      0xc
  33#define CR_CONTROL_BYTE_ORDER_3210      0
  34#define CR_CONTROL_BYTE_ORDER_0123      1
  35#define CR_CONTROL_BYTE_ORDER_2310      2
  36#define CR_CONTROL_BYTE_ORDER_1032      3
  37#define CR_CONTROL_BYTE_ORDER_SHIFT     8
  38#define CR_CONTROL_ALGO_MD5     0
  39#define CR_CONTROL_ALGO_SHA1    1
  40#define CR_CONTROL_ALGO_SHA224  2
  41#define CR_CONTROL_ALGO_SHA256  3
  42
  43#define CR_INTSTAT                      0x10
  44#define CR_INTENAB                      0x14
  45#define CR_INTCLEAR                     0x18
  46#define CR_INT_RESULTS_AVAILABLE        BIT(0)
  47#define CR_INT_NEW_RESULTS_SET          BIT(1)
  48#define CR_INT_RESULT_READ_ERR          BIT(2)
  49#define CR_INT_MESSAGE_WRITE_ERROR      BIT(3)
  50#define CR_INT_STATUS                   BIT(8)
  51
  52#define CR_RESULT_QUEUE         0x1c
  53#define CR_RSD0                         0x40
  54#define CR_CORE_REV                     0x50
  55#define CR_CORE_DES1            0x60
  56#define CR_CORE_DES2            0x70
  57
  58#define DRIVER_FLAGS_BUSY               BIT(0)
  59#define DRIVER_FLAGS_FINAL              BIT(1)
  60#define DRIVER_FLAGS_DMA_ACTIVE         BIT(2)
  61#define DRIVER_FLAGS_OUTPUT_READY       BIT(3)
  62#define DRIVER_FLAGS_INIT               BIT(4)
  63#define DRIVER_FLAGS_CPU                BIT(5)
  64#define DRIVER_FLAGS_DMA_READY          BIT(6)
  65#define DRIVER_FLAGS_ERROR              BIT(7)
  66#define DRIVER_FLAGS_SG                 BIT(8)
  67#define DRIVER_FLAGS_SHA1               BIT(18)
  68#define DRIVER_FLAGS_SHA224             BIT(19)
  69#define DRIVER_FLAGS_SHA256             BIT(20)
  70#define DRIVER_FLAGS_MD5                BIT(21)
  71
  72#define IMG_HASH_QUEUE_LENGTH           20
  73#define IMG_HASH_DMA_BURST              4
  74#define IMG_HASH_DMA_THRESHOLD          64
  75
  76#ifdef __LITTLE_ENDIAN
  77#define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_3210
  78#else
  79#define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_0123
  80#endif
  81
  82struct img_hash_dev;
  83
  84struct img_hash_request_ctx {
  85        struct img_hash_dev     *hdev;
  86        u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  87        unsigned long           flags;
  88        size_t                  digsize;
  89
  90        dma_addr_t              dma_addr;
  91        size_t                  dma_ct;
  92
  93        /* sg root */
  94        struct scatterlist      *sgfirst;
  95        /* walk state */
  96        struct scatterlist      *sg;
  97        size_t                  nents;
  98        size_t                  offset;
  99        unsigned int            total;
 100        size_t                  sent;
 101
 102        unsigned long           op;
 103
 104        size_t                  bufcnt;
 105        struct ahash_request    fallback_req;
 106
 107        /* Zero length buffer must remain last member of struct */
 108        u8 buffer[] __aligned(sizeof(u32));
 109};
 110
 111struct img_hash_ctx {
 112        struct img_hash_dev     *hdev;
 113        unsigned long           flags;
 114        struct crypto_ahash     *fallback;
 115};
 116
 117struct img_hash_dev {
 118        struct list_head        list;
 119        struct device           *dev;
 120        struct clk              *hash_clk;
 121        struct clk              *sys_clk;
 122        void __iomem            *io_base;
 123
 124        phys_addr_t             bus_addr;
 125        void __iomem            *cpu_addr;
 126
 127        spinlock_t              lock;
 128        int                     err;
 129        struct tasklet_struct   done_task;
 130        struct tasklet_struct   dma_task;
 131
 132        unsigned long           flags;
 133        struct crypto_queue     queue;
 134        struct ahash_request    *req;
 135
 136        struct dma_chan         *dma_lch;
 137};
 138
 139struct img_hash_drv {
 140        struct list_head dev_list;
 141        spinlock_t lock;
 142};
 143
 144static struct img_hash_drv img_hash = {
 145        .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 146        .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 147};
 148
 149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 150{
 151        return readl_relaxed(hdev->io_base + offset);
 152}
 153
 154static inline void img_hash_write(struct img_hash_dev *hdev,
 155                                  u32 offset, u32 value)
 156{
 157        writel_relaxed(value, hdev->io_base + offset);
 158}
 159
 160static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 161{
 162        return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
 163}
 164
 165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 166{
 167        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 168        u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 169
 170        if (ctx->flags & DRIVER_FLAGS_MD5)
 171                cr |= CR_CONTROL_ALGO_MD5;
 172        else if (ctx->flags & DRIVER_FLAGS_SHA1)
 173                cr |= CR_CONTROL_ALGO_SHA1;
 174        else if (ctx->flags & DRIVER_FLAGS_SHA224)
 175                cr |= CR_CONTROL_ALGO_SHA224;
 176        else if (ctx->flags & DRIVER_FLAGS_SHA256)
 177                cr |= CR_CONTROL_ALGO_SHA256;
 178        dev_dbg(hdev->dev, "Starting hash process\n");
 179        img_hash_write(hdev, CR_CONTROL, cr);
 180
 181        /*
 182         * The hardware block requires two cycles between writing the control
 183         * register and writing the first word of data in non DMA mode, to
 184         * ensure the first data write is not grouped in burst with the control
 185         * register write a read is issued to 'flush' the bus.
 186         */
 187        if (!dma)
 188                img_hash_read(hdev, CR_CONTROL);
 189}
 190
 191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 192                             size_t length, int final)
 193{
 194        u32 count, len32;
 195        const u32 *buffer = (const u32 *)buf;
 196
 197        dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 198
 199        if (final)
 200                hdev->flags |= DRIVER_FLAGS_FINAL;
 201
 202        len32 = DIV_ROUND_UP(length, sizeof(u32));
 203
 204        for (count = 0; count < len32; count++)
 205                writel_relaxed(buffer[count], hdev->cpu_addr);
 206
 207        return -EINPROGRESS;
 208}
 209
 210static void img_hash_dma_callback(void *data)
 211{
 212        struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 213        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 214
 215        if (ctx->bufcnt) {
 216                img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 217                ctx->bufcnt = 0;
 218        }
 219        if (ctx->sg)
 220                tasklet_schedule(&hdev->dma_task);
 221}
 222
 223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 224{
 225        struct dma_async_tx_descriptor *desc;
 226        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 227
 228        ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 229        if (ctx->dma_ct == 0) {
 230                dev_err(hdev->dev, "Invalid DMA sg\n");
 231                hdev->err = -EINVAL;
 232                return -EINVAL;
 233        }
 234
 235        desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 236                                       sg,
 237                                       ctx->dma_ct,
 238                                       DMA_MEM_TO_DEV,
 239                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 240        if (!desc) {
 241                dev_err(hdev->dev, "Null DMA descriptor\n");
 242                hdev->err = -EINVAL;
 243                dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 244                return -EINVAL;
 245        }
 246        desc->callback = img_hash_dma_callback;
 247        desc->callback_param = hdev;
 248        dmaengine_submit(desc);
 249        dma_async_issue_pending(hdev->dma_lch);
 250
 251        return 0;
 252}
 253
 254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 255{
 256        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 257
 258        ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 259                                        ctx->buffer, hdev->req->nbytes);
 260
 261        ctx->total = hdev->req->nbytes;
 262        ctx->bufcnt = 0;
 263
 264        hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 265
 266        img_hash_start(hdev, false);
 267
 268        return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 269}
 270
 271static int img_hash_finish(struct ahash_request *req)
 272{
 273        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 274
 275        if (!req->result)
 276                return -EINVAL;
 277
 278        memcpy(req->result, ctx->digest, ctx->digsize);
 279
 280        return 0;
 281}
 282
 283static void img_hash_copy_hash(struct ahash_request *req)
 284{
 285        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 286        u32 *hash = (u32 *)ctx->digest;
 287        int i;
 288
 289        for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
 290                hash[i] = img_hash_read_result_queue(ctx->hdev);
 291}
 292
 293static void img_hash_finish_req(struct ahash_request *req, int err)
 294{
 295        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 296        struct img_hash_dev *hdev =  ctx->hdev;
 297
 298        if (!err) {
 299                img_hash_copy_hash(req);
 300                if (DRIVER_FLAGS_FINAL & hdev->flags)
 301                        err = img_hash_finish(req);
 302        } else {
 303                dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 304                ctx->flags |= DRIVER_FLAGS_ERROR;
 305        }
 306
 307        hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 308                DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 309
 310        if (req->base.complete)
 311                req->base.complete(&req->base, err);
 312}
 313
 314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 315{
 316        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 317
 318        img_hash_start(hdev, true);
 319
 320        dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 321
 322        if (!ctx->total)
 323                hdev->flags |= DRIVER_FLAGS_FINAL;
 324
 325        hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 326
 327        tasklet_schedule(&hdev->dma_task);
 328
 329        return -EINPROGRESS;
 330}
 331
 332static int img_hash_dma_init(struct img_hash_dev *hdev)
 333{
 334        struct dma_slave_config dma_conf;
 335        int err;
 336
 337        hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
 338        if (IS_ERR(hdev->dma_lch)) {
 339                dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 340                return PTR_ERR(hdev->dma_lch);
 341        }
 342        dma_conf.direction = DMA_MEM_TO_DEV;
 343        dma_conf.dst_addr = hdev->bus_addr;
 344        dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 345        dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 346        dma_conf.device_fc = false;
 347
 348        err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 349        if (err) {
 350                dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 351                dma_release_channel(hdev->dma_lch);
 352                return err;
 353        }
 354
 355        return 0;
 356}
 357
 358static void img_hash_dma_task(unsigned long d)
 359{
 360        struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 361        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 362        u8 *addr;
 363        size_t nbytes, bleft, wsend, len, tbc;
 364        struct scatterlist tsg;
 365
 366        if (!hdev->req || !ctx->sg)
 367                return;
 368
 369        addr = sg_virt(ctx->sg);
 370        nbytes = ctx->sg->length - ctx->offset;
 371
 372        /*
 373         * The hash accelerator does not support a data valid mask. This means
 374         * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 375         * padding bytes in the last word written by that dma would erroneously
 376         * be included in the hash. To avoid this we round down the transfer,
 377         * and add the excess to the start of the next dma. It does not matter
 378         * that the final dma may not be a multiple of 4 bytes as the hashing
 379         * block is programmed to accept the correct number of bytes.
 380         */
 381
 382        bleft = nbytes % 4;
 383        wsend = (nbytes / 4);
 384
 385        if (wsend) {
 386                sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 387                if (img_hash_xmit_dma(hdev, &tsg)) {
 388                        dev_err(hdev->dev, "DMA failed, falling back to CPU");
 389                        ctx->flags |= DRIVER_FLAGS_CPU;
 390                        hdev->err = 0;
 391                        img_hash_xmit_cpu(hdev, addr + ctx->offset,
 392                                          wsend * 4, 0);
 393                        ctx->sent += wsend * 4;
 394                        wsend = 0;
 395                } else {
 396                        ctx->sent += wsend * 4;
 397                }
 398        }
 399
 400        if (bleft) {
 401                ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 402                                                 ctx->buffer, bleft, ctx->sent);
 403                tbc = 0;
 404                ctx->sg = sg_next(ctx->sg);
 405                while (ctx->sg && (ctx->bufcnt < 4)) {
 406                        len = ctx->sg->length;
 407                        if (likely(len > (4 - ctx->bufcnt)))
 408                                len = 4 - ctx->bufcnt;
 409                        tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 410                                                 ctx->buffer + ctx->bufcnt, len,
 411                                        ctx->sent + ctx->bufcnt);
 412                        ctx->bufcnt += tbc;
 413                        if (tbc >= ctx->sg->length) {
 414                                ctx->sg = sg_next(ctx->sg);
 415                                tbc = 0;
 416                        }
 417                }
 418
 419                ctx->sent += ctx->bufcnt;
 420                ctx->offset = tbc;
 421
 422                if (!wsend)
 423                        img_hash_dma_callback(hdev);
 424        } else {
 425                ctx->offset = 0;
 426                ctx->sg = sg_next(ctx->sg);
 427        }
 428}
 429
 430static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 431{
 432        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 433
 434        if (ctx->flags & DRIVER_FLAGS_SG)
 435                dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 436
 437        return 0;
 438}
 439
 440static int img_hash_process_data(struct img_hash_dev *hdev)
 441{
 442        struct ahash_request *req = hdev->req;
 443        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 444        int err = 0;
 445
 446        ctx->bufcnt = 0;
 447
 448        if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 449                dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 450                        req->nbytes);
 451                err = img_hash_write_via_dma(hdev);
 452        } else {
 453                dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 454                        req->nbytes);
 455                err = img_hash_write_via_cpu(hdev);
 456        }
 457        return err;
 458}
 459
 460static int img_hash_hw_init(struct img_hash_dev *hdev)
 461{
 462        unsigned long long nbits;
 463        u32 u, l;
 464
 465        img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 466        img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 467        img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 468
 469        nbits = (u64)hdev->req->nbytes << 3;
 470        u = nbits >> 32;
 471        l = nbits;
 472        img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 473        img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 474
 475        if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 476                hdev->flags |= DRIVER_FLAGS_INIT;
 477                hdev->err = 0;
 478        }
 479        dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 480        return 0;
 481}
 482
 483static int img_hash_init(struct ahash_request *req)
 484{
 485        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 486        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 487        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 488
 489        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 490        rctx->fallback_req.base.flags = req->base.flags
 491                & CRYPTO_TFM_REQ_MAY_SLEEP;
 492
 493        return crypto_ahash_init(&rctx->fallback_req);
 494}
 495
 496static int img_hash_handle_queue(struct img_hash_dev *hdev,
 497                                 struct ahash_request *req)
 498{
 499        struct crypto_async_request *async_req, *backlog;
 500        struct img_hash_request_ctx *ctx;
 501        unsigned long flags;
 502        int err = 0, res = 0;
 503
 504        spin_lock_irqsave(&hdev->lock, flags);
 505
 506        if (req)
 507                res = ahash_enqueue_request(&hdev->queue, req);
 508
 509        if (DRIVER_FLAGS_BUSY & hdev->flags) {
 510                spin_unlock_irqrestore(&hdev->lock, flags);
 511                return res;
 512        }
 513
 514        backlog = crypto_get_backlog(&hdev->queue);
 515        async_req = crypto_dequeue_request(&hdev->queue);
 516        if (async_req)
 517                hdev->flags |= DRIVER_FLAGS_BUSY;
 518
 519        spin_unlock_irqrestore(&hdev->lock, flags);
 520
 521        if (!async_req)
 522                return res;
 523
 524        if (backlog)
 525                backlog->complete(backlog, -EINPROGRESS);
 526
 527        req = ahash_request_cast(async_req);
 528        hdev->req = req;
 529
 530        ctx = ahash_request_ctx(req);
 531
 532        dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 533                 ctx->op, req->nbytes);
 534
 535        err = img_hash_hw_init(hdev);
 536
 537        if (!err)
 538                err = img_hash_process_data(hdev);
 539
 540        if (err != -EINPROGRESS) {
 541                /* done_task will not finish so do it here */
 542                img_hash_finish_req(req, err);
 543        }
 544        return res;
 545}
 546
 547static int img_hash_update(struct ahash_request *req)
 548{
 549        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 550        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 552
 553        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 554        rctx->fallback_req.base.flags = req->base.flags
 555                & CRYPTO_TFM_REQ_MAY_SLEEP;
 556        rctx->fallback_req.nbytes = req->nbytes;
 557        rctx->fallback_req.src = req->src;
 558
 559        return crypto_ahash_update(&rctx->fallback_req);
 560}
 561
 562static int img_hash_final(struct ahash_request *req)
 563{
 564        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 565        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 566        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 567
 568        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 569        rctx->fallback_req.base.flags = req->base.flags
 570                & CRYPTO_TFM_REQ_MAY_SLEEP;
 571        rctx->fallback_req.result = req->result;
 572
 573        return crypto_ahash_final(&rctx->fallback_req);
 574}
 575
 576static int img_hash_finup(struct ahash_request *req)
 577{
 578        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 579        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 580        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 581
 582        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 583        rctx->fallback_req.base.flags = req->base.flags
 584                & CRYPTO_TFM_REQ_MAY_SLEEP;
 585        rctx->fallback_req.nbytes = req->nbytes;
 586        rctx->fallback_req.src = req->src;
 587        rctx->fallback_req.result = req->result;
 588
 589        return crypto_ahash_finup(&rctx->fallback_req);
 590}
 591
 592static int img_hash_import(struct ahash_request *req, const void *in)
 593{
 594        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 595        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 596        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 597
 598        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 599        rctx->fallback_req.base.flags = req->base.flags
 600                & CRYPTO_TFM_REQ_MAY_SLEEP;
 601
 602        return crypto_ahash_import(&rctx->fallback_req, in);
 603}
 604
 605static int img_hash_export(struct ahash_request *req, void *out)
 606{
 607        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 608        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 609        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 610
 611        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 612        rctx->fallback_req.base.flags = req->base.flags
 613                & CRYPTO_TFM_REQ_MAY_SLEEP;
 614
 615        return crypto_ahash_export(&rctx->fallback_req, out);
 616}
 617
 618static int img_hash_digest(struct ahash_request *req)
 619{
 620        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621        struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 622        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 623        struct img_hash_dev *hdev = NULL;
 624        struct img_hash_dev *tmp;
 625        int err;
 626
 627        spin_lock(&img_hash.lock);
 628        if (!tctx->hdev) {
 629                list_for_each_entry(tmp, &img_hash.dev_list, list) {
 630                        hdev = tmp;
 631                        break;
 632                }
 633                tctx->hdev = hdev;
 634
 635        } else {
 636                hdev = tctx->hdev;
 637        }
 638
 639        spin_unlock(&img_hash.lock);
 640        ctx->hdev = hdev;
 641        ctx->flags = 0;
 642        ctx->digsize = crypto_ahash_digestsize(tfm);
 643
 644        switch (ctx->digsize) {
 645        case SHA1_DIGEST_SIZE:
 646                ctx->flags |= DRIVER_FLAGS_SHA1;
 647                break;
 648        case SHA256_DIGEST_SIZE:
 649                ctx->flags |= DRIVER_FLAGS_SHA256;
 650                break;
 651        case SHA224_DIGEST_SIZE:
 652                ctx->flags |= DRIVER_FLAGS_SHA224;
 653                break;
 654        case MD5_DIGEST_SIZE:
 655                ctx->flags |= DRIVER_FLAGS_MD5;
 656                break;
 657        default:
 658                return -EINVAL;
 659        }
 660
 661        ctx->bufcnt = 0;
 662        ctx->offset = 0;
 663        ctx->sent = 0;
 664        ctx->total = req->nbytes;
 665        ctx->sg = req->src;
 666        ctx->sgfirst = req->src;
 667        ctx->nents = sg_nents(ctx->sg);
 668
 669        err = img_hash_handle_queue(tctx->hdev, req);
 670
 671        return err;
 672}
 673
 674static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 675{
 676        struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 677        int err = -ENOMEM;
 678
 679        ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 680                                           CRYPTO_ALG_NEED_FALLBACK);
 681        if (IS_ERR(ctx->fallback)) {
 682                pr_err("img_hash: Could not load fallback driver.\n");
 683                err = PTR_ERR(ctx->fallback);
 684                goto err;
 685        }
 686        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 687                                 sizeof(struct img_hash_request_ctx) +
 688                                 crypto_ahash_reqsize(ctx->fallback) +
 689                                 IMG_HASH_DMA_THRESHOLD);
 690
 691        return 0;
 692
 693err:
 694        return err;
 695}
 696
 697static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 698{
 699        return img_hash_cra_init(tfm, "md5-generic");
 700}
 701
 702static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 703{
 704        return img_hash_cra_init(tfm, "sha1-generic");
 705}
 706
 707static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 708{
 709        return img_hash_cra_init(tfm, "sha224-generic");
 710}
 711
 712static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 713{
 714        return img_hash_cra_init(tfm, "sha256-generic");
 715}
 716
 717static void img_hash_cra_exit(struct crypto_tfm *tfm)
 718{
 719        struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 720
 721        crypto_free_ahash(tctx->fallback);
 722}
 723
 724static irqreturn_t img_irq_handler(int irq, void *dev_id)
 725{
 726        struct img_hash_dev *hdev = dev_id;
 727        u32 reg;
 728
 729        reg = img_hash_read(hdev, CR_INTSTAT);
 730        img_hash_write(hdev, CR_INTCLEAR, reg);
 731
 732        if (reg & CR_INT_NEW_RESULTS_SET) {
 733                dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 734                if (DRIVER_FLAGS_BUSY & hdev->flags) {
 735                        hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 736                        if (!(DRIVER_FLAGS_CPU & hdev->flags))
 737                                hdev->flags |= DRIVER_FLAGS_DMA_READY;
 738                        tasklet_schedule(&hdev->done_task);
 739                } else {
 740                        dev_warn(hdev->dev,
 741                                 "HASH interrupt when no active requests.\n");
 742                }
 743        } else if (reg & CR_INT_RESULTS_AVAILABLE) {
 744                dev_warn(hdev->dev,
 745                         "IRQ triggered before the hash had completed\n");
 746        } else if (reg & CR_INT_RESULT_READ_ERR) {
 747                dev_warn(hdev->dev,
 748                         "Attempt to read from an empty result queue\n");
 749        } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 750                dev_warn(hdev->dev,
 751                         "Data written before the hardware was configured\n");
 752        }
 753        return IRQ_HANDLED;
 754}
 755
 756static struct ahash_alg img_algs[] = {
 757        {
 758                .init = img_hash_init,
 759                .update = img_hash_update,
 760                .final = img_hash_final,
 761                .finup = img_hash_finup,
 762                .export = img_hash_export,
 763                .import = img_hash_import,
 764                .digest = img_hash_digest,
 765                .halg = {
 766                        .digestsize = MD5_DIGEST_SIZE,
 767                        .statesize = sizeof(struct md5_state),
 768                        .base = {
 769                                .cra_name = "md5",
 770                                .cra_driver_name = "img-md5",
 771                                .cra_priority = 300,
 772                                .cra_flags =
 773                                CRYPTO_ALG_ASYNC |
 774                                CRYPTO_ALG_NEED_FALLBACK,
 775                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 776                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 777                                .cra_init = img_hash_cra_md5_init,
 778                                .cra_exit = img_hash_cra_exit,
 779                                .cra_module = THIS_MODULE,
 780                        }
 781                }
 782        },
 783        {
 784                .init = img_hash_init,
 785                .update = img_hash_update,
 786                .final = img_hash_final,
 787                .finup = img_hash_finup,
 788                .export = img_hash_export,
 789                .import = img_hash_import,
 790                .digest = img_hash_digest,
 791                .halg = {
 792                        .digestsize = SHA1_DIGEST_SIZE,
 793                        .statesize = sizeof(struct sha1_state),
 794                        .base = {
 795                                .cra_name = "sha1",
 796                                .cra_driver_name = "img-sha1",
 797                                .cra_priority = 300,
 798                                .cra_flags =
 799                                CRYPTO_ALG_ASYNC |
 800                                CRYPTO_ALG_NEED_FALLBACK,
 801                                .cra_blocksize = SHA1_BLOCK_SIZE,
 802                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 803                                .cra_init = img_hash_cra_sha1_init,
 804                                .cra_exit = img_hash_cra_exit,
 805                                .cra_module = THIS_MODULE,
 806                        }
 807                }
 808        },
 809        {
 810                .init = img_hash_init,
 811                .update = img_hash_update,
 812                .final = img_hash_final,
 813                .finup = img_hash_finup,
 814                .export = img_hash_export,
 815                .import = img_hash_import,
 816                .digest = img_hash_digest,
 817                .halg = {
 818                        .digestsize = SHA224_DIGEST_SIZE,
 819                        .statesize = sizeof(struct sha256_state),
 820                        .base = {
 821                                .cra_name = "sha224",
 822                                .cra_driver_name = "img-sha224",
 823                                .cra_priority = 300,
 824                                .cra_flags =
 825                                CRYPTO_ALG_ASYNC |
 826                                CRYPTO_ALG_NEED_FALLBACK,
 827                                .cra_blocksize = SHA224_BLOCK_SIZE,
 828                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 829                                .cra_init = img_hash_cra_sha224_init,
 830                                .cra_exit = img_hash_cra_exit,
 831                                .cra_module = THIS_MODULE,
 832                        }
 833                }
 834        },
 835        {
 836                .init = img_hash_init,
 837                .update = img_hash_update,
 838                .final = img_hash_final,
 839                .finup = img_hash_finup,
 840                .export = img_hash_export,
 841                .import = img_hash_import,
 842                .digest = img_hash_digest,
 843                .halg = {
 844                        .digestsize = SHA256_DIGEST_SIZE,
 845                        .statesize = sizeof(struct sha256_state),
 846                        .base = {
 847                                .cra_name = "sha256",
 848                                .cra_driver_name = "img-sha256",
 849                                .cra_priority = 300,
 850                                .cra_flags =
 851                                CRYPTO_ALG_ASYNC |
 852                                CRYPTO_ALG_NEED_FALLBACK,
 853                                .cra_blocksize = SHA256_BLOCK_SIZE,
 854                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 855                                .cra_init = img_hash_cra_sha256_init,
 856                                .cra_exit = img_hash_cra_exit,
 857                                .cra_module = THIS_MODULE,
 858                        }
 859                }
 860        }
 861};
 862
 863static int img_register_algs(struct img_hash_dev *hdev)
 864{
 865        int i, err;
 866
 867        for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 868                err = crypto_register_ahash(&img_algs[i]);
 869                if (err)
 870                        goto err_reg;
 871        }
 872        return 0;
 873
 874err_reg:
 875        for (; i--; )
 876                crypto_unregister_ahash(&img_algs[i]);
 877
 878        return err;
 879}
 880
 881static int img_unregister_algs(struct img_hash_dev *hdev)
 882{
 883        int i;
 884
 885        for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 886                crypto_unregister_ahash(&img_algs[i]);
 887        return 0;
 888}
 889
 890static void img_hash_done_task(unsigned long data)
 891{
 892        struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 893        int err = 0;
 894
 895        if (hdev->err == -EINVAL) {
 896                err = hdev->err;
 897                goto finish;
 898        }
 899
 900        if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 901                img_hash_handle_queue(hdev, NULL);
 902                return;
 903        }
 904
 905        if (DRIVER_FLAGS_CPU & hdev->flags) {
 906                if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 907                        hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 908                        goto finish;
 909                }
 910        } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 911                if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 912                        hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 913                        img_hash_write_via_dma_stop(hdev);
 914                        if (hdev->err) {
 915                                err = hdev->err;
 916                                goto finish;
 917                        }
 918                }
 919                if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 920                        hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 921                                        DRIVER_FLAGS_OUTPUT_READY);
 922                        goto finish;
 923                }
 924        }
 925        return;
 926
 927finish:
 928        img_hash_finish_req(hdev->req, err);
 929}
 930
 931static const struct of_device_id img_hash_match[] = {
 932        { .compatible = "img,hash-accelerator" },
 933        {}
 934};
 935MODULE_DEVICE_TABLE(of, img_hash_match);
 936
 937static int img_hash_probe(struct platform_device *pdev)
 938{
 939        struct img_hash_dev *hdev;
 940        struct device *dev = &pdev->dev;
 941        struct resource *hash_res;
 942        int     irq;
 943        int err;
 944
 945        hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 946        if (hdev == NULL)
 947                return -ENOMEM;
 948
 949        spin_lock_init(&hdev->lock);
 950
 951        hdev->dev = dev;
 952
 953        platform_set_drvdata(pdev, hdev);
 954
 955        INIT_LIST_HEAD(&hdev->list);
 956
 957        tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 958        tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 959
 960        crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 961
 962        /* Register bank */
 963        hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
 964        if (IS_ERR(hdev->io_base)) {
 965                err = PTR_ERR(hdev->io_base);
 966                goto res_err;
 967        }
 968
 969        /* Write port (DMA or CPU) */
 970        hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 971        hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
 972        if (IS_ERR(hdev->cpu_addr)) {
 973                err = PTR_ERR(hdev->cpu_addr);
 974                goto res_err;
 975        }
 976        hdev->bus_addr = hash_res->start;
 977
 978        irq = platform_get_irq(pdev, 0);
 979        if (irq < 0) {
 980                err = irq;
 981                goto res_err;
 982        }
 983
 984        err = devm_request_irq(dev, irq, img_irq_handler, 0,
 985                               dev_name(dev), hdev);
 986        if (err) {
 987                dev_err(dev, "unable to request irq\n");
 988                goto res_err;
 989        }
 990        dev_dbg(dev, "using IRQ channel %d\n", irq);
 991
 992        hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
 993        if (IS_ERR(hdev->hash_clk)) {
 994                dev_err(dev, "clock initialization failed.\n");
 995                err = PTR_ERR(hdev->hash_clk);
 996                goto res_err;
 997        }
 998
 999        hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1000        if (IS_ERR(hdev->sys_clk)) {
1001                dev_err(dev, "clock initialization failed.\n");
1002                err = PTR_ERR(hdev->sys_clk);
1003                goto res_err;
1004        }
1005
1006        err = clk_prepare_enable(hdev->hash_clk);
1007        if (err)
1008                goto res_err;
1009
1010        err = clk_prepare_enable(hdev->sys_clk);
1011        if (err)
1012                goto clk_err;
1013
1014        err = img_hash_dma_init(hdev);
1015        if (err)
1016                goto dma_err;
1017
1018        dev_dbg(dev, "using %s for DMA transfers\n",
1019                dma_chan_name(hdev->dma_lch));
1020
1021        spin_lock(&img_hash.lock);
1022        list_add_tail(&hdev->list, &img_hash.dev_list);
1023        spin_unlock(&img_hash.lock);
1024
1025        err = img_register_algs(hdev);
1026        if (err)
1027                goto err_algs;
1028        dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1029
1030        return 0;
1031
1032err_algs:
1033        spin_lock(&img_hash.lock);
1034        list_del(&hdev->list);
1035        spin_unlock(&img_hash.lock);
1036        dma_release_channel(hdev->dma_lch);
1037dma_err:
1038        clk_disable_unprepare(hdev->sys_clk);
1039clk_err:
1040        clk_disable_unprepare(hdev->hash_clk);
1041res_err:
1042        tasklet_kill(&hdev->done_task);
1043        tasklet_kill(&hdev->dma_task);
1044
1045        return err;
1046}
1047
1048static int img_hash_remove(struct platform_device *pdev)
1049{
1050        struct img_hash_dev *hdev;
1051
1052        hdev = platform_get_drvdata(pdev);
1053        spin_lock(&img_hash.lock);
1054        list_del(&hdev->list);
1055        spin_unlock(&img_hash.lock);
1056
1057        img_unregister_algs(hdev);
1058
1059        tasklet_kill(&hdev->done_task);
1060        tasklet_kill(&hdev->dma_task);
1061
1062        dma_release_channel(hdev->dma_lch);
1063
1064        clk_disable_unprepare(hdev->hash_clk);
1065        clk_disable_unprepare(hdev->sys_clk);
1066
1067        return 0;
1068}
1069
1070#ifdef CONFIG_PM_SLEEP
1071static int img_hash_suspend(struct device *dev)
1072{
1073        struct img_hash_dev *hdev = dev_get_drvdata(dev);
1074
1075        clk_disable_unprepare(hdev->hash_clk);
1076        clk_disable_unprepare(hdev->sys_clk);
1077
1078        return 0;
1079}
1080
1081static int img_hash_resume(struct device *dev)
1082{
1083        struct img_hash_dev *hdev = dev_get_drvdata(dev);
1084        int ret;
1085
1086        ret = clk_prepare_enable(hdev->hash_clk);
1087        if (ret)
1088                return ret;
1089
1090        ret = clk_prepare_enable(hdev->sys_clk);
1091        if (ret) {
1092                clk_disable_unprepare(hdev->hash_clk);
1093                return ret;
1094        }
1095
1096        return 0;
1097}
1098#endif /* CONFIG_PM_SLEEP */
1099
1100static const struct dev_pm_ops img_hash_pm_ops = {
1101        SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1102};
1103
1104static struct platform_driver img_hash_driver = {
1105        .probe          = img_hash_probe,
1106        .remove         = img_hash_remove,
1107        .driver         = {
1108                .name   = "img-hash-accelerator",
1109                .pm     = &img_hash_pm_ops,
1110                .of_match_table = of_match_ptr(img_hash_match),
1111        }
1112};
1113module_platform_driver(img_hash_driver);
1114
1115MODULE_LICENSE("GPL v2");
1116MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1117MODULE_AUTHOR("Will Thomas.");
1118MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
1119