linux/drivers/crypto/img-hash.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 Imagination Technologies
   4 * Authors:  Will Thomas, James Hartley
   5 *
   6 *      Interface structure taken from omap-sham driver
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dmaengine.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/of_device.h>
  16#include <linux/platform_device.h>
  17#include <linux/scatterlist.h>
  18
  19#include <crypto/internal/hash.h>
  20#include <crypto/md5.h>
  21#include <crypto/sha.h>
  22
  23#define CR_RESET                        0
  24#define CR_RESET_SET                    1
  25#define CR_RESET_UNSET                  0
  26
  27#define CR_MESSAGE_LENGTH_H             0x4
  28#define CR_MESSAGE_LENGTH_L             0x8
  29
  30#define CR_CONTROL                      0xc
  31#define CR_CONTROL_BYTE_ORDER_3210      0
  32#define CR_CONTROL_BYTE_ORDER_0123      1
  33#define CR_CONTROL_BYTE_ORDER_2310      2
  34#define CR_CONTROL_BYTE_ORDER_1032      3
  35#define CR_CONTROL_BYTE_ORDER_SHIFT     8
  36#define CR_CONTROL_ALGO_MD5     0
  37#define CR_CONTROL_ALGO_SHA1    1
  38#define CR_CONTROL_ALGO_SHA224  2
  39#define CR_CONTROL_ALGO_SHA256  3
  40
  41#define CR_INTSTAT                      0x10
  42#define CR_INTENAB                      0x14
  43#define CR_INTCLEAR                     0x18
  44#define CR_INT_RESULTS_AVAILABLE        BIT(0)
  45#define CR_INT_NEW_RESULTS_SET          BIT(1)
  46#define CR_INT_RESULT_READ_ERR          BIT(2)
  47#define CR_INT_MESSAGE_WRITE_ERROR      BIT(3)
  48#define CR_INT_STATUS                   BIT(8)
  49
  50#define CR_RESULT_QUEUE         0x1c
  51#define CR_RSD0                         0x40
  52#define CR_CORE_REV                     0x50
  53#define CR_CORE_DES1            0x60
  54#define CR_CORE_DES2            0x70
  55
  56#define DRIVER_FLAGS_BUSY               BIT(0)
  57#define DRIVER_FLAGS_FINAL              BIT(1)
  58#define DRIVER_FLAGS_DMA_ACTIVE         BIT(2)
  59#define DRIVER_FLAGS_OUTPUT_READY       BIT(3)
  60#define DRIVER_FLAGS_INIT               BIT(4)
  61#define DRIVER_FLAGS_CPU                BIT(5)
  62#define DRIVER_FLAGS_DMA_READY          BIT(6)
  63#define DRIVER_FLAGS_ERROR              BIT(7)
  64#define DRIVER_FLAGS_SG                 BIT(8)
  65#define DRIVER_FLAGS_SHA1               BIT(18)
  66#define DRIVER_FLAGS_SHA224             BIT(19)
  67#define DRIVER_FLAGS_SHA256             BIT(20)
  68#define DRIVER_FLAGS_MD5                BIT(21)
  69
  70#define IMG_HASH_QUEUE_LENGTH           20
  71#define IMG_HASH_DMA_BURST              4
  72#define IMG_HASH_DMA_THRESHOLD          64
  73
  74#ifdef __LITTLE_ENDIAN
  75#define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_3210
  76#else
  77#define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_0123
  78#endif
  79
  80struct img_hash_dev;
  81
  82struct img_hash_request_ctx {
  83        struct img_hash_dev     *hdev;
  84        u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  85        unsigned long           flags;
  86        size_t                  digsize;
  87
  88        dma_addr_t              dma_addr;
  89        size_t                  dma_ct;
  90
  91        /* sg root */
  92        struct scatterlist      *sgfirst;
  93        /* walk state */
  94        struct scatterlist      *sg;
  95        size_t                  nents;
  96        size_t                  offset;
  97        unsigned int            total;
  98        size_t                  sent;
  99
 100        unsigned long           op;
 101
 102        size_t                  bufcnt;
 103        struct ahash_request    fallback_req;
 104
 105        /* Zero length buffer must remain last member of struct */
 106        u8 buffer[] __aligned(sizeof(u32));
 107};
 108
 109struct img_hash_ctx {
 110        struct img_hash_dev     *hdev;
 111        unsigned long           flags;
 112        struct crypto_ahash     *fallback;
 113};
 114
 115struct img_hash_dev {
 116        struct list_head        list;
 117        struct device           *dev;
 118        struct clk              *hash_clk;
 119        struct clk              *sys_clk;
 120        void __iomem            *io_base;
 121
 122        phys_addr_t             bus_addr;
 123        void __iomem            *cpu_addr;
 124
 125        spinlock_t              lock;
 126        int                     err;
 127        struct tasklet_struct   done_task;
 128        struct tasklet_struct   dma_task;
 129
 130        unsigned long           flags;
 131        struct crypto_queue     queue;
 132        struct ahash_request    *req;
 133
 134        struct dma_chan         *dma_lch;
 135};
 136
 137struct img_hash_drv {
 138        struct list_head dev_list;
 139        spinlock_t lock;
 140};
 141
 142static struct img_hash_drv img_hash = {
 143        .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 144        .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 145};
 146
 147static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 148{
 149        return readl_relaxed(hdev->io_base + offset);
 150}
 151
 152static inline void img_hash_write(struct img_hash_dev *hdev,
 153                                  u32 offset, u32 value)
 154{
 155        writel_relaxed(value, hdev->io_base + offset);
 156}
 157
 158static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 159{
 160        return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
 161}
 162
 163static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 164{
 165        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 166        u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 167
 168        if (ctx->flags & DRIVER_FLAGS_MD5)
 169                cr |= CR_CONTROL_ALGO_MD5;
 170        else if (ctx->flags & DRIVER_FLAGS_SHA1)
 171                cr |= CR_CONTROL_ALGO_SHA1;
 172        else if (ctx->flags & DRIVER_FLAGS_SHA224)
 173                cr |= CR_CONTROL_ALGO_SHA224;
 174        else if (ctx->flags & DRIVER_FLAGS_SHA256)
 175                cr |= CR_CONTROL_ALGO_SHA256;
 176        dev_dbg(hdev->dev, "Starting hash process\n");
 177        img_hash_write(hdev, CR_CONTROL, cr);
 178
 179        /*
 180         * The hardware block requires two cycles between writing the control
 181         * register and writing the first word of data in non DMA mode, to
 182         * ensure the first data write is not grouped in burst with the control
 183         * register write a read is issued to 'flush' the bus.
 184         */
 185        if (!dma)
 186                img_hash_read(hdev, CR_CONTROL);
 187}
 188
 189static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 190                             size_t length, int final)
 191{
 192        u32 count, len32;
 193        const u32 *buffer = (const u32 *)buf;
 194
 195        dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 196
 197        if (final)
 198                hdev->flags |= DRIVER_FLAGS_FINAL;
 199
 200        len32 = DIV_ROUND_UP(length, sizeof(u32));
 201
 202        for (count = 0; count < len32; count++)
 203                writel_relaxed(buffer[count], hdev->cpu_addr);
 204
 205        return -EINPROGRESS;
 206}
 207
 208static void img_hash_dma_callback(void *data)
 209{
 210        struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 211        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 212
 213        if (ctx->bufcnt) {
 214                img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 215                ctx->bufcnt = 0;
 216        }
 217        if (ctx->sg)
 218                tasklet_schedule(&hdev->dma_task);
 219}
 220
 221static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 222{
 223        struct dma_async_tx_descriptor *desc;
 224        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 225
 226        ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 227        if (ctx->dma_ct == 0) {
 228                dev_err(hdev->dev, "Invalid DMA sg\n");
 229                hdev->err = -EINVAL;
 230                return -EINVAL;
 231        }
 232
 233        desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 234                                       sg,
 235                                       ctx->dma_ct,
 236                                       DMA_MEM_TO_DEV,
 237                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 238        if (!desc) {
 239                dev_err(hdev->dev, "Null DMA descriptor\n");
 240                hdev->err = -EINVAL;
 241                dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 242                return -EINVAL;
 243        }
 244        desc->callback = img_hash_dma_callback;
 245        desc->callback_param = hdev;
 246        dmaengine_submit(desc);
 247        dma_async_issue_pending(hdev->dma_lch);
 248
 249        return 0;
 250}
 251
 252static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 253{
 254        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 255
 256        ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 257                                        ctx->buffer, hdev->req->nbytes);
 258
 259        ctx->total = hdev->req->nbytes;
 260        ctx->bufcnt = 0;
 261
 262        hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 263
 264        img_hash_start(hdev, false);
 265
 266        return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 267}
 268
 269static int img_hash_finish(struct ahash_request *req)
 270{
 271        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 272
 273        if (!req->result)
 274                return -EINVAL;
 275
 276        memcpy(req->result, ctx->digest, ctx->digsize);
 277
 278        return 0;
 279}
 280
 281static void img_hash_copy_hash(struct ahash_request *req)
 282{
 283        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 284        u32 *hash = (u32 *)ctx->digest;
 285        int i;
 286
 287        for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
 288                hash[i] = img_hash_read_result_queue(ctx->hdev);
 289}
 290
 291static void img_hash_finish_req(struct ahash_request *req, int err)
 292{
 293        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 294        struct img_hash_dev *hdev =  ctx->hdev;
 295
 296        if (!err) {
 297                img_hash_copy_hash(req);
 298                if (DRIVER_FLAGS_FINAL & hdev->flags)
 299                        err = img_hash_finish(req);
 300        } else {
 301                dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 302                ctx->flags |= DRIVER_FLAGS_ERROR;
 303        }
 304
 305        hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 306                DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 307
 308        if (req->base.complete)
 309                req->base.complete(&req->base, err);
 310}
 311
 312static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 313{
 314        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 315
 316        img_hash_start(hdev, true);
 317
 318        dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 319
 320        if (!ctx->total)
 321                hdev->flags |= DRIVER_FLAGS_FINAL;
 322
 323        hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 324
 325        tasklet_schedule(&hdev->dma_task);
 326
 327        return -EINPROGRESS;
 328}
 329
 330static int img_hash_dma_init(struct img_hash_dev *hdev)
 331{
 332        struct dma_slave_config dma_conf;
 333        int err;
 334
 335        hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
 336        if (IS_ERR(hdev->dma_lch)) {
 337                dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 338                return PTR_ERR(hdev->dma_lch);
 339        }
 340        dma_conf.direction = DMA_MEM_TO_DEV;
 341        dma_conf.dst_addr = hdev->bus_addr;
 342        dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 343        dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 344        dma_conf.device_fc = false;
 345
 346        err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 347        if (err) {
 348                dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 349                dma_release_channel(hdev->dma_lch);
 350                return err;
 351        }
 352
 353        return 0;
 354}
 355
 356static void img_hash_dma_task(unsigned long d)
 357{
 358        struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 359        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 360        u8 *addr;
 361        size_t nbytes, bleft, wsend, len, tbc;
 362        struct scatterlist tsg;
 363
 364        if (!hdev->req || !ctx->sg)
 365                return;
 366
 367        addr = sg_virt(ctx->sg);
 368        nbytes = ctx->sg->length - ctx->offset;
 369
 370        /*
 371         * The hash accelerator does not support a data valid mask. This means
 372         * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 373         * padding bytes in the last word written by that dma would erroneously
 374         * be included in the hash. To avoid this we round down the transfer,
 375         * and add the excess to the start of the next dma. It does not matter
 376         * that the final dma may not be a multiple of 4 bytes as the hashing
 377         * block is programmed to accept the correct number of bytes.
 378         */
 379
 380        bleft = nbytes % 4;
 381        wsend = (nbytes / 4);
 382
 383        if (wsend) {
 384                sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 385                if (img_hash_xmit_dma(hdev, &tsg)) {
 386                        dev_err(hdev->dev, "DMA failed, falling back to CPU");
 387                        ctx->flags |= DRIVER_FLAGS_CPU;
 388                        hdev->err = 0;
 389                        img_hash_xmit_cpu(hdev, addr + ctx->offset,
 390                                          wsend * 4, 0);
 391                        ctx->sent += wsend * 4;
 392                        wsend = 0;
 393                } else {
 394                        ctx->sent += wsend * 4;
 395                }
 396        }
 397
 398        if (bleft) {
 399                ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 400                                                 ctx->buffer, bleft, ctx->sent);
 401                tbc = 0;
 402                ctx->sg = sg_next(ctx->sg);
 403                while (ctx->sg && (ctx->bufcnt < 4)) {
 404                        len = ctx->sg->length;
 405                        if (likely(len > (4 - ctx->bufcnt)))
 406                                len = 4 - ctx->bufcnt;
 407                        tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 408                                                 ctx->buffer + ctx->bufcnt, len,
 409                                        ctx->sent + ctx->bufcnt);
 410                        ctx->bufcnt += tbc;
 411                        if (tbc >= ctx->sg->length) {
 412                                ctx->sg = sg_next(ctx->sg);
 413                                tbc = 0;
 414                        }
 415                }
 416
 417                ctx->sent += ctx->bufcnt;
 418                ctx->offset = tbc;
 419
 420                if (!wsend)
 421                        img_hash_dma_callback(hdev);
 422        } else {
 423                ctx->offset = 0;
 424                ctx->sg = sg_next(ctx->sg);
 425        }
 426}
 427
 428static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 429{
 430        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 431
 432        if (ctx->flags & DRIVER_FLAGS_SG)
 433                dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 434
 435        return 0;
 436}
 437
 438static int img_hash_process_data(struct img_hash_dev *hdev)
 439{
 440        struct ahash_request *req = hdev->req;
 441        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 442        int err = 0;
 443
 444        ctx->bufcnt = 0;
 445
 446        if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 447                dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 448                        req->nbytes);
 449                err = img_hash_write_via_dma(hdev);
 450        } else {
 451                dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 452                        req->nbytes);
 453                err = img_hash_write_via_cpu(hdev);
 454        }
 455        return err;
 456}
 457
 458static int img_hash_hw_init(struct img_hash_dev *hdev)
 459{
 460        unsigned long long nbits;
 461        u32 u, l;
 462
 463        img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 464        img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 465        img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 466
 467        nbits = (u64)hdev->req->nbytes << 3;
 468        u = nbits >> 32;
 469        l = nbits;
 470        img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 471        img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 472
 473        if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 474                hdev->flags |= DRIVER_FLAGS_INIT;
 475                hdev->err = 0;
 476        }
 477        dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 478        return 0;
 479}
 480
 481static int img_hash_init(struct ahash_request *req)
 482{
 483        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 484        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 485        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 486
 487        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 488        rctx->fallback_req.base.flags = req->base.flags
 489                & CRYPTO_TFM_REQ_MAY_SLEEP;
 490
 491        return crypto_ahash_init(&rctx->fallback_req);
 492}
 493
 494static int img_hash_handle_queue(struct img_hash_dev *hdev,
 495                                 struct ahash_request *req)
 496{
 497        struct crypto_async_request *async_req, *backlog;
 498        struct img_hash_request_ctx *ctx;
 499        unsigned long flags;
 500        int err = 0, res = 0;
 501
 502        spin_lock_irqsave(&hdev->lock, flags);
 503
 504        if (req)
 505                res = ahash_enqueue_request(&hdev->queue, req);
 506
 507        if (DRIVER_FLAGS_BUSY & hdev->flags) {
 508                spin_unlock_irqrestore(&hdev->lock, flags);
 509                return res;
 510        }
 511
 512        backlog = crypto_get_backlog(&hdev->queue);
 513        async_req = crypto_dequeue_request(&hdev->queue);
 514        if (async_req)
 515                hdev->flags |= DRIVER_FLAGS_BUSY;
 516
 517        spin_unlock_irqrestore(&hdev->lock, flags);
 518
 519        if (!async_req)
 520                return res;
 521
 522        if (backlog)
 523                backlog->complete(backlog, -EINPROGRESS);
 524
 525        req = ahash_request_cast(async_req);
 526        hdev->req = req;
 527
 528        ctx = ahash_request_ctx(req);
 529
 530        dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 531                 ctx->op, req->nbytes);
 532
 533        err = img_hash_hw_init(hdev);
 534
 535        if (!err)
 536                err = img_hash_process_data(hdev);
 537
 538        if (err != -EINPROGRESS) {
 539                /* done_task will not finish so do it here */
 540                img_hash_finish_req(req, err);
 541        }
 542        return res;
 543}
 544
 545static int img_hash_update(struct ahash_request *req)
 546{
 547        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 548        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 549        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 550
 551        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 552        rctx->fallback_req.base.flags = req->base.flags
 553                & CRYPTO_TFM_REQ_MAY_SLEEP;
 554        rctx->fallback_req.nbytes = req->nbytes;
 555        rctx->fallback_req.src = req->src;
 556
 557        return crypto_ahash_update(&rctx->fallback_req);
 558}
 559
 560static int img_hash_final(struct ahash_request *req)
 561{
 562        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 563        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 564        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 565
 566        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 567        rctx->fallback_req.base.flags = req->base.flags
 568                & CRYPTO_TFM_REQ_MAY_SLEEP;
 569        rctx->fallback_req.result = req->result;
 570
 571        return crypto_ahash_final(&rctx->fallback_req);
 572}
 573
 574static int img_hash_finup(struct ahash_request *req)
 575{
 576        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 577        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 578        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 579
 580        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 581        rctx->fallback_req.base.flags = req->base.flags
 582                & CRYPTO_TFM_REQ_MAY_SLEEP;
 583        rctx->fallback_req.nbytes = req->nbytes;
 584        rctx->fallback_req.src = req->src;
 585        rctx->fallback_req.result = req->result;
 586
 587        return crypto_ahash_finup(&rctx->fallback_req);
 588}
 589
 590static int img_hash_import(struct ahash_request *req, const void *in)
 591{
 592        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 593        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 594        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 595
 596        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 597        rctx->fallback_req.base.flags = req->base.flags
 598                & CRYPTO_TFM_REQ_MAY_SLEEP;
 599
 600        return crypto_ahash_import(&rctx->fallback_req, in);
 601}
 602
 603static int img_hash_export(struct ahash_request *req, void *out)
 604{
 605        struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 606        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 607        struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 608
 609        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 610        rctx->fallback_req.base.flags = req->base.flags
 611                & CRYPTO_TFM_REQ_MAY_SLEEP;
 612
 613        return crypto_ahash_export(&rctx->fallback_req, out);
 614}
 615
 616static int img_hash_digest(struct ahash_request *req)
 617{
 618        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 619        struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 620        struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 621        struct img_hash_dev *hdev = NULL;
 622        struct img_hash_dev *tmp;
 623        int err;
 624
 625        spin_lock(&img_hash.lock);
 626        if (!tctx->hdev) {
 627                list_for_each_entry(tmp, &img_hash.dev_list, list) {
 628                        hdev = tmp;
 629                        break;
 630                }
 631                tctx->hdev = hdev;
 632
 633        } else {
 634                hdev = tctx->hdev;
 635        }
 636
 637        spin_unlock(&img_hash.lock);
 638        ctx->hdev = hdev;
 639        ctx->flags = 0;
 640        ctx->digsize = crypto_ahash_digestsize(tfm);
 641
 642        switch (ctx->digsize) {
 643        case SHA1_DIGEST_SIZE:
 644                ctx->flags |= DRIVER_FLAGS_SHA1;
 645                break;
 646        case SHA256_DIGEST_SIZE:
 647                ctx->flags |= DRIVER_FLAGS_SHA256;
 648                break;
 649        case SHA224_DIGEST_SIZE:
 650                ctx->flags |= DRIVER_FLAGS_SHA224;
 651                break;
 652        case MD5_DIGEST_SIZE:
 653                ctx->flags |= DRIVER_FLAGS_MD5;
 654                break;
 655        default:
 656                return -EINVAL;
 657        }
 658
 659        ctx->bufcnt = 0;
 660        ctx->offset = 0;
 661        ctx->sent = 0;
 662        ctx->total = req->nbytes;
 663        ctx->sg = req->src;
 664        ctx->sgfirst = req->src;
 665        ctx->nents = sg_nents(ctx->sg);
 666
 667        err = img_hash_handle_queue(tctx->hdev, req);
 668
 669        return err;
 670}
 671
 672static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 673{
 674        struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 675        int err = -ENOMEM;
 676
 677        ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 678                                           CRYPTO_ALG_NEED_FALLBACK);
 679        if (IS_ERR(ctx->fallback)) {
 680                pr_err("img_hash: Could not load fallback driver.\n");
 681                err = PTR_ERR(ctx->fallback);
 682                goto err;
 683        }
 684        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 685                                 sizeof(struct img_hash_request_ctx) +
 686                                 crypto_ahash_reqsize(ctx->fallback) +
 687                                 IMG_HASH_DMA_THRESHOLD);
 688
 689        return 0;
 690
 691err:
 692        return err;
 693}
 694
 695static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 696{
 697        return img_hash_cra_init(tfm, "md5-generic");
 698}
 699
 700static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 701{
 702        return img_hash_cra_init(tfm, "sha1-generic");
 703}
 704
 705static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 706{
 707        return img_hash_cra_init(tfm, "sha224-generic");
 708}
 709
 710static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 711{
 712        return img_hash_cra_init(tfm, "sha256-generic");
 713}
 714
 715static void img_hash_cra_exit(struct crypto_tfm *tfm)
 716{
 717        struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 718
 719        crypto_free_ahash(tctx->fallback);
 720}
 721
 722static irqreturn_t img_irq_handler(int irq, void *dev_id)
 723{
 724        struct img_hash_dev *hdev = dev_id;
 725        u32 reg;
 726
 727        reg = img_hash_read(hdev, CR_INTSTAT);
 728        img_hash_write(hdev, CR_INTCLEAR, reg);
 729
 730        if (reg & CR_INT_NEW_RESULTS_SET) {
 731                dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 732                if (DRIVER_FLAGS_BUSY & hdev->flags) {
 733                        hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 734                        if (!(DRIVER_FLAGS_CPU & hdev->flags))
 735                                hdev->flags |= DRIVER_FLAGS_DMA_READY;
 736                        tasklet_schedule(&hdev->done_task);
 737                } else {
 738                        dev_warn(hdev->dev,
 739                                 "HASH interrupt when no active requests.\n");
 740                }
 741        } else if (reg & CR_INT_RESULTS_AVAILABLE) {
 742                dev_warn(hdev->dev,
 743                         "IRQ triggered before the hash had completed\n");
 744        } else if (reg & CR_INT_RESULT_READ_ERR) {
 745                dev_warn(hdev->dev,
 746                         "Attempt to read from an empty result queue\n");
 747        } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 748                dev_warn(hdev->dev,
 749                         "Data written before the hardware was configured\n");
 750        }
 751        return IRQ_HANDLED;
 752}
 753
 754static struct ahash_alg img_algs[] = {
 755        {
 756                .init = img_hash_init,
 757                .update = img_hash_update,
 758                .final = img_hash_final,
 759                .finup = img_hash_finup,
 760                .export = img_hash_export,
 761                .import = img_hash_import,
 762                .digest = img_hash_digest,
 763                .halg = {
 764                        .digestsize = MD5_DIGEST_SIZE,
 765                        .statesize = sizeof(struct md5_state),
 766                        .base = {
 767                                .cra_name = "md5",
 768                                .cra_driver_name = "img-md5",
 769                                .cra_priority = 300,
 770                                .cra_flags =
 771                                CRYPTO_ALG_ASYNC |
 772                                CRYPTO_ALG_NEED_FALLBACK,
 773                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 774                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 775                                .cra_init = img_hash_cra_md5_init,
 776                                .cra_exit = img_hash_cra_exit,
 777                                .cra_module = THIS_MODULE,
 778                        }
 779                }
 780        },
 781        {
 782                .init = img_hash_init,
 783                .update = img_hash_update,
 784                .final = img_hash_final,
 785                .finup = img_hash_finup,
 786                .export = img_hash_export,
 787                .import = img_hash_import,
 788                .digest = img_hash_digest,
 789                .halg = {
 790                        .digestsize = SHA1_DIGEST_SIZE,
 791                        .statesize = sizeof(struct sha1_state),
 792                        .base = {
 793                                .cra_name = "sha1",
 794                                .cra_driver_name = "img-sha1",
 795                                .cra_priority = 300,
 796                                .cra_flags =
 797                                CRYPTO_ALG_ASYNC |
 798                                CRYPTO_ALG_NEED_FALLBACK,
 799                                .cra_blocksize = SHA1_BLOCK_SIZE,
 800                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 801                                .cra_init = img_hash_cra_sha1_init,
 802                                .cra_exit = img_hash_cra_exit,
 803                                .cra_module = THIS_MODULE,
 804                        }
 805                }
 806        },
 807        {
 808                .init = img_hash_init,
 809                .update = img_hash_update,
 810                .final = img_hash_final,
 811                .finup = img_hash_finup,
 812                .export = img_hash_export,
 813                .import = img_hash_import,
 814                .digest = img_hash_digest,
 815                .halg = {
 816                        .digestsize = SHA224_DIGEST_SIZE,
 817                        .statesize = sizeof(struct sha256_state),
 818                        .base = {
 819                                .cra_name = "sha224",
 820                                .cra_driver_name = "img-sha224",
 821                                .cra_priority = 300,
 822                                .cra_flags =
 823                                CRYPTO_ALG_ASYNC |
 824                                CRYPTO_ALG_NEED_FALLBACK,
 825                                .cra_blocksize = SHA224_BLOCK_SIZE,
 826                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 827                                .cra_init = img_hash_cra_sha224_init,
 828                                .cra_exit = img_hash_cra_exit,
 829                                .cra_module = THIS_MODULE,
 830                        }
 831                }
 832        },
 833        {
 834                .init = img_hash_init,
 835                .update = img_hash_update,
 836                .final = img_hash_final,
 837                .finup = img_hash_finup,
 838                .export = img_hash_export,
 839                .import = img_hash_import,
 840                .digest = img_hash_digest,
 841                .halg = {
 842                        .digestsize = SHA256_DIGEST_SIZE,
 843                        .statesize = sizeof(struct sha256_state),
 844                        .base = {
 845                                .cra_name = "sha256",
 846                                .cra_driver_name = "img-sha256",
 847                                .cra_priority = 300,
 848                                .cra_flags =
 849                                CRYPTO_ALG_ASYNC |
 850                                CRYPTO_ALG_NEED_FALLBACK,
 851                                .cra_blocksize = SHA256_BLOCK_SIZE,
 852                                .cra_ctxsize = sizeof(struct img_hash_ctx),
 853                                .cra_init = img_hash_cra_sha256_init,
 854                                .cra_exit = img_hash_cra_exit,
 855                                .cra_module = THIS_MODULE,
 856                        }
 857                }
 858        }
 859};
 860
 861static int img_register_algs(struct img_hash_dev *hdev)
 862{
 863        int i, err;
 864
 865        for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 866                err = crypto_register_ahash(&img_algs[i]);
 867                if (err)
 868                        goto err_reg;
 869        }
 870        return 0;
 871
 872err_reg:
 873        for (; i--; )
 874                crypto_unregister_ahash(&img_algs[i]);
 875
 876        return err;
 877}
 878
 879static int img_unregister_algs(struct img_hash_dev *hdev)
 880{
 881        int i;
 882
 883        for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 884                crypto_unregister_ahash(&img_algs[i]);
 885        return 0;
 886}
 887
 888static void img_hash_done_task(unsigned long data)
 889{
 890        struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 891        int err = 0;
 892
 893        if (hdev->err == -EINVAL) {
 894                err = hdev->err;
 895                goto finish;
 896        }
 897
 898        if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 899                img_hash_handle_queue(hdev, NULL);
 900                return;
 901        }
 902
 903        if (DRIVER_FLAGS_CPU & hdev->flags) {
 904                if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 905                        hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 906                        goto finish;
 907                }
 908        } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 909                if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 910                        hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 911                        img_hash_write_via_dma_stop(hdev);
 912                        if (hdev->err) {
 913                                err = hdev->err;
 914                                goto finish;
 915                        }
 916                }
 917                if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 918                        hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 919                                        DRIVER_FLAGS_OUTPUT_READY);
 920                        goto finish;
 921                }
 922        }
 923        return;
 924
 925finish:
 926        img_hash_finish_req(hdev->req, err);
 927}
 928
 929static const struct of_device_id img_hash_match[] = {
 930        { .compatible = "img,hash-accelerator" },
 931        {}
 932};
 933MODULE_DEVICE_TABLE(of, img_hash_match);
 934
 935static int img_hash_probe(struct platform_device *pdev)
 936{
 937        struct img_hash_dev *hdev;
 938        struct device *dev = &pdev->dev;
 939        struct resource *hash_res;
 940        int     irq;
 941        int err;
 942
 943        hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 944        if (hdev == NULL)
 945                return -ENOMEM;
 946
 947        spin_lock_init(&hdev->lock);
 948
 949        hdev->dev = dev;
 950
 951        platform_set_drvdata(pdev, hdev);
 952
 953        INIT_LIST_HEAD(&hdev->list);
 954
 955        tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 956        tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 957
 958        crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 959
 960        /* Register bank */
 961        hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
 962        if (IS_ERR(hdev->io_base)) {
 963                err = PTR_ERR(hdev->io_base);
 964                dev_err(dev, "can't ioremap, returned %d\n", err);
 965
 966                goto res_err;
 967        }
 968
 969        /* Write port (DMA or CPU) */
 970        hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 971        hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
 972        if (IS_ERR(hdev->cpu_addr)) {
 973                dev_err(dev, "can't ioremap write port\n");
 974                err = PTR_ERR(hdev->cpu_addr);
 975                goto res_err;
 976        }
 977        hdev->bus_addr = hash_res->start;
 978
 979        irq = platform_get_irq(pdev, 0);
 980        if (irq < 0) {
 981                err = irq;
 982                goto res_err;
 983        }
 984
 985        err = devm_request_irq(dev, irq, img_irq_handler, 0,
 986                               dev_name(dev), hdev);
 987        if (err) {
 988                dev_err(dev, "unable to request irq\n");
 989                goto res_err;
 990        }
 991        dev_dbg(dev, "using IRQ channel %d\n", irq);
 992
 993        hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
 994        if (IS_ERR(hdev->hash_clk)) {
 995                dev_err(dev, "clock initialization failed.\n");
 996                err = PTR_ERR(hdev->hash_clk);
 997                goto res_err;
 998        }
 999
1000        hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1001        if (IS_ERR(hdev->sys_clk)) {
1002                dev_err(dev, "clock initialization failed.\n");
1003                err = PTR_ERR(hdev->sys_clk);
1004                goto res_err;
1005        }
1006
1007        err = clk_prepare_enable(hdev->hash_clk);
1008        if (err)
1009                goto res_err;
1010
1011        err = clk_prepare_enable(hdev->sys_clk);
1012        if (err)
1013                goto clk_err;
1014
1015        err = img_hash_dma_init(hdev);
1016        if (err)
1017                goto dma_err;
1018
1019        dev_dbg(dev, "using %s for DMA transfers\n",
1020                dma_chan_name(hdev->dma_lch));
1021
1022        spin_lock(&img_hash.lock);
1023        list_add_tail(&hdev->list, &img_hash.dev_list);
1024        spin_unlock(&img_hash.lock);
1025
1026        err = img_register_algs(hdev);
1027        if (err)
1028                goto err_algs;
1029        dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1030
1031        return 0;
1032
1033err_algs:
1034        spin_lock(&img_hash.lock);
1035        list_del(&hdev->list);
1036        spin_unlock(&img_hash.lock);
1037        dma_release_channel(hdev->dma_lch);
1038dma_err:
1039        clk_disable_unprepare(hdev->sys_clk);
1040clk_err:
1041        clk_disable_unprepare(hdev->hash_clk);
1042res_err:
1043        tasklet_kill(&hdev->done_task);
1044        tasklet_kill(&hdev->dma_task);
1045
1046        return err;
1047}
1048
1049static int img_hash_remove(struct platform_device *pdev)
1050{
1051        struct img_hash_dev *hdev;
1052
1053        hdev = platform_get_drvdata(pdev);
1054        spin_lock(&img_hash.lock);
1055        list_del(&hdev->list);
1056        spin_unlock(&img_hash.lock);
1057
1058        img_unregister_algs(hdev);
1059
1060        tasklet_kill(&hdev->done_task);
1061        tasklet_kill(&hdev->dma_task);
1062
1063        dma_release_channel(hdev->dma_lch);
1064
1065        clk_disable_unprepare(hdev->hash_clk);
1066        clk_disable_unprepare(hdev->sys_clk);
1067
1068        return 0;
1069}
1070
1071#ifdef CONFIG_PM_SLEEP
1072static int img_hash_suspend(struct device *dev)
1073{
1074        struct img_hash_dev *hdev = dev_get_drvdata(dev);
1075
1076        clk_disable_unprepare(hdev->hash_clk);
1077        clk_disable_unprepare(hdev->sys_clk);
1078
1079        return 0;
1080}
1081
1082static int img_hash_resume(struct device *dev)
1083{
1084        struct img_hash_dev *hdev = dev_get_drvdata(dev);
1085        int ret;
1086
1087        ret = clk_prepare_enable(hdev->hash_clk);
1088        if (ret)
1089                return ret;
1090
1091        ret = clk_prepare_enable(hdev->sys_clk);
1092        if (ret) {
1093                clk_disable_unprepare(hdev->hash_clk);
1094                return ret;
1095        }
1096
1097        return 0;
1098}
1099#endif /* CONFIG_PM_SLEEP */
1100
1101static const struct dev_pm_ops img_hash_pm_ops = {
1102        SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1103};
1104
1105static struct platform_driver img_hash_driver = {
1106        .probe          = img_hash_probe,
1107        .remove         = img_hash_remove,
1108        .driver         = {
1109                .name   = "img-hash-accelerator",
1110                .pm     = &img_hash_pm_ops,
1111                .of_match_table = of_match_ptr(img_hash_match),
1112        }
1113};
1114module_platform_driver(img_hash_driver);
1115
1116MODULE_LICENSE("GPL v2");
1117MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1118MODULE_AUTHOR("Will Thomas.");
1119MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
1120