linux/drivers/crypto/ux500/hash/hash_core.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 * Support for Nomadik hardware crypto engine.
   4
   5 * Copyright (C) ST-Ericsson SA 2010
   6 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
   7 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
   8 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
   9 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  10 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  11 * License terms: GNU General Public License (GPL) version 2
  12 */
  13
  14#define pr_fmt(fmt) "hashX hashX: " fmt
  15
  16#include <linux/clk.h>
  17#include <linux/device.h>
  18#include <linux/err.h>
  19#include <linux/init.h>
  20#include <linux/io.h>
  21#include <linux/klist.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
  25#include <linux/crypto.h>
  26
  27#include <linux/regulator/consumer.h>
  28#include <linux/dmaengine.h>
  29#include <linux/bitops.h>
  30
  31#include <crypto/internal/hash.h>
  32#include <crypto/sha.h>
  33#include <crypto/scatterwalk.h>
  34#include <crypto/algapi.h>
  35
  36#include <linux/platform_data/crypto-ux500.h>
  37
  38#include "hash_alg.h"
  39
  40static int hash_mode;
  41module_param(hash_mode, int, 0);
  42MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  43
  44/* HMAC-SHA1, no key */
  45static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  46        0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  47        0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  48        0x70, 0x69, 0x0e, 0x1d
  49};
  50
  51/* HMAC-SHA256, no key */
  52static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  53        0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  54        0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  55        0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  56        0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  57};
  58
  59/**
  60 * struct hash_driver_data - data specific to the driver.
  61 *
  62 * @device_list:        A list of registered devices to choose from.
  63 * @device_allocation:  A semaphore initialized with number of devices.
  64 */
  65struct hash_driver_data {
  66        struct klist            device_list;
  67        struct semaphore        device_allocation;
  68};
  69
  70static struct hash_driver_data  driver_data;
  71
  72/* Declaration of functions */
  73/**
  74 * hash_messagepad - Pads a message and write the nblw bits.
  75 * @device_data:        Structure for the hash device.
  76 * @message:            Last word of a message
  77 * @index_bytes:        The number of bytes in the last message
  78 *
  79 * This function manages the final part of the digest calculation, when less
  80 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  81 *
  82 */
  83static void hash_messagepad(struct hash_device_data *device_data,
  84                            const u32 *message, u8 index_bytes);
  85
  86/**
  87 * release_hash_device - Releases a previously allocated hash device.
  88 * @device_data:        Structure for the hash device.
  89 *
  90 */
  91static void release_hash_device(struct hash_device_data *device_data)
  92{
  93        spin_lock(&device_data->ctx_lock);
  94        device_data->current_ctx->device = NULL;
  95        device_data->current_ctx = NULL;
  96        spin_unlock(&device_data->ctx_lock);
  97
  98        /*
  99         * The down_interruptible part for this semaphore is called in
 100         * cryp_get_device_data.
 101         */
 102        up(&driver_data.device_allocation);
 103}
 104
 105static void hash_dma_setup_channel(struct hash_device_data *device_data,
 106                                   struct device *dev)
 107{
 108        struct hash_platform_data *platform_data = dev->platform_data;
 109        struct dma_slave_config conf = {
 110                .direction = DMA_MEM_TO_DEV,
 111                .dst_addr = device_data->phybase + HASH_DMA_FIFO,
 112                .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
 113                .dst_maxburst = 16,
 114        };
 115
 116        dma_cap_zero(device_data->dma.mask);
 117        dma_cap_set(DMA_SLAVE, device_data->dma.mask);
 118
 119        device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
 120        device_data->dma.chan_mem2hash =
 121                dma_request_channel(device_data->dma.mask,
 122                                    platform_data->dma_filter,
 123                                    device_data->dma.cfg_mem2hash);
 124
 125        dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
 126
 127        init_completion(&device_data->dma.complete);
 128}
 129
 130static void hash_dma_callback(void *data)
 131{
 132        struct hash_ctx *ctx = data;
 133
 134        complete(&ctx->device->dma.complete);
 135}
 136
 137static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
 138                                 int len, enum dma_data_direction direction)
 139{
 140        struct dma_async_tx_descriptor *desc = NULL;
 141        struct dma_chan *channel = NULL;
 142        dma_cookie_t cookie;
 143
 144        if (direction != DMA_TO_DEVICE) {
 145                dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
 146                        __func__);
 147                return -EFAULT;
 148        }
 149
 150        sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
 151
 152        channel = ctx->device->dma.chan_mem2hash;
 153        ctx->device->dma.sg = sg;
 154        ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
 155                        ctx->device->dma.sg, ctx->device->dma.nents,
 156                        direction);
 157
 158        if (!ctx->device->dma.sg_len) {
 159                dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
 160                        __func__);
 161                return -EFAULT;
 162        }
 163
 164        dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
 165                __func__);
 166        desc = dmaengine_prep_slave_sg(channel,
 167                        ctx->device->dma.sg, ctx->device->dma.sg_len,
 168                        direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
 169        if (!desc) {
 170                dev_err(ctx->device->dev,
 171                        "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
 172                return -EFAULT;
 173        }
 174
 175        desc->callback = hash_dma_callback;
 176        desc->callback_param = ctx;
 177
 178        cookie = dmaengine_submit(desc);
 179        dma_async_issue_pending(channel);
 180
 181        return 0;
 182}
 183
 184static void hash_dma_done(struct hash_ctx *ctx)
 185{
 186        struct dma_chan *chan;
 187
 188        chan = ctx->device->dma.chan_mem2hash;
 189        dmaengine_terminate_all(chan);
 190        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
 191                     ctx->device->dma.sg_len, DMA_TO_DEVICE);
 192}
 193
 194static int hash_dma_write(struct hash_ctx *ctx,
 195                          struct scatterlist *sg, int len)
 196{
 197        int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
 198        if (error) {
 199                dev_dbg(ctx->device->dev,
 200                        "%s: hash_set_dma_transfer() failed\n", __func__);
 201                return error;
 202        }
 203
 204        return len;
 205}
 206
 207/**
 208 * get_empty_message_digest - Returns a pre-calculated digest for
 209 * the empty message.
 210 * @device_data:        Structure for the hash device.
 211 * @zero_hash:          Buffer to return the empty message digest.
 212 * @zero_hash_size:     Hash size of the empty message digest.
 213 * @zero_digest:        True if zero_digest returned.
 214 */
 215static int get_empty_message_digest(
 216                struct hash_device_data *device_data,
 217                u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
 218{
 219        int ret = 0;
 220        struct hash_ctx *ctx = device_data->current_ctx;
 221        *zero_digest = false;
 222
 223        /**
 224         * Caller responsible for ctx != NULL.
 225         */
 226
 227        if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
 228                if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
 229                        memcpy(zero_hash, &sha1_zero_message_hash[0],
 230                               SHA1_DIGEST_SIZE);
 231                        *zero_hash_size = SHA1_DIGEST_SIZE;
 232                        *zero_digest = true;
 233                } else if (HASH_ALGO_SHA256 ==
 234                                ctx->config.algorithm) {
 235                        memcpy(zero_hash, &sha256_zero_message_hash[0],
 236                               SHA256_DIGEST_SIZE);
 237                        *zero_hash_size = SHA256_DIGEST_SIZE;
 238                        *zero_digest = true;
 239                } else {
 240                        dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
 241                                __func__);
 242                        ret = -EINVAL;
 243                        goto out;
 244                }
 245        } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
 246                if (!ctx->keylen) {
 247                        if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
 248                                memcpy(zero_hash, &zero_message_hmac_sha1[0],
 249                                       SHA1_DIGEST_SIZE);
 250                                *zero_hash_size = SHA1_DIGEST_SIZE;
 251                                *zero_digest = true;
 252                        } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
 253                                memcpy(zero_hash, &zero_message_hmac_sha256[0],
 254                                       SHA256_DIGEST_SIZE);
 255                                *zero_hash_size = SHA256_DIGEST_SIZE;
 256                                *zero_digest = true;
 257                        } else {
 258                                dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
 259                                        __func__);
 260                                ret = -EINVAL;
 261                                goto out;
 262                        }
 263                } else {
 264                        dev_dbg(device_data->dev,
 265                                "%s: Continue hash calculation, since hmac key available\n",
 266                                __func__);
 267                }
 268        }
 269out:
 270
 271        return ret;
 272}
 273
 274/**
 275 * hash_disable_power - Request to disable power and clock.
 276 * @device_data:        Structure for the hash device.
 277 * @save_device_state:  If true, saves the current hw state.
 278 *
 279 * This function request for disabling power (regulator) and clock,
 280 * and could also save current hw state.
 281 */
 282static int hash_disable_power(struct hash_device_data *device_data,
 283                              bool save_device_state)
 284{
 285        int ret = 0;
 286        struct device *dev = device_data->dev;
 287
 288        spin_lock(&device_data->power_state_lock);
 289        if (!device_data->power_state)
 290                goto out;
 291
 292        if (save_device_state) {
 293                hash_save_state(device_data,
 294                                &device_data->state);
 295                device_data->restore_dev_state = true;
 296        }
 297
 298        clk_disable(device_data->clk);
 299        ret = regulator_disable(device_data->regulator);
 300        if (ret)
 301                dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
 302
 303        device_data->power_state = false;
 304
 305out:
 306        spin_unlock(&device_data->power_state_lock);
 307
 308        return ret;
 309}
 310
 311/**
 312 * hash_enable_power - Request to enable power and clock.
 313 * @device_data:                Structure for the hash device.
 314 * @restore_device_state:       If true, restores a previous saved hw state.
 315 *
 316 * This function request for enabling power (regulator) and clock,
 317 * and could also restore a previously saved hw state.
 318 */
 319static int hash_enable_power(struct hash_device_data *device_data,
 320                             bool restore_device_state)
 321{
 322        int ret = 0;
 323        struct device *dev = device_data->dev;
 324
 325        spin_lock(&device_data->power_state_lock);
 326        if (!device_data->power_state) {
 327                ret = regulator_enable(device_data->regulator);
 328                if (ret) {
 329                        dev_err(dev, "%s: regulator_enable() failed!\n",
 330                                __func__);
 331                        goto out;
 332                }
 333                ret = clk_enable(device_data->clk);
 334                if (ret) {
 335                        dev_err(dev, "%s: clk_enable() failed!\n", __func__);
 336                        ret = regulator_disable(
 337                                        device_data->regulator);
 338                        goto out;
 339                }
 340                device_data->power_state = true;
 341        }
 342
 343        if (device_data->restore_dev_state) {
 344                if (restore_device_state) {
 345                        device_data->restore_dev_state = false;
 346                        hash_resume_state(device_data, &device_data->state);
 347                }
 348        }
 349out:
 350        spin_unlock(&device_data->power_state_lock);
 351
 352        return ret;
 353}
 354
 355/**
 356 * hash_get_device_data - Checks for an available hash device and return it.
 357 * @hash_ctx:           Structure for the hash context.
 358 * @device_data:        Structure for the hash device.
 359 *
 360 * This function check for an available hash device and return it to
 361 * the caller.
 362 * Note! Caller need to release the device, calling up().
 363 */
 364static int hash_get_device_data(struct hash_ctx *ctx,
 365                                struct hash_device_data **device_data)
 366{
 367        int                     ret;
 368        struct klist_iter       device_iterator;
 369        struct klist_node       *device_node;
 370        struct hash_device_data *local_device_data = NULL;
 371
 372        /* Wait until a device is available */
 373        ret = down_interruptible(&driver_data.device_allocation);
 374        if (ret)
 375                return ret;  /* Interrupted */
 376
 377        /* Select a device */
 378        klist_iter_init(&driver_data.device_list, &device_iterator);
 379        device_node = klist_next(&device_iterator);
 380        while (device_node) {
 381                local_device_data = container_of(device_node,
 382                                           struct hash_device_data, list_node);
 383                spin_lock(&local_device_data->ctx_lock);
 384                /* current_ctx allocates a device, NULL = unallocated */
 385                if (local_device_data->current_ctx) {
 386                        device_node = klist_next(&device_iterator);
 387                } else {
 388                        local_device_data->current_ctx = ctx;
 389                        ctx->device = local_device_data;
 390                        spin_unlock(&local_device_data->ctx_lock);
 391                        break;
 392                }
 393                spin_unlock(&local_device_data->ctx_lock);
 394        }
 395        klist_iter_exit(&device_iterator);
 396
 397        if (!device_node) {
 398                /**
 399                 * No free device found.
 400                 * Since we allocated a device with down_interruptible, this
 401                 * should not be able to happen.
 402                 * Number of available devices, which are contained in
 403                 * device_allocation, is therefore decremented by not doing
 404                 * an up(device_allocation).
 405                 */
 406                return -EBUSY;
 407        }
 408
 409        *device_data = local_device_data;
 410
 411        return 0;
 412}
 413
 414/**
 415 * hash_hw_write_key - Writes the key to the hardware registries.
 416 *
 417 * @device_data:        Structure for the hash device.
 418 * @key:                Key to be written.
 419 * @keylen:             The lengt of the key.
 420 *
 421 * Note! This function DOES NOT write to the NBLW registry, even though
 422 * specified in the the hw design spec. Either due to incorrect info in the
 423 * spec or due to a bug in the hw.
 424 */
 425static void hash_hw_write_key(struct hash_device_data *device_data,
 426                              const u8 *key, unsigned int keylen)
 427{
 428        u32 word = 0;
 429        int nwords = 1;
 430
 431        HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
 432
 433        while (keylen >= 4) {
 434                u32 *key_word = (u32 *)key;
 435
 436                HASH_SET_DIN(key_word, nwords);
 437                keylen -= 4;
 438                key += 4;
 439        }
 440
 441        /* Take care of the remaining bytes in the last word */
 442        if (keylen) {
 443                word = 0;
 444                while (keylen) {
 445                        word |= (key[keylen - 1] << (8 * (keylen - 1)));
 446                        keylen--;
 447                }
 448
 449                HASH_SET_DIN(&word, nwords);
 450        }
 451
 452        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 453                cpu_relax();
 454
 455        HASH_SET_DCAL;
 456
 457        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 458                cpu_relax();
 459}
 460
 461/**
 462 * init_hash_hw - Initialise the hash hardware for a new calculation.
 463 * @device_data:        Structure for the hash device.
 464 * @ctx:                The hash context.
 465 *
 466 * This function will enable the bits needed to clear and start a new
 467 * calculation.
 468 */
 469static int init_hash_hw(struct hash_device_data *device_data,
 470                        struct hash_ctx *ctx)
 471{
 472        int ret = 0;
 473
 474        ret = hash_setconfiguration(device_data, &ctx->config);
 475        if (ret) {
 476                dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
 477                        __func__);
 478                return ret;
 479        }
 480
 481        hash_begin(device_data, ctx);
 482
 483        if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
 484                hash_hw_write_key(device_data, ctx->key, ctx->keylen);
 485
 486        return ret;
 487}
 488
 489/**
 490 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
 491 *
 492 * @sg:         Scatterlist.
 493 * @size:       Size in bytes.
 494 * @aligned:    True if sg data aligned to work in DMA mode.
 495 *
 496 */
 497static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
 498{
 499        int nents = 0;
 500        bool aligned_data = true;
 501
 502        while (size > 0 && sg) {
 503                nents++;
 504                size -= sg->length;
 505
 506                /* hash_set_dma_transfer will align last nent */
 507                if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
 508                    (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
 509                        aligned_data = false;
 510
 511                sg = sg_next(sg);
 512        }
 513
 514        if (aligned)
 515                *aligned = aligned_data;
 516
 517        if (size != 0)
 518                return -EFAULT;
 519
 520        return nents;
 521}
 522
 523/**
 524 * hash_dma_valid_data - checks for dma valid sg data.
 525 * @sg:         Scatterlist.
 526 * @datasize:   Datasize in bytes.
 527 *
 528 * NOTE! This function checks for dma valid sg data, since dma
 529 * only accept datasizes of even wordsize.
 530 */
 531static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
 532{
 533        bool aligned;
 534
 535        /* Need to include at least one nent, else error */
 536        if (hash_get_nents(sg, datasize, &aligned) < 1)
 537                return false;
 538
 539        return aligned;
 540}
 541
 542/**
 543 * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
 544 * @req: The hash request for the job.
 545 *
 546 * Initialize structures.
 547 */
 548static int hash_init(struct ahash_request *req)
 549{
 550        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
 552        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
 553
 554        if (!ctx->key)
 555                ctx->keylen = 0;
 556
 557        memset(&req_ctx->state, 0, sizeof(struct hash_state));
 558        req_ctx->updated = 0;
 559        if (hash_mode == HASH_MODE_DMA) {
 560                if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
 561                        req_ctx->dma_mode = false; /* Don't use DMA */
 562
 563                        pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
 564                                 __func__, HASH_DMA_ALIGN_SIZE);
 565                } else {
 566                        if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
 567                            hash_dma_valid_data(req->src, req->nbytes)) {
 568                                req_ctx->dma_mode = true;
 569                        } else {
 570                                req_ctx->dma_mode = false;
 571                                pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
 572                                         __func__,
 573                                         HASH_DMA_PERFORMANCE_MIN_SIZE);
 574                        }
 575                }
 576        }
 577        return 0;
 578}
 579
 580/**
 581 * hash_processblock - This function processes a single block of 512 bits (64
 582 *                     bytes), word aligned, starting at message.
 583 * @device_data:        Structure for the hash device.
 584 * @message:            Block (512 bits) of message to be written to
 585 *                      the HASH hardware.
 586 *
 587 */
 588static void hash_processblock(struct hash_device_data *device_data,
 589                              const u32 *message, int length)
 590{
 591        int len = length / HASH_BYTES_PER_WORD;
 592        /*
 593         * NBLW bits. Reset the number of bits in last word (NBLW).
 594         */
 595        HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
 596
 597        /*
 598         * Write message data to the HASH_DIN register.
 599         */
 600        HASH_SET_DIN(message, len);
 601}
 602
 603/**
 604 * hash_messagepad - Pads a message and write the nblw bits.
 605 * @device_data:        Structure for the hash device.
 606 * @message:            Last word of a message.
 607 * @index_bytes:        The number of bytes in the last message.
 608 *
 609 * This function manages the final part of the digest calculation, when less
 610 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
 611 *
 612 */
 613static void hash_messagepad(struct hash_device_data *device_data,
 614                            const u32 *message, u8 index_bytes)
 615{
 616        int nwords = 1;
 617
 618        /*
 619         * Clear hash str register, only clear NBLW
 620         * since DCAL will be reset by hardware.
 621         */
 622        HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
 623
 624        /* Main loop */
 625        while (index_bytes >= 4) {
 626                HASH_SET_DIN(message, nwords);
 627                index_bytes -= 4;
 628                message++;
 629        }
 630
 631        if (index_bytes)
 632                HASH_SET_DIN(message, nwords);
 633
 634        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 635                cpu_relax();
 636
 637        /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
 638        HASH_SET_NBLW(index_bytes * 8);
 639        dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
 640                __func__, readl_relaxed(&device_data->base->din),
 641                readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
 642        HASH_SET_DCAL;
 643        dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
 644                __func__, readl_relaxed(&device_data->base->din),
 645                readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
 646
 647        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 648                cpu_relax();
 649}
 650
 651/**
 652 * hash_incrementlength - Increments the length of the current message.
 653 * @ctx: Hash context
 654 * @incr: Length of message processed already
 655 *
 656 * Overflow cannot occur, because conditions for overflow are checked in
 657 * hash_hw_update.
 658 */
 659static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
 660{
 661        ctx->state.length.low_word += incr;
 662
 663        /* Check for wrap-around */
 664        if (ctx->state.length.low_word < incr)
 665                ctx->state.length.high_word++;
 666}
 667
 668/**
 669 * hash_setconfiguration - Sets the required configuration for the hash
 670 *                         hardware.
 671 * @device_data:        Structure for the hash device.
 672 * @config:             Pointer to a configuration structure.
 673 */
 674int hash_setconfiguration(struct hash_device_data *device_data,
 675                          struct hash_config *config)
 676{
 677        int ret = 0;
 678
 679        if (config->algorithm != HASH_ALGO_SHA1 &&
 680            config->algorithm != HASH_ALGO_SHA256)
 681                return -EPERM;
 682
 683        /*
 684         * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
 685         * to be written to HASH_DIN is considered as 32 bits.
 686         */
 687        HASH_SET_DATA_FORMAT(config->data_format);
 688
 689        /*
 690         * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
 691         */
 692        switch (config->algorithm) {
 693        case HASH_ALGO_SHA1:
 694                HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
 695                break;
 696
 697        case HASH_ALGO_SHA256:
 698                HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
 699                break;
 700
 701        default:
 702                dev_err(device_data->dev, "%s: Incorrect algorithm\n",
 703                        __func__);
 704                return -EPERM;
 705        }
 706
 707        /*
 708         * MODE bit. This bit selects between HASH or HMAC mode for the
 709         * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
 710         */
 711        if (HASH_OPER_MODE_HASH == config->oper_mode)
 712                HASH_CLEAR_BITS(&device_data->base->cr,
 713                                HASH_CR_MODE_MASK);
 714        else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
 715                HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
 716                if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
 717                        /* Truncate key to blocksize */
 718                        dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
 719                        HASH_SET_BITS(&device_data->base->cr,
 720                                      HASH_CR_LKEY_MASK);
 721                } else {
 722                        dev_dbg(device_data->dev, "%s: LKEY cleared\n",
 723                                __func__);
 724                        HASH_CLEAR_BITS(&device_data->base->cr,
 725                                        HASH_CR_LKEY_MASK);
 726                }
 727        } else {        /* Wrong hash mode */
 728                ret = -EPERM;
 729                dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
 730                        __func__);
 731        }
 732        return ret;
 733}
 734
 735/**
 736 * hash_begin - This routine resets some globals and initializes the hash
 737 *              hardware.
 738 * @device_data:        Structure for the hash device.
 739 * @ctx:                Hash context.
 740 */
 741void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
 742{
 743        /* HW and SW initializations */
 744        /* Note: there is no need to initialize buffer and digest members */
 745
 746        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 747                cpu_relax();
 748
 749        /*
 750         * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
 751         * prepare the initialize the HASH accelerator to compute the message
 752         * digest of a new message.
 753         */
 754        HASH_INITIALIZE;
 755
 756        /*
 757         * NBLW bits. Reset the number of bits in last word (NBLW).
 758         */
 759        HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
 760}
 761
 762static int hash_process_data(struct hash_device_data *device_data,
 763                             struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
 764                             int msg_length, u8 *data_buffer, u8 *buffer,
 765                             u8 *index)
 766{
 767        int ret = 0;
 768        u32 count;
 769
 770        do {
 771                if ((*index + msg_length) < HASH_BLOCK_SIZE) {
 772                        for (count = 0; count < msg_length; count++) {
 773                                buffer[*index + count] =
 774                                        *(data_buffer + count);
 775                        }
 776                        *index += msg_length;
 777                        msg_length = 0;
 778                } else {
 779                        if (req_ctx->updated) {
 780                                ret = hash_resume_state(device_data,
 781                                                &device_data->state);
 782                                memmove(req_ctx->state.buffer,
 783                                        device_data->state.buffer,
 784                                        HASH_BLOCK_SIZE);
 785                                if (ret) {
 786                                        dev_err(device_data->dev,
 787                                                "%s: hash_resume_state() failed!\n",
 788                                                __func__);
 789                                        goto out;
 790                                }
 791                        } else {
 792                                ret = init_hash_hw(device_data, ctx);
 793                                if (ret) {
 794                                        dev_err(device_data->dev,
 795                                                "%s: init_hash_hw() failed!\n",
 796                                                __func__);
 797                                        goto out;
 798                                }
 799                                req_ctx->updated = 1;
 800                        }
 801                        /*
 802                         * If 'data_buffer' is four byte aligned and
 803                         * local buffer does not have any data, we can
 804                         * write data directly from 'data_buffer' to
 805                         * HW peripheral, otherwise we first copy data
 806                         * to a local buffer
 807                         */
 808                        if ((0 == (((u32)data_buffer) % 4)) &&
 809                            (0 == *index))
 810                                hash_processblock(device_data,
 811                                                  (const u32 *)data_buffer,
 812                                                  HASH_BLOCK_SIZE);
 813                        else {
 814                                for (count = 0;
 815                                     count < (u32)(HASH_BLOCK_SIZE - *index);
 816                                     count++) {
 817                                        buffer[*index + count] =
 818                                                *(data_buffer + count);
 819                                }
 820                                hash_processblock(device_data,
 821                                                  (const u32 *)buffer,
 822                                                  HASH_BLOCK_SIZE);
 823                        }
 824                        hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
 825                        data_buffer += (HASH_BLOCK_SIZE - *index);
 826
 827                        msg_length -= (HASH_BLOCK_SIZE - *index);
 828                        *index = 0;
 829
 830                        ret = hash_save_state(device_data,
 831                                        &device_data->state);
 832
 833                        memmove(device_data->state.buffer,
 834                                req_ctx->state.buffer,
 835                                HASH_BLOCK_SIZE);
 836                        if (ret) {
 837                                dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
 838                                        __func__);
 839                                goto out;
 840                        }
 841                }
 842        } while (msg_length != 0);
 843out:
 844
 845        return ret;
 846}
 847
 848/**
 849 * hash_dma_final - The hash dma final function for SHA1/SHA256.
 850 * @req:        The hash request for the job.
 851 */
 852static int hash_dma_final(struct ahash_request *req)
 853{
 854        int ret = 0;
 855        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 856        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
 857        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
 858        struct hash_device_data *device_data;
 859        u8 digest[SHA256_DIGEST_SIZE];
 860        int bytes_written = 0;
 861
 862        ret = hash_get_device_data(ctx, &device_data);
 863        if (ret)
 864                return ret;
 865
 866        dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
 867
 868        if (req_ctx->updated) {
 869                ret = hash_resume_state(device_data, &device_data->state);
 870
 871                if (ret) {
 872                        dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
 873                                __func__);
 874                        goto out;
 875                }
 876        }
 877
 878        if (!req_ctx->updated) {
 879                ret = hash_setconfiguration(device_data, &ctx->config);
 880                if (ret) {
 881                        dev_err(device_data->dev,
 882                                "%s: hash_setconfiguration() failed!\n",
 883                                __func__);
 884                        goto out;
 885                }
 886
 887                /* Enable DMA input */
 888                if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
 889                        HASH_CLEAR_BITS(&device_data->base->cr,
 890                                        HASH_CR_DMAE_MASK);
 891                } else {
 892                        HASH_SET_BITS(&device_data->base->cr,
 893                                      HASH_CR_DMAE_MASK);
 894                        HASH_SET_BITS(&device_data->base->cr,
 895                                      HASH_CR_PRIVN_MASK);
 896                }
 897
 898                HASH_INITIALIZE;
 899
 900                if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
 901                        hash_hw_write_key(device_data, ctx->key, ctx->keylen);
 902
 903                /* Number of bits in last word = (nbytes * 8) % 32 */
 904                HASH_SET_NBLW((req->nbytes * 8) % 32);
 905                req_ctx->updated = 1;
 906        }
 907
 908        /* Store the nents in the dma struct. */
 909        ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
 910        if (!ctx->device->dma.nents) {
 911                dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
 912                        __func__);
 913                ret = ctx->device->dma.nents;
 914                goto out;
 915        }
 916
 917        bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
 918        if (bytes_written != req->nbytes) {
 919                dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
 920                        __func__);
 921                ret = bytes_written;
 922                goto out;
 923        }
 924
 925        wait_for_completion(&ctx->device->dma.complete);
 926        hash_dma_done(ctx);
 927
 928        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
 929                cpu_relax();
 930
 931        if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
 932                unsigned int keylen = ctx->keylen;
 933                u8 *key = ctx->key;
 934
 935                dev_dbg(device_data->dev, "%s: keylen: %d\n",
 936                        __func__, ctx->keylen);
 937                hash_hw_write_key(device_data, key, keylen);
 938        }
 939
 940        hash_get_digest(device_data, digest, ctx->config.algorithm);
 941        memcpy(req->result, digest, ctx->digestsize);
 942
 943out:
 944        release_hash_device(device_data);
 945
 946        /**
 947         * Allocated in setkey, and only used in HMAC.
 948         */
 949        kfree(ctx->key);
 950
 951        return ret;
 952}
 953
 954/**
 955 * hash_hw_final - The final hash calculation function
 956 * @req:        The hash request for the job.
 957 */
 958static int hash_hw_final(struct ahash_request *req)
 959{
 960        int ret = 0;
 961        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 962        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
 963        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
 964        struct hash_device_data *device_data;
 965        u8 digest[SHA256_DIGEST_SIZE];
 966
 967        ret = hash_get_device_data(ctx, &device_data);
 968        if (ret)
 969                return ret;
 970
 971        dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
 972
 973        if (req_ctx->updated) {
 974                ret = hash_resume_state(device_data, &device_data->state);
 975
 976                if (ret) {
 977                        dev_err(device_data->dev,
 978                                "%s: hash_resume_state() failed!\n", __func__);
 979                        goto out;
 980                }
 981        } else if (req->nbytes == 0 && ctx->keylen == 0) {
 982                u8 zero_hash[SHA256_DIGEST_SIZE];
 983                u32 zero_hash_size = 0;
 984                bool zero_digest = false;
 985                /**
 986                 * Use a pre-calculated empty message digest
 987                 * (workaround since hw return zeroes, hw bug!?)
 988                 */
 989                ret = get_empty_message_digest(device_data, &zero_hash[0],
 990                                &zero_hash_size, &zero_digest);
 991                if (!ret && likely(zero_hash_size == ctx->digestsize) &&
 992                    zero_digest) {
 993                        memcpy(req->result, &zero_hash[0], ctx->digestsize);
 994                        goto out;
 995                } else if (!ret && !zero_digest) {
 996                        dev_dbg(device_data->dev,
 997                                "%s: HMAC zero msg with key, continue...\n",
 998                                __func__);
 999                } else {
1000                        dev_err(device_data->dev,
1001                                "%s: ret=%d, or wrong digest size? %s\n",
1002                                __func__, ret,
1003                                zero_hash_size == ctx->digestsize ?
1004                                "true" : "false");
1005                        /* Return error */
1006                        goto out;
1007                }
1008        } else if (req->nbytes == 0 && ctx->keylen > 0) {
1009                dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1010                        __func__);
1011                goto out;
1012        }
1013
1014        if (!req_ctx->updated) {
1015                ret = init_hash_hw(device_data, ctx);
1016                if (ret) {
1017                        dev_err(device_data->dev,
1018                                "%s: init_hash_hw() failed!\n", __func__);
1019                        goto out;
1020                }
1021        }
1022
1023        if (req_ctx->state.index) {
1024                hash_messagepad(device_data, req_ctx->state.buffer,
1025                                req_ctx->state.index);
1026        } else {
1027                HASH_SET_DCAL;
1028                while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1029                        cpu_relax();
1030        }
1031
1032        if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1033                unsigned int keylen = ctx->keylen;
1034                u8 *key = ctx->key;
1035
1036                dev_dbg(device_data->dev, "%s: keylen: %d\n",
1037                        __func__, ctx->keylen);
1038                hash_hw_write_key(device_data, key, keylen);
1039        }
1040
1041        hash_get_digest(device_data, digest, ctx->config.algorithm);
1042        memcpy(req->result, digest, ctx->digestsize);
1043
1044out:
1045        release_hash_device(device_data);
1046
1047        /**
1048         * Allocated in setkey, and only used in HMAC.
1049         */
1050        kfree(ctx->key);
1051
1052        return ret;
1053}
1054
1055/**
1056 * hash_hw_update - Updates current HASH computation hashing another part of
1057 *                  the message.
1058 * @req:        Byte array containing the message to be hashed (caller
1059 *              allocated).
1060 */
1061int hash_hw_update(struct ahash_request *req)
1062{
1063        int ret = 0;
1064        u8 index = 0;
1065        u8 *buffer;
1066        struct hash_device_data *device_data;
1067        u8 *data_buffer;
1068        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1069        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1070        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1071        struct crypto_hash_walk walk;
1072        int msg_length = crypto_hash_walk_first(req, &walk);
1073
1074        /* Empty message ("") is correct indata */
1075        if (msg_length == 0)
1076                return ret;
1077
1078        index = req_ctx->state.index;
1079        buffer = (u8 *)req_ctx->state.buffer;
1080
1081        /* Check if ctx->state.length + msg_length
1082           overflows */
1083        if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1084            HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1085                pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1086                return -EPERM;
1087        }
1088
1089        ret = hash_get_device_data(ctx, &device_data);
1090        if (ret)
1091                return ret;
1092
1093        /* Main loop */
1094        while (0 != msg_length) {
1095                data_buffer = walk.data;
1096                ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1097                                data_buffer, buffer, &index);
1098
1099                if (ret) {
1100                        dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1101                                __func__);
1102                        goto out;
1103                }
1104
1105                msg_length = crypto_hash_walk_done(&walk, 0);
1106        }
1107
1108        req_ctx->state.index = index;
1109        dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1110                __func__, req_ctx->state.index, req_ctx->state.bit_index);
1111
1112out:
1113        release_hash_device(device_data);
1114
1115        return ret;
1116}
1117
1118/**
1119 * hash_resume_state - Function that resumes the state of an calculation.
1120 * @device_data:        Pointer to the device structure.
1121 * @device_state:       The state to be restored in the hash hardware
1122 */
1123int hash_resume_state(struct hash_device_data *device_data,
1124                      const struct hash_state *device_state)
1125{
1126        u32 temp_cr;
1127        s32 count;
1128        int hash_mode = HASH_OPER_MODE_HASH;
1129
1130        if (NULL == device_state) {
1131                dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1132                        __func__);
1133                return -EPERM;
1134        }
1135
1136        /* Check correctness of index and length members */
1137        if (device_state->index > HASH_BLOCK_SIZE ||
1138            (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1139                dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1140                        __func__);
1141                return -EPERM;
1142        }
1143
1144        /*
1145         * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1146         * prepare the initialize the HASH accelerator to compute the message
1147         * digest of a new message.
1148         */
1149        HASH_INITIALIZE;
1150
1151        temp_cr = device_state->temp_cr;
1152        writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1153
1154        if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1155                hash_mode = HASH_OPER_MODE_HMAC;
1156        else
1157                hash_mode = HASH_OPER_MODE_HASH;
1158
1159        for (count = 0; count < HASH_CSR_COUNT; count++) {
1160                if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1161                        break;
1162
1163                writel_relaxed(device_state->csr[count],
1164                               &device_data->base->csrx[count]);
1165        }
1166
1167        writel_relaxed(device_state->csfull, &device_data->base->csfull);
1168        writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1169
1170        writel_relaxed(device_state->str_reg, &device_data->base->str);
1171        writel_relaxed(temp_cr, &device_data->base->cr);
1172
1173        return 0;
1174}
1175
1176/**
1177 * hash_save_state - Function that saves the state of hardware.
1178 * @device_data:        Pointer to the device structure.
1179 * @device_state:       The strucure where the hardware state should be saved.
1180 */
1181int hash_save_state(struct hash_device_data *device_data,
1182                    struct hash_state *device_state)
1183{
1184        u32 temp_cr;
1185        u32 count;
1186        int hash_mode = HASH_OPER_MODE_HASH;
1187
1188        if (NULL == device_state) {
1189                dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1190                        __func__);
1191                return -ENOTSUPP;
1192        }
1193
1194        /* Write dummy value to force digest intermediate calculation. This
1195         * actually makes sure that there isn't any ongoing calculation in the
1196         * hardware.
1197         */
1198        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1199                cpu_relax();
1200
1201        temp_cr = readl_relaxed(&device_data->base->cr);
1202
1203        device_state->str_reg = readl_relaxed(&device_data->base->str);
1204
1205        device_state->din_reg = readl_relaxed(&device_data->base->din);
1206
1207        if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1208                hash_mode = HASH_OPER_MODE_HMAC;
1209        else
1210                hash_mode = HASH_OPER_MODE_HASH;
1211
1212        for (count = 0; count < HASH_CSR_COUNT; count++) {
1213                if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1214                        break;
1215
1216                device_state->csr[count] =
1217                        readl_relaxed(&device_data->base->csrx[count]);
1218        }
1219
1220        device_state->csfull = readl_relaxed(&device_data->base->csfull);
1221        device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1222
1223        device_state->temp_cr = temp_cr;
1224
1225        return 0;
1226}
1227
1228/**
1229 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1230 * @device_data:
1231 *
1232 */
1233int hash_check_hw(struct hash_device_data *device_data)
1234{
1235        /* Checking Peripheral Ids  */
1236        if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1237            HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1238            HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1239            HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1240            HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1241            HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1242            HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1243            HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1244                return 0;
1245        }
1246
1247        dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1248        return -ENOTSUPP;
1249}
1250
1251/**
1252 * hash_get_digest - Gets the digest.
1253 * @device_data:        Pointer to the device structure.
1254 * @digest:             User allocated byte array for the calculated digest.
1255 * @algorithm:          The algorithm in use.
1256 */
1257void hash_get_digest(struct hash_device_data *device_data,
1258                     u8 *digest, int algorithm)
1259{
1260        u32 temp_hx_val, count;
1261        int loop_ctr;
1262
1263        if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1264                dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1265                        __func__, algorithm);
1266                return;
1267        }
1268
1269        if (algorithm == HASH_ALGO_SHA1)
1270                loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1271        else
1272                loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1273
1274        dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
1275                __func__, (u32) digest);
1276
1277        /* Copy result into digest array */
1278        for (count = 0; count < loop_ctr; count++) {
1279                temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1280                digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1281                digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1282                digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1283                digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1284        }
1285}
1286
1287/**
1288 * hash_update - The hash update function for SHA1/SHA2 (SHA256).
1289 * @req: The hash request for the job.
1290 */
1291static int ahash_update(struct ahash_request *req)
1292{
1293        int ret = 0;
1294        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1295
1296        if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1297                ret = hash_hw_update(req);
1298        /* Skip update for DMA, all data will be passed to DMA in final */
1299
1300        if (ret) {
1301                pr_err("%s: hash_hw_update() failed!\n", __func__);
1302        }
1303
1304        return ret;
1305}
1306
1307/**
1308 * hash_final - The hash final function for SHA1/SHA2 (SHA256).
1309 * @req:        The hash request for the job.
1310 */
1311static int ahash_final(struct ahash_request *req)
1312{
1313        int ret = 0;
1314        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1315
1316        pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1317
1318        if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1319                ret = hash_dma_final(req);
1320        else
1321                ret = hash_hw_final(req);
1322
1323        if (ret) {
1324                pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1325        }
1326
1327        return ret;
1328}
1329
1330static int hash_setkey(struct crypto_ahash *tfm,
1331                       const u8 *key, unsigned int keylen, int alg)
1332{
1333        int ret = 0;
1334        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1335
1336        /**
1337         * Freed in final.
1338         */
1339        ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1340        if (!ctx->key) {
1341                pr_err("%s: Failed to allocate ctx->key for %d\n",
1342                       __func__, alg);
1343                return -ENOMEM;
1344        }
1345        ctx->keylen = keylen;
1346
1347        return ret;
1348}
1349
1350static int ahash_sha1_init(struct ahash_request *req)
1351{
1352        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1353        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1354
1355        ctx->config.data_format = HASH_DATA_8_BITS;
1356        ctx->config.algorithm = HASH_ALGO_SHA1;
1357        ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1358        ctx->digestsize = SHA1_DIGEST_SIZE;
1359
1360        return hash_init(req);
1361}
1362
1363static int ahash_sha256_init(struct ahash_request *req)
1364{
1365        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1366        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1367
1368        ctx->config.data_format = HASH_DATA_8_BITS;
1369        ctx->config.algorithm = HASH_ALGO_SHA256;
1370        ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1371        ctx->digestsize = SHA256_DIGEST_SIZE;
1372
1373        return hash_init(req);
1374}
1375
1376static int ahash_sha1_digest(struct ahash_request *req)
1377{
1378        int ret2, ret1;
1379
1380        ret1 = ahash_sha1_init(req);
1381        if (ret1)
1382                goto out;
1383
1384        ret1 = ahash_update(req);
1385        ret2 = ahash_final(req);
1386
1387out:
1388        return ret1 ? ret1 : ret2;
1389}
1390
1391static int ahash_sha256_digest(struct ahash_request *req)
1392{
1393        int ret2, ret1;
1394
1395        ret1 = ahash_sha256_init(req);
1396        if (ret1)
1397                goto out;
1398
1399        ret1 = ahash_update(req);
1400        ret2 = ahash_final(req);
1401
1402out:
1403        return ret1 ? ret1 : ret2;
1404}
1405
1406static int hmac_sha1_init(struct ahash_request *req)
1407{
1408        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1409        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1410
1411        ctx->config.data_format = HASH_DATA_8_BITS;
1412        ctx->config.algorithm   = HASH_ALGO_SHA1;
1413        ctx->config.oper_mode   = HASH_OPER_MODE_HMAC;
1414        ctx->digestsize         = SHA1_DIGEST_SIZE;
1415
1416        return hash_init(req);
1417}
1418
1419static int hmac_sha256_init(struct ahash_request *req)
1420{
1421        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1422        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1423
1424        ctx->config.data_format = HASH_DATA_8_BITS;
1425        ctx->config.algorithm   = HASH_ALGO_SHA256;
1426        ctx->config.oper_mode   = HASH_OPER_MODE_HMAC;
1427        ctx->digestsize         = SHA256_DIGEST_SIZE;
1428
1429        return hash_init(req);
1430}
1431
1432static int hmac_sha1_digest(struct ahash_request *req)
1433{
1434        int ret2, ret1;
1435
1436        ret1 = hmac_sha1_init(req);
1437        if (ret1)
1438                goto out;
1439
1440        ret1 = ahash_update(req);
1441        ret2 = ahash_final(req);
1442
1443out:
1444        return ret1 ? ret1 : ret2;
1445}
1446
1447static int hmac_sha256_digest(struct ahash_request *req)
1448{
1449        int ret2, ret1;
1450
1451        ret1 = hmac_sha256_init(req);
1452        if (ret1)
1453                goto out;
1454
1455        ret1 = ahash_update(req);
1456        ret2 = ahash_final(req);
1457
1458out:
1459        return ret1 ? ret1 : ret2;
1460}
1461
1462static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1463                            const u8 *key, unsigned int keylen)
1464{
1465        return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1466}
1467
1468static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1469                              const u8 *key, unsigned int keylen)
1470{
1471        return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1472}
1473
1474struct hash_algo_template {
1475        struct hash_config conf;
1476        struct ahash_alg hash;
1477};
1478
1479static int hash_cra_init(struct crypto_tfm *tfm)
1480{
1481        struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1482        struct crypto_alg *alg = tfm->__crt_alg;
1483        struct hash_algo_template *hash_alg;
1484
1485        hash_alg = container_of(__crypto_ahash_alg(alg),
1486                        struct hash_algo_template,
1487                        hash);
1488
1489        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1490                                 sizeof(struct hash_req_ctx));
1491
1492        ctx->config.data_format = HASH_DATA_8_BITS;
1493        ctx->config.algorithm = hash_alg->conf.algorithm;
1494        ctx->config.oper_mode = hash_alg->conf.oper_mode;
1495
1496        ctx->digestsize = hash_alg->hash.halg.digestsize;
1497
1498        return 0;
1499}
1500
1501static struct hash_algo_template hash_algs[] = {
1502        {
1503                .conf.algorithm = HASH_ALGO_SHA1,
1504                .conf.oper_mode = HASH_OPER_MODE_HASH,
1505                .hash = {
1506                        .init = hash_init,
1507                        .update = ahash_update,
1508                        .final = ahash_final,
1509                        .digest = ahash_sha1_digest,
1510                        .halg.digestsize = SHA1_DIGEST_SIZE,
1511                        .halg.statesize = sizeof(struct hash_ctx),
1512                        .halg.base = {
1513                                .cra_name = "sha1",
1514                                .cra_driver_name = "sha1-ux500",
1515                                .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1516                                              CRYPTO_ALG_ASYNC),
1517                                .cra_blocksize = SHA1_BLOCK_SIZE,
1518                                .cra_ctxsize = sizeof(struct hash_ctx),
1519                                .cra_init = hash_cra_init,
1520                                .cra_module = THIS_MODULE,
1521                        }
1522                }
1523        },
1524        {
1525                .conf.algorithm = HASH_ALGO_SHA256,
1526                .conf.oper_mode = HASH_OPER_MODE_HASH,
1527                .hash = {
1528                        .init = hash_init,
1529                        .update = ahash_update,
1530                        .final = ahash_final,
1531                        .digest = ahash_sha256_digest,
1532                        .halg.digestsize = SHA256_DIGEST_SIZE,
1533                        .halg.statesize = sizeof(struct hash_ctx),
1534                        .halg.base = {
1535                                .cra_name = "sha256",
1536                                .cra_driver_name = "sha256-ux500",
1537                                .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1538                                              CRYPTO_ALG_ASYNC),
1539                                .cra_blocksize = SHA256_BLOCK_SIZE,
1540                                .cra_ctxsize = sizeof(struct hash_ctx),
1541                                .cra_type = &crypto_ahash_type,
1542                                .cra_init = hash_cra_init,
1543                                .cra_module = THIS_MODULE,
1544                        }
1545                }
1546        },
1547        {
1548                .conf.algorithm = HASH_ALGO_SHA1,
1549                .conf.oper_mode = HASH_OPER_MODE_HMAC,
1550                        .hash = {
1551                        .init = hash_init,
1552                        .update = ahash_update,
1553                        .final = ahash_final,
1554                        .digest = hmac_sha1_digest,
1555                        .setkey = hmac_sha1_setkey,
1556                        .halg.digestsize = SHA1_DIGEST_SIZE,
1557                        .halg.statesize = sizeof(struct hash_ctx),
1558                        .halg.base = {
1559                                .cra_name = "hmac(sha1)",
1560                                .cra_driver_name = "hmac-sha1-ux500",
1561                                .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1562                                              CRYPTO_ALG_ASYNC),
1563                                .cra_blocksize = SHA1_BLOCK_SIZE,
1564                                .cra_ctxsize = sizeof(struct hash_ctx),
1565                                .cra_type = &crypto_ahash_type,
1566                                .cra_init = hash_cra_init,
1567                                .cra_module = THIS_MODULE,
1568                        }
1569                }
1570        },
1571        {
1572                .conf.algorithm = HASH_ALGO_SHA256,
1573                .conf.oper_mode = HASH_OPER_MODE_HMAC,
1574                .hash = {
1575                        .init = hash_init,
1576                        .update = ahash_update,
1577                        .final = ahash_final,
1578                        .digest = hmac_sha256_digest,
1579                        .setkey = hmac_sha256_setkey,
1580                        .halg.digestsize = SHA256_DIGEST_SIZE,
1581                        .halg.statesize = sizeof(struct hash_ctx),
1582                        .halg.base = {
1583                                .cra_name = "hmac(sha256)",
1584                                .cra_driver_name = "hmac-sha256-ux500",
1585                                .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1586                                              CRYPTO_ALG_ASYNC),
1587                                .cra_blocksize = SHA256_BLOCK_SIZE,
1588                                .cra_ctxsize = sizeof(struct hash_ctx),
1589                                .cra_type = &crypto_ahash_type,
1590                                .cra_init = hash_cra_init,
1591                                .cra_module = THIS_MODULE,
1592                        }
1593                }
1594        }
1595};
1596
1597/**
1598 * hash_algs_register_all -
1599 */
1600static int ahash_algs_register_all(struct hash_device_data *device_data)
1601{
1602        int ret;
1603        int i;
1604        int count;
1605
1606        for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1607                ret = crypto_register_ahash(&hash_algs[i].hash);
1608                if (ret) {
1609                        count = i;
1610                        dev_err(device_data->dev, "%s: alg registration failed\n",
1611                                hash_algs[i].hash.halg.base.cra_driver_name);
1612                        goto unreg;
1613                }
1614        }
1615        return 0;
1616unreg:
1617        for (i = 0; i < count; i++)
1618                crypto_unregister_ahash(&hash_algs[i].hash);
1619        return ret;
1620}
1621
1622/**
1623 * hash_algs_unregister_all -
1624 */
1625static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1626{
1627        int i;
1628
1629        for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1630                crypto_unregister_ahash(&hash_algs[i].hash);
1631}
1632
1633/**
1634 * ux500_hash_probe - Function that probes the hash hardware.
1635 * @pdev: The platform device.
1636 */
1637static int ux500_hash_probe(struct platform_device *pdev)
1638{
1639        int                     ret = 0;
1640        struct resource         *res = NULL;
1641        struct hash_device_data *device_data;
1642        struct device           *dev = &pdev->dev;
1643
1644        device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1645        if (!device_data) {
1646                ret = -ENOMEM;
1647                goto out;
1648        }
1649
1650        device_data->dev = dev;
1651        device_data->current_ctx = NULL;
1652
1653        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1654        if (!res) {
1655                dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1656                ret = -ENODEV;
1657                goto out;
1658        }
1659
1660        device_data->phybase = res->start;
1661        device_data->base = devm_ioremap_resource(dev, res);
1662        if (IS_ERR(device_data->base)) {
1663                dev_err(dev, "%s: ioremap() failed!\n", __func__);
1664                ret = PTR_ERR(device_data->base);
1665                goto out;
1666        }
1667        spin_lock_init(&device_data->ctx_lock);
1668        spin_lock_init(&device_data->power_state_lock);
1669
1670        /* Enable power for HASH1 hardware block */
1671        device_data->regulator = regulator_get(dev, "v-ape");
1672        if (IS_ERR(device_data->regulator)) {
1673                dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1674                ret = PTR_ERR(device_data->regulator);
1675                device_data->regulator = NULL;
1676                goto out;
1677        }
1678
1679        /* Enable the clock for HASH1 hardware block */
1680        device_data->clk = devm_clk_get(dev, NULL);
1681        if (IS_ERR(device_data->clk)) {
1682                dev_err(dev, "%s: clk_get() failed!\n", __func__);
1683                ret = PTR_ERR(device_data->clk);
1684                goto out_regulator;
1685        }
1686
1687        ret = clk_prepare(device_data->clk);
1688        if (ret) {
1689                dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1690                goto out_regulator;
1691        }
1692
1693        /* Enable device power (and clock) */
1694        ret = hash_enable_power(device_data, false);
1695        if (ret) {
1696                dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1697                goto out_clk_unprepare;
1698        }
1699
1700        ret = hash_check_hw(device_data);
1701        if (ret) {
1702                dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1703                goto out_power;
1704        }
1705
1706        if (hash_mode == HASH_MODE_DMA)
1707                hash_dma_setup_channel(device_data, dev);
1708
1709        platform_set_drvdata(pdev, device_data);
1710
1711        /* Put the new device into the device list... */
1712        klist_add_tail(&device_data->list_node, &driver_data.device_list);
1713        /* ... and signal that a new device is available. */
1714        up(&driver_data.device_allocation);
1715
1716        ret = ahash_algs_register_all(device_data);
1717        if (ret) {
1718                dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1719                        __func__);
1720                goto out_power;
1721        }
1722
1723        dev_info(dev, "successfully registered\n");
1724        return 0;
1725
1726out_power:
1727        hash_disable_power(device_data, false);
1728
1729out_clk_unprepare:
1730        clk_unprepare(device_data->clk);
1731
1732out_regulator:
1733        regulator_put(device_data->regulator);
1734
1735out:
1736        return ret;
1737}
1738
1739/**
1740 * ux500_hash_remove - Function that removes the hash device from the platform.
1741 * @pdev: The platform device.
1742 */
1743static int ux500_hash_remove(struct platform_device *pdev)
1744{
1745        struct hash_device_data *device_data;
1746        struct device           *dev = &pdev->dev;
1747
1748        device_data = platform_get_drvdata(pdev);
1749        if (!device_data) {
1750                dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1751                return -ENOMEM;
1752        }
1753
1754        /* Try to decrease the number of available devices. */
1755        if (down_trylock(&driver_data.device_allocation))
1756                return -EBUSY;
1757
1758        /* Check that the device is free */
1759        spin_lock(&device_data->ctx_lock);
1760        /* current_ctx allocates a device, NULL = unallocated */
1761        if (device_data->current_ctx) {
1762                /* The device is busy */
1763                spin_unlock(&device_data->ctx_lock);
1764                /* Return the device to the pool. */
1765                up(&driver_data.device_allocation);
1766                return -EBUSY;
1767        }
1768
1769        spin_unlock(&device_data->ctx_lock);
1770
1771        /* Remove the device from the list */
1772        if (klist_node_attached(&device_data->list_node))
1773                klist_remove(&device_data->list_node);
1774
1775        /* If this was the last device, remove the services */
1776        if (list_empty(&driver_data.device_list.k_list))
1777                ahash_algs_unregister_all(device_data);
1778
1779        if (hash_disable_power(device_data, false))
1780                dev_err(dev, "%s: hash_disable_power() failed\n",
1781                        __func__);
1782
1783        clk_unprepare(device_data->clk);
1784        regulator_put(device_data->regulator);
1785
1786        return 0;
1787}
1788
1789/**
1790 * ux500_hash_shutdown - Function that shutdown the hash device.
1791 * @pdev: The platform device
1792 */
1793static void ux500_hash_shutdown(struct platform_device *pdev)
1794{
1795        struct hash_device_data *device_data;
1796
1797        device_data = platform_get_drvdata(pdev);
1798        if (!device_data) {
1799                dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1800                        __func__);
1801                return;
1802        }
1803
1804        /* Check that the device is free */
1805        spin_lock(&device_data->ctx_lock);
1806        /* current_ctx allocates a device, NULL = unallocated */
1807        if (!device_data->current_ctx) {
1808                if (down_trylock(&driver_data.device_allocation))
1809                        dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1810                                __func__);
1811                /**
1812                 * (Allocate the device)
1813                 * Need to set this to non-null (dummy) value,
1814                 * to avoid usage if context switching.
1815                 */
1816                device_data->current_ctx++;
1817        }
1818        spin_unlock(&device_data->ctx_lock);
1819
1820        /* Remove the device from the list */
1821        if (klist_node_attached(&device_data->list_node))
1822                klist_remove(&device_data->list_node);
1823
1824        /* If this was the last device, remove the services */
1825        if (list_empty(&driver_data.device_list.k_list))
1826                ahash_algs_unregister_all(device_data);
1827
1828        if (hash_disable_power(device_data, false))
1829                dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1830                        __func__);
1831}
1832
1833#ifdef CONFIG_PM_SLEEP
1834/**
1835 * ux500_hash_suspend - Function that suspends the hash device.
1836 * @dev:        Device to suspend.
1837 */
1838static int ux500_hash_suspend(struct device *dev)
1839{
1840        int ret;
1841        struct hash_device_data *device_data;
1842        struct hash_ctx *temp_ctx = NULL;
1843
1844        device_data = dev_get_drvdata(dev);
1845        if (!device_data) {
1846                dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1847                return -ENOMEM;
1848        }
1849
1850        spin_lock(&device_data->ctx_lock);
1851        if (!device_data->current_ctx)
1852                device_data->current_ctx++;
1853        spin_unlock(&device_data->ctx_lock);
1854
1855        if (device_data->current_ctx == ++temp_ctx) {
1856                if (down_interruptible(&driver_data.device_allocation))
1857                        dev_dbg(dev, "%s: down_interruptible() failed\n",
1858                                __func__);
1859                ret = hash_disable_power(device_data, false);
1860
1861        } else {
1862                ret = hash_disable_power(device_data, true);
1863        }
1864
1865        if (ret)
1866                dev_err(dev, "%s: hash_disable_power()\n", __func__);
1867
1868        return ret;
1869}
1870
1871/**
1872 * ux500_hash_resume - Function that resume the hash device.
1873 * @dev:        Device to resume.
1874 */
1875static int ux500_hash_resume(struct device *dev)
1876{
1877        int ret = 0;
1878        struct hash_device_data *device_data;
1879        struct hash_ctx *temp_ctx = NULL;
1880
1881        device_data = dev_get_drvdata(dev);
1882        if (!device_data) {
1883                dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1884                return -ENOMEM;
1885        }
1886
1887        spin_lock(&device_data->ctx_lock);
1888        if (device_data->current_ctx == ++temp_ctx)
1889                device_data->current_ctx = NULL;
1890        spin_unlock(&device_data->ctx_lock);
1891
1892        if (!device_data->current_ctx)
1893                up(&driver_data.device_allocation);
1894        else
1895                ret = hash_enable_power(device_data, true);
1896
1897        if (ret)
1898                dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1899
1900        return ret;
1901}
1902#endif
1903
1904static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1905
1906static const struct of_device_id ux500_hash_match[] = {
1907        { .compatible = "stericsson,ux500-hash" },
1908        { },
1909};
1910MODULE_DEVICE_TABLE(of, ux500_hash_match);
1911
1912static struct platform_driver hash_driver = {
1913        .probe  = ux500_hash_probe,
1914        .remove = ux500_hash_remove,
1915        .shutdown = ux500_hash_shutdown,
1916        .driver = {
1917                .name  = "hash1",
1918                .of_match_table = ux500_hash_match,
1919                .pm    = &ux500_hash_pm,
1920        }
1921};
1922
1923/**
1924 * ux500_hash_mod_init - The kernel module init function.
1925 */
1926static int __init ux500_hash_mod_init(void)
1927{
1928        klist_init(&driver_data.device_list, NULL, NULL);
1929        /* Initialize the semaphore to 0 devices (locked state) */
1930        sema_init(&driver_data.device_allocation, 0);
1931
1932        return platform_driver_register(&hash_driver);
1933}
1934
1935/**
1936 * ux500_hash_mod_fini - The kernel module exit function.
1937 */
1938static void __exit ux500_hash_mod_fini(void)
1939{
1940        platform_driver_unregister(&hash_driver);
1941}
1942
1943module_init(ux500_hash_mod_init);
1944module_exit(ux500_hash_mod_fini);
1945
1946MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1947MODULE_LICENSE("GPL");
1948
1949MODULE_ALIAS_CRYPTO("sha1-all");
1950MODULE_ALIAS_CRYPTO("sha256-all");
1951MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1952MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1953