linux/drivers/crypto/nx/nx-sha512.c
<<
>>
Prefs
   1/**
   2 * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
   3 *
   4 * Copyright (C) 2011-2012 International Business Machines Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; version 2 only.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 *
  19 * Author: Kent Yoder <yoder1@us.ibm.com>
  20 */
  21
  22#include <crypto/internal/hash.h>
  23#include <crypto/sha.h>
  24#include <linux/module.h>
  25#include <asm/vio.h>
  26
  27#include "nx_csbcpb.h"
  28#include "nx.h"
  29
  30
  31static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
  32{
  33        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
  34        int err;
  35
  36        err = nx_crypto_ctx_sha_init(tfm);
  37        if (err)
  38                return err;
  39
  40        nx_ctx_init(nx_ctx, HCOP_FC_SHA);
  41
  42        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
  43
  44        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
  45
  46        return 0;
  47}
  48
  49static int nx_sha512_init(struct shash_desc *desc)
  50{
  51        struct sha512_state *sctx = shash_desc_ctx(desc);
  52
  53        memset(sctx, 0, sizeof *sctx);
  54
  55        sctx->state[0] = __cpu_to_be64(SHA512_H0);
  56        sctx->state[1] = __cpu_to_be64(SHA512_H1);
  57        sctx->state[2] = __cpu_to_be64(SHA512_H2);
  58        sctx->state[3] = __cpu_to_be64(SHA512_H3);
  59        sctx->state[4] = __cpu_to_be64(SHA512_H4);
  60        sctx->state[5] = __cpu_to_be64(SHA512_H5);
  61        sctx->state[6] = __cpu_to_be64(SHA512_H6);
  62        sctx->state[7] = __cpu_to_be64(SHA512_H7);
  63        sctx->count[0] = 0;
  64
  65        return 0;
  66}
  67
  68static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
  69                            unsigned int len)
  70{
  71        struct sha512_state *sctx = shash_desc_ctx(desc);
  72        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
  73        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
  74        struct nx_sg *out_sg;
  75        u64 to_process, leftover = 0, total;
  76        unsigned long irq_flags;
  77        int rc = 0;
  78        int data_len;
  79        u32 max_sg_len;
  80        u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
  81
  82        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  83
  84        /* 2 cases for total data len:
  85         *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
  86         *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
  87         */
  88        total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
  89        if (total < SHA512_BLOCK_SIZE) {
  90                memcpy(sctx->buf + buf_len, data, len);
  91                sctx->count[0] += len;
  92                goto out;
  93        }
  94
  95        memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
  96        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  97        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  98
  99        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 100                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 101        max_sg_len = min_t(u64, max_sg_len,
 102                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 103
 104        data_len = SHA512_DIGEST_SIZE;
 105        out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
 106                                  &data_len, max_sg_len);
 107        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 108
 109        if (data_len != SHA512_DIGEST_SIZE) {
 110                rc = -EINVAL;
 111                goto out;
 112        }
 113
 114        do {
 115                int used_sgs = 0;
 116                struct nx_sg *in_sg = nx_ctx->in_sg;
 117
 118                if (buf_len) {
 119                        data_len = buf_len;
 120                        in_sg = nx_build_sg_list(in_sg,
 121                                                 (u8 *) sctx->buf,
 122                                                 &data_len, max_sg_len);
 123
 124                        if (data_len != buf_len) {
 125                                rc = -EINVAL;
 126                                goto out;
 127                        }
 128                        used_sgs = in_sg - nx_ctx->in_sg;
 129                }
 130
 131                /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
 132                 * processed in this iteration. This value is restricted
 133                 * by sg list limits and number of sgs we already used
 134                 * for leftover data. (see above)
 135                 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
 136                 * but because data may not be aligned, we need to account
 137                 * for that too. */
 138                to_process = min_t(u64, total,
 139                        (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
 140                to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
 141
 142                data_len = to_process - buf_len;
 143                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
 144                                         &data_len, max_sg_len);
 145
 146                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 147
 148                if (data_len != (to_process - buf_len)) {
 149                        rc = -EINVAL;
 150                        goto out;
 151                }
 152
 153                to_process = data_len + buf_len;
 154                leftover = total - to_process;
 155
 156                /*
 157                 * we've hit the nx chip previously and we're updating
 158                 * again, so copy over the partial digest.
 159                 */
 160                memcpy(csbcpb->cpb.sha512.input_partial_digest,
 161                               csbcpb->cpb.sha512.message_digest,
 162                               SHA512_DIGEST_SIZE);
 163
 164                if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
 165                        rc = -EINVAL;
 166                        goto out;
 167                }
 168
 169                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 170                                   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 171                if (rc)
 172                        goto out;
 173
 174                atomic_inc(&(nx_ctx->stats->sha512_ops));
 175
 176                total -= to_process;
 177                data += to_process - buf_len;
 178                buf_len = 0;
 179
 180        } while (leftover >= SHA512_BLOCK_SIZE);
 181
 182        /* copy the leftover back into the state struct */
 183        if (leftover)
 184                memcpy(sctx->buf, data, leftover);
 185        sctx->count[0] += len;
 186        memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
 187out:
 188        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 189        return rc;
 190}
 191
 192static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 193{
 194        struct sha512_state *sctx = shash_desc_ctx(desc);
 195        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 196        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
 197        struct nx_sg *in_sg, *out_sg;
 198        u32 max_sg_len;
 199        u64 count0;
 200        unsigned long irq_flags;
 201        int rc = 0;
 202        int len;
 203
 204        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 205
 206        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 207                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 208        max_sg_len = min_t(u64, max_sg_len,
 209                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 210
 211        /* final is represented by continuing the operation and indicating that
 212         * this is not an intermediate operation */
 213        if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
 214                /* we've hit the nx chip previously, now we're finalizing,
 215                 * so copy over the partial digest */
 216                memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
 217                                                        SHA512_DIGEST_SIZE);
 218                NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 219                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 220        } else {
 221                NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 222                NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
 223        }
 224
 225        NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 226
 227        count0 = sctx->count[0] * 8;
 228
 229        csbcpb->cpb.sha512.message_bit_length_lo = count0;
 230
 231        len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
 232        in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
 233                                 max_sg_len);
 234
 235        if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
 236                rc = -EINVAL;
 237                goto out;
 238        }
 239
 240        len = SHA512_DIGEST_SIZE;
 241        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
 242                                 max_sg_len);
 243
 244        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 245        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 246
 247        if (!nx_ctx->op.outlen) {
 248                rc = -EINVAL;
 249                goto out;
 250        }
 251
 252        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 253                           desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 254        if (rc)
 255                goto out;
 256
 257        atomic_inc(&(nx_ctx->stats->sha512_ops));
 258        atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
 259
 260        memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
 261out:
 262        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 263        return rc;
 264}
 265
 266static int nx_sha512_export(struct shash_desc *desc, void *out)
 267{
 268        struct sha512_state *sctx = shash_desc_ctx(desc);
 269
 270        memcpy(out, sctx, sizeof(*sctx));
 271
 272        return 0;
 273}
 274
 275static int nx_sha512_import(struct shash_desc *desc, const void *in)
 276{
 277        struct sha512_state *sctx = shash_desc_ctx(desc);
 278
 279        memcpy(sctx, in, sizeof(*sctx));
 280
 281        return 0;
 282}
 283
 284struct shash_alg nx_shash_sha512_alg = {
 285        .digestsize = SHA512_DIGEST_SIZE,
 286        .init       = nx_sha512_init,
 287        .update     = nx_sha512_update,
 288        .final      = nx_sha512_final,
 289        .export     = nx_sha512_export,
 290        .import     = nx_sha512_import,
 291        .descsize   = sizeof(struct sha512_state),
 292        .statesize  = sizeof(struct sha512_state),
 293        .base       = {
 294                .cra_name        = "sha512",
 295                .cra_driver_name = "sha512-nx",
 296                .cra_priority    = 300,
 297                .cra_flags       = CRYPTO_ALG_TYPE_SHASH,
 298                .cra_blocksize   = SHA512_BLOCK_SIZE,
 299                .cra_module      = THIS_MODULE,
 300                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 301                .cra_init        = nx_crypto_ctx_sha512_init,
 302                .cra_exit        = nx_crypto_ctx_exit,
 303        }
 304};
 305