linux/drivers/crypto/nx/nx-aes-xcbc.c
<<
>>
Prefs
   1/**
   2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
   3 *
   4 * Copyright (C) 2011-2012 International Business Machines Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; version 2 only.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 *
  19 * Author: Kent Yoder <yoder1@us.ibm.com>
  20 */
  21
  22#include <crypto/internal/hash.h>
  23#include <crypto/aes.h>
  24#include <crypto/algapi.h>
  25#include <linux/module.h>
  26#include <linux/types.h>
  27#include <linux/crypto.h>
  28#include <asm/vio.h>
  29
  30#include "nx_csbcpb.h"
  31#include "nx.h"
  32
  33
  34struct xcbc_state {
  35        u8 state[AES_BLOCK_SIZE];
  36        unsigned int count;
  37        u8 buffer[AES_BLOCK_SIZE];
  38};
  39
  40static int nx_xcbc_set_key(struct crypto_shash *desc,
  41                           const u8            *in_key,
  42                           unsigned int         key_len)
  43{
  44        struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
  45        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  46
  47        switch (key_len) {
  48        case AES_KEYSIZE_128:
  49                nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  50                break;
  51        default:
  52                return -EINVAL;
  53        }
  54
  55        memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
  56
  57        return 0;
  58}
  59
  60/*
  61 * Based on RFC 3566, for a zero-length message:
  62 *
  63 * n = 1
  64 * K1 = E(K, 0x01010101010101010101010101010101)
  65 * K3 = E(K, 0x03030303030303030303030303030303)
  66 * E[0] = 0x00000000000000000000000000000000
  67 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
  68 * E[1] = (K1, M[1] ^ E[0] ^ K3)
  69 * Tag = M[1]
  70 */
  71static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
  72{
  73        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
  74        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  75        struct nx_sg *in_sg, *out_sg;
  76        u8 keys[2][AES_BLOCK_SIZE];
  77        u8 key[32];
  78        int rc = 0;
  79        int len;
  80
  81        /* Change to ECB mode */
  82        csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
  83        memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
  84        memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
  85        NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  86
  87        /* K1 and K3 base patterns */
  88        memset(keys[0], 0x01, sizeof(keys[0]));
  89        memset(keys[1], 0x03, sizeof(keys[1]));
  90
  91        len = sizeof(keys);
  92        /* Generate K1 and K3 encrypting the patterns */
  93        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
  94                                 nx_ctx->ap->sglen);
  95
  96        if (len != sizeof(keys))
  97                return -EINVAL;
  98
  99        out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
 100                                  nx_ctx->ap->sglen);
 101
 102        if (len != sizeof(keys))
 103                return -EINVAL;
 104
 105        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 106        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 107
 108        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 109                           desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 110        if (rc)
 111                goto out;
 112        atomic_inc(&(nx_ctx->stats->aes_ops));
 113
 114        /* XOr K3 with the padding for a 0 length message */
 115        keys[1][0] ^= 0x80;
 116
 117        len = sizeof(keys[1]);
 118
 119        /* Encrypt the final result */
 120        memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
 121        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
 122                                 nx_ctx->ap->sglen);
 123
 124        if (len != sizeof(keys[1]))
 125                return -EINVAL;
 126
 127        len = AES_BLOCK_SIZE;
 128        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
 129                                  nx_ctx->ap->sglen);
 130
 131        if (len != AES_BLOCK_SIZE)
 132                return -EINVAL;
 133
 134        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 135        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 136
 137        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 138                           desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 139        if (rc)
 140                goto out;
 141        atomic_inc(&(nx_ctx->stats->aes_ops));
 142
 143out:
 144        /* Restore XCBC mode */
 145        csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
 146        memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
 147        NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 148
 149        return rc;
 150}
 151
 152static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
 153{
 154        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
 155        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 156        int err;
 157
 158        err = nx_crypto_ctx_aes_xcbc_init(tfm);
 159        if (err)
 160                return err;
 161
 162        nx_ctx_init(nx_ctx, HCOP_FC_AES);
 163
 164        NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
 165        csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
 166
 167        return 0;
 168}
 169
 170static int nx_xcbc_init(struct shash_desc *desc)
 171{
 172        struct xcbc_state *sctx = shash_desc_ctx(desc);
 173
 174        memset(sctx, 0, sizeof *sctx);
 175
 176        return 0;
 177}
 178
 179static int nx_xcbc_update(struct shash_desc *desc,
 180                          const u8          *data,
 181                          unsigned int       len)
 182{
 183        struct xcbc_state *sctx = shash_desc_ctx(desc);
 184        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 185        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 186        struct nx_sg *in_sg;
 187        struct nx_sg *out_sg;
 188        u32 to_process = 0, leftover, total;
 189        unsigned int max_sg_len;
 190        unsigned long irq_flags;
 191        int rc = 0;
 192        int data_len;
 193
 194        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 195
 196
 197        total = sctx->count + len;
 198
 199        /* 2 cases for total data len:
 200         *  1: <= AES_BLOCK_SIZE: copy into state, return 0
 201         *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
 202         */
 203        if (total <= AES_BLOCK_SIZE) {
 204                memcpy(sctx->buffer + sctx->count, data, len);
 205                sctx->count += len;
 206                goto out;
 207        }
 208
 209        in_sg = nx_ctx->in_sg;
 210        max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
 211                                nx_ctx->ap->sglen);
 212        max_sg_len = min_t(u64, max_sg_len,
 213                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 214
 215        data_len = AES_BLOCK_SIZE;
 216        out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
 217                                  &len, nx_ctx->ap->sglen);
 218
 219        if (data_len != AES_BLOCK_SIZE) {
 220                rc = -EINVAL;
 221                goto out;
 222        }
 223
 224        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 225
 226        do {
 227                to_process = total - to_process;
 228                to_process = to_process & ~(AES_BLOCK_SIZE - 1);
 229
 230                leftover = total - to_process;
 231
 232                /* the hardware will not accept a 0 byte operation for this
 233                 * algorithm and the operation MUST be finalized to be correct.
 234                 * So if we happen to get an update that falls on a block sized
 235                 * boundary, we must save off the last block to finalize with
 236                 * later. */
 237                if (!leftover) {
 238                        to_process -= AES_BLOCK_SIZE;
 239                        leftover = AES_BLOCK_SIZE;
 240                }
 241
 242                if (sctx->count) {
 243                        data_len = sctx->count;
 244                        in_sg = nx_build_sg_list(nx_ctx->in_sg,
 245                                                (u8 *) sctx->buffer,
 246                                                &data_len,
 247                                                max_sg_len);
 248                        if (data_len != sctx->count) {
 249                                rc = -EINVAL;
 250                                goto out;
 251                        }
 252                }
 253
 254                data_len = to_process - sctx->count;
 255                in_sg = nx_build_sg_list(in_sg,
 256                                        (u8 *) data,
 257                                        &data_len,
 258                                        max_sg_len);
 259
 260                if (data_len != to_process - sctx->count) {
 261                        rc = -EINVAL;
 262                        goto out;
 263                }
 264
 265                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
 266                                        sizeof(struct nx_sg);
 267
 268                /* we've hit the nx chip previously and we're updating again,
 269                 * so copy over the partial digest */
 270                if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
 271                        memcpy(csbcpb->cpb.aes_xcbc.cv,
 272                                csbcpb->cpb.aes_xcbc.out_cv_mac,
 273                                AES_BLOCK_SIZE);
 274                }
 275
 276                NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 277                if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
 278                        rc = -EINVAL;
 279                        goto out;
 280                }
 281
 282                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 283                           desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 284                if (rc)
 285                        goto out;
 286
 287                atomic_inc(&(nx_ctx->stats->aes_ops));
 288
 289                /* everything after the first update is continuation */
 290                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 291
 292                total -= to_process;
 293                data += to_process - sctx->count;
 294                sctx->count = 0;
 295                in_sg = nx_ctx->in_sg;
 296        } while (leftover > AES_BLOCK_SIZE);
 297
 298        /* copy the leftover back into the state struct */
 299        memcpy(sctx->buffer, data, leftover);
 300        sctx->count = leftover;
 301
 302out:
 303        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 304        return rc;
 305}
 306
 307static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
 308{
 309        struct xcbc_state *sctx = shash_desc_ctx(desc);
 310        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 311        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 312        struct nx_sg *in_sg, *out_sg;
 313        unsigned long irq_flags;
 314        int rc = 0;
 315        int len;
 316
 317        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 318
 319        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
 320                /* we've hit the nx chip previously, now we're finalizing,
 321                 * so copy over the partial digest */
 322                memcpy(csbcpb->cpb.aes_xcbc.cv,
 323                       csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
 324        } else if (sctx->count == 0) {
 325                /*
 326                 * we've never seen an update, so this is a 0 byte op. The
 327                 * hardware cannot handle a 0 byte op, so just ECB to
 328                 * generate the hash.
 329                 */
 330                rc = nx_xcbc_empty(desc, out);
 331                goto out;
 332        }
 333
 334        /* final is represented by continuing the operation and indicating that
 335         * this is not an intermediate operation */
 336        NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 337
 338        len = sctx->count;
 339        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
 340                                 &len, nx_ctx->ap->sglen);
 341
 342        if (len != sctx->count) {
 343                rc = -EINVAL;
 344                goto out;
 345        }
 346
 347        len = AES_BLOCK_SIZE;
 348        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
 349                                  nx_ctx->ap->sglen);
 350
 351        if (len != AES_BLOCK_SIZE) {
 352                rc = -EINVAL;
 353                goto out;
 354        }
 355
 356        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 357        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 358
 359        if (!nx_ctx->op.outlen) {
 360                rc = -EINVAL;
 361                goto out;
 362        }
 363
 364        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 365                           desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 366        if (rc)
 367                goto out;
 368
 369        atomic_inc(&(nx_ctx->stats->aes_ops));
 370
 371        memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
 372out:
 373        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 374        return rc;
 375}
 376
 377struct shash_alg nx_shash_aes_xcbc_alg = {
 378        .digestsize = AES_BLOCK_SIZE,
 379        .init       = nx_xcbc_init,
 380        .update     = nx_xcbc_update,
 381        .final      = nx_xcbc_final,
 382        .setkey     = nx_xcbc_set_key,
 383        .descsize   = sizeof(struct xcbc_state),
 384        .statesize  = sizeof(struct xcbc_state),
 385        .base       = {
 386                .cra_name        = "xcbc(aes)",
 387                .cra_driver_name = "xcbc-aes-nx",
 388                .cra_priority    = 300,
 389                .cra_flags       = CRYPTO_ALG_TYPE_SHASH,
 390                .cra_blocksize   = AES_BLOCK_SIZE,
 391                .cra_module      = THIS_MODULE,
 392                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 393                .cra_init        = nx_crypto_ctx_aes_xcbc_init2,
 394                .cra_exit        = nx_crypto_ctx_exit,
 395        }
 396};
 397