linux/drivers/crypto/ccp/ccp-crypto-aes-xts.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
   3 *
   4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/sched.h>
  15#include <linux/delay.h>
  16#include <linux/scatterlist.h>
  17#include <crypto/aes.h>
  18#include <crypto/internal/skcipher.h>
  19#include <crypto/scatterwalk.h>
  20
  21#include "ccp-crypto.h"
  22
  23struct ccp_aes_xts_def {
  24        const char *name;
  25        const char *drv_name;
  26};
  27
  28static struct ccp_aes_xts_def aes_xts_algs[] = {
  29        {
  30                .name           = "xts(aes)",
  31                .drv_name       = "xts-aes-ccp",
  32        },
  33};
  34
  35struct ccp_unit_size_map {
  36        unsigned int size;
  37        u32 value;
  38};
  39
  40static struct ccp_unit_size_map unit_size_map[] = {
  41        {
  42                .size   = 4096,
  43                .value  = CCP_XTS_AES_UNIT_SIZE_4096,
  44        },
  45        {
  46                .size   = 2048,
  47                .value  = CCP_XTS_AES_UNIT_SIZE_2048,
  48        },
  49        {
  50                .size   = 1024,
  51                .value  = CCP_XTS_AES_UNIT_SIZE_1024,
  52        },
  53        {
  54                .size   = 512,
  55                .value  = CCP_XTS_AES_UNIT_SIZE_512,
  56        },
  57        {
  58                .size   = 256,
  59                .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
  60        },
  61        {
  62                .size   = 128,
  63                .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
  64        },
  65        {
  66                .size   = 64,
  67                .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
  68        },
  69        {
  70                .size   = 32,
  71                .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
  72        },
  73        {
  74                .size   = 16,
  75                .value  = CCP_XTS_AES_UNIT_SIZE_16,
  76        },
  77        {
  78                .size   = 1,
  79                .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
  80        },
  81};
  82
  83static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
  84{
  85        struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  86        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
  87
  88        if (ret)
  89                return ret;
  90
  91        memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
  92
  93        return 0;
  94}
  95
  96static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  97                              unsigned int key_len)
  98{
  99        struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
 100
 101        /* Only support 128-bit AES key with a 128-bit Tweak key,
 102         * otherwise use the fallback
 103         */
 104        switch (key_len) {
 105        case AES_KEYSIZE_128 * 2:
 106                memcpy(ctx->u.aes.key, key, key_len);
 107                break;
 108        }
 109        ctx->u.aes.key_len = key_len / 2;
 110        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 111
 112        return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
 113}
 114
 115static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
 116                             unsigned int encrypt)
 117{
 118        struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 119        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 120        unsigned int unit;
 121        u32 unit_size;
 122        int ret;
 123
 124        if (!ctx->u.aes.key_len)
 125                return -EINVAL;
 126
 127        if (req->nbytes & (AES_BLOCK_SIZE - 1))
 128                return -EINVAL;
 129
 130        if (!req->info)
 131                return -EINVAL;
 132
 133        unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
 134        if (req->nbytes <= unit_size_map[0].size) {
 135                for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
 136                        if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
 137                                unit_size = unit_size_map[unit].value;
 138                                break;
 139                        }
 140                }
 141        }
 142
 143        if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
 144            (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
 145                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
 146
 147                /* Use the fallback to process the request for any
 148                 * unsupported unit sizes or key sizes
 149                 */
 150                skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
 151                skcipher_request_set_callback(subreq, req->base.flags,
 152                                              NULL, NULL);
 153                skcipher_request_set_crypt(subreq, req->src, req->dst,
 154                                           req->nbytes, req->info);
 155                ret = encrypt ? crypto_skcipher_encrypt(subreq) :
 156                                crypto_skcipher_decrypt(subreq);
 157                skcipher_request_zero(subreq);
 158                return ret;
 159        }
 160
 161        memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
 162        sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
 163
 164        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 165        INIT_LIST_HEAD(&rctx->cmd.entry);
 166        rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
 167        rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
 168                                           : CCP_AES_ACTION_DECRYPT;
 169        rctx->cmd.u.xts.unit_size = unit_size;
 170        rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
 171        rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
 172        rctx->cmd.u.xts.iv = &rctx->iv_sg;
 173        rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
 174        rctx->cmd.u.xts.src = req->src;
 175        rctx->cmd.u.xts.src_len = req->nbytes;
 176        rctx->cmd.u.xts.dst = req->dst;
 177
 178        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 179
 180        return ret;
 181}
 182
 183static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
 184{
 185        return ccp_aes_xts_crypt(req, 1);
 186}
 187
 188static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
 189{
 190        return ccp_aes_xts_crypt(req, 0);
 191}
 192
 193static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
 194{
 195        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 196        struct crypto_skcipher *fallback_tfm;
 197
 198        ctx->complete = ccp_aes_xts_complete;
 199        ctx->u.aes.key_len = 0;
 200
 201        fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
 202                                             CRYPTO_ALG_ASYNC |
 203                                             CRYPTO_ALG_NEED_FALLBACK);
 204        if (IS_ERR(fallback_tfm)) {
 205                pr_warn("could not load fallback driver xts(aes)\n");
 206                return PTR_ERR(fallback_tfm);
 207        }
 208        ctx->u.aes.tfm_skcipher = fallback_tfm;
 209
 210        tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
 211
 212        return 0;
 213}
 214
 215static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
 216{
 217        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 218
 219        crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
 220}
 221
 222static int ccp_register_aes_xts_alg(struct list_head *head,
 223                                    const struct ccp_aes_xts_def *def)
 224{
 225        struct ccp_crypto_ablkcipher_alg *ccp_alg;
 226        struct crypto_alg *alg;
 227        int ret;
 228
 229        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 230        if (!ccp_alg)
 231                return -ENOMEM;
 232
 233        INIT_LIST_HEAD(&ccp_alg->entry);
 234
 235        alg = &ccp_alg->alg;
 236
 237        snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 238        snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 239                 def->drv_name);
 240        alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 241                         CRYPTO_ALG_KERN_DRIVER_ONLY |
 242                         CRYPTO_ALG_NEED_FALLBACK;
 243        alg->cra_blocksize = AES_BLOCK_SIZE;
 244        alg->cra_ctxsize = sizeof(struct ccp_ctx);
 245        alg->cra_priority = CCP_CRA_PRIORITY;
 246        alg->cra_type = &crypto_ablkcipher_type;
 247        alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
 248        alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
 249        alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
 250        alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
 251        alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
 252        alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
 253        alg->cra_init = ccp_aes_xts_cra_init;
 254        alg->cra_exit = ccp_aes_xts_cra_exit;
 255        alg->cra_module = THIS_MODULE;
 256
 257        ret = crypto_register_alg(alg);
 258        if (ret) {
 259                pr_err("%s ablkcipher algorithm registration error (%d)\n",
 260                       alg->cra_name, ret);
 261                kfree(ccp_alg);
 262                return ret;
 263        }
 264
 265        list_add(&ccp_alg->entry, head);
 266
 267        return 0;
 268}
 269
 270int ccp_register_aes_xts_algs(struct list_head *head)
 271{
 272        int i, ret;
 273
 274        for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
 275                ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
 276                if (ret)
 277                        return ret;
 278        }
 279
 280        return 0;
 281}
 282