linux/drivers/crypto/ccp/ccp-crypto-aes-xts.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
   3 *
   4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Gary R Hook <gary.hook@amd.com>
   7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/sched.h>
  16#include <linux/delay.h>
  17#include <linux/scatterlist.h>
  18#include <crypto/aes.h>
  19#include <crypto/xts.h>
  20#include <crypto/internal/skcipher.h>
  21#include <crypto/scatterwalk.h>
  22
  23#include "ccp-crypto.h"
  24
  25struct ccp_aes_xts_def {
  26        const char *name;
  27        const char *drv_name;
  28};
  29
  30static struct ccp_aes_xts_def aes_xts_algs[] = {
  31        {
  32                .name           = "xts(aes)",
  33                .drv_name       = "xts-aes-ccp",
  34        },
  35};
  36
  37struct ccp_unit_size_map {
  38        unsigned int size;
  39        u32 value;
  40};
  41
  42static struct ccp_unit_size_map xts_unit_sizes[] = {
  43        {
  44                .size   = 16,
  45                .value  = CCP_XTS_AES_UNIT_SIZE_16,
  46        },
  47        {
  48                .size   = 512,
  49                .value  = CCP_XTS_AES_UNIT_SIZE_512,
  50        },
  51        {
  52                .size   = 1024,
  53                .value  = CCP_XTS_AES_UNIT_SIZE_1024,
  54        },
  55        {
  56                .size   = 2048,
  57                .value  = CCP_XTS_AES_UNIT_SIZE_2048,
  58        },
  59        {
  60                .size   = 4096,
  61                .value  = CCP_XTS_AES_UNIT_SIZE_4096,
  62        },
  63};
  64
  65static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
  66{
  67        struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  68        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
  69
  70        if (ret)
  71                return ret;
  72
  73        memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
  74
  75        return 0;
  76}
  77
  78static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  79                              unsigned int key_len)
  80{
  81        struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
  82        struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
  83        unsigned int ccpversion = ccp_version();
  84        int ret;
  85
  86        ret = xts_check_key(xfm, key, key_len);
  87        if (ret)
  88                return ret;
  89
  90        /* Version 3 devices support 128-bit keys; version 5 devices can
  91         * accommodate 128- and 256-bit keys.
  92         */
  93        switch (key_len) {
  94        case AES_KEYSIZE_128 * 2:
  95                memcpy(ctx->u.aes.key, key, key_len);
  96                break;
  97        case AES_KEYSIZE_256 * 2:
  98                if (ccpversion > CCP_VERSION(3, 0))
  99                        memcpy(ctx->u.aes.key, key, key_len);
 100                break;
 101        }
 102        ctx->u.aes.key_len = key_len / 2;
 103        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 104
 105        return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
 106}
 107
 108static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
 109                             unsigned int encrypt)
 110{
 111        struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 112        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 113        unsigned int ccpversion = ccp_version();
 114        unsigned int fallback = 0;
 115        unsigned int unit;
 116        u32 unit_size;
 117        int ret;
 118
 119        if (!ctx->u.aes.key_len)
 120                return -EINVAL;
 121
 122        if (req->nbytes & (AES_BLOCK_SIZE - 1))
 123                return -EINVAL;
 124
 125        if (!req->info)
 126                return -EINVAL;
 127
 128        /* Check conditions under which the CCP can fulfill a request. The
 129         * device can handle input plaintext of a length that is a multiple
 130         * of the unit_size, bug the crypto implementation only supports
 131         * the unit_size being equal to the input length. This limits the
 132         * number of scenarios we can handle.
 133         */
 134        unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
 135        for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
 136                if (req->nbytes == xts_unit_sizes[unit].size) {
 137                        unit_size = unit;
 138                        break;
 139                }
 140        }
 141        /* The CCP has restrictions on block sizes. Also, a version 3 device
 142         * only supports AES-128 operations; version 5 CCPs support both
 143         * AES-128 and -256 operations.
 144         */
 145        if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
 146                fallback = 1;
 147        if ((ccpversion < CCP_VERSION(5, 0)) &&
 148            (ctx->u.aes.key_len != AES_KEYSIZE_128))
 149                fallback = 1;
 150        if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
 151            (ctx->u.aes.key_len != AES_KEYSIZE_256))
 152                fallback = 1;
 153        if (fallback) {
 154                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
 155
 156                /* Use the fallback to process the request for any
 157                 * unsupported unit sizes or key sizes
 158                 */
 159                skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
 160                skcipher_request_set_callback(subreq, req->base.flags,
 161                                              NULL, NULL);
 162                skcipher_request_set_crypt(subreq, req->src, req->dst,
 163                                           req->nbytes, req->info);
 164                ret = encrypt ? crypto_skcipher_encrypt(subreq) :
 165                                crypto_skcipher_decrypt(subreq);
 166                skcipher_request_zero(subreq);
 167                return ret;
 168        }
 169
 170        memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
 171        sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
 172
 173        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 174        INIT_LIST_HEAD(&rctx->cmd.entry);
 175        rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
 176        rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
 177        rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
 178                                           : CCP_AES_ACTION_DECRYPT;
 179        rctx->cmd.u.xts.unit_size = unit_size;
 180        rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
 181        rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
 182        rctx->cmd.u.xts.iv = &rctx->iv_sg;
 183        rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
 184        rctx->cmd.u.xts.src = req->src;
 185        rctx->cmd.u.xts.src_len = req->nbytes;
 186        rctx->cmd.u.xts.dst = req->dst;
 187
 188        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 189
 190        return ret;
 191}
 192
 193static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
 194{
 195        return ccp_aes_xts_crypt(req, 1);
 196}
 197
 198static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
 199{
 200        return ccp_aes_xts_crypt(req, 0);
 201}
 202
 203static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
 204{
 205        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 206        struct crypto_skcipher *fallback_tfm;
 207
 208        ctx->complete = ccp_aes_xts_complete;
 209        ctx->u.aes.key_len = 0;
 210
 211        fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
 212                                             CRYPTO_ALG_ASYNC |
 213                                             CRYPTO_ALG_NEED_FALLBACK);
 214        if (IS_ERR(fallback_tfm)) {
 215                pr_warn("could not load fallback driver xts(aes)\n");
 216                return PTR_ERR(fallback_tfm);
 217        }
 218        ctx->u.aes.tfm_skcipher = fallback_tfm;
 219
 220        tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
 221
 222        return 0;
 223}
 224
 225static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
 226{
 227        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 228
 229        crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
 230}
 231
 232static int ccp_register_aes_xts_alg(struct list_head *head,
 233                                    const struct ccp_aes_xts_def *def)
 234{
 235        struct ccp_crypto_ablkcipher_alg *ccp_alg;
 236        struct crypto_alg *alg;
 237        int ret;
 238
 239        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 240        if (!ccp_alg)
 241                return -ENOMEM;
 242
 243        INIT_LIST_HEAD(&ccp_alg->entry);
 244
 245        alg = &ccp_alg->alg;
 246
 247        snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 248        snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 249                 def->drv_name);
 250        alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 251                         CRYPTO_ALG_KERN_DRIVER_ONLY |
 252                         CRYPTO_ALG_NEED_FALLBACK;
 253        alg->cra_blocksize = AES_BLOCK_SIZE;
 254        alg->cra_ctxsize = sizeof(struct ccp_ctx);
 255        alg->cra_priority = CCP_CRA_PRIORITY;
 256        alg->cra_type = &crypto_ablkcipher_type;
 257        alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
 258        alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
 259        alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
 260        alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
 261        alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
 262        alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
 263        alg->cra_init = ccp_aes_xts_cra_init;
 264        alg->cra_exit = ccp_aes_xts_cra_exit;
 265        alg->cra_module = THIS_MODULE;
 266
 267        ret = crypto_register_alg(alg);
 268        if (ret) {
 269                pr_err("%s ablkcipher algorithm registration error (%d)\n",
 270                       alg->cra_name, ret);
 271                kfree(ccp_alg);
 272                return ret;
 273        }
 274
 275        list_add(&ccp_alg->entry, head);
 276
 277        return 0;
 278}
 279
 280int ccp_register_aes_xts_algs(struct list_head *head)
 281{
 282        int i, ret;
 283
 284        for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
 285                ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
 286                if (ret)
 287                        return ret;
 288        }
 289
 290        return 0;
 291}
 292