linux/drivers/crypto/ccp/ccp-crypto-rsa.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
   3 *
   4 * Copyright (C) 2017 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Gary R Hook <gary.hook@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/sched.h>
  15#include <linux/scatterlist.h>
  16#include <linux/crypto.h>
  17#include <crypto/algapi.h>
  18#include <crypto/internal/rsa.h>
  19#include <crypto/internal/akcipher.h>
  20#include <crypto/akcipher.h>
  21#include <crypto/scatterwalk.h>
  22
  23#include "ccp-crypto.h"
  24
  25static inline struct akcipher_request *akcipher_request_cast(
  26        struct crypto_async_request *req)
  27{
  28        return container_of(req, struct akcipher_request, base);
  29}
  30
  31static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
  32                                            const u8 *buf, size_t sz)
  33{
  34        int nskip;
  35
  36        for (nskip = 0; nskip < sz; nskip++)
  37                if (buf[nskip])
  38                        break;
  39        *kplen = sz - nskip;
  40        *kpbuf = kzalloc(*kplen, GFP_KERNEL);
  41        if (!*kpbuf)
  42                return -ENOMEM;
  43        memcpy(*kpbuf, buf + nskip, *kplen);
  44
  45        return 0;
  46}
  47
  48static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
  49{
  50        struct akcipher_request *req = akcipher_request_cast(async_req);
  51        struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
  52
  53        if (ret)
  54                return ret;
  55
  56        req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
  57
  58        return 0;
  59}
  60
  61static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
  62{
  63        struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
  64
  65        return ctx->u.rsa.n_len;
  66}
  67
  68static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
  69{
  70        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  71        struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
  72        struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
  73        int ret = 0;
  74
  75        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
  76        INIT_LIST_HEAD(&rctx->cmd.entry);
  77        rctx->cmd.engine = CCP_ENGINE_RSA;
  78
  79        rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
  80        if (encrypt) {
  81                rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
  82                rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
  83        } else {
  84                rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
  85                rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
  86        }
  87        rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
  88        rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
  89        rctx->cmd.u.rsa.src = req->src;
  90        rctx->cmd.u.rsa.src_len = req->src_len;
  91        rctx->cmd.u.rsa.dst = req->dst;
  92
  93        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
  94
  95        return ret;
  96}
  97
  98static int ccp_rsa_encrypt(struct akcipher_request *req)
  99{
 100        return ccp_rsa_crypt(req, true);
 101}
 102
 103static int ccp_rsa_decrypt(struct akcipher_request *req)
 104{
 105        return ccp_rsa_crypt(req, false);
 106}
 107
 108static int ccp_check_key_length(unsigned int len)
 109{
 110        /* In bits */
 111        if (len < 8 || len > 4096)
 112                return -EINVAL;
 113        return 0;
 114}
 115
 116static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
 117{
 118        /* Clean up old key data */
 119        kzfree(ctx->u.rsa.e_buf);
 120        ctx->u.rsa.e_buf = NULL;
 121        ctx->u.rsa.e_len = 0;
 122        kzfree(ctx->u.rsa.n_buf);
 123        ctx->u.rsa.n_buf = NULL;
 124        ctx->u.rsa.n_len = 0;
 125        kzfree(ctx->u.rsa.d_buf);
 126        ctx->u.rsa.d_buf = NULL;
 127        ctx->u.rsa.d_len = 0;
 128}
 129
 130static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 131                          unsigned int keylen, bool private)
 132{
 133        struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
 134        struct rsa_key raw_key;
 135        int ret;
 136
 137        ccp_rsa_free_key_bufs(ctx);
 138        memset(&raw_key, 0, sizeof(raw_key));
 139
 140        /* Code borrowed from crypto/rsa.c */
 141        if (private)
 142                ret = rsa_parse_priv_key(&raw_key, key, keylen);
 143        else
 144                ret = rsa_parse_pub_key(&raw_key, key, keylen);
 145        if (ret)
 146                goto n_key;
 147
 148        ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
 149                                        raw_key.n, raw_key.n_sz);
 150        if (ret)
 151                goto key_err;
 152        sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
 153
 154        ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
 155        if (ccp_check_key_length(ctx->u.rsa.key_len)) {
 156                ret = -EINVAL;
 157                goto key_err;
 158        }
 159
 160        ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
 161                                        raw_key.e, raw_key.e_sz);
 162        if (ret)
 163                goto key_err;
 164        sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
 165
 166        if (private) {
 167                ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
 168                                                &ctx->u.rsa.d_len,
 169                                                raw_key.d, raw_key.d_sz);
 170                if (ret)
 171                        goto key_err;
 172                sg_init_one(&ctx->u.rsa.d_sg,
 173                            ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
 174        }
 175
 176        return 0;
 177
 178key_err:
 179        ccp_rsa_free_key_bufs(ctx);
 180
 181n_key:
 182        return ret;
 183}
 184
 185static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
 186                              unsigned int keylen)
 187{
 188        return ccp_rsa_setkey(tfm, key, keylen, true);
 189}
 190
 191static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
 192                             unsigned int keylen)
 193{
 194        return ccp_rsa_setkey(tfm, key, keylen, false);
 195}
 196
 197static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
 198{
 199        struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
 200
 201        akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
 202        ctx->complete = ccp_rsa_complete;
 203
 204        return 0;
 205}
 206
 207static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
 208{
 209        struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
 210
 211        ccp_rsa_free_key_bufs(ctx);
 212}
 213
 214static struct akcipher_alg ccp_rsa_defaults = {
 215        .encrypt = ccp_rsa_encrypt,
 216        .decrypt = ccp_rsa_decrypt,
 217        .sign = ccp_rsa_decrypt,
 218        .verify = ccp_rsa_encrypt,
 219        .set_pub_key = ccp_rsa_setpubkey,
 220        .set_priv_key = ccp_rsa_setprivkey,
 221        .max_size = ccp_rsa_maxsize,
 222        .init = ccp_rsa_init_tfm,
 223        .exit = ccp_rsa_exit_tfm,
 224        .base = {
 225                .cra_name = "rsa",
 226                .cra_driver_name = "rsa-ccp",
 227                .cra_priority = CCP_CRA_PRIORITY,
 228                .cra_module = THIS_MODULE,
 229                .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
 230        },
 231};
 232
 233struct ccp_rsa_def {
 234        unsigned int version;
 235        const char *name;
 236        const char *driver_name;
 237        unsigned int reqsize;
 238        struct akcipher_alg *alg_defaults;
 239};
 240
 241static struct ccp_rsa_def rsa_algs[] = {
 242        {
 243                .version        = CCP_VERSION(3, 0),
 244                .name           = "rsa",
 245                .driver_name    = "rsa-ccp",
 246                .reqsize        = sizeof(struct ccp_rsa_req_ctx),
 247                .alg_defaults   = &ccp_rsa_defaults,
 248        }
 249};
 250
 251int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
 252{
 253        struct ccp_crypto_akcipher_alg *ccp_alg;
 254        struct akcipher_alg *alg;
 255        int ret;
 256
 257        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 258        if (!ccp_alg)
 259                return -ENOMEM;
 260
 261        INIT_LIST_HEAD(&ccp_alg->entry);
 262
 263        alg = &ccp_alg->alg;
 264        *alg = *def->alg_defaults;
 265        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 266        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 267                 def->driver_name);
 268        ret = crypto_register_akcipher(alg);
 269        if (ret) {
 270                pr_err("%s akcipher algorithm registration error (%d)\n",
 271                       alg->base.cra_name, ret);
 272                kfree(ccp_alg);
 273                return ret;
 274        }
 275
 276        list_add(&ccp_alg->entry, head);
 277
 278        return 0;
 279}
 280
 281int ccp_register_rsa_algs(struct list_head *head)
 282{
 283        int i, ret;
 284        unsigned int ccpversion = ccp_version();
 285
 286        /* Register the RSA algorithm in standard mode
 287         * This works for CCP v3 and later
 288         */
 289        for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
 290                if (rsa_algs[i].version > ccpversion)
 291                        continue;
 292                ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
 293                if (ret)
 294                        return ret;
 295        }
 296
 297        return 0;
 298}
 299