linux/drivers/crypto/virtio/virtio_crypto_algs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2 /* Algorithms supported by virtio crypto device
   3  *
   4  * Authors: Gonglei <arei.gonglei@huawei.com>
   5  *
   6  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
   7  */
   8
   9#include <linux/scatterlist.h>
  10#include <crypto/algapi.h>
  11#include <crypto/internal/skcipher.h>
  12#include <linux/err.h>
  13#include <crypto/scatterwalk.h>
  14#include <linux/atomic.h>
  15
  16#include <uapi/linux/virtio_crypto.h>
  17#include "virtio_crypto_common.h"
  18
  19
  20struct virtio_crypto_skcipher_ctx {
  21        struct crypto_engine_ctx enginectx;
  22        struct virtio_crypto *vcrypto;
  23        struct crypto_skcipher *tfm;
  24
  25        struct virtio_crypto_sym_session_info enc_sess_info;
  26        struct virtio_crypto_sym_session_info dec_sess_info;
  27};
  28
  29struct virtio_crypto_sym_request {
  30        struct virtio_crypto_request base;
  31
  32        /* Cipher or aead */
  33        uint32_t type;
  34        struct virtio_crypto_skcipher_ctx *skcipher_ctx;
  35        struct skcipher_request *skcipher_req;
  36        uint8_t *iv;
  37        /* Encryption? */
  38        bool encrypt;
  39};
  40
  41struct virtio_crypto_algo {
  42        uint32_t algonum;
  43        uint32_t service;
  44        unsigned int active_devs;
  45        struct skcipher_alg algo;
  46};
  47
  48/*
  49 * The algs_lock protects the below global virtio_crypto_active_devs
  50 * and crypto algorithms registion.
  51 */
  52static DEFINE_MUTEX(algs_lock);
  53static void virtio_crypto_skcipher_finalize_req(
  54        struct virtio_crypto_sym_request *vc_sym_req,
  55        struct skcipher_request *req,
  56        int err);
  57
  58static void virtio_crypto_dataq_sym_callback
  59                (struct virtio_crypto_request *vc_req, int len)
  60{
  61        struct virtio_crypto_sym_request *vc_sym_req =
  62                container_of(vc_req, struct virtio_crypto_sym_request, base);
  63        struct skcipher_request *ablk_req;
  64        int error;
  65
  66        /* Finish the encrypt or decrypt process */
  67        if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
  68                switch (vc_req->status) {
  69                case VIRTIO_CRYPTO_OK:
  70                        error = 0;
  71                        break;
  72                case VIRTIO_CRYPTO_INVSESS:
  73                case VIRTIO_CRYPTO_ERR:
  74                        error = -EINVAL;
  75                        break;
  76                case VIRTIO_CRYPTO_BADMSG:
  77                        error = -EBADMSG;
  78                        break;
  79                default:
  80                        error = -EIO;
  81                        break;
  82                }
  83                ablk_req = vc_sym_req->skcipher_req;
  84                virtio_crypto_skcipher_finalize_req(vc_sym_req,
  85                                                        ablk_req, error);
  86        }
  87}
  88
  89static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
  90{
  91        u64 total = 0;
  92
  93        for (total = 0; sg; sg = sg_next(sg))
  94                total += sg->length;
  95
  96        return total;
  97}
  98
  99static int
 100virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
 101{
 102        switch (key_len) {
 103        case AES_KEYSIZE_128:
 104        case AES_KEYSIZE_192:
 105        case AES_KEYSIZE_256:
 106                *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
 107                break;
 108        default:
 109                return -EINVAL;
 110        }
 111        return 0;
 112}
 113
 114static int virtio_crypto_alg_skcipher_init_session(
 115                struct virtio_crypto_skcipher_ctx *ctx,
 116                uint32_t alg, const uint8_t *key,
 117                unsigned int keylen,
 118                int encrypt)
 119{
 120        struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
 121        unsigned int tmp;
 122        struct virtio_crypto *vcrypto = ctx->vcrypto;
 123        int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
 124        int err;
 125        unsigned int num_out = 0, num_in = 0;
 126
 127        /*
 128         * Avoid to do DMA from the stack, switch to using
 129         * dynamically-allocated for the key
 130         */
 131        uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
 132
 133        if (!cipher_key)
 134                return -ENOMEM;
 135
 136        spin_lock(&vcrypto->ctrl_lock);
 137        /* Pad ctrl header */
 138        vcrypto->ctrl.header.opcode =
 139                cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
 140        vcrypto->ctrl.header.algo = cpu_to_le32(alg);
 141        /* Set the default dataqueue id to 0 */
 142        vcrypto->ctrl.header.queue_id = 0;
 143
 144        vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
 145        /* Pad cipher's parameters */
 146        vcrypto->ctrl.u.sym_create_session.op_type =
 147                cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
 148        vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
 149                vcrypto->ctrl.header.algo;
 150        vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
 151                cpu_to_le32(keylen);
 152        vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
 153                cpu_to_le32(op);
 154
 155        sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
 156        sgs[num_out++] = &outhdr;
 157
 158        /* Set key */
 159        sg_init_one(&key_sg, cipher_key, keylen);
 160        sgs[num_out++] = &key_sg;
 161
 162        /* Return status and session id back */
 163        sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
 164        sgs[num_out + num_in++] = &inhdr;
 165
 166        err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
 167                                num_in, vcrypto, GFP_ATOMIC);
 168        if (err < 0) {
 169                spin_unlock(&vcrypto->ctrl_lock);
 170                kfree_sensitive(cipher_key);
 171                return err;
 172        }
 173        virtqueue_kick(vcrypto->ctrl_vq);
 174
 175        /*
 176         * Trapping into the hypervisor, so the request should be
 177         * handled immediately.
 178         */
 179        while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
 180               !virtqueue_is_broken(vcrypto->ctrl_vq))
 181                cpu_relax();
 182
 183        if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
 184                spin_unlock(&vcrypto->ctrl_lock);
 185                pr_err("virtio_crypto: Create session failed status: %u\n",
 186                        le32_to_cpu(vcrypto->input.status));
 187                kfree_sensitive(cipher_key);
 188                return -EINVAL;
 189        }
 190
 191        if (encrypt)
 192                ctx->enc_sess_info.session_id =
 193                        le64_to_cpu(vcrypto->input.session_id);
 194        else
 195                ctx->dec_sess_info.session_id =
 196                        le64_to_cpu(vcrypto->input.session_id);
 197
 198        spin_unlock(&vcrypto->ctrl_lock);
 199
 200        kfree_sensitive(cipher_key);
 201        return 0;
 202}
 203
 204static int virtio_crypto_alg_skcipher_close_session(
 205                struct virtio_crypto_skcipher_ctx *ctx,
 206                int encrypt)
 207{
 208        struct scatterlist outhdr, status_sg, *sgs[2];
 209        unsigned int tmp;
 210        struct virtio_crypto_destroy_session_req *destroy_session;
 211        struct virtio_crypto *vcrypto = ctx->vcrypto;
 212        int err;
 213        unsigned int num_out = 0, num_in = 0;
 214
 215        spin_lock(&vcrypto->ctrl_lock);
 216        vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
 217        /* Pad ctrl header */
 218        vcrypto->ctrl.header.opcode =
 219                cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
 220        /* Set the default virtqueue id to 0 */
 221        vcrypto->ctrl.header.queue_id = 0;
 222
 223        destroy_session = &vcrypto->ctrl.u.destroy_session;
 224
 225        if (encrypt)
 226                destroy_session->session_id =
 227                        cpu_to_le64(ctx->enc_sess_info.session_id);
 228        else
 229                destroy_session->session_id =
 230                        cpu_to_le64(ctx->dec_sess_info.session_id);
 231
 232        sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
 233        sgs[num_out++] = &outhdr;
 234
 235        /* Return status and session id back */
 236        sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
 237                sizeof(vcrypto->ctrl_status.status));
 238        sgs[num_out + num_in++] = &status_sg;
 239
 240        err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
 241                        num_in, vcrypto, GFP_ATOMIC);
 242        if (err < 0) {
 243                spin_unlock(&vcrypto->ctrl_lock);
 244                return err;
 245        }
 246        virtqueue_kick(vcrypto->ctrl_vq);
 247
 248        while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
 249               !virtqueue_is_broken(vcrypto->ctrl_vq))
 250                cpu_relax();
 251
 252        if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
 253                spin_unlock(&vcrypto->ctrl_lock);
 254                pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
 255                        vcrypto->ctrl_status.status,
 256                        destroy_session->session_id);
 257
 258                return -EINVAL;
 259        }
 260        spin_unlock(&vcrypto->ctrl_lock);
 261
 262        return 0;
 263}
 264
 265static int virtio_crypto_alg_skcipher_init_sessions(
 266                struct virtio_crypto_skcipher_ctx *ctx,
 267                const uint8_t *key, unsigned int keylen)
 268{
 269        uint32_t alg;
 270        int ret;
 271        struct virtio_crypto *vcrypto = ctx->vcrypto;
 272
 273        if (keylen > vcrypto->max_cipher_key_len) {
 274                pr_err("virtio_crypto: the key is too long\n");
 275                return -EINVAL;
 276        }
 277
 278        if (virtio_crypto_alg_validate_key(keylen, &alg))
 279                return -EINVAL;
 280
 281        /* Create encryption session */
 282        ret = virtio_crypto_alg_skcipher_init_session(ctx,
 283                        alg, key, keylen, 1);
 284        if (ret)
 285                return ret;
 286        /* Create decryption session */
 287        ret = virtio_crypto_alg_skcipher_init_session(ctx,
 288                        alg, key, keylen, 0);
 289        if (ret) {
 290                virtio_crypto_alg_skcipher_close_session(ctx, 1);
 291                return ret;
 292        }
 293        return 0;
 294}
 295
 296/* Note: kernel crypto API realization */
 297static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
 298                                         const uint8_t *key,
 299                                         unsigned int keylen)
 300{
 301        struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 302        uint32_t alg;
 303        int ret;
 304
 305        ret = virtio_crypto_alg_validate_key(keylen, &alg);
 306        if (ret)
 307                return ret;
 308
 309        if (!ctx->vcrypto) {
 310                /* New key */
 311                int node = virtio_crypto_get_current_node();
 312                struct virtio_crypto *vcrypto =
 313                                      virtcrypto_get_dev_node(node,
 314                                      VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
 315                if (!vcrypto) {
 316                        pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
 317                        return -ENODEV;
 318                }
 319
 320                ctx->vcrypto = vcrypto;
 321        } else {
 322                /* Rekeying, we should close the created sessions previously */
 323                virtio_crypto_alg_skcipher_close_session(ctx, 1);
 324                virtio_crypto_alg_skcipher_close_session(ctx, 0);
 325        }
 326
 327        ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
 328        if (ret) {
 329                virtcrypto_dev_put(ctx->vcrypto);
 330                ctx->vcrypto = NULL;
 331
 332                return ret;
 333        }
 334
 335        return 0;
 336}
 337
 338static int
 339__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
 340                struct skcipher_request *req,
 341                struct data_queue *data_vq)
 342{
 343        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 344        struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
 345        struct virtio_crypto_request *vc_req = &vc_sym_req->base;
 346        unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 347        struct virtio_crypto *vcrypto = ctx->vcrypto;
 348        struct virtio_crypto_op_data_req *req_data;
 349        int src_nents, dst_nents;
 350        int err;
 351        unsigned long flags;
 352        struct scatterlist outhdr, iv_sg, status_sg, **sgs;
 353        u64 dst_len;
 354        unsigned int num_out = 0, num_in = 0;
 355        int sg_total;
 356        uint8_t *iv;
 357        struct scatterlist *sg;
 358
 359        src_nents = sg_nents_for_len(req->src, req->cryptlen);
 360        if (src_nents < 0) {
 361                pr_err("Invalid number of src SG.\n");
 362                return src_nents;
 363        }
 364
 365        dst_nents = sg_nents(req->dst);
 366
 367        pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
 368                        src_nents, dst_nents);
 369
 370        /* Why 3?  outhdr + iv + inhdr */
 371        sg_total = src_nents + dst_nents + 3;
 372        sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
 373                                dev_to_node(&vcrypto->vdev->dev));
 374        if (!sgs)
 375                return -ENOMEM;
 376
 377        req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
 378                                dev_to_node(&vcrypto->vdev->dev));
 379        if (!req_data) {
 380                kfree(sgs);
 381                return -ENOMEM;
 382        }
 383
 384        vc_req->req_data = req_data;
 385        vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
 386        /* Head of operation */
 387        if (vc_sym_req->encrypt) {
 388                req_data->header.session_id =
 389                        cpu_to_le64(ctx->enc_sess_info.session_id);
 390                req_data->header.opcode =
 391                        cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
 392        } else {
 393                req_data->header.session_id =
 394                        cpu_to_le64(ctx->dec_sess_info.session_id);
 395                req_data->header.opcode =
 396                        cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
 397        }
 398        req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
 399        req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
 400        req_data->u.sym_req.u.cipher.para.src_data_len =
 401                        cpu_to_le32(req->cryptlen);
 402
 403        dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
 404        if (unlikely(dst_len > U32_MAX)) {
 405                pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
 406                err = -EINVAL;
 407                goto free;
 408        }
 409
 410        dst_len = min_t(unsigned int, req->cryptlen, dst_len);
 411        pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
 412                        req->cryptlen, dst_len);
 413
 414        if (unlikely(req->cryptlen + dst_len + ivsize +
 415                sizeof(vc_req->status) > vcrypto->max_size)) {
 416                pr_err("virtio_crypto: The length is too big\n");
 417                err = -EINVAL;
 418                goto free;
 419        }
 420
 421        req_data->u.sym_req.u.cipher.para.dst_data_len =
 422                        cpu_to_le32((uint32_t)dst_len);
 423
 424        /* Outhdr */
 425        sg_init_one(&outhdr, req_data, sizeof(*req_data));
 426        sgs[num_out++] = &outhdr;
 427
 428        /* IV */
 429
 430        /*
 431         * Avoid to do DMA from the stack, switch to using
 432         * dynamically-allocated for the IV
 433         */
 434        iv = kzalloc_node(ivsize, GFP_ATOMIC,
 435                                dev_to_node(&vcrypto->vdev->dev));
 436        if (!iv) {
 437                err = -ENOMEM;
 438                goto free;
 439        }
 440        memcpy(iv, req->iv, ivsize);
 441        if (!vc_sym_req->encrypt)
 442                scatterwalk_map_and_copy(req->iv, req->src,
 443                                         req->cryptlen - AES_BLOCK_SIZE,
 444                                         AES_BLOCK_SIZE, 0);
 445
 446        sg_init_one(&iv_sg, iv, ivsize);
 447        sgs[num_out++] = &iv_sg;
 448        vc_sym_req->iv = iv;
 449
 450        /* Source data */
 451        for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
 452                sgs[num_out++] = sg;
 453
 454        /* Destination data */
 455        for (sg = req->dst; sg; sg = sg_next(sg))
 456                sgs[num_out + num_in++] = sg;
 457
 458        /* Status */
 459        sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
 460        sgs[num_out + num_in++] = &status_sg;
 461
 462        vc_req->sgs = sgs;
 463
 464        spin_lock_irqsave(&data_vq->lock, flags);
 465        err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
 466                                num_in, vc_req, GFP_ATOMIC);
 467        virtqueue_kick(data_vq->vq);
 468        spin_unlock_irqrestore(&data_vq->lock, flags);
 469        if (unlikely(err < 0))
 470                goto free_iv;
 471
 472        return 0;
 473
 474free_iv:
 475        kfree_sensitive(iv);
 476free:
 477        kfree_sensitive(req_data);
 478        kfree(sgs);
 479        return err;
 480}
 481
 482static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
 483{
 484        struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
 485        struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
 486        struct virtio_crypto_sym_request *vc_sym_req =
 487                                skcipher_request_ctx(req);
 488        struct virtio_crypto_request *vc_req = &vc_sym_req->base;
 489        struct virtio_crypto *vcrypto = ctx->vcrypto;
 490        /* Use the first data virtqueue as default */
 491        struct data_queue *data_vq = &vcrypto->data_vq[0];
 492
 493        if (!req->cryptlen)
 494                return 0;
 495        if (req->cryptlen % AES_BLOCK_SIZE)
 496                return -EINVAL;
 497
 498        vc_req->dataq = data_vq;
 499        vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
 500        vc_sym_req->skcipher_ctx = ctx;
 501        vc_sym_req->skcipher_req = req;
 502        vc_sym_req->encrypt = true;
 503
 504        return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
 505}
 506
 507static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
 508{
 509        struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
 510        struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
 511        struct virtio_crypto_sym_request *vc_sym_req =
 512                                skcipher_request_ctx(req);
 513        struct virtio_crypto_request *vc_req = &vc_sym_req->base;
 514        struct virtio_crypto *vcrypto = ctx->vcrypto;
 515        /* Use the first data virtqueue as default */
 516        struct data_queue *data_vq = &vcrypto->data_vq[0];
 517
 518        if (!req->cryptlen)
 519                return 0;
 520        if (req->cryptlen % AES_BLOCK_SIZE)
 521                return -EINVAL;
 522
 523        vc_req->dataq = data_vq;
 524        vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
 525        vc_sym_req->skcipher_ctx = ctx;
 526        vc_sym_req->skcipher_req = req;
 527        vc_sym_req->encrypt = false;
 528
 529        return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
 530}
 531
 532static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
 533{
 534        struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 535
 536        crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
 537        ctx->tfm = tfm;
 538
 539        ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
 540        ctx->enginectx.op.prepare_request = NULL;
 541        ctx->enginectx.op.unprepare_request = NULL;
 542        return 0;
 543}
 544
 545static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
 546{
 547        struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 548
 549        if (!ctx->vcrypto)
 550                return;
 551
 552        virtio_crypto_alg_skcipher_close_session(ctx, 1);
 553        virtio_crypto_alg_skcipher_close_session(ctx, 0);
 554        virtcrypto_dev_put(ctx->vcrypto);
 555        ctx->vcrypto = NULL;
 556}
 557
 558int virtio_crypto_skcipher_crypt_req(
 559        struct crypto_engine *engine, void *vreq)
 560{
 561        struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
 562        struct virtio_crypto_sym_request *vc_sym_req =
 563                                skcipher_request_ctx(req);
 564        struct virtio_crypto_request *vc_req = &vc_sym_req->base;
 565        struct data_queue *data_vq = vc_req->dataq;
 566        int ret;
 567
 568        ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
 569        if (ret < 0)
 570                return ret;
 571
 572        virtqueue_kick(data_vq->vq);
 573
 574        return 0;
 575}
 576
 577static void virtio_crypto_skcipher_finalize_req(
 578        struct virtio_crypto_sym_request *vc_sym_req,
 579        struct skcipher_request *req,
 580        int err)
 581{
 582        if (vc_sym_req->encrypt)
 583                scatterwalk_map_and_copy(req->iv, req->dst,
 584                                         req->cryptlen - AES_BLOCK_SIZE,
 585                                         AES_BLOCK_SIZE, 0);
 586        kfree_sensitive(vc_sym_req->iv);
 587        virtcrypto_clear_request(&vc_sym_req->base);
 588
 589        crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
 590                                           req, err);
 591}
 592
 593static struct virtio_crypto_algo virtio_crypto_algs[] = { {
 594        .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
 595        .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
 596        .algo = {
 597                .base.cra_name          = "cbc(aes)",
 598                .base.cra_driver_name   = "virtio_crypto_aes_cbc",
 599                .base.cra_priority      = 150,
 600                .base.cra_flags         = CRYPTO_ALG_ASYNC |
 601                                          CRYPTO_ALG_ALLOCATES_MEMORY,
 602                .base.cra_blocksize     = AES_BLOCK_SIZE,
 603                .base.cra_ctxsize       = sizeof(struct virtio_crypto_skcipher_ctx),
 604                .base.cra_module        = THIS_MODULE,
 605                .init                   = virtio_crypto_skcipher_init,
 606                .exit                   = virtio_crypto_skcipher_exit,
 607                .setkey                 = virtio_crypto_skcipher_setkey,
 608                .decrypt                = virtio_crypto_skcipher_decrypt,
 609                .encrypt                = virtio_crypto_skcipher_encrypt,
 610                .min_keysize            = AES_MIN_KEY_SIZE,
 611                .max_keysize            = AES_MAX_KEY_SIZE,
 612                .ivsize                 = AES_BLOCK_SIZE,
 613        },
 614} };
 615
 616int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
 617{
 618        int ret = 0;
 619        int i = 0;
 620
 621        mutex_lock(&algs_lock);
 622
 623        for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
 624
 625                uint32_t service = virtio_crypto_algs[i].service;
 626                uint32_t algonum = virtio_crypto_algs[i].algonum;
 627
 628                if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
 629                        continue;
 630
 631                if (virtio_crypto_algs[i].active_devs == 0) {
 632                        ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
 633                        if (ret)
 634                                goto unlock;
 635                }
 636
 637                virtio_crypto_algs[i].active_devs++;
 638                dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
 639                         virtio_crypto_algs[i].algo.base.cra_name);
 640        }
 641
 642unlock:
 643        mutex_unlock(&algs_lock);
 644        return ret;
 645}
 646
 647void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
 648{
 649        int i = 0;
 650
 651        mutex_lock(&algs_lock);
 652
 653        for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
 654
 655                uint32_t service = virtio_crypto_algs[i].service;
 656                uint32_t algonum = virtio_crypto_algs[i].algonum;
 657
 658                if (virtio_crypto_algs[i].active_devs == 0 ||
 659                    !virtcrypto_algo_is_supported(vcrypto, service, algonum))
 660                        continue;
 661
 662                if (virtio_crypto_algs[i].active_devs == 1)
 663                        crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
 664
 665                virtio_crypto_algs[i].active_devs--;
 666        }
 667
 668        mutex_unlock(&algs_lock);
 669}
 670