linux/drivers/crypto/virtio/virtio_crypto_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2 /* Driver for Virtio crypto device.
   3  *
   4  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
   5  */
   6
   7#include <linux/err.h>
   8#include <linux/module.h>
   9#include <linux/virtio_config.h>
  10#include <linux/cpu.h>
  11
  12#include <uapi/linux/virtio_crypto.h>
  13#include "virtio_crypto_common.h"
  14
  15
  16void
  17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
  18{
  19        if (vc_req) {
  20                kfree_sensitive(vc_req->req_data);
  21                kfree(vc_req->sgs);
  22        }
  23}
  24
  25static void virtcrypto_dataq_callback(struct virtqueue *vq)
  26{
  27        struct virtio_crypto *vcrypto = vq->vdev->priv;
  28        struct virtio_crypto_request *vc_req;
  29        unsigned long flags;
  30        unsigned int len;
  31        unsigned int qid = vq->index;
  32
  33        spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
  34        do {
  35                virtqueue_disable_cb(vq);
  36                while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
  37                        spin_unlock_irqrestore(
  38                                &vcrypto->data_vq[qid].lock, flags);
  39                        if (vc_req->alg_cb)
  40                                vc_req->alg_cb(vc_req, len);
  41                        spin_lock_irqsave(
  42                                &vcrypto->data_vq[qid].lock, flags);
  43                }
  44        } while (!virtqueue_enable_cb(vq));
  45        spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
  46}
  47
  48static int virtcrypto_find_vqs(struct virtio_crypto *vi)
  49{
  50        vq_callback_t **callbacks;
  51        struct virtqueue **vqs;
  52        int ret = -ENOMEM;
  53        int i, total_vqs;
  54        const char **names;
  55        struct device *dev = &vi->vdev->dev;
  56
  57        /*
  58         * We expect 1 data virtqueue, followed by
  59         * possible N-1 data queues used in multiqueue mode,
  60         * followed by control vq.
  61         */
  62        total_vqs = vi->max_data_queues + 1;
  63
  64        /* Allocate space for find_vqs parameters */
  65        vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
  66        if (!vqs)
  67                goto err_vq;
  68        callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
  69        if (!callbacks)
  70                goto err_callback;
  71        names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
  72        if (!names)
  73                goto err_names;
  74
  75        /* Parameters for control virtqueue */
  76        callbacks[total_vqs - 1] = NULL;
  77        names[total_vqs - 1] = "controlq";
  78
  79        /* Allocate/initialize parameters for data virtqueues */
  80        for (i = 0; i < vi->max_data_queues; i++) {
  81                callbacks[i] = virtcrypto_dataq_callback;
  82                snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
  83                                "dataq.%d", i);
  84                names[i] = vi->data_vq[i].name;
  85        }
  86
  87        ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
  88        if (ret)
  89                goto err_find;
  90
  91        vi->ctrl_vq = vqs[total_vqs - 1];
  92
  93        for (i = 0; i < vi->max_data_queues; i++) {
  94                spin_lock_init(&vi->data_vq[i].lock);
  95                vi->data_vq[i].vq = vqs[i];
  96                /* Initialize crypto engine */
  97                vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
  98                if (!vi->data_vq[i].engine) {
  99                        ret = -ENOMEM;
 100                        goto err_engine;
 101                }
 102        }
 103
 104        kfree(names);
 105        kfree(callbacks);
 106        kfree(vqs);
 107
 108        return 0;
 109
 110err_engine:
 111err_find:
 112        kfree(names);
 113err_names:
 114        kfree(callbacks);
 115err_callback:
 116        kfree(vqs);
 117err_vq:
 118        return ret;
 119}
 120
 121static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
 122{
 123        vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
 124                                GFP_KERNEL);
 125        if (!vi->data_vq)
 126                return -ENOMEM;
 127
 128        return 0;
 129}
 130
 131static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
 132{
 133        int i;
 134
 135        if (vi->affinity_hint_set) {
 136                for (i = 0; i < vi->max_data_queues; i++)
 137                        virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
 138
 139                vi->affinity_hint_set = false;
 140        }
 141}
 142
 143static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
 144{
 145        int i = 0;
 146        int cpu;
 147
 148        /*
 149         * In single queue mode, we don't set the cpu affinity.
 150         */
 151        if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
 152                virtcrypto_clean_affinity(vcrypto, -1);
 153                return;
 154        }
 155
 156        /*
 157         * In multiqueue mode, we let the queue to be private to one cpu
 158         * by setting the affinity hint to eliminate the contention.
 159         *
 160         * TODO: adds cpu hotplug support by register cpu notifier.
 161         *
 162         */
 163        for_each_online_cpu(cpu) {
 164                virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
 165                if (++i >= vcrypto->max_data_queues)
 166                        break;
 167        }
 168
 169        vcrypto->affinity_hint_set = true;
 170}
 171
 172static void virtcrypto_free_queues(struct virtio_crypto *vi)
 173{
 174        kfree(vi->data_vq);
 175}
 176
 177static int virtcrypto_init_vqs(struct virtio_crypto *vi)
 178{
 179        int ret;
 180
 181        /* Allocate send & receive queues */
 182        ret = virtcrypto_alloc_queues(vi);
 183        if (ret)
 184                goto err;
 185
 186        ret = virtcrypto_find_vqs(vi);
 187        if (ret)
 188                goto err_free;
 189
 190        get_online_cpus();
 191        virtcrypto_set_affinity(vi);
 192        put_online_cpus();
 193
 194        return 0;
 195
 196err_free:
 197        virtcrypto_free_queues(vi);
 198err:
 199        return ret;
 200}
 201
 202static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
 203{
 204        u32 status;
 205        int err;
 206
 207        virtio_cread_le(vcrypto->vdev,
 208                        struct virtio_crypto_config, status, &status);
 209
 210        /*
 211         * Unknown status bits would be a host error and the driver
 212         * should consider the device to be broken.
 213         */
 214        if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
 215                dev_warn(&vcrypto->vdev->dev,
 216                                "Unknown status bits: 0x%x\n", status);
 217
 218                virtio_break_device(vcrypto->vdev);
 219                return -EPERM;
 220        }
 221
 222        if (vcrypto->status == status)
 223                return 0;
 224
 225        vcrypto->status = status;
 226
 227        if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
 228                err = virtcrypto_dev_start(vcrypto);
 229                if (err) {
 230                        dev_err(&vcrypto->vdev->dev,
 231                                "Failed to start virtio crypto device.\n");
 232
 233                        return -EPERM;
 234                }
 235                dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
 236        } else {
 237                virtcrypto_dev_stop(vcrypto);
 238                dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
 239        }
 240
 241        return 0;
 242}
 243
 244static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
 245{
 246        int32_t i;
 247        int ret;
 248
 249        for (i = 0; i < vcrypto->max_data_queues; i++) {
 250                if (vcrypto->data_vq[i].engine) {
 251                        ret = crypto_engine_start(vcrypto->data_vq[i].engine);
 252                        if (ret)
 253                                goto err;
 254                }
 255        }
 256
 257        return 0;
 258
 259err:
 260        while (--i >= 0)
 261                if (vcrypto->data_vq[i].engine)
 262                        crypto_engine_exit(vcrypto->data_vq[i].engine);
 263
 264        return ret;
 265}
 266
 267static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
 268{
 269        u32 i;
 270
 271        for (i = 0; i < vcrypto->max_data_queues; i++)
 272                if (vcrypto->data_vq[i].engine)
 273                        crypto_engine_exit(vcrypto->data_vq[i].engine);
 274}
 275
 276static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
 277{
 278        struct virtio_device *vdev = vcrypto->vdev;
 279
 280        virtcrypto_clean_affinity(vcrypto, -1);
 281
 282        vdev->config->del_vqs(vdev);
 283
 284        virtcrypto_free_queues(vcrypto);
 285}
 286
 287static int virtcrypto_probe(struct virtio_device *vdev)
 288{
 289        int err = -EFAULT;
 290        struct virtio_crypto *vcrypto;
 291        u32 max_data_queues = 0, max_cipher_key_len = 0;
 292        u32 max_auth_key_len = 0;
 293        u64 max_size = 0;
 294        u32 cipher_algo_l = 0;
 295        u32 cipher_algo_h = 0;
 296        u32 hash_algo = 0;
 297        u32 mac_algo_l = 0;
 298        u32 mac_algo_h = 0;
 299        u32 aead_algo = 0;
 300        u32 crypto_services = 0;
 301
 302        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 303                return -ENODEV;
 304
 305        if (!vdev->config->get) {
 306                dev_err(&vdev->dev, "%s failure: config access disabled\n",
 307                        __func__);
 308                return -EINVAL;
 309        }
 310
 311        if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
 312                /*
 313                 * If the accelerator is connected to a node with no memory
 314                 * there is no point in using the accelerator since the remote
 315                 * memory transaction will be very slow.
 316                 */
 317                dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
 318                return -EINVAL;
 319        }
 320
 321        vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
 322                                        dev_to_node(&vdev->dev));
 323        if (!vcrypto)
 324                return -ENOMEM;
 325
 326        virtio_cread_le(vdev, struct virtio_crypto_config,
 327                        max_dataqueues, &max_data_queues);
 328        if (max_data_queues < 1)
 329                max_data_queues = 1;
 330
 331        virtio_cread_le(vdev, struct virtio_crypto_config,
 332                        max_cipher_key_len, &max_cipher_key_len);
 333        virtio_cread_le(vdev, struct virtio_crypto_config,
 334                        max_auth_key_len, &max_auth_key_len);
 335        virtio_cread_le(vdev, struct virtio_crypto_config,
 336                        max_size, &max_size);
 337        virtio_cread_le(vdev, struct virtio_crypto_config,
 338                        crypto_services, &crypto_services);
 339        virtio_cread_le(vdev, struct virtio_crypto_config,
 340                        cipher_algo_l, &cipher_algo_l);
 341        virtio_cread_le(vdev, struct virtio_crypto_config,
 342                        cipher_algo_h, &cipher_algo_h);
 343        virtio_cread_le(vdev, struct virtio_crypto_config,
 344                        hash_algo, &hash_algo);
 345        virtio_cread_le(vdev, struct virtio_crypto_config,
 346                        mac_algo_l, &mac_algo_l);
 347        virtio_cread_le(vdev, struct virtio_crypto_config,
 348                        mac_algo_h, &mac_algo_h);
 349        virtio_cread_le(vdev, struct virtio_crypto_config,
 350                        aead_algo, &aead_algo);
 351
 352        /* Add virtio crypto device to global table */
 353        err = virtcrypto_devmgr_add_dev(vcrypto);
 354        if (err) {
 355                dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
 356                goto free;
 357        }
 358        vcrypto->owner = THIS_MODULE;
 359        vcrypto = vdev->priv = vcrypto;
 360        vcrypto->vdev = vdev;
 361
 362        spin_lock_init(&vcrypto->ctrl_lock);
 363
 364        /* Use single data queue as default */
 365        vcrypto->curr_queue = 1;
 366        vcrypto->max_data_queues = max_data_queues;
 367        vcrypto->max_cipher_key_len = max_cipher_key_len;
 368        vcrypto->max_auth_key_len = max_auth_key_len;
 369        vcrypto->max_size = max_size;
 370        vcrypto->crypto_services = crypto_services;
 371        vcrypto->cipher_algo_l = cipher_algo_l;
 372        vcrypto->cipher_algo_h = cipher_algo_h;
 373        vcrypto->mac_algo_l = mac_algo_l;
 374        vcrypto->mac_algo_h = mac_algo_h;
 375        vcrypto->hash_algo = hash_algo;
 376        vcrypto->aead_algo = aead_algo;
 377
 378
 379        dev_info(&vdev->dev,
 380                "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
 381                vcrypto->max_data_queues,
 382                vcrypto->max_cipher_key_len,
 383                vcrypto->max_auth_key_len,
 384                vcrypto->max_size);
 385
 386        err = virtcrypto_init_vqs(vcrypto);
 387        if (err) {
 388                dev_err(&vdev->dev, "Failed to initialize vqs.\n");
 389                goto free_dev;
 390        }
 391
 392        err = virtcrypto_start_crypto_engines(vcrypto);
 393        if (err)
 394                goto free_vqs;
 395
 396        virtio_device_ready(vdev);
 397
 398        err = virtcrypto_update_status(vcrypto);
 399        if (err)
 400                goto free_engines;
 401
 402        return 0;
 403
 404free_engines:
 405        virtcrypto_clear_crypto_engines(vcrypto);
 406free_vqs:
 407        vcrypto->vdev->config->reset(vdev);
 408        virtcrypto_del_vqs(vcrypto);
 409free_dev:
 410        virtcrypto_devmgr_rm_dev(vcrypto);
 411free:
 412        kfree(vcrypto);
 413        return err;
 414}
 415
 416static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
 417{
 418        struct virtio_crypto_request *vc_req;
 419        int i;
 420        struct virtqueue *vq;
 421
 422        for (i = 0; i < vcrypto->max_data_queues; i++) {
 423                vq = vcrypto->data_vq[i].vq;
 424                while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
 425                        kfree(vc_req->req_data);
 426                        kfree(vc_req->sgs);
 427                }
 428        }
 429}
 430
 431static void virtcrypto_remove(struct virtio_device *vdev)
 432{
 433        struct virtio_crypto *vcrypto = vdev->priv;
 434
 435        dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
 436
 437        if (virtcrypto_dev_started(vcrypto))
 438                virtcrypto_dev_stop(vcrypto);
 439        vdev->config->reset(vdev);
 440        virtcrypto_free_unused_reqs(vcrypto);
 441        virtcrypto_clear_crypto_engines(vcrypto);
 442        virtcrypto_del_vqs(vcrypto);
 443        virtcrypto_devmgr_rm_dev(vcrypto);
 444        kfree(vcrypto);
 445}
 446
 447static void virtcrypto_config_changed(struct virtio_device *vdev)
 448{
 449        struct virtio_crypto *vcrypto = vdev->priv;
 450
 451        virtcrypto_update_status(vcrypto);
 452}
 453
 454#ifdef CONFIG_PM_SLEEP
 455static int virtcrypto_freeze(struct virtio_device *vdev)
 456{
 457        struct virtio_crypto *vcrypto = vdev->priv;
 458
 459        vdev->config->reset(vdev);
 460        virtcrypto_free_unused_reqs(vcrypto);
 461        if (virtcrypto_dev_started(vcrypto))
 462                virtcrypto_dev_stop(vcrypto);
 463
 464        virtcrypto_clear_crypto_engines(vcrypto);
 465        virtcrypto_del_vqs(vcrypto);
 466        return 0;
 467}
 468
 469static int virtcrypto_restore(struct virtio_device *vdev)
 470{
 471        struct virtio_crypto *vcrypto = vdev->priv;
 472        int err;
 473
 474        err = virtcrypto_init_vqs(vcrypto);
 475        if (err)
 476                return err;
 477
 478        err = virtcrypto_start_crypto_engines(vcrypto);
 479        if (err)
 480                goto free_vqs;
 481
 482        virtio_device_ready(vdev);
 483
 484        err = virtcrypto_dev_start(vcrypto);
 485        if (err) {
 486                dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
 487                goto free_engines;
 488        }
 489
 490        return 0;
 491
 492free_engines:
 493        virtcrypto_clear_crypto_engines(vcrypto);
 494free_vqs:
 495        vcrypto->vdev->config->reset(vdev);
 496        virtcrypto_del_vqs(vcrypto);
 497        return err;
 498}
 499#endif
 500
 501static const unsigned int features[] = {
 502        /* none */
 503};
 504
 505static const struct virtio_device_id id_table[] = {
 506        { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
 507        { 0 },
 508};
 509
 510static struct virtio_driver virtio_crypto_driver = {
 511        .driver.name         = KBUILD_MODNAME,
 512        .driver.owner        = THIS_MODULE,
 513        .feature_table       = features,
 514        .feature_table_size  = ARRAY_SIZE(features),
 515        .id_table            = id_table,
 516        .probe               = virtcrypto_probe,
 517        .remove              = virtcrypto_remove,
 518        .config_changed = virtcrypto_config_changed,
 519#ifdef CONFIG_PM_SLEEP
 520        .freeze = virtcrypto_freeze,
 521        .restore = virtcrypto_restore,
 522#endif
 523};
 524
 525module_virtio_driver(virtio_crypto_driver);
 526
 527MODULE_DEVICE_TABLE(virtio, id_table);
 528MODULE_DESCRIPTION("virtio crypto device driver");
 529MODULE_LICENSE("GPL");
 530MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
 531