linux/drivers/crypto/virtio/virtio_crypto_core.c
<<
>>
Prefs
   1 /* Driver for Virtio crypto device.
   2  *
   3  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
   4  *
   5  * This program is free software; you can redistribute it and/or modify
   6  * it under the terms of the GNU General Public License as published by
   7  * the Free Software Foundation; either version 2 of the License, or
   8  * (at your option) any later version.
   9  *
  10  * This program is distributed in the hope that it will be useful,
  11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13  * GNU General Public License for more details.
  14  *
  15  * You should have received a copy of the GNU General Public License
  16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  17  */
  18
  19#include <linux/err.h>
  20#include <linux/module.h>
  21#include <linux/virtio_config.h>
  22#include <linux/cpu.h>
  23
  24#include <uapi/linux/virtio_crypto.h>
  25#include "virtio_crypto_common.h"
  26
  27
  28void
  29virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
  30{
  31        if (vc_req) {
  32                kzfree(vc_req->req_data);
  33                kfree(vc_req->sgs);
  34        }
  35}
  36
  37static void virtcrypto_dataq_callback(struct virtqueue *vq)
  38{
  39        struct virtio_crypto *vcrypto = vq->vdev->priv;
  40        struct virtio_crypto_request *vc_req;
  41        unsigned long flags;
  42        unsigned int len;
  43        unsigned int qid = vq->index;
  44
  45        spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
  46        do {
  47                virtqueue_disable_cb(vq);
  48                while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
  49                        spin_unlock_irqrestore(
  50                                &vcrypto->data_vq[qid].lock, flags);
  51                        if (vc_req->alg_cb)
  52                                vc_req->alg_cb(vc_req, len);
  53                        spin_lock_irqsave(
  54                                &vcrypto->data_vq[qid].lock, flags);
  55                }
  56        } while (!virtqueue_enable_cb(vq));
  57        spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
  58}
  59
  60static int virtcrypto_find_vqs(struct virtio_crypto *vi)
  61{
  62        vq_callback_t **callbacks;
  63        struct virtqueue **vqs;
  64        int ret = -ENOMEM;
  65        int i, total_vqs;
  66        const char **names;
  67        struct device *dev = &vi->vdev->dev;
  68
  69        /*
  70         * We expect 1 data virtqueue, followed by
  71         * possible N-1 data queues used in multiqueue mode,
  72         * followed by control vq.
  73         */
  74        total_vqs = vi->max_data_queues + 1;
  75
  76        /* Allocate space for find_vqs parameters */
  77        vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
  78        if (!vqs)
  79                goto err_vq;
  80        callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
  81        if (!callbacks)
  82                goto err_callback;
  83        names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
  84        if (!names)
  85                goto err_names;
  86
  87        /* Parameters for control virtqueue */
  88        callbacks[total_vqs - 1] = NULL;
  89        names[total_vqs - 1] = "controlq";
  90
  91        /* Allocate/initialize parameters for data virtqueues */
  92        for (i = 0; i < vi->max_data_queues; i++) {
  93                callbacks[i] = virtcrypto_dataq_callback;
  94                snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
  95                                "dataq.%d", i);
  96                names[i] = vi->data_vq[i].name;
  97        }
  98
  99        ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
 100        if (ret)
 101                goto err_find;
 102
 103        vi->ctrl_vq = vqs[total_vqs - 1];
 104
 105        for (i = 0; i < vi->max_data_queues; i++) {
 106                spin_lock_init(&vi->data_vq[i].lock);
 107                vi->data_vq[i].vq = vqs[i];
 108                /* Initialize crypto engine */
 109                vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
 110                if (!vi->data_vq[i].engine) {
 111                        ret = -ENOMEM;
 112                        goto err_engine;
 113                }
 114        }
 115
 116        kfree(names);
 117        kfree(callbacks);
 118        kfree(vqs);
 119
 120        return 0;
 121
 122err_engine:
 123err_find:
 124        kfree(names);
 125err_names:
 126        kfree(callbacks);
 127err_callback:
 128        kfree(vqs);
 129err_vq:
 130        return ret;
 131}
 132
 133static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
 134{
 135        vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
 136                                GFP_KERNEL);
 137        if (!vi->data_vq)
 138                return -ENOMEM;
 139
 140        return 0;
 141}
 142
 143static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
 144{
 145        int i;
 146
 147        if (vi->affinity_hint_set) {
 148                for (i = 0; i < vi->max_data_queues; i++)
 149                        virtqueue_set_affinity(vi->data_vq[i].vq, -1);
 150
 151                vi->affinity_hint_set = false;
 152        }
 153}
 154
 155static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
 156{
 157        int i = 0;
 158        int cpu;
 159
 160        /*
 161         * In single queue mode, we don't set the cpu affinity.
 162         */
 163        if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
 164                virtcrypto_clean_affinity(vcrypto, -1);
 165                return;
 166        }
 167
 168        /*
 169         * In multiqueue mode, we let the queue to be private to one cpu
 170         * by setting the affinity hint to eliminate the contention.
 171         *
 172         * TODO: adds cpu hotplug support by register cpu notifier.
 173         *
 174         */
 175        for_each_online_cpu(cpu) {
 176                virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
 177                if (++i >= vcrypto->max_data_queues)
 178                        break;
 179        }
 180
 181        vcrypto->affinity_hint_set = true;
 182}
 183
 184static void virtcrypto_free_queues(struct virtio_crypto *vi)
 185{
 186        kfree(vi->data_vq);
 187}
 188
 189static int virtcrypto_init_vqs(struct virtio_crypto *vi)
 190{
 191        int ret;
 192
 193        /* Allocate send & receive queues */
 194        ret = virtcrypto_alloc_queues(vi);
 195        if (ret)
 196                goto err;
 197
 198        ret = virtcrypto_find_vqs(vi);
 199        if (ret)
 200                goto err_free;
 201
 202        get_online_cpus();
 203        virtcrypto_set_affinity(vi);
 204        put_online_cpus();
 205
 206        return 0;
 207
 208err_free:
 209        virtcrypto_free_queues(vi);
 210err:
 211        return ret;
 212}
 213
 214static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
 215{
 216        u32 status;
 217        int err;
 218
 219        virtio_cread(vcrypto->vdev,
 220            struct virtio_crypto_config, status, &status);
 221
 222        /*
 223         * Unknown status bits would be a host error and the driver
 224         * should consider the device to be broken.
 225         */
 226        if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
 227                dev_warn(&vcrypto->vdev->dev,
 228                                "Unknown status bits: 0x%x\n", status);
 229
 230                virtio_break_device(vcrypto->vdev);
 231                return -EPERM;
 232        }
 233
 234        if (vcrypto->status == status)
 235                return 0;
 236
 237        vcrypto->status = status;
 238
 239        if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
 240                err = virtcrypto_dev_start(vcrypto);
 241                if (err) {
 242                        dev_err(&vcrypto->vdev->dev,
 243                                "Failed to start virtio crypto device.\n");
 244
 245                        return -EPERM;
 246                }
 247                dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
 248        } else {
 249                virtcrypto_dev_stop(vcrypto);
 250                dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
 251        }
 252
 253        return 0;
 254}
 255
 256static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
 257{
 258        int32_t i;
 259        int ret;
 260
 261        for (i = 0; i < vcrypto->max_data_queues; i++) {
 262                if (vcrypto->data_vq[i].engine) {
 263                        ret = crypto_engine_start(vcrypto->data_vq[i].engine);
 264                        if (ret)
 265                                goto err;
 266                }
 267        }
 268
 269        return 0;
 270
 271err:
 272        while (--i >= 0)
 273                if (vcrypto->data_vq[i].engine)
 274                        crypto_engine_exit(vcrypto->data_vq[i].engine);
 275
 276        return ret;
 277}
 278
 279static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
 280{
 281        u32 i;
 282
 283        for (i = 0; i < vcrypto->max_data_queues; i++)
 284                if (vcrypto->data_vq[i].engine)
 285                        crypto_engine_exit(vcrypto->data_vq[i].engine);
 286}
 287
 288static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
 289{
 290        struct virtio_device *vdev = vcrypto->vdev;
 291
 292        virtcrypto_clean_affinity(vcrypto, -1);
 293
 294        vdev->config->del_vqs(vdev);
 295
 296        virtcrypto_free_queues(vcrypto);
 297}
 298
 299static int virtcrypto_probe(struct virtio_device *vdev)
 300{
 301        int err = -EFAULT;
 302        struct virtio_crypto *vcrypto;
 303        u32 max_data_queues = 0, max_cipher_key_len = 0;
 304        u32 max_auth_key_len = 0;
 305        u64 max_size = 0;
 306
 307        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 308                return -ENODEV;
 309
 310        if (!vdev->config->get) {
 311                dev_err(&vdev->dev, "%s failure: config access disabled\n",
 312                        __func__);
 313                return -EINVAL;
 314        }
 315
 316        if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
 317                /*
 318                 * If the accelerator is connected to a node with no memory
 319                 * there is no point in using the accelerator since the remote
 320                 * memory transaction will be very slow.
 321                 */
 322                dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
 323                return -EINVAL;
 324        }
 325
 326        vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
 327                                        dev_to_node(&vdev->dev));
 328        if (!vcrypto)
 329                return -ENOMEM;
 330
 331        virtio_cread(vdev, struct virtio_crypto_config,
 332                        max_dataqueues, &max_data_queues);
 333        if (max_data_queues < 1)
 334                max_data_queues = 1;
 335
 336        virtio_cread(vdev, struct virtio_crypto_config,
 337                max_cipher_key_len, &max_cipher_key_len);
 338        virtio_cread(vdev, struct virtio_crypto_config,
 339                max_auth_key_len, &max_auth_key_len);
 340        virtio_cread(vdev, struct virtio_crypto_config,
 341                max_size, &max_size);
 342
 343        /* Add virtio crypto device to global table */
 344        err = virtcrypto_devmgr_add_dev(vcrypto);
 345        if (err) {
 346                dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
 347                goto free;
 348        }
 349        vcrypto->owner = THIS_MODULE;
 350        vcrypto = vdev->priv = vcrypto;
 351        vcrypto->vdev = vdev;
 352
 353        spin_lock_init(&vcrypto->ctrl_lock);
 354
 355        /* Use single data queue as default */
 356        vcrypto->curr_queue = 1;
 357        vcrypto->max_data_queues = max_data_queues;
 358        vcrypto->max_cipher_key_len = max_cipher_key_len;
 359        vcrypto->max_auth_key_len = max_auth_key_len;
 360        vcrypto->max_size = max_size;
 361
 362        dev_info(&vdev->dev,
 363                "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
 364                vcrypto->max_data_queues,
 365                vcrypto->max_cipher_key_len,
 366                vcrypto->max_auth_key_len,
 367                vcrypto->max_size);
 368
 369        err = virtcrypto_init_vqs(vcrypto);
 370        if (err) {
 371                dev_err(&vdev->dev, "Failed to initialize vqs.\n");
 372                goto free_dev;
 373        }
 374
 375        err = virtcrypto_start_crypto_engines(vcrypto);
 376        if (err)
 377                goto free_vqs;
 378
 379        virtio_device_ready(vdev);
 380
 381        err = virtcrypto_update_status(vcrypto);
 382        if (err)
 383                goto free_engines;
 384
 385        return 0;
 386
 387free_engines:
 388        virtcrypto_clear_crypto_engines(vcrypto);
 389free_vqs:
 390        vcrypto->vdev->config->reset(vdev);
 391        virtcrypto_del_vqs(vcrypto);
 392free_dev:
 393        virtcrypto_devmgr_rm_dev(vcrypto);
 394free:
 395        kfree(vcrypto);
 396        return err;
 397}
 398
 399static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
 400{
 401        struct virtio_crypto_request *vc_req;
 402        int i;
 403        struct virtqueue *vq;
 404
 405        for (i = 0; i < vcrypto->max_data_queues; i++) {
 406                vq = vcrypto->data_vq[i].vq;
 407                while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
 408                        kfree(vc_req->req_data);
 409                        kfree(vc_req->sgs);
 410                }
 411        }
 412}
 413
 414static void virtcrypto_remove(struct virtio_device *vdev)
 415{
 416        struct virtio_crypto *vcrypto = vdev->priv;
 417
 418        dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
 419
 420        if (virtcrypto_dev_started(vcrypto))
 421                virtcrypto_dev_stop(vcrypto);
 422        vdev->config->reset(vdev);
 423        virtcrypto_free_unused_reqs(vcrypto);
 424        virtcrypto_clear_crypto_engines(vcrypto);
 425        virtcrypto_del_vqs(vcrypto);
 426        virtcrypto_devmgr_rm_dev(vcrypto);
 427        kfree(vcrypto);
 428}
 429
 430static void virtcrypto_config_changed(struct virtio_device *vdev)
 431{
 432        struct virtio_crypto *vcrypto = vdev->priv;
 433
 434        virtcrypto_update_status(vcrypto);
 435}
 436
 437#ifdef CONFIG_PM_SLEEP
 438static int virtcrypto_freeze(struct virtio_device *vdev)
 439{
 440        struct virtio_crypto *vcrypto = vdev->priv;
 441
 442        vdev->config->reset(vdev);
 443        virtcrypto_free_unused_reqs(vcrypto);
 444        if (virtcrypto_dev_started(vcrypto))
 445                virtcrypto_dev_stop(vcrypto);
 446
 447        virtcrypto_clear_crypto_engines(vcrypto);
 448        virtcrypto_del_vqs(vcrypto);
 449        return 0;
 450}
 451
 452static int virtcrypto_restore(struct virtio_device *vdev)
 453{
 454        struct virtio_crypto *vcrypto = vdev->priv;
 455        int err;
 456
 457        err = virtcrypto_init_vqs(vcrypto);
 458        if (err)
 459                return err;
 460
 461        err = virtcrypto_start_crypto_engines(vcrypto);
 462        if (err)
 463                goto free_vqs;
 464
 465        virtio_device_ready(vdev);
 466
 467        err = virtcrypto_dev_start(vcrypto);
 468        if (err) {
 469                dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
 470                goto free_engines;
 471        }
 472
 473        return 0;
 474
 475free_engines:
 476        virtcrypto_clear_crypto_engines(vcrypto);
 477free_vqs:
 478        vcrypto->vdev->config->reset(vdev);
 479        virtcrypto_del_vqs(vcrypto);
 480        return err;
 481}
 482#endif
 483
 484static unsigned int features[] = {
 485        /* none */
 486};
 487
 488static struct virtio_device_id id_table[] = {
 489        { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
 490        { 0 },
 491};
 492
 493static struct virtio_driver virtio_crypto_driver = {
 494        .driver.name         = KBUILD_MODNAME,
 495        .driver.owner        = THIS_MODULE,
 496        .feature_table       = features,
 497        .feature_table_size  = ARRAY_SIZE(features),
 498        .id_table            = id_table,
 499        .probe               = virtcrypto_probe,
 500        .remove              = virtcrypto_remove,
 501        .config_changed = virtcrypto_config_changed,
 502#ifdef CONFIG_PM_SLEEP
 503        .freeze = virtcrypto_freeze,
 504        .restore = virtcrypto_restore,
 505#endif
 506};
 507
 508module_virtio_driver(virtio_crypto_driver);
 509
 510MODULE_DEVICE_TABLE(virtio, id_table);
 511MODULE_DESCRIPTION("virtio crypto device driver");
 512MODULE_LICENSE("GPL");
 513MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
 514